]>
git.saurik.com Git - apple/xnu.git/blob - bsd/sys/queue.h
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1991, 1993
30 * The Regents of the University of California. All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 4. Neither the name of the University nor the names of its contributors
41 * may be used to endorse or promote products derived from this software
42 * without specific prior written permission.
44 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * @(#)queue.h 8.5 (Berkeley) 8/20/94
63 #include <kern/debug.h> /* panic function call */
64 #include <sys/cdefs.h> /* __improbable in kernelspace */
67 #define __improbable(x) (x) /* noop in userspace */
68 #endif /* __improbable */
69 #endif /* KERNEL_PRIVATE */
72 * This file defines five types of data structures: singly-linked lists,
73 * singly-linked tail queues, lists, tail queues, and circular queues.
75 * A singly-linked list is headed by a single forward pointer. The elements
76 * are singly linked for minimum space and pointer manipulation overhead at
77 * the expense of O(n) removal for arbitrary elements. New elements can be
78 * added to the list after an existing element or at the head of the list.
79 * Elements being removed from the head of the list should use the explicit
80 * macro for this purpose for optimum efficiency. A singly-linked list may
81 * only be traversed in the forward direction. Singly-linked lists are ideal
82 * for applications with large datasets and few or no removals or for
83 * implementing a LIFO queue.
85 * A singly-linked tail queue is headed by a pair of pointers, one to the
86 * head of the list and the other to the tail of the list. The elements are
87 * singly linked for minimum space and pointer manipulation overhead at the
88 * expense of O(n) removal for arbitrary elements. New elements can be added
89 * to the list after an existing element, at the head of the list, or at the
90 * end of the list. Elements being removed from the head of the tail queue
91 * should use the explicit macro for this purpose for optimum efficiency.
92 * A singly-linked tail queue may only be traversed in the forward direction.
93 * Singly-linked tail queues are ideal for applications with large datasets
94 * and few or no removals or for implementing a FIFO queue.
96 * A list is headed by a single forward pointer (or an array of forward
97 * pointers for a hash table header). The elements are doubly linked
98 * so that an arbitrary element can be removed without a need to
99 * traverse the list. New elements can be added to the list before
100 * or after an existing element or at the head of the list. A list
101 * may only be traversed in the forward direction.
103 * A tail queue is headed by a pair of pointers, one to the head of the
104 * list and the other to the tail of the list. The elements are doubly
105 * linked so that an arbitrary element can be removed without a need to
106 * traverse the list. New elements can be added to the list before or
107 * after an existing element, at the head of the list, or at the end of
108 * the list. A tail queue may be traversed in either direction.
110 * A circle queue is headed by a pair of pointers, one to the head of the
111 * list and the other to the tail of the list. The elements are doubly
112 * linked so that an arbitrary element can be removed without a need to
113 * traverse the list. New elements can be added to the list before or after
114 * an existing element, at the head of the list, or at the end of the list.
115 * A circle queue may be traversed in either direction, but has a more
116 * complex end of list detection.
117 * Note that circle queues are deprecated, because, as the removal log
118 * in FreeBSD states, "CIRCLEQs are a disgrace to everything Knuth taught
119 * us in Volume 1 Chapter 2. [...] Use TAILQ instead, it provides the same
120 * functionality." Code using them will continue to compile, but they
121 * are no longer documented on the man page.
123 * For details on the use of these macros, see the queue(3) manual page.
126 * SLIST LIST STAILQ TAILQ CIRCLEQ
128 * _HEAD_INITIALIZER + + + + -
137 * _FOREACH_SAFE + + + + -
138 * _FOREACH_REVERSE - - - + -
139 * _FOREACH_REVERSE_SAFE - - - + -
140 * _INSERT_HEAD + + + + +
141 * _INSERT_BEFORE - + - + +
142 * _INSERT_AFTER + + + + +
143 * _INSERT_TAIL - - + + +
145 * _REMOVE_AFTER + - + - -
146 * _REMOVE_HEAD + - + - -
147 * _REMOVE_HEAD_UNTIL - - + - -
152 #ifdef QUEUE_MACRO_DEBUG
153 /* Store the last 2 places the queue element or head was altered */
161 #define TRACEBUF struct qm_trace trace;
162 #define TRASHIT(x) do {(x) = (void *)-1;} while (0)
164 #define QMD_TRACE_HEAD(head) do { \
165 (head)->trace.prevline = (head)->trace.lastline; \
166 (head)->trace.prevfile = (head)->trace.lastfile; \
167 (head)->trace.lastline = __LINE__; \
168 (head)->trace.lastfile = __FILE__; \
171 #define QMD_TRACE_ELEM(elem) do { \
172 (elem)->trace.prevline = (elem)->trace.lastline; \
173 (elem)->trace.prevfile = (elem)->trace.lastfile; \
174 (elem)->trace.lastline = __LINE__; \
175 (elem)->trace.lastfile = __FILE__; \
179 #define QMD_TRACE_ELEM(elem)
180 #define QMD_TRACE_HEAD(head)
183 #endif /* QUEUE_MACRO_DEBUG */
186 * Horrible macros to enable use of code that was meant to be C-specific
187 * (and which push struct onto type) in C++; without these, C++ code
188 * that uses these macros in the context of a class will blow up
189 * due to "struct" being preprended to "type" by the macros, causing
190 * inconsistent use of tags.
192 * This approach is necessary because these are macros; we have to use
193 * these on a per-macro basis (because the queues are implemented as
194 * macros, disabling this warning in the scope of the header file is
195 * insufficient), whuch means we can't use #pragma, and have to use
196 * _Pragma. We only need to use these for the queue macros that
197 * prepend "struct" to "type" and will cause C++ to blow up.
199 #if defined(__clang__) && defined(__cplusplus)
200 #define __MISMATCH_TAGS_PUSH \
201 _Pragma("clang diagnostic push") \
202 _Pragma("clang diagnostic ignored \"-Wmismatched-tags\"")
203 #define __MISMATCH_TAGS_POP \
204 _Pragma("clang diagnostic pop")
206 #define __MISMATCH_TAGS_PUSH
207 #define __MISMATCH_TAGS_POP
211 * Ensures that these macros can safely be used in structs when compiling with
212 * clang. The macros do not allow for nullability attributes to be specified due
213 * to how they are expanded. For example:
215 * SLIST_HEAD(, foo _Nullable) bar;
220 * struct foo _Nullable *slh_first;
223 * which is not valid because the nullability specifier has to apply to the
224 * pointer. So just ignore nullability completeness in all the places where this
227 #if defined(__clang__)
228 #define __NULLABILITY_COMPLETENESS_PUSH \
229 _Pragma("clang diagnostic push") \
230 _Pragma("clang diagnostic ignored \"-Wnullability-completeness\"")
231 #define __NULLABILITY_COMPLETENESS_POP \
232 _Pragma("clang diagnostic pop")
234 #define __NULLABILITY_COMPLETENESS_PUSH
235 #define __NULLABILITY_COMPLETENESS_POP
239 * Singly-linked List declarations.
241 #define SLIST_HEAD(name, type) \
242 __MISMATCH_TAGS_PUSH \
243 __NULLABILITY_COMPLETENESS_PUSH \
245 struct type *slh_first; /* first element */ \
247 __NULLABILITY_COMPLETENESS_POP \
250 #define SLIST_HEAD_INITIALIZER(head) \
253 #define SLIST_ENTRY(type) \
254 __MISMATCH_TAGS_PUSH \
255 __NULLABILITY_COMPLETENESS_PUSH \
257 struct type *sle_next; /* next element */ \
259 __NULLABILITY_COMPLETENESS_POP \
263 * Singly-linked List functions.
265 #define SLIST_EMPTY(head) ((head)->slh_first == NULL)
267 #define SLIST_FIRST(head) ((head)->slh_first)
269 #define SLIST_FOREACH(var, head, field) \
270 for ((var) = SLIST_FIRST((head)); \
272 (var) = SLIST_NEXT((var), field))
274 #define SLIST_FOREACH_SAFE(var, head, field, tvar) \
275 for ((var) = SLIST_FIRST((head)); \
276 (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
279 #define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
280 for ((varp) = &SLIST_FIRST((head)); \
281 ((var) = *(varp)) != NULL; \
282 (varp) = &SLIST_NEXT((var), field))
284 #define SLIST_INIT(head) do { \
285 SLIST_FIRST((head)) = NULL; \
288 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
289 SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
290 SLIST_NEXT((slistelm), field) = (elm); \
293 #define SLIST_INSERT_HEAD(head, elm, field) do { \
294 SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
295 SLIST_FIRST((head)) = (elm); \
298 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
300 #define SLIST_REMOVE(head, elm, type, field) \
301 __MISMATCH_TAGS_PUSH \
302 __NULLABILITY_COMPLETENESS_PUSH \
304 if (SLIST_FIRST((head)) == (elm)) { \
305 SLIST_REMOVE_HEAD((head), field); \
308 struct type *curelm = SLIST_FIRST((head)); \
309 while (SLIST_NEXT(curelm, field) != (elm)) \
310 curelm = SLIST_NEXT(curelm, field); \
311 SLIST_REMOVE_AFTER(curelm, field); \
313 TRASHIT((elm)->field.sle_next); \
315 __NULLABILITY_COMPLETENESS_POP \
318 #define SLIST_REMOVE_AFTER(elm, field) do { \
319 SLIST_NEXT(elm, field) = \
320 SLIST_NEXT(SLIST_NEXT(elm, field), field); \
323 #define SLIST_REMOVE_HEAD(head, field) do { \
324 SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
328 * Singly-linked Tail queue declarations.
330 #define STAILQ_HEAD(name, type) \
331 __MISMATCH_TAGS_PUSH \
332 __NULLABILITY_COMPLETENESS_PUSH \
334 struct type *stqh_first;/* first element */ \
335 struct type **stqh_last;/* addr of last next element */ \
337 __NULLABILITY_COMPLETENESS_POP \
340 #define STAILQ_HEAD_INITIALIZER(head) \
341 { NULL, &(head).stqh_first }
343 #define STAILQ_ENTRY(type) \
344 __MISMATCH_TAGS_PUSH \
345 __NULLABILITY_COMPLETENESS_PUSH \
347 struct type *stqe_next; /* next element */ \
349 __NULLABILITY_COMPLETENESS_POP \
353 * Singly-linked Tail queue functions.
355 #define STAILQ_CONCAT(head1, head2) do { \
356 if (!STAILQ_EMPTY((head2))) { \
357 *(head1)->stqh_last = (head2)->stqh_first; \
358 (head1)->stqh_last = (head2)->stqh_last; \
359 STAILQ_INIT((head2)); \
363 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
365 #define STAILQ_FIRST(head) ((head)->stqh_first)
367 #define STAILQ_FOREACH(var, head, field) \
368 for((var) = STAILQ_FIRST((head)); \
370 (var) = STAILQ_NEXT((var), field))
373 #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
374 for ((var) = STAILQ_FIRST((head)); \
375 (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
378 #define STAILQ_INIT(head) do { \
379 STAILQ_FIRST((head)) = NULL; \
380 (head)->stqh_last = &STAILQ_FIRST((head)); \
383 #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
384 if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
385 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
386 STAILQ_NEXT((tqelm), field) = (elm); \
389 #define STAILQ_INSERT_HEAD(head, elm, field) do { \
390 if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
391 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
392 STAILQ_FIRST((head)) = (elm); \
395 #define STAILQ_INSERT_TAIL(head, elm, field) do { \
396 STAILQ_NEXT((elm), field) = NULL; \
397 *(head)->stqh_last = (elm); \
398 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
401 #define STAILQ_LAST(head, type, field) \
402 __MISMATCH_TAGS_PUSH \
403 __NULLABILITY_COMPLETENESS_PUSH \
404 (STAILQ_EMPTY((head)) ? \
406 ((struct type *)(void *) \
407 ((char *)((head)->stqh_last) - __offsetof(struct type, field))))\
408 __NULLABILITY_COMPLETENESS_POP \
411 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
413 #define STAILQ_REMOVE(head, elm, type, field) \
414 __MISMATCH_TAGS_PUSH \
415 __NULLABILITY_COMPLETENESS_PUSH \
417 if (STAILQ_FIRST((head)) == (elm)) { \
418 STAILQ_REMOVE_HEAD((head), field); \
421 struct type *curelm = STAILQ_FIRST((head)); \
422 while (STAILQ_NEXT(curelm, field) != (elm)) \
423 curelm = STAILQ_NEXT(curelm, field); \
424 STAILQ_REMOVE_AFTER(head, curelm, field); \
426 TRASHIT((elm)->field.stqe_next); \
428 __NULLABILITY_COMPLETENESS_POP \
431 #define STAILQ_REMOVE_HEAD(head, field) do { \
432 if ((STAILQ_FIRST((head)) = \
433 STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
434 (head)->stqh_last = &STAILQ_FIRST((head)); \
437 #define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
438 if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
439 (head)->stqh_last = &STAILQ_FIRST((head)); \
442 #define STAILQ_REMOVE_AFTER(head, elm, field) do { \
443 if ((STAILQ_NEXT(elm, field) = \
444 STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \
445 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
448 #define STAILQ_SWAP(head1, head2, type) \
449 __MISMATCH_TAGS_PUSH \
450 __NULLABILITY_COMPLETENESS_PUSH \
452 struct type *swap_first = STAILQ_FIRST(head1); \
453 struct type **swap_last = (head1)->stqh_last; \
454 STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \
455 (head1)->stqh_last = (head2)->stqh_last; \
456 STAILQ_FIRST(head2) = swap_first; \
457 (head2)->stqh_last = swap_last; \
458 if (STAILQ_EMPTY(head1)) \
459 (head1)->stqh_last = &STAILQ_FIRST(head1); \
460 if (STAILQ_EMPTY(head2)) \
461 (head2)->stqh_last = &STAILQ_FIRST(head2); \
463 __NULLABILITY_COMPLETENESS_POP \
470 #define LIST_HEAD(name, type) \
471 __MISMATCH_TAGS_PUSH \
472 __NULLABILITY_COMPLETENESS_PUSH \
474 struct type *lh_first; /* first element */ \
476 __NULLABILITY_COMPLETENESS_POP \
479 #define LIST_HEAD_INITIALIZER(head) \
482 #define LIST_ENTRY(type) \
483 __MISMATCH_TAGS_PUSH \
484 __NULLABILITY_COMPLETENESS_PUSH \
486 struct type *le_next; /* next element */ \
487 struct type **le_prev; /* address of previous next element */ \
489 __NULLABILITY_COMPLETENESS_POP \
496 #ifdef KERNEL_PRIVATE
497 #define LIST_CHECK_HEAD(head, field) do { \
499 LIST_FIRST((head)) != NULL && \
500 LIST_FIRST((head))->field.le_prev != \
501 &LIST_FIRST((head)))) \
502 panic("Bad list head %p first->prev != head", (head)); \
505 #define LIST_CHECK_NEXT(elm, field) do { \
507 LIST_NEXT((elm), field) != NULL && \
508 LIST_NEXT((elm), field)->field.le_prev != \
509 &((elm)->field.le_next))) \
510 panic("Bad link elm %p next->prev != elm", (elm)); \
513 #define LIST_CHECK_PREV(elm, field) do { \
514 if (__improbable(*(elm)->field.le_prev != (elm))) \
515 panic("Bad link elm %p prev->next != elm", (elm)); \
518 #define LIST_CHECK_HEAD(head, field)
519 #define LIST_CHECK_NEXT(elm, field)
520 #define LIST_CHECK_PREV(elm, field)
521 #endif /* KERNEL_PRIVATE */
523 #define LIST_EMPTY(head) ((head)->lh_first == NULL)
525 #define LIST_FIRST(head) ((head)->lh_first)
527 #define LIST_FOREACH(var, head, field) \
528 for ((var) = LIST_FIRST((head)); \
530 (var) = LIST_NEXT((var), field))
532 #define LIST_FOREACH_SAFE(var, head, field, tvar) \
533 for ((var) = LIST_FIRST((head)); \
534 (var) && ((tvar) = LIST_NEXT((var), field), 1); \
537 #define LIST_INIT(head) do { \
538 LIST_FIRST((head)) = NULL; \
541 #define LIST_INSERT_AFTER(listelm, elm, field) do { \
542 LIST_CHECK_NEXT(listelm, field); \
543 if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
544 LIST_NEXT((listelm), field)->field.le_prev = \
545 &LIST_NEXT((elm), field); \
546 LIST_NEXT((listelm), field) = (elm); \
547 (elm)->field.le_prev = &LIST_NEXT((listelm), field); \
550 #define LIST_INSERT_BEFORE(listelm, elm, field) do { \
551 LIST_CHECK_PREV(listelm, field); \
552 (elm)->field.le_prev = (listelm)->field.le_prev; \
553 LIST_NEXT((elm), field) = (listelm); \
554 *(listelm)->field.le_prev = (elm); \
555 (listelm)->field.le_prev = &LIST_NEXT((elm), field); \
558 #define LIST_INSERT_HEAD(head, elm, field) do { \
559 LIST_CHECK_HEAD((head), field); \
560 if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
561 LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
562 LIST_FIRST((head)) = (elm); \
563 (elm)->field.le_prev = &LIST_FIRST((head)); \
566 #define LIST_NEXT(elm, field) ((elm)->field.le_next)
568 #define LIST_REMOVE(elm, field) do { \
569 LIST_CHECK_NEXT(elm, field); \
570 LIST_CHECK_PREV(elm, field); \
571 if (LIST_NEXT((elm), field) != NULL) \
572 LIST_NEXT((elm), field)->field.le_prev = \
573 (elm)->field.le_prev; \
574 *(elm)->field.le_prev = LIST_NEXT((elm), field); \
575 TRASHIT((elm)->field.le_next); \
576 TRASHIT((elm)->field.le_prev); \
579 #define LIST_SWAP(head1, head2, type, field) \
580 __MISMATCH_TAGS_PUSH \
581 __NULLABILITY_COMPLETENESS_PUSH \
583 struct type *swap_tmp = LIST_FIRST((head1)); \
584 LIST_FIRST((head1)) = LIST_FIRST((head2)); \
585 LIST_FIRST((head2)) = swap_tmp; \
586 if ((swap_tmp = LIST_FIRST((head1))) != NULL) \
587 swap_tmp->field.le_prev = &LIST_FIRST((head1)); \
588 if ((swap_tmp = LIST_FIRST((head2))) != NULL) \
589 swap_tmp->field.le_prev = &LIST_FIRST((head2)); \
591 __NULLABILITY_COMPLETENESS_POP \
595 * Tail queue declarations.
597 #define TAILQ_HEAD(name, type) \
598 __MISMATCH_TAGS_PUSH \
599 __NULLABILITY_COMPLETENESS_PUSH \
601 struct type *tqh_first; /* first element */ \
602 struct type **tqh_last; /* addr of last next element */ \
605 __NULLABILITY_COMPLETENESS_POP \
608 #define TAILQ_HEAD_INITIALIZER(head) \
609 { NULL, &(head).tqh_first }
611 #define TAILQ_ENTRY(type) \
612 __MISMATCH_TAGS_PUSH \
613 __NULLABILITY_COMPLETENESS_PUSH \
615 struct type *tqe_next; /* next element */ \
616 struct type **tqe_prev; /* address of previous next element */ \
619 __NULLABILITY_COMPLETENESS_POP \
623 * Tail queue functions.
625 #ifdef KERNEL_PRIVATE
626 #define TAILQ_CHECK_HEAD(head, field) do { \
628 TAILQ_FIRST((head)) != NULL && \
629 TAILQ_FIRST((head))->field.tqe_prev != \
630 &TAILQ_FIRST((head)))) \
631 panic("Bad tailq head %p first->prev != head", (head)); \
634 #define TAILQ_CHECK_NEXT(elm, field) do { \
636 TAILQ_NEXT((elm), field) != NULL && \
637 TAILQ_NEXT((elm), field)->field.tqe_prev != \
638 &((elm)->field.tqe_next))) \
639 panic("Bad tailq elm %p next->prev != elm", (elm)); \
642 #define TAILQ_CHECK_PREV(elm, field) do { \
643 if (__improbable(*(elm)->field.tqe_prev != (elm))) \
644 panic("Bad tailq elm %p prev->next != elm", (elm)); \
647 #define TAILQ_CHECK_HEAD(head, field)
648 #define TAILQ_CHECK_NEXT(elm, field)
649 #define TAILQ_CHECK_PREV(elm, field)
650 #endif /* KERNEL_PRIVATE */
652 #define TAILQ_CONCAT(head1, head2, field) do { \
653 if (!TAILQ_EMPTY(head2)) { \
654 *(head1)->tqh_last = (head2)->tqh_first; \
655 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
656 (head1)->tqh_last = (head2)->tqh_last; \
657 TAILQ_INIT((head2)); \
658 QMD_TRACE_HEAD(head1); \
659 QMD_TRACE_HEAD(head2); \
663 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
665 #define TAILQ_FIRST(head) ((head)->tqh_first)
667 #define TAILQ_FOREACH(var, head, field) \
668 for ((var) = TAILQ_FIRST((head)); \
670 (var) = TAILQ_NEXT((var), field))
672 #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
673 for ((var) = TAILQ_FIRST((head)); \
674 (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
677 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
678 for ((var) = TAILQ_LAST((head), headname); \
680 (var) = TAILQ_PREV((var), headname, field))
682 #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
683 for ((var) = TAILQ_LAST((head), headname); \
684 (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
687 #if XNU_KERNEL_PRIVATE
689 * Can be used when the initialized HEAD was just bzeroed
690 * Works around deficiencies in clang analysis of initialization patterns.
691 * See: <rdar://problem/47939050>
693 #define TAILQ_INIT_AFTER_BZERO(head) do { \
694 (head)->tqh_last = &TAILQ_FIRST((head)); \
696 #endif /* XNU_KERNEL_PRIVATE */
698 #define TAILQ_INIT(head) do { \
699 TAILQ_FIRST((head)) = NULL; \
700 (head)->tqh_last = &TAILQ_FIRST((head)); \
701 QMD_TRACE_HEAD(head); \
705 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
706 TAILQ_CHECK_NEXT(listelm, field); \
707 if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
708 TAILQ_NEXT((elm), field)->field.tqe_prev = \
709 &TAILQ_NEXT((elm), field); \
711 (head)->tqh_last = &TAILQ_NEXT((elm), field); \
712 QMD_TRACE_HEAD(head); \
714 TAILQ_NEXT((listelm), field) = (elm); \
715 (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
716 QMD_TRACE_ELEM(&(elm)->field); \
717 QMD_TRACE_ELEM(&listelm->field); \
720 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
721 TAILQ_CHECK_PREV(listelm, field); \
722 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
723 TAILQ_NEXT((elm), field) = (listelm); \
724 *(listelm)->field.tqe_prev = (elm); \
725 (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
726 QMD_TRACE_ELEM(&(elm)->field); \
727 QMD_TRACE_ELEM(&listelm->field); \
730 #define TAILQ_INSERT_HEAD(head, elm, field) do { \
731 TAILQ_CHECK_HEAD(head, field); \
732 if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
733 TAILQ_FIRST((head))->field.tqe_prev = \
734 &TAILQ_NEXT((elm), field); \
736 (head)->tqh_last = &TAILQ_NEXT((elm), field); \
737 TAILQ_FIRST((head)) = (elm); \
738 (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
739 QMD_TRACE_HEAD(head); \
740 QMD_TRACE_ELEM(&(elm)->field); \
743 #define TAILQ_INSERT_TAIL(head, elm, field) do { \
744 TAILQ_NEXT((elm), field) = NULL; \
745 (elm)->field.tqe_prev = (head)->tqh_last; \
746 *(head)->tqh_last = (elm); \
747 (head)->tqh_last = &TAILQ_NEXT((elm), field); \
748 QMD_TRACE_HEAD(head); \
749 QMD_TRACE_ELEM(&(elm)->field); \
752 #define TAILQ_LAST(head, headname) \
753 __MISMATCH_TAGS_PUSH \
754 __NULLABILITY_COMPLETENESS_PUSH \
755 (*(((struct headname *)((head)->tqh_last))->tqh_last)) \
756 __NULLABILITY_COMPLETENESS_POP \
759 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
761 #define TAILQ_PREV(elm, headname, field) \
762 __MISMATCH_TAGS_PUSH \
763 __NULLABILITY_COMPLETENESS_PUSH \
764 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) \
765 __NULLABILITY_COMPLETENESS_POP \
768 #define TAILQ_REMOVE(head, elm, field) do { \
769 TAILQ_CHECK_NEXT(elm, field); \
770 TAILQ_CHECK_PREV(elm, field); \
771 if ((TAILQ_NEXT((elm), field)) != NULL) \
772 TAILQ_NEXT((elm), field)->field.tqe_prev = \
773 (elm)->field.tqe_prev; \
775 (head)->tqh_last = (elm)->field.tqe_prev; \
776 QMD_TRACE_HEAD(head); \
778 *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
779 TRASHIT((elm)->field.tqe_next); \
780 TRASHIT((elm)->field.tqe_prev); \
781 QMD_TRACE_ELEM(&(elm)->field); \
785 * Why did they switch to spaces for this one macro?
787 #define TAILQ_SWAP(head1, head2, type, field) \
788 __MISMATCH_TAGS_PUSH \
789 __NULLABILITY_COMPLETENESS_PUSH \
791 struct type *swap_first = (head1)->tqh_first; \
792 struct type **swap_last = (head1)->tqh_last; \
793 (head1)->tqh_first = (head2)->tqh_first; \
794 (head1)->tqh_last = (head2)->tqh_last; \
795 (head2)->tqh_first = swap_first; \
796 (head2)->tqh_last = swap_last; \
797 if ((swap_first = (head1)->tqh_first) != NULL) \
798 swap_first->field.tqe_prev = &(head1)->tqh_first; \
800 (head1)->tqh_last = &(head1)->tqh_first; \
801 if ((swap_first = (head2)->tqh_first) != NULL) \
802 swap_first->field.tqe_prev = &(head2)->tqh_first; \
804 (head2)->tqh_last = &(head2)->tqh_first; \
806 __NULLABILITY_COMPLETENESS_POP \
810 * Circular queue definitions.
812 #define CIRCLEQ_HEAD(name, type) \
813 __MISMATCH_TAGS_PUSH \
814 __NULLABILITY_COMPLETENESS_PUSH \
816 struct type *cqh_first; /* first element */ \
817 struct type *cqh_last; /* last element */ \
819 __NULLABILITY_COMPLETENESS_POP \
822 #define CIRCLEQ_ENTRY(type) \
823 __MISMATCH_TAGS_PUSH \
824 __NULLABILITY_COMPLETENESS_PUSH \
826 struct type *cqe_next; /* next element */ \
827 struct type *cqe_prev; /* previous element */ \
829 __NULLABILITY_COMPLETENESS_POP \
833 * Circular queue functions.
835 #ifdef KERNEL_PRIVATE
836 #define CIRCLEQ_CHECK_HEAD(head, field) do { \
838 CIRCLEQ_FIRST((head)) != ((void*)(head)) && \
839 CIRCLEQ_FIRST((head))->field.cqe_prev != ((void*)(head))))\
840 panic("Bad circleq head %p first->prev != head", (head)); \
842 #define CIRCLEQ_CHECK_NEXT(head, elm, field) do { \
844 CIRCLEQ_NEXT((elm), field) != ((void*)(head)) && \
845 CIRCLEQ_NEXT((elm), field)->field.cqe_prev != (elm))) \
846 panic("Bad circleq elm %p next->prev != elm", (elm)); \
848 #define CIRCLEQ_CHECK_PREV(head, elm, field) do { \
850 CIRCLEQ_PREV((elm), field) != ((void*)(head)) && \
851 CIRCLEQ_PREV((elm), field)->field.cqe_next != (elm))) \
852 panic("Bad circleq elm %p prev->next != elm", (elm)); \
855 #define CIRCLEQ_CHECK_HEAD(head, field)
856 #define CIRCLEQ_CHECK_NEXT(head, elm, field)
857 #define CIRCLEQ_CHECK_PREV(head, elm, field)
858 #endif /* KERNEL_PRIVATE */
860 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
862 #define CIRCLEQ_FIRST(head) ((head)->cqh_first)
864 #define CIRCLEQ_FOREACH(var, head, field) \
865 for((var) = (head)->cqh_first; \
866 (var) != (void *)(head); \
867 (var) = (var)->field.cqe_next)
869 #define CIRCLEQ_INIT(head) do { \
870 (head)->cqh_first = (void *)(head); \
871 (head)->cqh_last = (void *)(head); \
874 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
875 CIRCLEQ_CHECK_NEXT(head, listelm, field); \
876 (elm)->field.cqe_next = (listelm)->field.cqe_next; \
877 (elm)->field.cqe_prev = (listelm); \
878 if ((listelm)->field.cqe_next == (void *)(head)) \
879 (head)->cqh_last = (elm); \
881 (listelm)->field.cqe_next->field.cqe_prev = (elm); \
882 (listelm)->field.cqe_next = (elm); \
885 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
886 CIRCLEQ_CHECK_PREV(head, listelm, field); \
887 (elm)->field.cqe_next = (listelm); \
888 (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
889 if ((listelm)->field.cqe_prev == (void *)(head)) \
890 (head)->cqh_first = (elm); \
892 (listelm)->field.cqe_prev->field.cqe_next = (elm); \
893 (listelm)->field.cqe_prev = (elm); \
896 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
897 CIRCLEQ_CHECK_HEAD(head, field); \
898 (elm)->field.cqe_next = (head)->cqh_first; \
899 (elm)->field.cqe_prev = (void *)(head); \
900 if ((head)->cqh_last == (void *)(head)) \
901 (head)->cqh_last = (elm); \
903 (head)->cqh_first->field.cqe_prev = (elm); \
904 (head)->cqh_first = (elm); \
907 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
908 (elm)->field.cqe_next = (void *)(head); \
909 (elm)->field.cqe_prev = (head)->cqh_last; \
910 if ((head)->cqh_first == (void *)(head)) \
911 (head)->cqh_first = (elm); \
913 (head)->cqh_last->field.cqe_next = (elm); \
914 (head)->cqh_last = (elm); \
917 #define CIRCLEQ_LAST(head) ((head)->cqh_last)
919 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
921 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
923 #define CIRCLEQ_REMOVE(head, elm, field) do { \
924 CIRCLEQ_CHECK_NEXT(head, elm, field); \
925 CIRCLEQ_CHECK_PREV(head, elm, field); \
926 if ((elm)->field.cqe_next == (void *)(head)) \
927 (head)->cqh_last = (elm)->field.cqe_prev; \
929 (elm)->field.cqe_next->field.cqe_prev = \
930 (elm)->field.cqe_prev; \
931 if ((elm)->field.cqe_prev == (void *)(head)) \
932 (head)->cqh_first = (elm)->field.cqe_next; \
934 (elm)->field.cqe_prev->field.cqe_next = \
935 (elm)->field.cqe_next; \
943 * XXX insque() and remque() are an old way of handling certain queues.
944 * They bogusly assumes that all queue heads look alike.
948 struct quehead
*qh_link
;
949 struct quehead
*qh_rlink
;
953 #ifdef KERNEL_PRIVATE
957 struct quehead
*element
= (struct quehead
*)a
;
958 if (__improbable(element
->qh_link
!= NULL
&&
959 element
->qh_link
->qh_rlink
!= element
)) {
960 panic("Bad que elm %p next->prev != elm", a
);
967 struct quehead
*element
= (struct quehead
*)a
;
968 if (__improbable(element
->qh_rlink
!= NULL
&&
969 element
->qh_rlink
->qh_link
!= element
)) {
970 panic("Bad que elm %p prev->next != elm", a
);
973 #else /* !KERNEL_PRIVATE */
974 #define chkquenext(a)
975 #define chkqueprev(a)
976 #endif /* KERNEL_PRIVATE */
979 insque(void *a
, void *b
)
981 struct quehead
*element
= (struct quehead
*)a
,
982 *head
= (struct quehead
*)b
;
985 element
->qh_link
= head
->qh_link
;
986 element
->qh_rlink
= head
;
987 head
->qh_link
= element
;
988 element
->qh_link
->qh_rlink
= element
;
994 struct quehead
*element
= (struct quehead
*)a
;
998 element
->qh_link
->qh_rlink
= element
->qh_rlink
;
999 element
->qh_rlink
->qh_link
= element
->qh_link
;
1000 element
->qh_rlink
= 0;
1003 #else /* !__GNUC__ */
1005 void insque(void *a
, void *b
);
1006 void remque(void *a
);
1008 #endif /* __GNUC__ */
1010 #endif /* NOTFB31 */
1011 #endif /* _KERNEL */
1013 #endif /* !_SYS_QUEUE_H_ */