]> git.saurik.com Git - apple/xnu.git/blob - bsd/sys/queue.h
xnu-6153.61.1.tar.gz
[apple/xnu.git] / bsd / sys / queue.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*-
29 * Copyright (c) 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 4. Neither the name of the University nor the names of its contributors
41 * may be used to endorse or promote products derived from this software
42 * without specific prior written permission.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * SUCH DAMAGE.
55 *
56 * @(#)queue.h 8.5 (Berkeley) 8/20/94
57 */
58
59 #ifndef _SYS_QUEUE_H_
60 #define _SYS_QUEUE_H_
61
62 #ifdef KERNEL_PRIVATE
63 #include <kern/debug.h> /* panic function call */
64 #include <sys/cdefs.h> /* __improbable in kernelspace */
65 #else
66 #ifndef __improbable
67 #define __improbable(x) (x) /* noop in userspace */
68 #endif /* __improbable */
69 #endif /* KERNEL_PRIVATE */
70
71 /*
72 * This file defines five types of data structures: singly-linked lists,
73 * singly-linked tail queues, lists, tail queues, and circular queues.
74 *
75 * A singly-linked list is headed by a single forward pointer. The elements
76 * are singly linked for minimum space and pointer manipulation overhead at
77 * the expense of O(n) removal for arbitrary elements. New elements can be
78 * added to the list after an existing element or at the head of the list.
79 * Elements being removed from the head of the list should use the explicit
80 * macro for this purpose for optimum efficiency. A singly-linked list may
81 * only be traversed in the forward direction. Singly-linked lists are ideal
82 * for applications with large datasets and few or no removals or for
83 * implementing a LIFO queue.
84 *
85 * A singly-linked tail queue is headed by a pair of pointers, one to the
86 * head of the list and the other to the tail of the list. The elements are
87 * singly linked for minimum space and pointer manipulation overhead at the
88 * expense of O(n) removal for arbitrary elements. New elements can be added
89 * to the list after an existing element, at the head of the list, or at the
90 * end of the list. Elements being removed from the head of the tail queue
91 * should use the explicit macro for this purpose for optimum efficiency.
92 * A singly-linked tail queue may only be traversed in the forward direction.
93 * Singly-linked tail queues are ideal for applications with large datasets
94 * and few or no removals or for implementing a FIFO queue.
95 *
96 * A list is headed by a single forward pointer (or an array of forward
97 * pointers for a hash table header). The elements are doubly linked
98 * so that an arbitrary element can be removed without a need to
99 * traverse the list. New elements can be added to the list before
100 * or after an existing element or at the head of the list. A list
101 * may only be traversed in the forward direction.
102 *
103 * A tail queue is headed by a pair of pointers, one to the head of the
104 * list and the other to the tail of the list. The elements are doubly
105 * linked so that an arbitrary element can be removed without a need to
106 * traverse the list. New elements can be added to the list before or
107 * after an existing element, at the head of the list, or at the end of
108 * the list. A tail queue may be traversed in either direction.
109 *
110 * A circle queue is headed by a pair of pointers, one to the head of the
111 * list and the other to the tail of the list. The elements are doubly
112 * linked so that an arbitrary element can be removed without a need to
113 * traverse the list. New elements can be added to the list before or after
114 * an existing element, at the head of the list, or at the end of the list.
115 * A circle queue may be traversed in either direction, but has a more
116 * complex end of list detection.
117 * Note that circle queues are deprecated, because, as the removal log
118 * in FreeBSD states, "CIRCLEQs are a disgrace to everything Knuth taught
119 * us in Volume 1 Chapter 2. [...] Use TAILQ instead, it provides the same
120 * functionality." Code using them will continue to compile, but they
121 * are no longer documented on the man page.
122 *
123 * For details on the use of these macros, see the queue(3) manual page.
124 *
125 *
126 * SLIST LIST STAILQ TAILQ CIRCLEQ
127 * _HEAD + + + + +
128 * _HEAD_INITIALIZER + + + + -
129 * _ENTRY + + + + +
130 * _INIT + + + + +
131 * _EMPTY + + + + +
132 * _FIRST + + + + +
133 * _NEXT + + + + +
134 * _PREV - - - + +
135 * _LAST - - + + +
136 * _FOREACH + + + + +
137 * _FOREACH_SAFE + + + + -
138 * _FOREACH_REVERSE - - - + -
139 * _FOREACH_REVERSE_SAFE - - - + -
140 * _INSERT_HEAD + + + + +
141 * _INSERT_BEFORE - + - + +
142 * _INSERT_AFTER + + + + +
143 * _INSERT_TAIL - - + + +
144 * _CONCAT - - + + -
145 * _REMOVE_AFTER + - + - -
146 * _REMOVE_HEAD + - + - -
147 * _REMOVE_HEAD_UNTIL - - + - -
148 * _REMOVE + + + + +
149 * _SWAP - + + + -
150 *
151 */
152 #ifdef QUEUE_MACRO_DEBUG
153 /* Store the last 2 places the queue element or head was altered */
154 struct qm_trace {
155 char * lastfile;
156 int lastline;
157 char * prevfile;
158 int prevline;
159 };
160
161 #define TRACEBUF struct qm_trace trace;
162 #define TRASHIT(x) do {(x) = (void *)-1;} while (0)
163
164 #define QMD_TRACE_HEAD(head) do { \
165 (head)->trace.prevline = (head)->trace.lastline; \
166 (head)->trace.prevfile = (head)->trace.lastfile; \
167 (head)->trace.lastline = __LINE__; \
168 (head)->trace.lastfile = __FILE__; \
169 } while (0)
170
171 #define QMD_TRACE_ELEM(elem) do { \
172 (elem)->trace.prevline = (elem)->trace.lastline; \
173 (elem)->trace.prevfile = (elem)->trace.lastfile; \
174 (elem)->trace.lastline = __LINE__; \
175 (elem)->trace.lastfile = __FILE__; \
176 } while (0)
177
178 #else
179 #define QMD_TRACE_ELEM(elem)
180 #define QMD_TRACE_HEAD(head)
181 #define TRACEBUF
182 #define TRASHIT(x)
183 #endif /* QUEUE_MACRO_DEBUG */
184
185 /*
186 * Horrible macros to enable use of code that was meant to be C-specific
187 * (and which push struct onto type) in C++; without these, C++ code
188 * that uses these macros in the context of a class will blow up
189 * due to "struct" being preprended to "type" by the macros, causing
190 * inconsistent use of tags.
191 *
192 * This approach is necessary because these are macros; we have to use
193 * these on a per-macro basis (because the queues are implemented as
194 * macros, disabling this warning in the scope of the header file is
195 * insufficient), whuch means we can't use #pragma, and have to use
196 * _Pragma. We only need to use these for the queue macros that
197 * prepend "struct" to "type" and will cause C++ to blow up.
198 */
199 #if defined(__clang__) && defined(__cplusplus)
200 #define __MISMATCH_TAGS_PUSH \
201 _Pragma("clang diagnostic push") \
202 _Pragma("clang diagnostic ignored \"-Wmismatched-tags\"")
203 #define __MISMATCH_TAGS_POP \
204 _Pragma("clang diagnostic pop")
205 #else
206 #define __MISMATCH_TAGS_PUSH
207 #define __MISMATCH_TAGS_POP
208 #endif
209
210 /*!
211 * Ensures that these macros can safely be used in structs when compiling with
212 * clang. The macros do not allow for nullability attributes to be specified due
213 * to how they are expanded. For example:
214 *
215 * SLIST_HEAD(, foo _Nullable) bar;
216 *
217 * expands to
218 *
219 * struct {
220 * struct foo _Nullable *slh_first;
221 * }
222 *
223 * which is not valid because the nullability specifier has to apply to the
224 * pointer. So just ignore nullability completeness in all the places where this
225 * is an issue.
226 */
227 #if defined(__clang__)
228 #define __NULLABILITY_COMPLETENESS_PUSH \
229 _Pragma("clang diagnostic push") \
230 _Pragma("clang diagnostic ignored \"-Wnullability-completeness\"")
231 #define __NULLABILITY_COMPLETENESS_POP \
232 _Pragma("clang diagnostic pop")
233 #else
234 #define __NULLABILITY_COMPLETENESS_PUSH
235 #define __NULLABILITY_COMPLETENESS_POP
236 #endif
237
238 /*
239 * Singly-linked List declarations.
240 */
241 #define SLIST_HEAD(name, type) \
242 __MISMATCH_TAGS_PUSH \
243 __NULLABILITY_COMPLETENESS_PUSH \
244 struct name { \
245 struct type *slh_first; /* first element */ \
246 } \
247 __NULLABILITY_COMPLETENESS_POP \
248 __MISMATCH_TAGS_POP
249
250 #define SLIST_HEAD_INITIALIZER(head) \
251 { NULL }
252
253 #define SLIST_ENTRY(type) \
254 __MISMATCH_TAGS_PUSH \
255 __NULLABILITY_COMPLETENESS_PUSH \
256 struct { \
257 struct type *sle_next; /* next element */ \
258 } \
259 __NULLABILITY_COMPLETENESS_POP \
260 __MISMATCH_TAGS_POP
261
262 /*
263 * Singly-linked List functions.
264 */
265 #define SLIST_EMPTY(head) ((head)->slh_first == NULL)
266
267 #define SLIST_FIRST(head) ((head)->slh_first)
268
269 #define SLIST_FOREACH(var, head, field) \
270 for ((var) = SLIST_FIRST((head)); \
271 (var); \
272 (var) = SLIST_NEXT((var), field))
273
274 #define SLIST_FOREACH_SAFE(var, head, field, tvar) \
275 for ((var) = SLIST_FIRST((head)); \
276 (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
277 (var) = (tvar))
278
279 #define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
280 for ((varp) = &SLIST_FIRST((head)); \
281 ((var) = *(varp)) != NULL; \
282 (varp) = &SLIST_NEXT((var), field))
283
284 #define SLIST_INIT(head) do { \
285 SLIST_FIRST((head)) = NULL; \
286 } while (0)
287
288 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
289 SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
290 SLIST_NEXT((slistelm), field) = (elm); \
291 } while (0)
292
293 #define SLIST_INSERT_HEAD(head, elm, field) do { \
294 SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
295 SLIST_FIRST((head)) = (elm); \
296 } while (0)
297
298 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
299
300 #define SLIST_REMOVE(head, elm, type, field) \
301 __MISMATCH_TAGS_PUSH \
302 __NULLABILITY_COMPLETENESS_PUSH \
303 do { \
304 if (SLIST_FIRST((head)) == (elm)) { \
305 SLIST_REMOVE_HEAD((head), field); \
306 } \
307 else { \
308 struct type *curelm = SLIST_FIRST((head)); \
309 while (SLIST_NEXT(curelm, field) != (elm)) \
310 curelm = SLIST_NEXT(curelm, field); \
311 SLIST_REMOVE_AFTER(curelm, field); \
312 } \
313 TRASHIT((elm)->field.sle_next); \
314 } while (0) \
315 __NULLABILITY_COMPLETENESS_POP \
316 __MISMATCH_TAGS_POP
317
318 #define SLIST_REMOVE_AFTER(elm, field) do { \
319 SLIST_NEXT(elm, field) = \
320 SLIST_NEXT(SLIST_NEXT(elm, field), field); \
321 } while (0)
322
323 #define SLIST_REMOVE_HEAD(head, field) do { \
324 SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
325 } while (0)
326
327 /*
328 * Singly-linked Tail queue declarations.
329 */
330 #define STAILQ_HEAD(name, type) \
331 __MISMATCH_TAGS_PUSH \
332 __NULLABILITY_COMPLETENESS_PUSH \
333 struct name { \
334 struct type *stqh_first;/* first element */ \
335 struct type **stqh_last;/* addr of last next element */ \
336 } \
337 __NULLABILITY_COMPLETENESS_POP \
338 __MISMATCH_TAGS_POP
339
340 #define STAILQ_HEAD_INITIALIZER(head) \
341 { NULL, &(head).stqh_first }
342
343 #define STAILQ_ENTRY(type) \
344 __MISMATCH_TAGS_PUSH \
345 __NULLABILITY_COMPLETENESS_PUSH \
346 struct { \
347 struct type *stqe_next; /* next element */ \
348 } \
349 __NULLABILITY_COMPLETENESS_POP \
350 __MISMATCH_TAGS_POP
351
352 /*
353 * Singly-linked Tail queue functions.
354 */
355 #define STAILQ_CONCAT(head1, head2) do { \
356 if (!STAILQ_EMPTY((head2))) { \
357 *(head1)->stqh_last = (head2)->stqh_first; \
358 (head1)->stqh_last = (head2)->stqh_last; \
359 STAILQ_INIT((head2)); \
360 } \
361 } while (0)
362
363 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
364
365 #define STAILQ_FIRST(head) ((head)->stqh_first)
366
367 #define STAILQ_FOREACH(var, head, field) \
368 for((var) = STAILQ_FIRST((head)); \
369 (var); \
370 (var) = STAILQ_NEXT((var), field))
371
372
373 #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
374 for ((var) = STAILQ_FIRST((head)); \
375 (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
376 (var) = (tvar))
377
378 #define STAILQ_INIT(head) do { \
379 STAILQ_FIRST((head)) = NULL; \
380 (head)->stqh_last = &STAILQ_FIRST((head)); \
381 } while (0)
382
383 #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
384 if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
385 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
386 STAILQ_NEXT((tqelm), field) = (elm); \
387 } while (0)
388
389 #define STAILQ_INSERT_HEAD(head, elm, field) do { \
390 if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
391 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
392 STAILQ_FIRST((head)) = (elm); \
393 } while (0)
394
395 #define STAILQ_INSERT_TAIL(head, elm, field) do { \
396 STAILQ_NEXT((elm), field) = NULL; \
397 *(head)->stqh_last = (elm); \
398 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
399 } while (0)
400
401 #define STAILQ_LAST(head, type, field) \
402 __MISMATCH_TAGS_PUSH \
403 __NULLABILITY_COMPLETENESS_PUSH \
404 (STAILQ_EMPTY((head)) ? \
405 NULL : \
406 ((struct type *)(void *) \
407 ((char *)((head)->stqh_last) - __offsetof(struct type, field))))\
408 __NULLABILITY_COMPLETENESS_POP \
409 __MISMATCH_TAGS_POP
410
411 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
412
413 #define STAILQ_REMOVE(head, elm, type, field) \
414 __MISMATCH_TAGS_PUSH \
415 __NULLABILITY_COMPLETENESS_PUSH \
416 do { \
417 if (STAILQ_FIRST((head)) == (elm)) { \
418 STAILQ_REMOVE_HEAD((head), field); \
419 } \
420 else { \
421 struct type *curelm = STAILQ_FIRST((head)); \
422 while (STAILQ_NEXT(curelm, field) != (elm)) \
423 curelm = STAILQ_NEXT(curelm, field); \
424 STAILQ_REMOVE_AFTER(head, curelm, field); \
425 } \
426 TRASHIT((elm)->field.stqe_next); \
427 } while (0) \
428 __NULLABILITY_COMPLETENESS_POP \
429 __MISMATCH_TAGS_POP
430
431 #define STAILQ_REMOVE_HEAD(head, field) do { \
432 if ((STAILQ_FIRST((head)) = \
433 STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
434 (head)->stqh_last = &STAILQ_FIRST((head)); \
435 } while (0)
436
437 #define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
438 if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
439 (head)->stqh_last = &STAILQ_FIRST((head)); \
440 } while (0)
441
442 #define STAILQ_REMOVE_AFTER(head, elm, field) do { \
443 if ((STAILQ_NEXT(elm, field) = \
444 STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \
445 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
446 } while (0)
447
448 #define STAILQ_SWAP(head1, head2, type) \
449 __MISMATCH_TAGS_PUSH \
450 __NULLABILITY_COMPLETENESS_PUSH \
451 do { \
452 struct type *swap_first = STAILQ_FIRST(head1); \
453 struct type **swap_last = (head1)->stqh_last; \
454 STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \
455 (head1)->stqh_last = (head2)->stqh_last; \
456 STAILQ_FIRST(head2) = swap_first; \
457 (head2)->stqh_last = swap_last; \
458 if (STAILQ_EMPTY(head1)) \
459 (head1)->stqh_last = &STAILQ_FIRST(head1); \
460 if (STAILQ_EMPTY(head2)) \
461 (head2)->stqh_last = &STAILQ_FIRST(head2); \
462 } while (0) \
463 __NULLABILITY_COMPLETENESS_POP \
464 __MISMATCH_TAGS_POP
465
466
467 /*
468 * List declarations.
469 */
470 #define LIST_HEAD(name, type) \
471 __MISMATCH_TAGS_PUSH \
472 __NULLABILITY_COMPLETENESS_PUSH \
473 struct name { \
474 struct type *lh_first; /* first element */ \
475 } \
476 __NULLABILITY_COMPLETENESS_POP \
477 __MISMATCH_TAGS_POP
478
479 #define LIST_HEAD_INITIALIZER(head) \
480 { NULL }
481
482 #define LIST_ENTRY(type) \
483 __MISMATCH_TAGS_PUSH \
484 __NULLABILITY_COMPLETENESS_PUSH \
485 struct { \
486 struct type *le_next; /* next element */ \
487 struct type **le_prev; /* address of previous next element */ \
488 } \
489 __NULLABILITY_COMPLETENESS_POP \
490 __MISMATCH_TAGS_POP
491
492 /*
493 * List functions.
494 */
495
496 #ifdef KERNEL_PRIVATE
497 #define LIST_CHECK_HEAD(head, field) do { \
498 if (__improbable( \
499 LIST_FIRST((head)) != NULL && \
500 LIST_FIRST((head))->field.le_prev != \
501 &LIST_FIRST((head)))) \
502 panic("Bad list head %p first->prev != head", (head)); \
503 } while (0)
504
505 #define LIST_CHECK_NEXT(elm, field) do { \
506 if (__improbable( \
507 LIST_NEXT((elm), field) != NULL && \
508 LIST_NEXT((elm), field)->field.le_prev != \
509 &((elm)->field.le_next))) \
510 panic("Bad link elm %p next->prev != elm", (elm)); \
511 } while (0)
512
513 #define LIST_CHECK_PREV(elm, field) do { \
514 if (__improbable(*(elm)->field.le_prev != (elm))) \
515 panic("Bad link elm %p prev->next != elm", (elm)); \
516 } while (0)
517 #else
518 #define LIST_CHECK_HEAD(head, field)
519 #define LIST_CHECK_NEXT(elm, field)
520 #define LIST_CHECK_PREV(elm, field)
521 #endif /* KERNEL_PRIVATE */
522
523 #define LIST_EMPTY(head) ((head)->lh_first == NULL)
524
525 #define LIST_FIRST(head) ((head)->lh_first)
526
527 #define LIST_FOREACH(var, head, field) \
528 for ((var) = LIST_FIRST((head)); \
529 (var); \
530 (var) = LIST_NEXT((var), field))
531
532 #define LIST_FOREACH_SAFE(var, head, field, tvar) \
533 for ((var) = LIST_FIRST((head)); \
534 (var) && ((tvar) = LIST_NEXT((var), field), 1); \
535 (var) = (tvar))
536
537 #define LIST_INIT(head) do { \
538 LIST_FIRST((head)) = NULL; \
539 } while (0)
540
541 #define LIST_INSERT_AFTER(listelm, elm, field) do { \
542 LIST_CHECK_NEXT(listelm, field); \
543 if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
544 LIST_NEXT((listelm), field)->field.le_prev = \
545 &LIST_NEXT((elm), field); \
546 LIST_NEXT((listelm), field) = (elm); \
547 (elm)->field.le_prev = &LIST_NEXT((listelm), field); \
548 } while (0)
549
550 #define LIST_INSERT_BEFORE(listelm, elm, field) do { \
551 LIST_CHECK_PREV(listelm, field); \
552 (elm)->field.le_prev = (listelm)->field.le_prev; \
553 LIST_NEXT((elm), field) = (listelm); \
554 *(listelm)->field.le_prev = (elm); \
555 (listelm)->field.le_prev = &LIST_NEXT((elm), field); \
556 } while (0)
557
558 #define LIST_INSERT_HEAD(head, elm, field) do { \
559 LIST_CHECK_HEAD((head), field); \
560 if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
561 LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
562 LIST_FIRST((head)) = (elm); \
563 (elm)->field.le_prev = &LIST_FIRST((head)); \
564 } while (0)
565
566 #define LIST_NEXT(elm, field) ((elm)->field.le_next)
567
568 #define LIST_REMOVE(elm, field) do { \
569 LIST_CHECK_NEXT(elm, field); \
570 LIST_CHECK_PREV(elm, field); \
571 if (LIST_NEXT((elm), field) != NULL) \
572 LIST_NEXT((elm), field)->field.le_prev = \
573 (elm)->field.le_prev; \
574 *(elm)->field.le_prev = LIST_NEXT((elm), field); \
575 TRASHIT((elm)->field.le_next); \
576 TRASHIT((elm)->field.le_prev); \
577 } while (0)
578
579 #define LIST_SWAP(head1, head2, type, field) \
580 __MISMATCH_TAGS_PUSH \
581 __NULLABILITY_COMPLETENESS_PUSH \
582 do { \
583 struct type *swap_tmp = LIST_FIRST((head1)); \
584 LIST_FIRST((head1)) = LIST_FIRST((head2)); \
585 LIST_FIRST((head2)) = swap_tmp; \
586 if ((swap_tmp = LIST_FIRST((head1))) != NULL) \
587 swap_tmp->field.le_prev = &LIST_FIRST((head1)); \
588 if ((swap_tmp = LIST_FIRST((head2))) != NULL) \
589 swap_tmp->field.le_prev = &LIST_FIRST((head2)); \
590 } while (0) \
591 __NULLABILITY_COMPLETENESS_POP \
592 __MISMATCH_TAGS_POP
593
594 /*
595 * Tail queue declarations.
596 */
597 #define TAILQ_HEAD(name, type) \
598 __MISMATCH_TAGS_PUSH \
599 __NULLABILITY_COMPLETENESS_PUSH \
600 struct name { \
601 struct type *tqh_first; /* first element */ \
602 struct type **tqh_last; /* addr of last next element */ \
603 TRACEBUF \
604 } \
605 __NULLABILITY_COMPLETENESS_POP \
606 __MISMATCH_TAGS_POP
607
608 #define TAILQ_HEAD_INITIALIZER(head) \
609 { NULL, &(head).tqh_first }
610
611 #define TAILQ_ENTRY(type) \
612 __MISMATCH_TAGS_PUSH \
613 __NULLABILITY_COMPLETENESS_PUSH \
614 struct { \
615 struct type *tqe_next; /* next element */ \
616 struct type **tqe_prev; /* address of previous next element */ \
617 TRACEBUF \
618 } \
619 __NULLABILITY_COMPLETENESS_POP \
620 __MISMATCH_TAGS_POP
621
622 /*
623 * Tail queue functions.
624 */
625 #ifdef KERNEL_PRIVATE
626 #define TAILQ_CHECK_HEAD(head, field) do { \
627 if (__improbable( \
628 TAILQ_FIRST((head)) != NULL && \
629 TAILQ_FIRST((head))->field.tqe_prev != \
630 &TAILQ_FIRST((head)))) \
631 panic("Bad tailq head %p first->prev != head", (head)); \
632 } while (0)
633
634 #define TAILQ_CHECK_NEXT(elm, field) do { \
635 if (__improbable( \
636 TAILQ_NEXT((elm), field) != NULL && \
637 TAILQ_NEXT((elm), field)->field.tqe_prev != \
638 &((elm)->field.tqe_next))) \
639 panic("Bad tailq elm %p next->prev != elm", (elm)); \
640 } while(0)
641
642 #define TAILQ_CHECK_PREV(elm, field) do { \
643 if (__improbable(*(elm)->field.tqe_prev != (elm))) \
644 panic("Bad tailq elm %p prev->next != elm", (elm)); \
645 } while(0)
646 #else
647 #define TAILQ_CHECK_HEAD(head, field)
648 #define TAILQ_CHECK_NEXT(elm, field)
649 #define TAILQ_CHECK_PREV(elm, field)
650 #endif /* KERNEL_PRIVATE */
651
652 #define TAILQ_CONCAT(head1, head2, field) do { \
653 if (!TAILQ_EMPTY(head2)) { \
654 *(head1)->tqh_last = (head2)->tqh_first; \
655 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
656 (head1)->tqh_last = (head2)->tqh_last; \
657 TAILQ_INIT((head2)); \
658 QMD_TRACE_HEAD(head1); \
659 QMD_TRACE_HEAD(head2); \
660 } \
661 } while (0)
662
663 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
664
665 #define TAILQ_FIRST(head) ((head)->tqh_first)
666
667 #define TAILQ_FOREACH(var, head, field) \
668 for ((var) = TAILQ_FIRST((head)); \
669 (var); \
670 (var) = TAILQ_NEXT((var), field))
671
672 #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
673 for ((var) = TAILQ_FIRST((head)); \
674 (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
675 (var) = (tvar))
676
677 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
678 for ((var) = TAILQ_LAST((head), headname); \
679 (var); \
680 (var) = TAILQ_PREV((var), headname, field))
681
682 #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
683 for ((var) = TAILQ_LAST((head), headname); \
684 (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
685 (var) = (tvar))
686
687 #if XNU_KERNEL_PRIVATE
688 /*
689 * Can be used when the initialized HEAD was just bzeroed
690 * Works around deficiencies in clang analysis of initialization patterns.
691 * See: <rdar://problem/47939050>
692 */
693 #define TAILQ_INIT_AFTER_BZERO(head) do { \
694 (head)->tqh_last = &TAILQ_FIRST((head)); \
695 } while (0)
696 #endif /* XNU_KERNEL_PRIVATE */
697
698 #define TAILQ_INIT(head) do { \
699 TAILQ_FIRST((head)) = NULL; \
700 (head)->tqh_last = &TAILQ_FIRST((head)); \
701 QMD_TRACE_HEAD(head); \
702 } while (0)
703
704
705 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
706 TAILQ_CHECK_NEXT(listelm, field); \
707 if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
708 TAILQ_NEXT((elm), field)->field.tqe_prev = \
709 &TAILQ_NEXT((elm), field); \
710 else { \
711 (head)->tqh_last = &TAILQ_NEXT((elm), field); \
712 QMD_TRACE_HEAD(head); \
713 } \
714 TAILQ_NEXT((listelm), field) = (elm); \
715 (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
716 QMD_TRACE_ELEM(&(elm)->field); \
717 QMD_TRACE_ELEM(&listelm->field); \
718 } while (0)
719
720 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
721 TAILQ_CHECK_PREV(listelm, field); \
722 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
723 TAILQ_NEXT((elm), field) = (listelm); \
724 *(listelm)->field.tqe_prev = (elm); \
725 (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
726 QMD_TRACE_ELEM(&(elm)->field); \
727 QMD_TRACE_ELEM(&listelm->field); \
728 } while (0)
729
730 #define TAILQ_INSERT_HEAD(head, elm, field) do { \
731 TAILQ_CHECK_HEAD(head, field); \
732 if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
733 TAILQ_FIRST((head))->field.tqe_prev = \
734 &TAILQ_NEXT((elm), field); \
735 else \
736 (head)->tqh_last = &TAILQ_NEXT((elm), field); \
737 TAILQ_FIRST((head)) = (elm); \
738 (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
739 QMD_TRACE_HEAD(head); \
740 QMD_TRACE_ELEM(&(elm)->field); \
741 } while (0)
742
743 #define TAILQ_INSERT_TAIL(head, elm, field) do { \
744 TAILQ_NEXT((elm), field) = NULL; \
745 (elm)->field.tqe_prev = (head)->tqh_last; \
746 *(head)->tqh_last = (elm); \
747 (head)->tqh_last = &TAILQ_NEXT((elm), field); \
748 QMD_TRACE_HEAD(head); \
749 QMD_TRACE_ELEM(&(elm)->field); \
750 } while (0)
751
752 #define TAILQ_LAST(head, headname) \
753 __MISMATCH_TAGS_PUSH \
754 __NULLABILITY_COMPLETENESS_PUSH \
755 (*(((struct headname *)((head)->tqh_last))->tqh_last)) \
756 __NULLABILITY_COMPLETENESS_POP \
757 __MISMATCH_TAGS_POP
758
759 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
760
761 #define TAILQ_PREV(elm, headname, field) \
762 __MISMATCH_TAGS_PUSH \
763 __NULLABILITY_COMPLETENESS_PUSH \
764 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) \
765 __NULLABILITY_COMPLETENESS_POP \
766 __MISMATCH_TAGS_POP
767
768 #define TAILQ_REMOVE(head, elm, field) do { \
769 TAILQ_CHECK_NEXT(elm, field); \
770 TAILQ_CHECK_PREV(elm, field); \
771 if ((TAILQ_NEXT((elm), field)) != NULL) \
772 TAILQ_NEXT((elm), field)->field.tqe_prev = \
773 (elm)->field.tqe_prev; \
774 else { \
775 (head)->tqh_last = (elm)->field.tqe_prev; \
776 QMD_TRACE_HEAD(head); \
777 } \
778 *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
779 TRASHIT((elm)->field.tqe_next); \
780 TRASHIT((elm)->field.tqe_prev); \
781 QMD_TRACE_ELEM(&(elm)->field); \
782 } while (0)
783
784 /*
785 * Why did they switch to spaces for this one macro?
786 */
787 #define TAILQ_SWAP(head1, head2, type, field) \
788 __MISMATCH_TAGS_PUSH \
789 __NULLABILITY_COMPLETENESS_PUSH \
790 do { \
791 struct type *swap_first = (head1)->tqh_first; \
792 struct type **swap_last = (head1)->tqh_last; \
793 (head1)->tqh_first = (head2)->tqh_first; \
794 (head1)->tqh_last = (head2)->tqh_last; \
795 (head2)->tqh_first = swap_first; \
796 (head2)->tqh_last = swap_last; \
797 if ((swap_first = (head1)->tqh_first) != NULL) \
798 swap_first->field.tqe_prev = &(head1)->tqh_first; \
799 else \
800 (head1)->tqh_last = &(head1)->tqh_first; \
801 if ((swap_first = (head2)->tqh_first) != NULL) \
802 swap_first->field.tqe_prev = &(head2)->tqh_first; \
803 else \
804 (head2)->tqh_last = &(head2)->tqh_first; \
805 } while (0) \
806 __NULLABILITY_COMPLETENESS_POP \
807 __MISMATCH_TAGS_POP
808
809 /*
810 * Circular queue definitions.
811 */
812 #define CIRCLEQ_HEAD(name, type) \
813 __MISMATCH_TAGS_PUSH \
814 __NULLABILITY_COMPLETENESS_PUSH \
815 struct name { \
816 struct type *cqh_first; /* first element */ \
817 struct type *cqh_last; /* last element */ \
818 } \
819 __NULLABILITY_COMPLETENESS_POP \
820 __MISMATCH_TAGS_POP
821
822 #define CIRCLEQ_ENTRY(type) \
823 __MISMATCH_TAGS_PUSH \
824 __NULLABILITY_COMPLETENESS_PUSH \
825 struct { \
826 struct type *cqe_next; /* next element */ \
827 struct type *cqe_prev; /* previous element */ \
828 } \
829 __NULLABILITY_COMPLETENESS_POP \
830 __MISMATCH_TAGS_POP
831
832 /*
833 * Circular queue functions.
834 */
835 #ifdef KERNEL_PRIVATE
836 #define CIRCLEQ_CHECK_HEAD(head, field) do { \
837 if (__improbable( \
838 CIRCLEQ_FIRST((head)) != ((void*)(head)) && \
839 CIRCLEQ_FIRST((head))->field.cqe_prev != ((void*)(head))))\
840 panic("Bad circleq head %p first->prev != head", (head)); \
841 } while(0)
842 #define CIRCLEQ_CHECK_NEXT(head, elm, field) do { \
843 if (__improbable( \
844 CIRCLEQ_NEXT((elm), field) != ((void*)(head)) && \
845 CIRCLEQ_NEXT((elm), field)->field.cqe_prev != (elm))) \
846 panic("Bad circleq elm %p next->prev != elm", (elm)); \
847 } while(0)
848 #define CIRCLEQ_CHECK_PREV(head, elm, field) do { \
849 if (__improbable( \
850 CIRCLEQ_PREV((elm), field) != ((void*)(head)) && \
851 CIRCLEQ_PREV((elm), field)->field.cqe_next != (elm))) \
852 panic("Bad circleq elm %p prev->next != elm", (elm)); \
853 } while(0)
854 #else
855 #define CIRCLEQ_CHECK_HEAD(head, field)
856 #define CIRCLEQ_CHECK_NEXT(head, elm, field)
857 #define CIRCLEQ_CHECK_PREV(head, elm, field)
858 #endif /* KERNEL_PRIVATE */
859
860 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
861
862 #define CIRCLEQ_FIRST(head) ((head)->cqh_first)
863
864 #define CIRCLEQ_FOREACH(var, head, field) \
865 for((var) = (head)->cqh_first; \
866 (var) != (void *)(head); \
867 (var) = (var)->field.cqe_next)
868
869 #define CIRCLEQ_INIT(head) do { \
870 (head)->cqh_first = (void *)(head); \
871 (head)->cqh_last = (void *)(head); \
872 } while (0)
873
874 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
875 CIRCLEQ_CHECK_NEXT(head, listelm, field); \
876 (elm)->field.cqe_next = (listelm)->field.cqe_next; \
877 (elm)->field.cqe_prev = (listelm); \
878 if ((listelm)->field.cqe_next == (void *)(head)) \
879 (head)->cqh_last = (elm); \
880 else \
881 (listelm)->field.cqe_next->field.cqe_prev = (elm); \
882 (listelm)->field.cqe_next = (elm); \
883 } while (0)
884
885 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
886 CIRCLEQ_CHECK_PREV(head, listelm, field); \
887 (elm)->field.cqe_next = (listelm); \
888 (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
889 if ((listelm)->field.cqe_prev == (void *)(head)) \
890 (head)->cqh_first = (elm); \
891 else \
892 (listelm)->field.cqe_prev->field.cqe_next = (elm); \
893 (listelm)->field.cqe_prev = (elm); \
894 } while (0)
895
896 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
897 CIRCLEQ_CHECK_HEAD(head, field); \
898 (elm)->field.cqe_next = (head)->cqh_first; \
899 (elm)->field.cqe_prev = (void *)(head); \
900 if ((head)->cqh_last == (void *)(head)) \
901 (head)->cqh_last = (elm); \
902 else \
903 (head)->cqh_first->field.cqe_prev = (elm); \
904 (head)->cqh_first = (elm); \
905 } while (0)
906
907 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
908 (elm)->field.cqe_next = (void *)(head); \
909 (elm)->field.cqe_prev = (head)->cqh_last; \
910 if ((head)->cqh_first == (void *)(head)) \
911 (head)->cqh_first = (elm); \
912 else \
913 (head)->cqh_last->field.cqe_next = (elm); \
914 (head)->cqh_last = (elm); \
915 } while (0)
916
917 #define CIRCLEQ_LAST(head) ((head)->cqh_last)
918
919 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
920
921 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
922
923 #define CIRCLEQ_REMOVE(head, elm, field) do { \
924 CIRCLEQ_CHECK_NEXT(head, elm, field); \
925 CIRCLEQ_CHECK_PREV(head, elm, field); \
926 if ((elm)->field.cqe_next == (void *)(head)) \
927 (head)->cqh_last = (elm)->field.cqe_prev; \
928 else \
929 (elm)->field.cqe_next->field.cqe_prev = \
930 (elm)->field.cqe_prev; \
931 if ((elm)->field.cqe_prev == (void *)(head)) \
932 (head)->cqh_first = (elm)->field.cqe_next; \
933 else \
934 (elm)->field.cqe_prev->field.cqe_next = \
935 (elm)->field.cqe_next; \
936 } while (0)
937
938 #ifdef _KERNEL
939
940 #if NOTFB31
941
942 /*
943 * XXX insque() and remque() are an old way of handling certain queues.
944 * They bogusly assumes that all queue heads look alike.
945 */
946
947 struct quehead {
948 struct quehead *qh_link;
949 struct quehead *qh_rlink;
950 };
951
952 #ifdef __GNUC__
953 #ifdef KERNEL_PRIVATE
954 static __inline void
955 chkquenext(void *a)
956 {
957 struct quehead *element = (struct quehead *)a;
958 if (__improbable(element->qh_link != NULL &&
959 element->qh_link->qh_rlink != element)) {
960 panic("Bad que elm %p next->prev != elm", a);
961 }
962 }
963
964 static __inline void
965 chkqueprev(void *a)
966 {
967 struct quehead *element = (struct quehead *)a;
968 if (__improbable(element->qh_rlink != NULL &&
969 element->qh_rlink->qh_link != element)) {
970 panic("Bad que elm %p prev->next != elm", a);
971 }
972 }
973 #else /* !KERNEL_PRIVATE */
974 #define chkquenext(a)
975 #define chkqueprev(a)
976 #endif /* KERNEL_PRIVATE */
977
978 static __inline void
979 insque(void *a, void *b)
980 {
981 struct quehead *element = (struct quehead *)a,
982 *head = (struct quehead *)b;
983 chkquenext(head);
984
985 element->qh_link = head->qh_link;
986 element->qh_rlink = head;
987 head->qh_link = element;
988 element->qh_link->qh_rlink = element;
989 }
990
991 static __inline void
992 remque(void *a)
993 {
994 struct quehead *element = (struct quehead *)a;
995 chkquenext(element);
996 chkqueprev(element);
997
998 element->qh_link->qh_rlink = element->qh_rlink;
999 element->qh_rlink->qh_link = element->qh_link;
1000 element->qh_rlink = 0;
1001 }
1002
1003 #else /* !__GNUC__ */
1004
1005 void insque(void *a, void *b);
1006 void remque(void *a);
1007
1008 #endif /* __GNUC__ */
1009
1010 #endif /* NOTFB31 */
1011 #endif /* _KERNEL */
1012
1013 #endif /* !_SYS_QUEUE_H_ */