2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: wait_queue.c (adapted from sched_prim.c)
60 * Author: Avadis Tevanian, Jr.
63 * Primitives for manipulating wait queues: either global
64 * ones from sched_prim.c, or private ones associated with
65 * particular structures(pots, semaphores, etc..).
68 #include <kern/kern_types.h>
69 #include <kern/simple_lock.h>
70 #include <kern/zalloc.h>
71 #include <kern/queue.h>
73 #include <mach/sync_policy.h>
74 #include <kern/mach_param.h>
75 #include <kern/sched_prim.h>
77 #include <kern/wait_queue.h>
78 #include <vm/vm_kern.h>
80 /* forward declarations */
81 static boolean_t
wait_queue_member_locked(
83 wait_queue_set_t wq_set
);
85 static void wait_queues_init(void);
87 #define WAIT_QUEUE_MAX thread_max
88 #define WAIT_QUEUE_SET_MAX task_max * 3
89 #define WAIT_QUEUE_LINK_MAX PORT_MAX / 2 + (WAIT_QUEUE_MAX * WAIT_QUEUE_SET_MAX) / 64
91 static zone_t _wait_queue_link_zone
;
92 static zone_t _wait_queue_set_zone
;
93 static zone_t _wait_queue_zone
;
95 /* see rdar://6737748&5561610; we need an unshadowed
96 * definition of a WaitQueueLink for debugging,
97 * but it needs to be used somewhere to wind up in
99 volatile WaitQueueLink
*unused_except_for_debugging
;
103 * Waiting protocols and implementation:
105 * Each thread may be waiting for exactly one event; this event
106 * is set using assert_wait(). That thread may be awakened either
107 * by performing a thread_wakeup_prim() on its event,
108 * or by directly waking that thread up with clear_wait().
110 * The implementation of wait events uses a hash table. Each
111 * bucket is queue of threads having the same hash function
112 * value; the chain for the queue (linked list) is the run queue
113 * field. [It is not possible to be waiting and runnable at the
116 * Locks on both the thread and on the hash buckets govern the
117 * wait event field and the queue chain field. Because wakeup
118 * operations only have the event as an argument, the event hash
119 * bucket must be locked before any thread.
121 * Scheduling operations may also occur at interrupt level; therefore,
122 * interrupts below splsched() must be prevented when holding
123 * thread or hash bucket locks.
125 * The wait event hash table declarations are as follows:
128 struct wait_queue boot_wait_queue
[1];
129 __private_extern__
struct wait_queue
*wait_queues
= &boot_wait_queue
[0];
130 __private_extern__
uint32_t num_wait_queues
= 1;
132 #define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align)))
133 #define ROUNDDOWN(x,y) (((x)/(y))*(y))
136 compute_wait_hash_size(void)
138 uint32_t hsize
, queues
;
140 if (PE_parse_boot_argn("wqsize", &hsize
, sizeof(hsize
)))
143 queues
= thread_max
/ 11;
144 hsize
= P2ROUNDUP(queues
* sizeof(struct wait_queue
), PAGE_SIZE
);
150 wait_queues_init(void)
152 uint32_t i
, whsize
, qsz
;
156 * Determine the amount of memory we're willing to reserve for
157 * the waitqueue hash table
159 whsize
= compute_wait_hash_size();
161 /* Determine the number of waitqueues we can fit. */
162 qsz
= sizeof (struct wait_queue
);
163 whsize
= ROUNDDOWN(whsize
, qsz
);
164 num_wait_queues
= whsize
/ qsz
;
167 * The hash algorithm requires that this be a power of 2, so we
168 * just mask off all the low-order bits.
170 for (i
= 0; i
< 31; i
++) {
171 uint32_t bit
= (1 << i
);
172 if ((num_wait_queues
& bit
) == num_wait_queues
)
174 num_wait_queues
&= ~bit
;
176 assert(num_wait_queues
> 0);
178 /* Now determine how much memory we really need. */
179 whsize
= P2ROUNDUP(num_wait_queues
* qsz
, PAGE_SIZE
);
181 kret
= kernel_memory_allocate(kernel_map
, (vm_offset_t
*) &wait_queues
,
182 whsize
, 0, KMA_KOBJECT
|KMA_NOPAGEWAIT
);
184 if (kret
!= KERN_SUCCESS
|| wait_queues
== NULL
)
185 panic("kernel_memory_allocate() failed to allocate wait queues, error: %d, whsize: 0x%x", kret
, whsize
);
187 for (i
= 0; i
< num_wait_queues
; i
++) {
188 wait_queue_init(&wait_queues
[i
], SYNC_POLICY_FIFO
);
193 wait_queue_bootstrap(void)
196 _wait_queue_zone
= zinit(sizeof(struct wait_queue
),
197 WAIT_QUEUE_MAX
* sizeof(struct wait_queue
),
198 sizeof(struct wait_queue
),
200 zone_change(_wait_queue_zone
, Z_NOENCRYPT
, TRUE
);
202 _wait_queue_set_zone
= zinit(sizeof(struct wait_queue_set
),
203 WAIT_QUEUE_SET_MAX
* sizeof(struct wait_queue_set
),
204 sizeof(struct wait_queue_set
),
206 zone_change(_wait_queue_set_zone
, Z_NOENCRYPT
, TRUE
);
208 _wait_queue_link_zone
= zinit(sizeof(struct _wait_queue_link
),
209 WAIT_QUEUE_LINK_MAX
* sizeof(struct _wait_queue_link
),
210 sizeof(struct _wait_queue_link
),
212 zone_change(_wait_queue_link_zone
, Z_NOENCRYPT
, TRUE
);
216 * Routine: wait_queue_init
218 * Initialize a previously allocated wait queue.
220 * KERN_SUCCESS - The wait_queue_t was initialized
221 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
228 /* only FIFO and LIFO for now */
229 if ((policy
& SYNC_POLICY_FIXED_PRIORITY
) != 0)
230 return KERN_INVALID_ARGUMENT
;
232 wq
->wq_fifo
= ((policy
& SYNC_POLICY_REVERSED
) == 0);
233 wq
->wq_type
= _WAIT_QUEUE_inited
;
234 wq
->wq_eventmask
= 0;
235 queue_init(&wq
->wq_queue
);
236 hw_lock_init(&wq
->wq_interlock
);
241 * Routine: wait_queue_alloc
243 * Allocate and initialize a wait queue for use outside of
244 * of the mach part of the kernel.
246 * Nothing locked - can block.
248 * The allocated and initialized wait queue
249 * WAIT_QUEUE_NULL if there is a resource shortage
258 wq
= (wait_queue_t
) zalloc(_wait_queue_zone
);
259 if (wq
!= WAIT_QUEUE_NULL
) {
260 ret
= wait_queue_init(wq
, policy
);
261 if (ret
!= KERN_SUCCESS
) {
262 zfree(_wait_queue_zone
, wq
);
263 wq
= WAIT_QUEUE_NULL
;
270 * Routine: wait_queue_free
272 * Free an allocated wait queue.
280 if (!wait_queue_is_queue(wq
))
281 return KERN_INVALID_ARGUMENT
;
282 if (!queue_empty(&wq
->wq_queue
))
284 zfree(_wait_queue_zone
, wq
);
289 * Routine: wait_queue_set_init
291 * Initialize a previously allocated wait queue set.
293 * KERN_SUCCESS - The wait_queue_set_t was initialized
294 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
298 wait_queue_set_t wqset
,
303 ret
= wait_queue_init(&wqset
->wqs_wait_queue
, policy
);
304 if (ret
!= KERN_SUCCESS
)
307 wqset
->wqs_wait_queue
.wq_type
= _WAIT_QUEUE_SET_inited
;
308 if (policy
& SYNC_POLICY_PREPOST
)
309 wqset
->wqs_wait_queue
.wq_prepost
= TRUE
;
311 wqset
->wqs_wait_queue
.wq_prepost
= FALSE
;
312 queue_init(&wqset
->wqs_setlinks
);
313 queue_init(&wqset
->wqs_preposts
);
320 wait_queue_set_t wqset
,
323 return wait_queue_set_init(wqset
, policy
);
327 wait_queue_sub_clearrefs(
328 wait_queue_set_t wq_set
)
330 wait_queue_link_t wql
;
334 if (!wait_queue_is_set(wq_set
))
335 return KERN_INVALID_ARGUMENT
;
339 q
= &wq_set
->wqs_preposts
;
340 while (!queue_empty(q
)) {
341 queue_remove_first(q
, wql
, wait_queue_link_t
, wql_preposts
);
342 assert(!wql_is_preposted(wql
));
350 * Routine: wait_queue_set_alloc
352 * Allocate and initialize a wait queue set for
353 * use outside of the mach part of the kernel.
357 * The allocated and initialized wait queue set
358 * WAIT_QUEUE_SET_NULL if there is a resource shortage
361 wait_queue_set_alloc(
364 wait_queue_set_t wq_set
;
366 wq_set
= (wait_queue_set_t
) zalloc(_wait_queue_set_zone
);
367 if (wq_set
!= WAIT_QUEUE_SET_NULL
) {
370 ret
= wait_queue_set_init(wq_set
, policy
);
371 if (ret
!= KERN_SUCCESS
) {
372 zfree(_wait_queue_set_zone
, wq_set
);
373 wq_set
= WAIT_QUEUE_SET_NULL
;
380 * Routine: wait_queue_set_free
382 * Free an allocated wait queue set
388 wait_queue_set_t wq_set
)
390 if (!wait_queue_is_set(wq_set
))
391 return KERN_INVALID_ARGUMENT
;
393 if (!queue_empty(&wq_set
->wqs_wait_queue
.wq_queue
))
396 zfree(_wait_queue_set_zone
, wq_set
);
403 * Routine: wait_queue_set_size
404 * Routine: wait_queue_link_size
406 * Return the size of opaque wait queue structures
408 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet
); }
409 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink
); }
411 /* declare a unique type for wait queue link structures */
412 static unsigned int _wait_queue_link
;
413 static unsigned int _wait_queue_link_noalloc
;
414 static unsigned int _wait_queue_unlinked
;
416 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
417 #define WAIT_QUEUE_LINK_NOALLOC ((void *)&_wait_queue_link_noalloc)
418 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
420 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
421 WQASSERT(((wqe)->wqe_queue == (wq) && \
422 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
423 "wait queue element list corruption: wq=%#x, wqe=%#x", \
426 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
427 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
428 (queue_t)(wql) : &(wql)->wql_setlinks)))
430 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
431 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
432 (queue_t)(wql) : &(wql)->wql_setlinks)))
434 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
435 WQASSERT(((((wql)->wql_type == WAIT_QUEUE_LINK) || \
436 ((wql)->wql_type == WAIT_QUEUE_LINK_NOALLOC)) && \
437 ((wql)->wql_setqueue == (wqs)) && \
438 (((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) || \
439 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_SET_inited)) && \
440 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
441 "wait queue set links corruption: wqs=%#x, wql=%#x", \
444 #if defined(_WAIT_QUEUE_DEBUG_)
446 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
448 #define WAIT_QUEUE_CHECK(wq) \
450 queue_t q2 = &(wq)->wq_queue; \
451 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
452 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
453 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
454 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
458 #define WAIT_QUEUE_SET_CHECK(wqs) \
460 queue_t q2 = &(wqs)->wqs_setlinks; \
461 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
462 while (!queue_end(q2, (queue_entry_t)wql2)) { \
463 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
464 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
468 #else /* !_WAIT_QUEUE_DEBUG_ */
470 #define WQASSERT(e, s, p0, p1) assert(e)
472 #define WAIT_QUEUE_CHECK(wq)
473 #define WAIT_QUEUE_SET_CHECK(wqs)
475 #endif /* !_WAIT_QUEUE_DEBUG_ */
478 * Routine: wait_queue_global
480 * Indicate if this wait queue is a global wait queue or not.
486 if ((wq
>= wait_queues
) && (wq
<= (wait_queues
+ num_wait_queues
))) {
494 * Routine: wait_queue_member_locked
496 * Indicate if this set queue is a member of the queue
498 * The wait queue is locked
499 * The set queue is just that, a set queue
502 wait_queue_member_locked(
504 wait_queue_set_t wq_set
)
506 wait_queue_element_t wq_element
;
509 assert(wait_queue_held(wq
));
510 assert(wait_queue_is_set(wq_set
));
514 wq_element
= (wait_queue_element_t
) queue_first(q
);
515 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
516 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
517 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
) ||
518 (wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
)) {
519 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
521 if (wql
->wql_setqueue
== wq_set
)
524 wq_element
= (wait_queue_element_t
)
525 queue_next((queue_t
) wq_element
);
532 * Routine: wait_queue_member
534 * Indicate if this set queue is a member of the queue
536 * The set queue is just that, a set queue
541 wait_queue_set_t wq_set
)
546 if (!wait_queue_is_set(wq_set
))
551 ret
= wait_queue_member_locked(wq
, wq_set
);
552 wait_queue_unlock(wq
);
560 * Routine: wait_queue_link_internal
562 * Insert a set wait queue into a wait queue. This
563 * requires us to link the two together using a wait_queue_link
564 * structure that was provided.
566 * The wait queue being inserted must be inited as a set queue
567 * The wait_queue_link structure must already be properly typed
571 wait_queue_link_internal(
573 wait_queue_set_t wq_set
,
574 wait_queue_link_t wql
)
576 wait_queue_element_t wq_element
;
580 if (!wait_queue_is_valid(wq
) || !wait_queue_is_set(wq_set
))
581 return KERN_INVALID_ARGUMENT
;
584 * There are probably fewer threads and sets associated with
585 * the wait queue than there are wait queues associated with
586 * the set. So let's validate it that way.
591 wq_element
= (wait_queue_element_t
) queue_first(q
);
592 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
593 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
594 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
595 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) &&
596 ((wait_queue_link_t
)wq_element
)->wql_setqueue
== wq_set
) {
597 wait_queue_unlock(wq
);
599 return KERN_ALREADY_IN_SET
;
601 wq_element
= (wait_queue_element_t
)
602 queue_next((queue_t
) wq_element
);
606 * Not already a member, so we can add it.
610 WAIT_QUEUE_SET_CHECK(wq_set
);
612 assert(wql
->wql_type
== WAIT_QUEUE_LINK
||
613 wql
->wql_type
== WAIT_QUEUE_LINK_NOALLOC
);
616 wql_clear_prepost(wql
);
617 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
618 wql
->wql_setqueue
= wq_set
;
619 queue_enter(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
622 wait_queue_unlock(wq
);
629 * Routine: wait_queue_link_noalloc
631 * Insert a set wait queue into a wait queue. This
632 * requires us to link the two together using a wait_queue_link
633 * structure that we allocate.
635 * The wait queue being inserted must be inited as a set queue
638 wait_queue_link_noalloc(
640 wait_queue_set_t wq_set
,
641 wait_queue_link_t wql
)
643 wql
->wql_type
= WAIT_QUEUE_LINK_NOALLOC
;
644 return wait_queue_link_internal(wq
, wq_set
, wql
);
648 * Routine: wait_queue_link
650 * Insert a set wait queue into a wait queue. This
651 * requires us to link the two together using a wait_queue_link
652 * structure that we allocate.
654 * The wait queue being inserted must be inited as a set queue
659 wait_queue_set_t wq_set
)
661 wait_queue_link_t wql
;
664 wql
= (wait_queue_link_t
) zalloc(_wait_queue_link_zone
);
665 if (wql
== WAIT_QUEUE_LINK_NULL
)
666 return KERN_RESOURCE_SHORTAGE
;
668 wql
->wql_type
= WAIT_QUEUE_LINK
;
669 ret
= wait_queue_link_internal(wq
, wq_set
, wql
);
670 if (ret
!= KERN_SUCCESS
)
671 zfree(_wait_queue_link_zone
, wql
);
677 wait_queue_link_allocate(void)
679 wait_queue_link_t wql
;
681 wql
= zalloc(_wait_queue_link_zone
); /* Can't fail */
682 bzero(wql
, sizeof(*wql
));
683 wql
->wql_type
= WAIT_QUEUE_UNLINKED
;
689 wait_queue_link_free(wait_queue_link_t wql
)
691 zfree(_wait_queue_link_zone
, wql
);
697 * Routine: wait_queue_unlink_locked
699 * Undo the linkage between a wait queue and a set.
702 wait_queue_unlink_locked(
704 wait_queue_set_t wq_set
,
705 wait_queue_link_t wql
)
707 assert(wait_queue_held(wq
));
708 assert(wait_queue_held(&wq_set
->wqs_wait_queue
));
710 wql
->wql_queue
= WAIT_QUEUE_NULL
;
711 queue_remove(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
712 wql
->wql_setqueue
= WAIT_QUEUE_SET_NULL
;
713 queue_remove(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
714 if (wql_is_preposted(wql
)) {
715 queue_t ppq
= &wq_set
->wqs_preposts
;
716 queue_remove(ppq
, wql
, wait_queue_link_t
, wql_preposts
);
718 wql
->wql_type
= WAIT_QUEUE_UNLINKED
;
720 WAIT_QUEUE_CHECK(wq
);
721 WAIT_QUEUE_SET_CHECK(wq_set
);
725 * Routine: wait_queue_unlink_nofree
727 * Remove the linkage between a wait queue and a set,
728 * returning the linkage structure to the caller to
731 * The wait queue being must be a member set queue
734 wait_queue_unlink_nofree(
736 wait_queue_set_t wq_set
,
737 wait_queue_link_t
*wqlp
)
739 wait_queue_element_t wq_element
;
740 wait_queue_link_t wql
;
744 if (!wait_queue_is_valid(wq
) || !wait_queue_is_set(wq_set
)) {
745 return KERN_INVALID_ARGUMENT
;
751 wq_element
= (wait_queue_element_t
) queue_first(q
);
752 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
753 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
754 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
755 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
757 wql
= (wait_queue_link_t
)wq_element
;
759 if (wql
->wql_setqueue
== wq_set
) {
762 wait_queue_unlink_locked(wq
, wq_set
, wql
);
764 wait_queue_unlock(wq
);
770 wq_element
= (wait_queue_element_t
)
771 queue_next((queue_t
) wq_element
);
773 wait_queue_unlock(wq
);
775 return KERN_NOT_IN_SET
;
779 * Routine: wait_queue_unlink
781 * Remove the linkage between a wait queue and a set,
782 * freeing the linkage structure.
784 * The wait queue being must be a member set queue
789 wait_queue_set_t wq_set
)
791 wait_queue_element_t wq_element
;
792 wait_queue_link_t wql
;
796 if (!wait_queue_is_valid(wq
) || !wait_queue_is_set(wq_set
)) {
797 return KERN_INVALID_ARGUMENT
;
803 wq_element
= (wait_queue_element_t
) queue_first(q
);
804 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
805 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
806 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
807 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
809 wql
= (wait_queue_link_t
)wq_element
;
811 if (wql
->wql_setqueue
== wq_set
) {
814 alloced
= (wql
->wql_type
== WAIT_QUEUE_LINK
);
816 wait_queue_unlink_locked(wq
, wq_set
, wql
);
818 wait_queue_unlock(wq
);
821 zfree(_wait_queue_link_zone
, wql
);
825 wq_element
= (wait_queue_element_t
)
826 queue_next((queue_t
) wq_element
);
828 wait_queue_unlock(wq
);
830 return KERN_NOT_IN_SET
;
834 * Routine: wait_queue_unlink_all_nofree_locked
836 * Remove the linkage between a wait queue and all its sets.
837 * All the linkage structures are returned to the caller for
844 wait_queue_unlink_all_nofree_locked(
848 wait_queue_element_t wq_element
;
849 wait_queue_element_t wq_next_element
;
850 wait_queue_set_t wq_set
;
851 wait_queue_link_t wql
;
856 wq_element
= (wait_queue_element_t
) queue_first(q
);
857 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
859 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
860 wq_next_element
= (wait_queue_element_t
)
861 queue_next((queue_t
) wq_element
);
863 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
864 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
865 wql
= (wait_queue_link_t
)wq_element
;
866 wq_set
= wql
->wql_setqueue
;
868 wait_queue_unlink_locked(wq
, wq_set
, wql
);
870 enqueue(links
, &wql
->wql_links
);
872 wq_element
= wq_next_element
;
877 * Routine: wait_queue_unlink_all_nofree
879 * Remove the linkage between a wait queue and all its sets.
880 * All the linkage structures are returned to the caller for
883 * Nothing of interest locked.
887 wait_queue_unlink_all_nofree(
893 if (!wait_queue_is_valid(wq
)) {
894 return KERN_INVALID_ARGUMENT
;
899 wait_queue_unlink_all_nofree_locked(wq
, links
);
900 wait_queue_unlock(wq
);
903 return(KERN_SUCCESS
);
907 * Routine: wait_queue_unlink_all_locked
909 * Remove the linkage between a locked wait queue and all its
910 * sets and enqueue the allocated ones onto the links queue
916 wait_queue_unlink_all_locked(
920 wait_queue_element_t wq_element
;
921 wait_queue_element_t wq_next_element
;
922 wait_queue_set_t wq_set
;
923 wait_queue_link_t wql
;
928 wq_element
= (wait_queue_element_t
) queue_first(q
);
929 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
932 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
933 wq_next_element
= (wait_queue_element_t
)
934 queue_next((queue_t
) wq_element
);
936 alloced
= (wq_element
->wqe_type
== WAIT_QUEUE_LINK
);
937 if (alloced
|| wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
938 wql
= (wait_queue_link_t
)wq_element
;
939 wq_set
= wql
->wql_setqueue
;
941 wait_queue_unlink_locked(wq
, wq_set
, wql
);
944 enqueue(links
, &wql
->wql_links
);
946 wq_element
= wq_next_element
;
953 * Routine: wait_queue_unlink_all
955 * Remove the linkage between a wait queue and all its sets.
956 * All the linkage structures that were allocated internally
957 * are freed. The others are the caller's responsibility.
959 * Nothing of interest locked.
963 wait_queue_unlink_all(
966 wait_queue_link_t wql
;
967 queue_head_t links_queue_head
;
968 queue_t links
= &links_queue_head
;
971 if (!wait_queue_is_valid(wq
)) {
972 return KERN_INVALID_ARGUMENT
;
979 wait_queue_unlink_all_locked(wq
, links
);
980 wait_queue_unlock(wq
);
983 while(!queue_empty(links
)) {
984 wql
= (wait_queue_link_t
) dequeue(links
);
985 zfree(_wait_queue_link_zone
, wql
);
988 return(KERN_SUCCESS
);
991 /* legacy interface naming */
993 wait_subqueue_unlink_all(
994 wait_queue_set_t wq_set
)
996 return wait_queue_set_unlink_all(wq_set
);
1001 * Routine: wait_queue_set_unlink_all_nofree
1003 * Remove the linkage between a set wait queue and all its
1004 * member wait queues and all the sets it may be a member of.
1005 * The links structures are returned for later freeing by the
1008 * The wait queue must be a set
1011 wait_queue_set_unlink_all_nofree(
1012 wait_queue_set_t wq_set
,
1015 wait_queue_link_t wql
;
1020 if (!wait_queue_is_set(wq_set
)) {
1021 return KERN_INVALID_ARGUMENT
;
1028 /* remove the wait queues that are members of our set */
1029 q
= &wq_set
->wqs_setlinks
;
1031 wql
= (wait_queue_link_t
)queue_first(q
);
1032 while (!queue_end(q
, (queue_entry_t
)wql
)) {
1033 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
1034 wq
= wql
->wql_queue
;
1035 if (wait_queue_lock_try(wq
)) {
1036 wait_queue_unlink_locked(wq
, wq_set
, wql
);
1037 wait_queue_unlock(wq
);
1038 enqueue(links
, &wql
->wql_links
);
1039 wql
= (wait_queue_link_t
)queue_first(q
);
1048 /* remove this set from sets it belongs to */
1049 wait_queue_unlink_all_nofree_locked(&wq_set
->wqs_wait_queue
, links
);
1054 return(KERN_SUCCESS
);
1058 * Routine: wait_queue_set_unlink_all
1060 * Remove the linkage between a set wait queue and all its
1061 * member wait queues and all the sets it may be members of.
1062 * The link structures are freed for those links which were
1063 * dynamically allocated.
1065 * The wait queue must be a set
1068 wait_queue_set_unlink_all(
1069 wait_queue_set_t wq_set
)
1071 wait_queue_link_t wql
;
1074 queue_head_t links_queue_head
;
1075 queue_t links
= &links_queue_head
;
1078 if (!wait_queue_is_set(wq_set
)) {
1079 return KERN_INVALID_ARGUMENT
;
1088 /* remove the wait queues that are members of our set */
1089 q
= &wq_set
->wqs_setlinks
;
1091 wql
= (wait_queue_link_t
)queue_first(q
);
1092 while (!queue_end(q
, (queue_entry_t
)wql
)) {
1093 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
1094 wq
= wql
->wql_queue
;
1095 if (wait_queue_lock_try(wq
)) {
1098 alloced
= (wql
->wql_type
== WAIT_QUEUE_LINK
);
1099 wait_queue_unlink_locked(wq
, wq_set
, wql
);
1100 wait_queue_unlock(wq
);
1102 enqueue(links
, &wql
->wql_links
);
1103 wql
= (wait_queue_link_t
)queue_first(q
);
1113 /* remove this set from sets it belongs to */
1114 wait_queue_unlink_all_locked(&wq_set
->wqs_wait_queue
, links
);
1119 while (!queue_empty (links
)) {
1120 wql
= (wait_queue_link_t
) dequeue(links
);
1121 zfree(_wait_queue_link_zone
, wql
);
1123 return(KERN_SUCCESS
);
1127 wait_queue_set_unlink_one(
1128 wait_queue_set_t wq_set
,
1129 wait_queue_link_t wql
)
1134 assert(wait_queue_is_set(wq_set
));
1140 WAIT_QUEUE_SET_CHECK(wq_set
);
1142 /* Already unlinked, e.g. by selclearthread() */
1143 if (wql
->wql_type
== WAIT_QUEUE_UNLINKED
) {
1147 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
1149 /* On a wait queue, and we hold set queue lock ... */
1150 wq
= wql
->wql_queue
;
1151 if (wait_queue_lock_try(wq
)) {
1152 wait_queue_unlink_locked(wq
, wq_set
, wql
);
1153 wait_queue_unlock(wq
);
1165 return KERN_SUCCESS
;
1169 * Routine: wait_queue_assert_wait64_locked
1171 * Insert the current thread into the supplied wait queue
1172 * waiting for a particular event to be posted to that queue.
1175 * The wait queue is assumed locked.
1176 * The waiting thread is assumed locked.
1179 __private_extern__ wait_result_t
1180 wait_queue_assert_wait64_locked(
1183 wait_interrupt_t interruptible
,
1184 wait_timeout_urgency_t urgency
,
1189 wait_result_t wait_result
;
1192 if (!wait_queue_assert_possible(thread
))
1193 panic("wait_queue_assert_wait64_locked");
1195 if (wq
->wq_type
== _WAIT_QUEUE_SET_inited
) {
1196 wait_queue_set_t wqs
= (wait_queue_set_t
)wq
;
1198 if (event
== NO_EVENT64
&& wqs_is_preposted(wqs
))
1199 return(THREAD_AWAKENED
);
1203 * Realtime threads get priority for wait queue placements.
1204 * This allows wait_queue_wakeup_one to prefer a waiting
1205 * realtime thread, similar in principle to performing
1206 * a wait_queue_wakeup_all and allowing scheduler prioritization
1207 * to run the realtime thread, but without causing the
1208 * lock contention of that scenario.
1210 realtime
= (thread
->sched_pri
>= BASEPRI_REALTIME
);
1213 * This is the extent to which we currently take scheduling attributes
1214 * into account. If the thread is vm priviledged, we stick it at
1215 * the front of the queue. Later, these queues will honor the policy
1216 * value set at wait_queue_init time.
1218 wait_result
= thread_mark_wait_locked(thread
, interruptible
);
1219 if (wait_result
== THREAD_WAITING
) {
1221 || (thread
->options
& TH_OPT_VMPRIV
)
1223 enqueue_head(&wq
->wq_queue
, (queue_entry_t
) thread
);
1225 enqueue_tail(&wq
->wq_queue
, (queue_entry_t
) thread
);
1227 thread
->wait_event
= event
;
1228 thread
->wait_queue
= wq
;
1230 if (deadline
!= 0) {
1232 if (!timer_call_enter_with_leeway(&thread
->wait_timer
, NULL
,
1233 deadline
, leeway
, urgency
, FALSE
))
1234 thread
->wait_timer_active
++;
1235 thread
->wait_timer_is_set
= TRUE
;
1237 if (wait_queue_global(wq
)) {
1238 wq
->wq_eventmask
= wq
->wq_eventmask
| CAST_TO_EVENT_MASK(event
);
1242 return(wait_result
);
1246 * Routine: wait_queue_assert_wait
1248 * Insert the current thread into the supplied wait queue
1249 * waiting for a particular event to be posted to that queue.
1252 * nothing of interest locked.
1255 wait_queue_assert_wait(
1258 wait_interrupt_t interruptible
,
1263 thread_t thread
= current_thread();
1265 /* If it is an invalid wait queue, you can't wait on it */
1266 if (!wait_queue_is_valid(wq
))
1267 return (thread
->wait_result
= THREAD_RESTART
);
1270 wait_queue_lock(wq
);
1271 thread_lock(thread
);
1272 ret
= wait_queue_assert_wait64_locked(wq
, CAST_DOWN(event64_t
,event
),
1274 TIMEOUT_URGENCY_SYS_NORMAL
,
1277 thread_unlock(thread
);
1278 wait_queue_unlock(wq
);
1284 * Routine: wait_queue_assert_wait_with_leeway
1286 * Insert the current thread into the supplied wait queue
1287 * waiting for a particular event to be posted to that queue.
1288 * Deadline values are specified with urgency and leeway.
1291 * nothing of interest locked.
1294 wait_queue_assert_wait_with_leeway(
1297 wait_interrupt_t interruptible
,
1298 wait_timeout_urgency_t urgency
,
1304 thread_t thread
= current_thread();
1306 /* If it is an invalid wait queue, you can't wait on it */
1307 if (!wait_queue_is_valid(wq
))
1308 return (thread
->wait_result
= THREAD_RESTART
);
1311 wait_queue_lock(wq
);
1312 thread_lock(thread
);
1313 ret
= wait_queue_assert_wait64_locked(wq
, CAST_DOWN(event64_t
,event
),
1315 urgency
, deadline
, leeway
,
1317 thread_unlock(thread
);
1318 wait_queue_unlock(wq
);
1324 * Routine: wait_queue_assert_wait64
1326 * Insert the current thread into the supplied wait queue
1327 * waiting for a particular event to be posted to that queue.
1329 * nothing of interest locked.
1332 wait_queue_assert_wait64(
1335 wait_interrupt_t interruptible
,
1340 thread_t thread
= current_thread();
1342 /* If it is an invalid wait queue, you cant wait on it */
1343 if (!wait_queue_is_valid(wq
))
1344 return (thread
->wait_result
= THREAD_RESTART
);
1347 wait_queue_lock(wq
);
1348 thread_lock(thread
);
1349 ret
= wait_queue_assert_wait64_locked(wq
, event
, interruptible
,
1350 TIMEOUT_URGENCY_SYS_NORMAL
,
1353 thread_unlock(thread
);
1354 wait_queue_unlock(wq
);
1360 * Routine: wait_queue_assert_wait64_with_leeway
1362 * Insert the current thread into the supplied wait queue
1363 * waiting for a particular event to be posted to that queue.
1364 * Deadline values are specified with urgency and leeway.
1366 * nothing of interest locked.
1369 wait_queue_assert_wait64_with_leeway(
1372 wait_interrupt_t interruptible
,
1373 wait_timeout_urgency_t urgency
,
1379 thread_t thread
= current_thread();
1381 /* If it is an invalid wait queue, you cant wait on it */
1382 if (!wait_queue_is_valid(wq
))
1383 return (thread
->wait_result
= THREAD_RESTART
);
1386 wait_queue_lock(wq
);
1387 thread_lock(thread
);
1388 ret
= wait_queue_assert_wait64_locked(wq
, event
, interruptible
,
1389 urgency
, deadline
, leeway
,
1391 thread_unlock(thread
);
1392 wait_queue_unlock(wq
);
1398 * Routine: _wait_queue_select64_all
1400 * Select all threads off a wait queue that meet the
1401 * supplied criteria.
1405 * wake_queue initialized and ready for insertion
1406 * possibly recursive
1408 * a queue of locked threads
1411 _wait_queue_select64_all(
1416 wait_queue_element_t wq_element
;
1417 wait_queue_element_t wqe_next
;
1418 unsigned long eventmask
= 0;
1419 boolean_t is_queue_global
= FALSE
;
1422 is_queue_global
= wait_queue_global(wq
);
1423 if (is_queue_global
) {
1424 eventmask
= CAST_TO_EVENT_MASK(event
);
1425 if ((wq
->wq_eventmask
& eventmask
) != eventmask
) {
1432 wq_element
= (wait_queue_element_t
) queue_first(q
);
1433 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1434 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1435 wqe_next
= (wait_queue_element_t
)
1436 queue_next((queue_t
) wq_element
);
1439 * We may have to recurse if this is a compound wait queue.
1441 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
1442 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
1443 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1444 wait_queue_set_t set_queue
= wql
->wql_setqueue
;
1447 * We have to check the set wait queue. If it is marked
1448 * as pre-post, and it is the "generic event" then mark
1449 * it pre-posted now (if not already).
1451 wqs_lock(set_queue
);
1452 if (event
== NO_EVENT64
&& set_queue
->wqs_prepost
&& !wql_is_preposted(wql
)) {
1453 queue_t ppq
= &set_queue
->wqs_preposts
;
1454 queue_enter(ppq
, wql
, wait_queue_link_t
, wql_preposts
);
1456 if (! wait_queue_empty(&set_queue
->wqs_wait_queue
))
1457 _wait_queue_select64_all(&set_queue
->wqs_wait_queue
, event
, wake_queue
);
1458 wqs_unlock(set_queue
);
1462 * Otherwise, its a thread. If it is waiting on
1463 * the event we are posting to this queue, pull
1464 * it off the queue and stick it in out wake_queue.
1466 thread_t t
= (thread_t
)(void *)wq_element
;
1468 if (t
->wait_event
== event
) {
1470 remqueue((queue_entry_t
) t
);
1471 enqueue (wake_queue
, (queue_entry_t
) t
);
1472 t
->wait_queue
= WAIT_QUEUE_NULL
;
1473 t
->wait_event
= NO_EVENT64
;
1474 t
->at_safe_point
= FALSE
;
1475 /* returned locked */
1477 if (is_queue_global
) {
1478 eventmask
= eventmask
|
1479 CAST_TO_EVENT_MASK(t
->wait_event
);
1483 wq_element
= wqe_next
;
1485 /* Update event mask if global wait queue */
1486 if (is_queue_global
) {
1487 wq
->wq_eventmask
= eventmask
;
1493 * Routine: wait_queue_wakeup64_all_locked
1495 * Wakeup some number of threads that are in the specified
1496 * wait queue and waiting on the specified event.
1498 * wait queue already locked (may be released).
1500 * KERN_SUCCESS - Threads were woken up
1501 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1503 __private_extern__ kern_return_t
1504 wait_queue_wakeup64_all_locked(
1507 wait_result_t result
,
1510 queue_head_t wake_queue_head
;
1511 queue_t q
= &wake_queue_head
;
1514 // assert(wait_queue_held(wq));
1515 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1516 // panic("wait_queue_wakeup64_all_locked: lock not held on %p\n", wq); /* (BRINGUP) */
1522 * Select the threads that we will wake up. The threads
1523 * are returned to us locked and cleanly removed from the
1526 _wait_queue_select64_all(wq
, event
, q
);
1528 wait_queue_unlock(wq
);
1531 * For each thread, set it running.
1533 res
= KERN_NOT_WAITING
;
1534 while (!queue_empty (q
)) {
1535 thread_t thread
= (thread_t
)(void *) dequeue(q
);
1536 res
= thread_go(thread
, result
);
1537 assert(res
== KERN_SUCCESS
);
1538 thread_unlock(thread
);
1545 * Routine: wait_queue_wakeup_all
1547 * Wakeup some number of threads that are in the specified
1548 * wait queue and waiting on the specified event.
1552 * KERN_SUCCESS - Threads were woken up
1553 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1556 wait_queue_wakeup_all(
1559 wait_result_t result
)
1564 if (!wait_queue_is_valid(wq
)) {
1565 return KERN_INVALID_ARGUMENT
;
1569 wait_queue_lock(wq
);
1570 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1571 // panic("wait_queue_wakeup_all: we did not get the lock on %p\n", wq); /* (BRINGUP) */
1573 ret
= wait_queue_wakeup64_all_locked(
1574 wq
, CAST_DOWN(event64_t
,event
),
1582 * Routine: wait_queue_wakeup64_all
1584 * Wakeup some number of threads that are in the specified
1585 * wait queue and waiting on the specified event.
1589 * KERN_SUCCESS - Threads were woken up
1590 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1593 wait_queue_wakeup64_all(
1596 wait_result_t result
)
1601 if (!wait_queue_is_valid(wq
)) {
1602 return KERN_INVALID_ARGUMENT
;
1606 wait_queue_lock(wq
);
1607 ret
= wait_queue_wakeup64_all_locked(wq
, event
, result
, TRUE
);
1614 * Routine: _wait_queue_select64_one
1616 * Select the best thread off a wait queue that meet the
1617 * supplied criteria.
1621 * possibly recursive
1623 * a locked thread - if one found
1625 * This is where the sync policy of the wait queue comes
1626 * into effect. For now, we just assume FIFO/LIFO.
1629 _wait_queue_select64_one(
1633 wait_queue_element_t wq_element
;
1634 wait_queue_element_t wqe_next
;
1635 thread_t t
= THREAD_NULL
;
1636 thread_t fifo_thread
= THREAD_NULL
;
1637 boolean_t is_queue_fifo
= TRUE
;
1638 boolean_t is_queue_global
= FALSE
;
1639 boolean_t thread_imp_donor
= FALSE
;
1640 boolean_t realtime
= FALSE
;
1641 unsigned long eventmask
= 0;
1644 if (wait_queue_global(wq
)) {
1645 eventmask
= CAST_TO_EVENT_MASK(event
);
1646 if ((wq
->wq_eventmask
& eventmask
) != eventmask
) {
1650 is_queue_global
= TRUE
;
1651 #if IMPORTANCE_INHERITANCE
1652 is_queue_fifo
= FALSE
;
1653 #endif /* IMPORTANCE_INHERITANCE */
1658 wq_element
= (wait_queue_element_t
) queue_first(q
);
1659 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1660 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1661 wqe_next
= (wait_queue_element_t
)
1662 queue_next((queue_t
) wq_element
);
1665 * We may have to recurse if this is a compound wait queue.
1667 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
1668 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
1669 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1670 wait_queue_set_t set_queue
= wql
->wql_setqueue
;
1673 * We have to check the set wait queue. If the set
1674 * supports pre-posting, it isn't already preposted,
1675 * and we didn't find a thread in the set, then mark it.
1677 * If we later find a thread, there may be a spurious
1678 * pre-post here on this set. The wait side has to check
1679 * for that either pre- or post-wait.
1681 wqs_lock(set_queue
);
1682 if (! wait_queue_empty(&set_queue
->wqs_wait_queue
)) {
1683 t
= _wait_queue_select64_one(&set_queue
->wqs_wait_queue
, event
);
1685 if (t
!= THREAD_NULL
) {
1686 wqs_unlock(set_queue
);
1689 if (event
== NO_EVENT64
&& set_queue
->wqs_prepost
&& !wql_is_preposted(wql
)) {
1690 queue_t ppq
= &set_queue
->wqs_preposts
;
1691 queue_enter(ppq
, wql
, wait_queue_link_t
, wql_preposts
);
1693 wqs_unlock(set_queue
);
1698 * Otherwise, its a thread. If it is waiting on
1699 * the event we are posting to this queue, pull
1700 * it off the queue and stick it in out wake_queue.
1702 t
= (thread_t
)(void *)wq_element
;
1703 if (t
->wait_event
== event
) {
1704 if (fifo_thread
== THREAD_NULL
) {
1707 #if IMPORTANCE_INHERITANCE
1709 * Checking imp donor bit does not need thread lock or
1710 * or task lock since we have the wait queue lock and
1711 * thread can not be removed from it without acquiring
1712 * wait queue lock. The imp donor bit may change
1713 * once we read its value, but it is ok to wake
1714 * a thread while someone drops importance assertion
1715 * on the that thread.
1717 thread_imp_donor
= task_is_importance_donor(t
->task
);
1718 #endif /* IMPORTANCE_INHERITANCE */
1719 realtime
= (t
->sched_pri
>= BASEPRI_REALTIME
);
1720 if (is_queue_fifo
|| thread_imp_donor
|| realtime
||
1721 (t
->options
& TH_OPT_VMPRIV
)) {
1723 remqueue((queue_entry_t
) t
);
1724 t
->wait_queue
= WAIT_QUEUE_NULL
;
1725 t
->wait_event
= NO_EVENT64
;
1726 t
->at_safe_point
= FALSE
;
1727 return t
; /* still locked */
1730 if (is_queue_global
) {
1731 eventmask
= eventmask
| CAST_TO_EVENT_MASK(t
->wait_event
);
1735 wq_element
= wqe_next
;
1738 if (is_queue_global
) {
1739 wq
->wq_eventmask
= eventmask
;
1741 #if IMPORTANCE_INHERITANCE
1742 if (fifo_thread
!= THREAD_NULL
) {
1743 thread_lock(fifo_thread
);
1744 remqueue((queue_entry_t
) fifo_thread
);
1745 fifo_thread
->wait_queue
= WAIT_QUEUE_NULL
;
1746 fifo_thread
->wait_event
= NO_EVENT64
;
1747 fifo_thread
->at_safe_point
= FALSE
;
1748 return fifo_thread
; /* still locked */
1750 #endif /* IMPORTANCE_INHERITANCE */
1756 * Routine: wait_queue_pull_thread_locked
1758 * Pull a thread off its wait queue and (possibly) unlock
1765 * with the thread still locked.
1768 wait_queue_pull_thread_locked(
1774 assert(thread
->wait_queue
== waitq
);
1776 remqueue((queue_entry_t
)thread
);
1777 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1778 thread
->wait_event
= NO_EVENT64
;
1779 thread
->at_safe_point
= FALSE
;
1781 wait_queue_unlock(waitq
);
1786 * Routine: wait_queue_select64_thread
1788 * Look for a thread and remove it from the queues, if
1789 * (and only if) the thread is waiting on the supplied
1790 * <wait_queue, event> pair.
1794 * possibly recursive
1796 * KERN_NOT_WAITING: Thread is not waiting here.
1797 * KERN_SUCCESS: It was, and is now removed (returned locked)
1799 static kern_return_t
1800 _wait_queue_select64_thread(
1805 wait_queue_element_t wq_element
;
1806 wait_queue_element_t wqe_next
;
1807 kern_return_t res
= KERN_NOT_WAITING
;
1808 queue_t q
= &wq
->wq_queue
;
1810 thread_lock(thread
);
1811 if ((thread
->wait_queue
== wq
) && (thread
->wait_event
== event
)) {
1812 remqueue((queue_entry_t
) thread
);
1813 thread
->at_safe_point
= FALSE
;
1814 thread
->wait_event
= NO_EVENT64
;
1815 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1816 /* thread still locked */
1817 return KERN_SUCCESS
;
1819 thread_unlock(thread
);
1822 * The wait_queue associated with the thread may be one of this
1823 * wait queue's sets. Go see. If so, removing it from
1824 * there is like removing it from here.
1826 wq_element
= (wait_queue_element_t
) queue_first(q
);
1827 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1828 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1829 wqe_next
= (wait_queue_element_t
)
1830 queue_next((queue_t
) wq_element
);
1832 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
1833 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
1834 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1835 wait_queue_set_t set_queue
= wql
->wql_setqueue
;
1837 wqs_lock(set_queue
);
1838 if (! wait_queue_empty(&set_queue
->wqs_wait_queue
)) {
1839 res
= _wait_queue_select64_thread(&set_queue
->wqs_wait_queue
,
1843 wqs_unlock(set_queue
);
1844 if (res
== KERN_SUCCESS
)
1845 return KERN_SUCCESS
;
1847 wq_element
= wqe_next
;
1854 * Routine: wait_queue_wakeup64_identity_locked
1856 * Select a single thread that is most-eligible to run and set
1857 * set it running. But return the thread locked.
1862 * possibly recursive
1864 * a pointer to the locked thread that was awakened
1866 __private_extern__ thread_t
1867 wait_queue_wakeup64_identity_locked(
1870 wait_result_t result
,
1876 assert(wait_queue_held(wq
));
1878 thread
= _wait_queue_select64_one(wq
, event
);
1880 wait_queue_unlock(wq
);
1883 res
= thread_go(thread
, result
);
1884 assert(res
== KERN_SUCCESS
);
1886 return thread
; /* still locked if not NULL */
1891 * Routine: wait_queue_wakeup64_one_locked
1893 * Select a single thread that is most-eligible to run and set
1899 * possibly recursive
1901 * KERN_SUCCESS: It was, and is, now removed.
1902 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1904 __private_extern__ kern_return_t
1905 wait_queue_wakeup64_one_locked(
1908 wait_result_t result
,
1913 assert(wait_queue_held(wq
));
1915 thread
= _wait_queue_select64_one(wq
, event
);
1917 wait_queue_unlock(wq
);
1922 res
= thread_go(thread
, result
);
1923 assert(res
== KERN_SUCCESS
);
1924 thread_unlock(thread
);
1928 return KERN_NOT_WAITING
;
1932 * Routine: wait_queue_wakeup_one
1934 * Wakeup the most appropriate thread that is in the specified
1935 * wait queue for the specified event.
1939 * KERN_SUCCESS - Thread was woken up
1940 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1943 wait_queue_wakeup_one(
1946 wait_result_t result
,
1952 if (!wait_queue_is_valid(wq
)) {
1953 return KERN_INVALID_ARGUMENT
;
1957 wait_queue_lock(wq
);
1958 thread
= _wait_queue_select64_one(wq
, CAST_DOWN(event64_t
,event
));
1959 wait_queue_unlock(wq
);
1964 if (thread
->sched_pri
< priority
) {
1965 if (priority
<= MAXPRI
) {
1966 set_sched_pri(thread
, priority
);
1968 thread
->was_promoted_on_wakeup
= 1;
1969 thread
->sched_flags
|= TH_SFLAG_PROMOTED
;
1972 res
= thread_go(thread
, result
);
1973 assert(res
== KERN_SUCCESS
);
1974 thread_unlock(thread
);
1980 return KERN_NOT_WAITING
;
1984 * Routine: wait_queue_wakeup64_one
1986 * Wakeup the most appropriate thread that is in the specified
1987 * wait queue for the specified event.
1991 * KERN_SUCCESS - Thread was woken up
1992 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1995 wait_queue_wakeup64_one(
1998 wait_result_t result
)
2003 if (!wait_queue_is_valid(wq
)) {
2004 return KERN_INVALID_ARGUMENT
;
2007 wait_queue_lock(wq
);
2008 thread
= _wait_queue_select64_one(wq
, event
);
2009 wait_queue_unlock(wq
);
2014 res
= thread_go(thread
, result
);
2015 assert(res
== KERN_SUCCESS
);
2016 thread_unlock(thread
);
2022 return KERN_NOT_WAITING
;
2027 * Routine: wait_queue_wakeup64_thread_locked
2029 * Wakeup the particular thread that was specified if and only
2030 * it was in this wait queue (or one of it's set queues)
2031 * and waiting on the specified event.
2033 * This is much safer than just removing the thread from
2034 * whatever wait queue it happens to be on. For instance, it
2035 * may have already been awoken from the wait you intended to
2036 * interrupt and waited on something else (like another
2040 * wait queue already locked (may be released).
2042 * KERN_SUCCESS - the thread was found waiting and awakened
2043 * KERN_NOT_WAITING - the thread was not waiting here
2045 __private_extern__ kern_return_t
2046 wait_queue_wakeup64_thread_locked(
2050 wait_result_t result
,
2055 assert(wait_queue_held(wq
));
2058 * See if the thread was still waiting there. If so, it got
2059 * dequeued and returned locked.
2061 res
= _wait_queue_select64_thread(wq
, event
, thread
);
2063 wait_queue_unlock(wq
);
2065 if (res
!= KERN_SUCCESS
)
2066 return KERN_NOT_WAITING
;
2068 res
= thread_go(thread
, result
);
2069 assert(res
== KERN_SUCCESS
);
2070 thread_unlock(thread
);
2075 * Routine: wait_queue_wakeup_thread
2077 * Wakeup the particular thread that was specified if and only
2078 * it was in this wait queue (or one of it's set queues)
2079 * and waiting on the specified event.
2081 * This is much safer than just removing the thread from
2082 * whatever wait queue it happens to be on. For instance, it
2083 * may have already been awoken from the wait you intended to
2084 * interrupt and waited on something else (like another
2087 * nothing of interest locked
2088 * we need to assume spl needs to be raised
2090 * KERN_SUCCESS - the thread was found waiting and awakened
2091 * KERN_NOT_WAITING - the thread was not waiting here
2094 wait_queue_wakeup_thread(
2098 wait_result_t result
)
2103 if (!wait_queue_is_valid(wq
)) {
2104 return KERN_INVALID_ARGUMENT
;
2108 wait_queue_lock(wq
);
2109 res
= _wait_queue_select64_thread(wq
, CAST_DOWN(event64_t
,event
), thread
);
2110 wait_queue_unlock(wq
);
2112 if (res
== KERN_SUCCESS
) {
2113 res
= thread_go(thread
, result
);
2114 assert(res
== KERN_SUCCESS
);
2115 thread_unlock(thread
);
2120 return KERN_NOT_WAITING
;
2124 * Routine: wait_queue_wakeup64_thread
2126 * Wakeup the particular thread that was specified if and only
2127 * it was in this wait queue (or one of it's set's queues)
2128 * and waiting on the specified event.
2130 * This is much safer than just removing the thread from
2131 * whatever wait queue it happens to be on. For instance, it
2132 * may have already been awoken from the wait you intended to
2133 * interrupt and waited on something else (like another
2136 * nothing of interest locked
2137 * we need to assume spl needs to be raised
2139 * KERN_SUCCESS - the thread was found waiting and awakened
2140 * KERN_NOT_WAITING - the thread was not waiting here
2143 wait_queue_wakeup64_thread(
2147 wait_result_t result
)
2152 if (!wait_queue_is_valid(wq
)) {
2153 return KERN_INVALID_ARGUMENT
;
2157 wait_queue_lock(wq
);
2158 res
= _wait_queue_select64_thread(wq
, event
, thread
);
2159 wait_queue_unlock(wq
);
2161 if (res
== KERN_SUCCESS
) {
2162 res
= thread_go(thread
, result
);
2163 assert(res
== KERN_SUCCESS
);
2164 thread_unlock(thread
);
2169 return KERN_NOT_WAITING
;