2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: wait_queue.c (adapted from sched_prim.c)
60 * Author: Avadis Tevanian, Jr.
63 * Primitives for manipulating wait queues: either global
64 * ones from sched_prim.c, or private ones associated with
65 * particular structures(pots, semaphores, etc..).
68 #include <kern/kern_types.h>
69 #include <kern/simple_lock.h>
70 #include <kern/zalloc.h>
71 #include <kern/queue.h>
73 #include <mach/sync_policy.h>
74 #include <kern/mach_param.h>
75 #include <kern/sched_prim.h>
77 #include <kern/wait_queue.h>
78 #include <vm/vm_kern.h>
80 /* forward declarations */
81 static boolean_t
wait_queue_member_locked(
83 wait_queue_set_t wq_set
);
85 static void wait_queues_init(void) __attribute__((section("__TEXT, initcode")));
87 #define WAIT_QUEUE_MAX thread_max
88 #define WAIT_QUEUE_SET_MAX task_max * 3
89 #define WAIT_QUEUE_LINK_MAX PORT_MAX / 2 + (WAIT_QUEUE_MAX * WAIT_QUEUE_SET_MAX) / 64
91 static zone_t _wait_queue_link_zone
;
92 static zone_t _wait_queue_set_zone
;
93 static zone_t _wait_queue_zone
;
95 /* see rdar://6737748&5561610; we need an unshadowed
96 * definition of a WaitQueueLink for debugging,
97 * but it needs to be used somewhere to wind up in
99 volatile WaitQueueLink
*unused_except_for_debugging
;
103 * Waiting protocols and implementation:
105 * Each thread may be waiting for exactly one event; this event
106 * is set using assert_wait(). That thread may be awakened either
107 * by performing a thread_wakeup_prim() on its event,
108 * or by directly waking that thread up with clear_wait().
110 * The implementation of wait events uses a hash table. Each
111 * bucket is queue of threads having the same hash function
112 * value; the chain for the queue (linked list) is the run queue
113 * field. [It is not possible to be waiting and runnable at the
116 * Locks on both the thread and on the hash buckets govern the
117 * wait event field and the queue chain field. Because wakeup
118 * operations only have the event as an argument, the event hash
119 * bucket must be locked before any thread.
121 * Scheduling operations may also occur at interrupt level; therefore,
122 * interrupts below splsched() must be prevented when holding
123 * thread or hash bucket locks.
125 * The wait event hash table declarations are as follows:
128 struct wait_queue boot_wait_queue
[1];
129 __private_extern__
struct wait_queue
*wait_queues
= &boot_wait_queue
[0];
130 __private_extern__
uint32_t num_wait_queues
= 1;
132 #define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align)))
133 #define ROUNDDOWN(x,y) (((x)/(y))*(y))
136 compute_wait_hash_size(void)
138 uint32_t hsize
, queues
;
140 if (PE_parse_boot_argn("wqsize", &hsize
, sizeof(hsize
)))
143 queues
= thread_max
/ 11;
144 hsize
= P2ROUNDUP(queues
* sizeof(struct wait_queue
), PAGE_SIZE
);
150 wait_queues_init(void)
152 uint32_t i
, whsize
, qsz
;
156 * Determine the amount of memory we're willing to reserve for
157 * the waitqueue hash table
159 whsize
= compute_wait_hash_size();
161 /* Determine the number of waitqueues we can fit. */
162 qsz
= sizeof (struct wait_queue
);
163 whsize
= ROUNDDOWN(whsize
, qsz
);
164 num_wait_queues
= whsize
/ qsz
;
167 * The hash algorithm requires that this be a power of 2, so we
168 * just mask off all the low-order bits.
170 for (i
= 0; i
< 31; i
++) {
171 uint32_t bit
= (1 << i
);
172 if ((num_wait_queues
& bit
) == num_wait_queues
)
174 num_wait_queues
&= ~bit
;
176 assert(num_wait_queues
> 0);
178 /* Now determine how much memory we really need. */
179 whsize
= P2ROUNDUP(num_wait_queues
* qsz
, PAGE_SIZE
);
181 kret
= kernel_memory_allocate(kernel_map
, (vm_offset_t
*) &wait_queues
,
182 whsize
, 0, KMA_KOBJECT
|KMA_NOPAGEWAIT
);
184 if (kret
!= KERN_SUCCESS
|| wait_queues
== NULL
)
185 panic("kernel_memory_allocate() failed to allocate wait queues, error: %d, whsize: 0x%x", kret
, whsize
);
187 for (i
= 0; i
< num_wait_queues
; i
++) {
188 wait_queue_init(&wait_queues
[i
], SYNC_POLICY_FIFO
);
193 wait_queue_bootstrap(void)
196 _wait_queue_zone
= zinit(sizeof(struct wait_queue
),
197 WAIT_QUEUE_MAX
* sizeof(struct wait_queue
),
198 sizeof(struct wait_queue
),
200 zone_change(_wait_queue_zone
, Z_NOENCRYPT
, TRUE
);
202 _wait_queue_set_zone
= zinit(sizeof(struct wait_queue_set
),
203 WAIT_QUEUE_SET_MAX
* sizeof(struct wait_queue_set
),
204 sizeof(struct wait_queue_set
),
206 zone_change(_wait_queue_set_zone
, Z_NOENCRYPT
, TRUE
);
208 _wait_queue_link_zone
= zinit(sizeof(struct _wait_queue_link
),
209 WAIT_QUEUE_LINK_MAX
* sizeof(struct _wait_queue_link
),
210 sizeof(struct _wait_queue_link
),
212 zone_change(_wait_queue_link_zone
, Z_NOENCRYPT
, TRUE
);
216 * Routine: wait_queue_init
218 * Initialize a previously allocated wait queue.
220 * KERN_SUCCESS - The wait_queue_t was initialized
221 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
228 /* only FIFO and LIFO for now */
229 if ((policy
& SYNC_POLICY_FIXED_PRIORITY
) != 0)
230 return KERN_INVALID_ARGUMENT
;
232 wq
->wq_fifo
= ((policy
& SYNC_POLICY_REVERSED
) == 0);
233 wq
->wq_type
= _WAIT_QUEUE_inited
;
234 queue_init(&wq
->wq_queue
);
235 hw_lock_init(&wq
->wq_interlock
);
240 * Routine: wait_queue_alloc
242 * Allocate and initialize a wait queue for use outside of
243 * of the mach part of the kernel.
245 * Nothing locked - can block.
247 * The allocated and initialized wait queue
248 * WAIT_QUEUE_NULL if there is a resource shortage
257 wq
= (wait_queue_t
) zalloc(_wait_queue_zone
);
258 if (wq
!= WAIT_QUEUE_NULL
) {
259 ret
= wait_queue_init(wq
, policy
);
260 if (ret
!= KERN_SUCCESS
) {
261 zfree(_wait_queue_zone
, wq
);
262 wq
= WAIT_QUEUE_NULL
;
269 * Routine: wait_queue_free
271 * Free an allocated wait queue.
279 if (!wait_queue_is_queue(wq
))
280 return KERN_INVALID_ARGUMENT
;
281 if (!queue_empty(&wq
->wq_queue
))
283 zfree(_wait_queue_zone
, wq
);
288 * Routine: wait_queue_set_init
290 * Initialize a previously allocated wait queue set.
292 * KERN_SUCCESS - The wait_queue_set_t was initialized
293 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
297 wait_queue_set_t wqset
,
302 ret
= wait_queue_init(&wqset
->wqs_wait_queue
, policy
);
303 if (ret
!= KERN_SUCCESS
)
306 wqset
->wqs_wait_queue
.wq_type
= _WAIT_QUEUE_SET_inited
;
307 if (policy
& SYNC_POLICY_PREPOST
)
308 wqset
->wqs_wait_queue
.wq_prepost
= TRUE
;
310 wqset
->wqs_wait_queue
.wq_prepost
= FALSE
;
311 queue_init(&wqset
->wqs_setlinks
);
312 queue_init(&wqset
->wqs_preposts
);
319 wait_queue_set_t wqset
,
322 return wait_queue_set_init(wqset
, policy
);
326 wait_queue_sub_clearrefs(
327 wait_queue_set_t wq_set
)
329 wait_queue_link_t wql
;
333 if (!wait_queue_is_set(wq_set
))
334 return KERN_INVALID_ARGUMENT
;
338 q
= &wq_set
->wqs_preposts
;
339 while (!queue_empty(q
)) {
340 queue_remove_first(q
, wql
, wait_queue_link_t
, wql_preposts
);
341 assert(!wql_is_preposted(wql
));
349 * Routine: wait_queue_set_alloc
351 * Allocate and initialize a wait queue set for
352 * use outside of the mach part of the kernel.
356 * The allocated and initialized wait queue set
357 * WAIT_QUEUE_SET_NULL if there is a resource shortage
360 wait_queue_set_alloc(
363 wait_queue_set_t wq_set
;
365 wq_set
= (wait_queue_set_t
) zalloc(_wait_queue_set_zone
);
366 if (wq_set
!= WAIT_QUEUE_SET_NULL
) {
369 ret
= wait_queue_set_init(wq_set
, policy
);
370 if (ret
!= KERN_SUCCESS
) {
371 zfree(_wait_queue_set_zone
, wq_set
);
372 wq_set
= WAIT_QUEUE_SET_NULL
;
379 * Routine: wait_queue_set_free
381 * Free an allocated wait queue set
387 wait_queue_set_t wq_set
)
389 if (!wait_queue_is_set(wq_set
))
390 return KERN_INVALID_ARGUMENT
;
392 if (!queue_empty(&wq_set
->wqs_wait_queue
.wq_queue
))
395 zfree(_wait_queue_set_zone
, wq_set
);
402 * Routine: wait_queue_set_size
403 * Routine: wait_queue_link_size
405 * Return the size of opaque wait queue structures
407 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet
); }
408 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink
); }
410 /* declare a unique type for wait queue link structures */
411 static unsigned int _wait_queue_link
;
412 static unsigned int _wait_queue_link_noalloc
;
413 static unsigned int _wait_queue_unlinked
;
415 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
416 #define WAIT_QUEUE_LINK_NOALLOC ((void *)&_wait_queue_link_noalloc)
417 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
419 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
420 WQASSERT(((wqe)->wqe_queue == (wq) && \
421 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
422 "wait queue element list corruption: wq=%#x, wqe=%#x", \
425 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
426 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
427 (queue_t)(wql) : &(wql)->wql_setlinks)))
429 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
430 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
431 (queue_t)(wql) : &(wql)->wql_setlinks)))
433 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
434 WQASSERT(((((wql)->wql_type == WAIT_QUEUE_LINK) || \
435 ((wql)->wql_type == WAIT_QUEUE_LINK_NOALLOC)) && \
436 ((wql)->wql_setqueue == (wqs)) && \
437 (((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) || \
438 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_SET_inited)) && \
439 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
440 "wait queue set links corruption: wqs=%#x, wql=%#x", \
443 #if defined(_WAIT_QUEUE_DEBUG_)
445 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
447 #define WAIT_QUEUE_CHECK(wq) \
449 queue_t q2 = &(wq)->wq_queue; \
450 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
451 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
452 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
453 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
457 #define WAIT_QUEUE_SET_CHECK(wqs) \
459 queue_t q2 = &(wqs)->wqs_setlinks; \
460 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
461 while (!queue_end(q2, (queue_entry_t)wql2)) { \
462 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
463 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
467 #else /* !_WAIT_QUEUE_DEBUG_ */
469 #define WQASSERT(e, s, p0, p1) assert(e)
471 #define WAIT_QUEUE_CHECK(wq)
472 #define WAIT_QUEUE_SET_CHECK(wqs)
474 #endif /* !_WAIT_QUEUE_DEBUG_ */
477 * Routine: wait_queue_member_locked
479 * Indicate if this set queue is a member of the queue
481 * The wait queue is locked
482 * The set queue is just that, a set queue
485 wait_queue_member_locked(
487 wait_queue_set_t wq_set
)
489 wait_queue_element_t wq_element
;
492 assert(wait_queue_held(wq
));
493 assert(wait_queue_is_set(wq_set
));
497 wq_element
= (wait_queue_element_t
) queue_first(q
);
498 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
499 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
500 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
) ||
501 (wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
)) {
502 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
504 if (wql
->wql_setqueue
== wq_set
)
507 wq_element
= (wait_queue_element_t
)
508 queue_next((queue_t
) wq_element
);
515 * Routine: wait_queue_member
517 * Indicate if this set queue is a member of the queue
519 * The set queue is just that, a set queue
524 wait_queue_set_t wq_set
)
529 if (!wait_queue_is_set(wq_set
))
534 ret
= wait_queue_member_locked(wq
, wq_set
);
535 wait_queue_unlock(wq
);
543 * Routine: wait_queue_link_internal
545 * Insert a set wait queue into a wait queue. This
546 * requires us to link the two together using a wait_queue_link
547 * structure that was provided.
549 * The wait queue being inserted must be inited as a set queue
550 * The wait_queue_link structure must already be properly typed
554 wait_queue_link_internal(
556 wait_queue_set_t wq_set
,
557 wait_queue_link_t wql
)
559 wait_queue_element_t wq_element
;
563 if (!wait_queue_is_valid(wq
) || !wait_queue_is_set(wq_set
))
564 return KERN_INVALID_ARGUMENT
;
567 * There are probably fewer threads and sets associated with
568 * the wait queue than there are wait queues associated with
569 * the set. So let's validate it that way.
574 wq_element
= (wait_queue_element_t
) queue_first(q
);
575 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
576 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
577 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
578 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) &&
579 ((wait_queue_link_t
)wq_element
)->wql_setqueue
== wq_set
) {
580 wait_queue_unlock(wq
);
582 return KERN_ALREADY_IN_SET
;
584 wq_element
= (wait_queue_element_t
)
585 queue_next((queue_t
) wq_element
);
589 * Not already a member, so we can add it.
593 WAIT_QUEUE_SET_CHECK(wq_set
);
595 assert(wql
->wql_type
== WAIT_QUEUE_LINK
||
596 wql
->wql_type
== WAIT_QUEUE_LINK_NOALLOC
);
599 wql_clear_prepost(wql
);
600 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
601 wql
->wql_setqueue
= wq_set
;
602 queue_enter(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
605 wait_queue_unlock(wq
);
612 * Routine: wait_queue_link_noalloc
614 * Insert a set wait queue into a wait queue. This
615 * requires us to link the two together using a wait_queue_link
616 * structure that we allocate.
618 * The wait queue being inserted must be inited as a set queue
621 wait_queue_link_noalloc(
623 wait_queue_set_t wq_set
,
624 wait_queue_link_t wql
)
626 wql
->wql_type
= WAIT_QUEUE_LINK_NOALLOC
;
627 return wait_queue_link_internal(wq
, wq_set
, wql
);
631 * Routine: wait_queue_link
633 * Insert a set wait queue into a wait queue. This
634 * requires us to link the two together using a wait_queue_link
635 * structure that we allocate.
637 * The wait queue being inserted must be inited as a set queue
642 wait_queue_set_t wq_set
)
644 wait_queue_link_t wql
;
647 wql
= (wait_queue_link_t
) zalloc(_wait_queue_link_zone
);
648 if (wql
== WAIT_QUEUE_LINK_NULL
)
649 return KERN_RESOURCE_SHORTAGE
;
651 wql
->wql_type
= WAIT_QUEUE_LINK
;
652 ret
= wait_queue_link_internal(wq
, wq_set
, wql
);
653 if (ret
!= KERN_SUCCESS
)
654 zfree(_wait_queue_link_zone
, wql
);
660 wait_queue_link_allocate(void)
662 wait_queue_link_t wql
;
664 wql
= zalloc(_wait_queue_link_zone
); /* Can't fail */
665 bzero(wql
, sizeof(*wql
));
666 wql
->wql_type
= WAIT_QUEUE_UNLINKED
;
672 wait_queue_link_free(wait_queue_link_t wql
)
674 zfree(_wait_queue_link_zone
, wql
);
680 * Routine: wait_queue_unlink_locked
682 * Undo the linkage between a wait queue and a set.
685 wait_queue_unlink_locked(
687 wait_queue_set_t wq_set
,
688 wait_queue_link_t wql
)
690 assert(wait_queue_held(wq
));
691 assert(wait_queue_held(&wq_set
->wqs_wait_queue
));
693 wql
->wql_queue
= WAIT_QUEUE_NULL
;
694 queue_remove(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
695 wql
->wql_setqueue
= WAIT_QUEUE_SET_NULL
;
696 queue_remove(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
697 if (wql_is_preposted(wql
)) {
698 queue_t ppq
= &wq_set
->wqs_preposts
;
699 queue_remove(ppq
, wql
, wait_queue_link_t
, wql_preposts
);
701 wql
->wql_type
= WAIT_QUEUE_UNLINKED
;
703 WAIT_QUEUE_CHECK(wq
);
704 WAIT_QUEUE_SET_CHECK(wq_set
);
708 * Routine: wait_queue_unlink_nofree
710 * Remove the linkage between a wait queue and a set,
711 * returning the linkage structure to the caller to
714 * The wait queue being must be a member set queue
717 wait_queue_unlink_nofree(
719 wait_queue_set_t wq_set
,
720 wait_queue_link_t
*wqlp
)
722 wait_queue_element_t wq_element
;
723 wait_queue_link_t wql
;
727 if (!wait_queue_is_valid(wq
) || !wait_queue_is_set(wq_set
)) {
728 return KERN_INVALID_ARGUMENT
;
734 wq_element
= (wait_queue_element_t
) queue_first(q
);
735 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
736 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
737 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
738 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
740 wql
= (wait_queue_link_t
)wq_element
;
742 if (wql
->wql_setqueue
== wq_set
) {
745 wait_queue_unlink_locked(wq
, wq_set
, wql
);
747 wait_queue_unlock(wq
);
753 wq_element
= (wait_queue_element_t
)
754 queue_next((queue_t
) wq_element
);
756 wait_queue_unlock(wq
);
758 return KERN_NOT_IN_SET
;
762 * Routine: wait_queue_unlink
764 * Remove the linkage between a wait queue and a set,
765 * freeing the linkage structure.
767 * The wait queue being must be a member set queue
772 wait_queue_set_t wq_set
)
774 wait_queue_element_t wq_element
;
775 wait_queue_link_t wql
;
779 if (!wait_queue_is_valid(wq
) || !wait_queue_is_set(wq_set
)) {
780 return KERN_INVALID_ARGUMENT
;
786 wq_element
= (wait_queue_element_t
) queue_first(q
);
787 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
788 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
789 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
790 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
792 wql
= (wait_queue_link_t
)wq_element
;
794 if (wql
->wql_setqueue
== wq_set
) {
797 alloced
= (wql
->wql_type
== WAIT_QUEUE_LINK
);
799 wait_queue_unlink_locked(wq
, wq_set
, wql
);
801 wait_queue_unlock(wq
);
804 zfree(_wait_queue_link_zone
, wql
);
808 wq_element
= (wait_queue_element_t
)
809 queue_next((queue_t
) wq_element
);
811 wait_queue_unlock(wq
);
813 return KERN_NOT_IN_SET
;
817 * Routine: wait_queue_unlink_all_nofree_locked
819 * Remove the linkage between a wait queue and all its sets.
820 * All the linkage structures are returned to the caller for
827 wait_queue_unlink_all_nofree_locked(
831 wait_queue_element_t wq_element
;
832 wait_queue_element_t wq_next_element
;
833 wait_queue_set_t wq_set
;
834 wait_queue_link_t wql
;
839 wq_element
= (wait_queue_element_t
) queue_first(q
);
840 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
842 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
843 wq_next_element
= (wait_queue_element_t
)
844 queue_next((queue_t
) wq_element
);
846 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
847 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
848 wql
= (wait_queue_link_t
)wq_element
;
849 wq_set
= wql
->wql_setqueue
;
851 wait_queue_unlink_locked(wq
, wq_set
, wql
);
853 enqueue(links
, &wql
->wql_links
);
855 wq_element
= wq_next_element
;
860 * Routine: wait_queue_unlink_all_nofree
862 * Remove the linkage between a wait queue and all its sets.
863 * All the linkage structures are returned to the caller for
866 * Nothing of interest locked.
870 wait_queue_unlink_all_nofree(
876 if (!wait_queue_is_valid(wq
)) {
877 return KERN_INVALID_ARGUMENT
;
882 wait_queue_unlink_all_nofree_locked(wq
, links
);
883 wait_queue_unlock(wq
);
886 return(KERN_SUCCESS
);
890 * Routine: wait_queue_unlink_all_locked
892 * Remove the linkage between a locked wait queue and all its
893 * sets and enqueue the allocated ones onto the links queue
899 wait_queue_unlink_all_locked(
903 wait_queue_element_t wq_element
;
904 wait_queue_element_t wq_next_element
;
905 wait_queue_set_t wq_set
;
906 wait_queue_link_t wql
;
911 wq_element
= (wait_queue_element_t
) queue_first(q
);
912 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
915 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
916 wq_next_element
= (wait_queue_element_t
)
917 queue_next((queue_t
) wq_element
);
919 alloced
= (wq_element
->wqe_type
== WAIT_QUEUE_LINK
);
920 if (alloced
|| wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
921 wql
= (wait_queue_link_t
)wq_element
;
922 wq_set
= wql
->wql_setqueue
;
924 wait_queue_unlink_locked(wq
, wq_set
, wql
);
927 enqueue(links
, &wql
->wql_links
);
929 wq_element
= wq_next_element
;
936 * Routine: wait_queue_unlink_all
938 * Remove the linkage between a wait queue and all its sets.
939 * All the linkage structures that were allocated internally
940 * are freed. The others are the caller's responsibility.
942 * Nothing of interest locked.
946 wait_queue_unlink_all(
949 wait_queue_link_t wql
;
950 queue_head_t links_queue_head
;
951 queue_t links
= &links_queue_head
;
954 if (!wait_queue_is_valid(wq
)) {
955 return KERN_INVALID_ARGUMENT
;
962 wait_queue_unlink_all_locked(wq
, links
);
963 wait_queue_unlock(wq
);
966 while(!queue_empty(links
)) {
967 wql
= (wait_queue_link_t
) dequeue(links
);
968 zfree(_wait_queue_link_zone
, wql
);
971 return(KERN_SUCCESS
);
974 /* legacy interface naming */
976 wait_subqueue_unlink_all(
977 wait_queue_set_t wq_set
)
979 return wait_queue_set_unlink_all(wq_set
);
984 * Routine: wait_queue_set_unlink_all_nofree
986 * Remove the linkage between a set wait queue and all its
987 * member wait queues and all the sets it may be a member of.
988 * The links structures are returned for later freeing by the
991 * The wait queue must be a set
994 wait_queue_set_unlink_all_nofree(
995 wait_queue_set_t wq_set
,
998 wait_queue_link_t wql
;
1003 if (!wait_queue_is_set(wq_set
)) {
1004 return KERN_INVALID_ARGUMENT
;
1011 /* remove the wait queues that are members of our set */
1012 q
= &wq_set
->wqs_setlinks
;
1014 wql
= (wait_queue_link_t
)queue_first(q
);
1015 while (!queue_end(q
, (queue_entry_t
)wql
)) {
1016 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
1017 wq
= wql
->wql_queue
;
1018 if (wait_queue_lock_try(wq
)) {
1019 wait_queue_unlink_locked(wq
, wq_set
, wql
);
1020 wait_queue_unlock(wq
);
1021 enqueue(links
, &wql
->wql_links
);
1022 wql
= (wait_queue_link_t
)queue_first(q
);
1031 /* remove this set from sets it belongs to */
1032 wait_queue_unlink_all_nofree_locked(&wq_set
->wqs_wait_queue
, links
);
1037 return(KERN_SUCCESS
);
1041 * Routine: wait_queue_set_unlink_all
1043 * Remove the linkage between a set wait queue and all its
1044 * member wait queues and all the sets it may be members of.
1045 * The link structures are freed for those links which were
1046 * dynamically allocated.
1048 * The wait queue must be a set
1051 wait_queue_set_unlink_all(
1052 wait_queue_set_t wq_set
)
1054 wait_queue_link_t wql
;
1057 queue_head_t links_queue_head
;
1058 queue_t links
= &links_queue_head
;
1061 if (!wait_queue_is_set(wq_set
)) {
1062 return KERN_INVALID_ARGUMENT
;
1071 /* remove the wait queues that are members of our set */
1072 q
= &wq_set
->wqs_setlinks
;
1074 wql
= (wait_queue_link_t
)queue_first(q
);
1075 while (!queue_end(q
, (queue_entry_t
)wql
)) {
1076 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
1077 wq
= wql
->wql_queue
;
1078 if (wait_queue_lock_try(wq
)) {
1081 alloced
= (wql
->wql_type
== WAIT_QUEUE_LINK
);
1082 wait_queue_unlink_locked(wq
, wq_set
, wql
);
1083 wait_queue_unlock(wq
);
1085 enqueue(links
, &wql
->wql_links
);
1086 wql
= (wait_queue_link_t
)queue_first(q
);
1096 /* remove this set from sets it belongs to */
1097 wait_queue_unlink_all_locked(&wq_set
->wqs_wait_queue
, links
);
1102 while (!queue_empty (links
)) {
1103 wql
= (wait_queue_link_t
) dequeue(links
);
1104 zfree(_wait_queue_link_zone
, wql
);
1106 return(KERN_SUCCESS
);
1110 wait_queue_set_unlink_one(
1111 wait_queue_set_t wq_set
,
1112 wait_queue_link_t wql
)
1117 assert(wait_queue_is_set(wq_set
));
1123 WAIT_QUEUE_SET_CHECK(wq_set
);
1125 /* Already unlinked, e.g. by selclearthread() */
1126 if (wql
->wql_type
== WAIT_QUEUE_UNLINKED
) {
1130 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
1132 /* On a wait queue, and we hold set queue lock ... */
1133 wq
= wql
->wql_queue
;
1134 if (wait_queue_lock_try(wq
)) {
1135 wait_queue_unlink_locked(wq
, wq_set
, wql
);
1136 wait_queue_unlock(wq
);
1148 return KERN_SUCCESS
;
1152 * Routine: wait_queue_assert_wait64_locked
1154 * Insert the current thread into the supplied wait queue
1155 * waiting for a particular event to be posted to that queue.
1158 * The wait queue is assumed locked.
1159 * The waiting thread is assumed locked.
1162 __private_extern__ wait_result_t
1163 wait_queue_assert_wait64_locked(
1166 wait_interrupt_t interruptible
,
1170 wait_result_t wait_result
;
1173 if (!wait_queue_assert_possible(thread
))
1174 panic("wait_queue_assert_wait64_locked");
1176 if (wq
->wq_type
== _WAIT_QUEUE_SET_inited
) {
1177 wait_queue_set_t wqs
= (wait_queue_set_t
)wq
;
1179 if (event
== NO_EVENT64
&& wqs_is_preposted(wqs
))
1180 return(THREAD_AWAKENED
);
1184 * Realtime threads get priority for wait queue placements.
1185 * This allows wait_queue_wakeup_one to prefer a waiting
1186 * realtime thread, similar in principle to performing
1187 * a wait_queue_wakeup_all and allowing scheduler prioritization
1188 * to run the realtime thread, but without causing the
1189 * lock contention of that scenario.
1191 realtime
= (thread
->sched_pri
>= BASEPRI_REALTIME
);
1194 * This is the extent to which we currently take scheduling attributes
1195 * into account. If the thread is vm priviledged, we stick it at
1196 * the front of the queue. Later, these queues will honor the policy
1197 * value set at wait_queue_init time.
1199 wait_result
= thread_mark_wait_locked(thread
, interruptible
);
1200 if (wait_result
== THREAD_WAITING
) {
1202 || (thread
->options
& TH_OPT_VMPRIV
)
1204 enqueue_head(&wq
->wq_queue
, (queue_entry_t
) thread
);
1206 enqueue_tail(&wq
->wq_queue
, (queue_entry_t
) thread
);
1208 thread
->wait_event
= event
;
1209 thread
->wait_queue
= wq
;
1211 if (deadline
!= 0) {
1214 flags
= realtime
? TIMER_CALL_CRITICAL
: 0;
1216 if (!timer_call_enter(&thread
->wait_timer
, deadline
, flags
))
1217 thread
->wait_timer_active
++;
1218 thread
->wait_timer_is_set
= TRUE
;
1221 return(wait_result
);
1225 * Routine: wait_queue_assert_wait
1227 * Insert the current thread into the supplied wait queue
1228 * waiting for a particular event to be posted to that queue.
1231 * nothing of interest locked.
1234 wait_queue_assert_wait(
1237 wait_interrupt_t interruptible
,
1242 thread_t thread
= current_thread();
1244 /* If it is an invalid wait queue, you can't wait on it */
1245 if (!wait_queue_is_valid(wq
))
1246 return (thread
->wait_result
= THREAD_RESTART
);
1249 wait_queue_lock(wq
);
1250 thread_lock(thread
);
1251 ret
= wait_queue_assert_wait64_locked(wq
, CAST_DOWN(event64_t
,event
),
1252 interruptible
, deadline
, thread
);
1253 thread_unlock(thread
);
1254 wait_queue_unlock(wq
);
1260 * Routine: wait_queue_assert_wait64
1262 * Insert the current thread into the supplied wait queue
1263 * waiting for a particular event to be posted to that queue.
1265 * nothing of interest locked.
1268 wait_queue_assert_wait64(
1271 wait_interrupt_t interruptible
,
1276 thread_t thread
= current_thread();
1278 /* If it is an invalid wait queue, you cant wait on it */
1279 if (!wait_queue_is_valid(wq
))
1280 return (thread
->wait_result
= THREAD_RESTART
);
1283 wait_queue_lock(wq
);
1284 thread_lock(thread
);
1285 ret
= wait_queue_assert_wait64_locked(wq
, event
, interruptible
, deadline
, thread
);
1286 thread_unlock(thread
);
1287 wait_queue_unlock(wq
);
1293 * Routine: _wait_queue_select64_all
1295 * Select all threads off a wait queue that meet the
1296 * supplied criteria.
1300 * wake_queue initialized and ready for insertion
1301 * possibly recursive
1303 * a queue of locked threads
1306 _wait_queue_select64_all(
1311 wait_queue_element_t wq_element
;
1312 wait_queue_element_t wqe_next
;
1317 wq_element
= (wait_queue_element_t
) queue_first(q
);
1318 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1319 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1320 wqe_next
= (wait_queue_element_t
)
1321 queue_next((queue_t
) wq_element
);
1324 * We may have to recurse if this is a compound wait queue.
1326 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
1327 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
1328 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1329 wait_queue_set_t set_queue
= wql
->wql_setqueue
;
1332 * We have to check the set wait queue. If it is marked
1333 * as pre-post, and it is the "generic event" then mark
1334 * it pre-posted now (if not already).
1336 wqs_lock(set_queue
);
1337 if (event
== NO_EVENT64
&& set_queue
->wqs_prepost
&& !wql_is_preposted(wql
)) {
1338 queue_t ppq
= &set_queue
->wqs_preposts
;
1339 queue_enter(ppq
, wql
, wait_queue_link_t
, wql_preposts
);
1341 if (! wait_queue_empty(&set_queue
->wqs_wait_queue
))
1342 _wait_queue_select64_all(&set_queue
->wqs_wait_queue
, event
, wake_queue
);
1343 wqs_unlock(set_queue
);
1347 * Otherwise, its a thread. If it is waiting on
1348 * the event we are posting to this queue, pull
1349 * it off the queue and stick it in out wake_queue.
1351 thread_t t
= (thread_t
)wq_element
;
1353 if (t
->wait_event
== event
) {
1355 remqueue((queue_entry_t
) t
);
1356 enqueue (wake_queue
, (queue_entry_t
) t
);
1357 t
->wait_queue
= WAIT_QUEUE_NULL
;
1358 t
->wait_event
= NO_EVENT64
;
1359 t
->at_safe_point
= FALSE
;
1360 /* returned locked */
1363 wq_element
= wqe_next
;
1368 * Routine: wait_queue_wakeup64_all_locked
1370 * Wakeup some number of threads that are in the specified
1371 * wait queue and waiting on the specified event.
1373 * wait queue already locked (may be released).
1375 * KERN_SUCCESS - Threads were woken up
1376 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1378 __private_extern__ kern_return_t
1379 wait_queue_wakeup64_all_locked(
1382 wait_result_t result
,
1385 queue_head_t wake_queue_head
;
1386 queue_t q
= &wake_queue_head
;
1389 // assert(wait_queue_held(wq));
1390 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1391 // panic("wait_queue_wakeup64_all_locked: lock not held on %p\n", wq); /* (BRINGUP) */
1397 * Select the threads that we will wake up. The threads
1398 * are returned to us locked and cleanly removed from the
1401 _wait_queue_select64_all(wq
, event
, q
);
1403 wait_queue_unlock(wq
);
1406 * For each thread, set it running.
1408 res
= KERN_NOT_WAITING
;
1409 while (!queue_empty (q
)) {
1410 thread_t thread
= (thread_t
) dequeue(q
);
1411 res
= thread_go(thread
, result
);
1412 assert(res
== KERN_SUCCESS
);
1413 thread_unlock(thread
);
1420 * Routine: wait_queue_wakeup_all
1422 * Wakeup some number of threads that are in the specified
1423 * wait queue and waiting on the specified event.
1427 * KERN_SUCCESS - Threads were woken up
1428 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1431 wait_queue_wakeup_all(
1434 wait_result_t result
)
1439 if (!wait_queue_is_valid(wq
)) {
1440 return KERN_INVALID_ARGUMENT
;
1444 wait_queue_lock(wq
);
1445 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1446 // panic("wait_queue_wakeup_all: we did not get the lock on %p\n", wq); /* (BRINGUP) */
1448 ret
= wait_queue_wakeup64_all_locked(
1449 wq
, CAST_DOWN(event64_t
,event
),
1457 * Routine: wait_queue_wakeup64_all
1459 * Wakeup some number of threads that are in the specified
1460 * wait queue and waiting on the specified event.
1464 * KERN_SUCCESS - Threads were woken up
1465 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1468 wait_queue_wakeup64_all(
1471 wait_result_t result
)
1476 if (!wait_queue_is_valid(wq
)) {
1477 return KERN_INVALID_ARGUMENT
;
1481 wait_queue_lock(wq
);
1482 ret
= wait_queue_wakeup64_all_locked(wq
, event
, result
, TRUE
);
1489 * Routine: _wait_queue_select64_one
1491 * Select the best thread off a wait queue that meet the
1492 * supplied criteria.
1496 * possibly recursive
1498 * a locked thread - if one found
1500 * This is where the sync policy of the wait queue comes
1501 * into effect. For now, we just assume FIFO/LIFO.
1504 _wait_queue_select64_one(
1508 wait_queue_element_t wq_element
;
1509 wait_queue_element_t wqe_next
;
1510 thread_t t
= THREAD_NULL
;
1515 wq_element
= (wait_queue_element_t
) queue_first(q
);
1516 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1517 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1518 wqe_next
= (wait_queue_element_t
)
1519 queue_next((queue_t
) wq_element
);
1522 * We may have to recurse if this is a compound wait queue.
1524 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
1525 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
1526 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1527 wait_queue_set_t set_queue
= wql
->wql_setqueue
;
1530 * We have to check the set wait queue. If the set
1531 * supports pre-posting, it isn't already preposted,
1532 * and we didn't find a thread in the set, then mark it.
1534 * If we later find a thread, there may be a spurious
1535 * pre-post here on this set. The wait side has to check
1536 * for that either pre- or post-wait.
1538 wqs_lock(set_queue
);
1539 if (! wait_queue_empty(&set_queue
->wqs_wait_queue
)) {
1540 t
= _wait_queue_select64_one(&set_queue
->wqs_wait_queue
, event
);
1542 if (t
!= THREAD_NULL
) {
1543 wqs_unlock(set_queue
);
1546 if (event
== NO_EVENT64
&& set_queue
->wqs_prepost
&& !wql_is_preposted(wql
)) {
1547 queue_t ppq
= &set_queue
->wqs_preposts
;
1548 queue_enter(ppq
, wql
, wait_queue_link_t
, wql_preposts
);
1550 wqs_unlock(set_queue
);
1555 * Otherwise, its a thread. If it is waiting on
1556 * the event we are posting to this queue, pull
1557 * it off the queue and stick it in out wake_queue.
1559 t
= (thread_t
)wq_element
;
1560 if (t
->wait_event
== event
) {
1562 remqueue((queue_entry_t
) t
);
1563 t
->wait_queue
= WAIT_QUEUE_NULL
;
1564 t
->wait_event
= NO_EVENT64
;
1565 t
->at_safe_point
= FALSE
;
1566 return t
; /* still locked */
1571 wq_element
= wqe_next
;
1578 * Routine: wait_queue_pull_thread_locked
1580 * Pull a thread off its wait queue and (possibly) unlock
1587 * with the thread still locked.
1590 wait_queue_pull_thread_locked(
1596 assert(thread
->wait_queue
== waitq
);
1598 remqueue((queue_entry_t
)thread
);
1599 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1600 thread
->wait_event
= NO_EVENT64
;
1601 thread
->at_safe_point
= FALSE
;
1603 wait_queue_unlock(waitq
);
1608 * Routine: wait_queue_select64_thread
1610 * Look for a thread and remove it from the queues, if
1611 * (and only if) the thread is waiting on the supplied
1612 * <wait_queue, event> pair.
1616 * possibly recursive
1618 * KERN_NOT_WAITING: Thread is not waiting here.
1619 * KERN_SUCCESS: It was, and is now removed (returned locked)
1621 static kern_return_t
1622 _wait_queue_select64_thread(
1627 wait_queue_element_t wq_element
;
1628 wait_queue_element_t wqe_next
;
1629 kern_return_t res
= KERN_NOT_WAITING
;
1630 queue_t q
= &wq
->wq_queue
;
1632 thread_lock(thread
);
1633 if ((thread
->wait_queue
== wq
) && (thread
->wait_event
== event
)) {
1634 remqueue((queue_entry_t
) thread
);
1635 thread
->at_safe_point
= FALSE
;
1636 thread
->wait_event
= NO_EVENT64
;
1637 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1638 /* thread still locked */
1639 return KERN_SUCCESS
;
1641 thread_unlock(thread
);
1644 * The wait_queue associated with the thread may be one of this
1645 * wait queue's sets. Go see. If so, removing it from
1646 * there is like removing it from here.
1648 wq_element
= (wait_queue_element_t
) queue_first(q
);
1649 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1650 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1651 wqe_next
= (wait_queue_element_t
)
1652 queue_next((queue_t
) wq_element
);
1654 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
1655 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
1656 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1657 wait_queue_set_t set_queue
= wql
->wql_setqueue
;
1659 wqs_lock(set_queue
);
1660 if (! wait_queue_empty(&set_queue
->wqs_wait_queue
)) {
1661 res
= _wait_queue_select64_thread(&set_queue
->wqs_wait_queue
,
1665 wqs_unlock(set_queue
);
1666 if (res
== KERN_SUCCESS
)
1667 return KERN_SUCCESS
;
1669 wq_element
= wqe_next
;
1676 * Routine: wait_queue_wakeup64_identity_locked
1678 * Select a single thread that is most-eligible to run and set
1679 * set it running. But return the thread locked.
1684 * possibly recursive
1686 * a pointer to the locked thread that was awakened
1688 __private_extern__ thread_t
1689 wait_queue_wakeup64_identity_locked(
1692 wait_result_t result
,
1698 assert(wait_queue_held(wq
));
1700 thread
= _wait_queue_select64_one(wq
, event
);
1702 wait_queue_unlock(wq
);
1705 res
= thread_go(thread
, result
);
1706 assert(res
== KERN_SUCCESS
);
1708 return thread
; /* still locked if not NULL */
1713 * Routine: wait_queue_wakeup64_one_locked
1715 * Select a single thread that is most-eligible to run and set
1721 * possibly recursive
1723 * KERN_SUCCESS: It was, and is, now removed.
1724 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1726 __private_extern__ kern_return_t
1727 wait_queue_wakeup64_one_locked(
1730 wait_result_t result
,
1735 assert(wait_queue_held(wq
));
1737 thread
= _wait_queue_select64_one(wq
, event
);
1739 wait_queue_unlock(wq
);
1744 res
= thread_go(thread
, result
);
1745 assert(res
== KERN_SUCCESS
);
1746 thread_unlock(thread
);
1750 return KERN_NOT_WAITING
;
1754 * Routine: wait_queue_wakeup_one
1756 * Wakeup the most appropriate thread that is in the specified
1757 * wait queue for the specified event.
1761 * KERN_SUCCESS - Thread was woken up
1762 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1765 wait_queue_wakeup_one(
1768 wait_result_t result
,
1774 if (!wait_queue_is_valid(wq
)) {
1775 return KERN_INVALID_ARGUMENT
;
1779 wait_queue_lock(wq
);
1780 thread
= _wait_queue_select64_one(wq
, CAST_DOWN(event64_t
,event
));
1781 wait_queue_unlock(wq
);
1786 if (thread
->sched_pri
< priority
) {
1787 if (priority
<= MAXPRI
) {
1788 set_sched_pri(thread
, priority
);
1790 thread
->was_promoted_on_wakeup
= 1;
1791 thread
->sched_flags
|= TH_SFLAG_PROMOTED
;
1794 res
= thread_go(thread
, result
);
1795 assert(res
== KERN_SUCCESS
);
1796 thread_unlock(thread
);
1802 return KERN_NOT_WAITING
;
1806 * Routine: wait_queue_wakeup64_one
1808 * Wakeup the most appropriate thread that is in the specified
1809 * wait queue for the specified event.
1813 * KERN_SUCCESS - Thread was woken up
1814 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1817 wait_queue_wakeup64_one(
1820 wait_result_t result
)
1825 if (!wait_queue_is_valid(wq
)) {
1826 return KERN_INVALID_ARGUMENT
;
1829 wait_queue_lock(wq
);
1830 thread
= _wait_queue_select64_one(wq
, event
);
1831 wait_queue_unlock(wq
);
1836 res
= thread_go(thread
, result
);
1837 assert(res
== KERN_SUCCESS
);
1838 thread_unlock(thread
);
1844 return KERN_NOT_WAITING
;
1849 * Routine: wait_queue_wakeup64_thread_locked
1851 * Wakeup the particular thread that was specified if and only
1852 * it was in this wait queue (or one of it's set queues)
1853 * and waiting on the specified event.
1855 * This is much safer than just removing the thread from
1856 * whatever wait queue it happens to be on. For instance, it
1857 * may have already been awoken from the wait you intended to
1858 * interrupt and waited on something else (like another
1862 * wait queue already locked (may be released).
1864 * KERN_SUCCESS - the thread was found waiting and awakened
1865 * KERN_NOT_WAITING - the thread was not waiting here
1867 __private_extern__ kern_return_t
1868 wait_queue_wakeup64_thread_locked(
1872 wait_result_t result
,
1877 assert(wait_queue_held(wq
));
1880 * See if the thread was still waiting there. If so, it got
1881 * dequeued and returned locked.
1883 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1885 wait_queue_unlock(wq
);
1887 if (res
!= KERN_SUCCESS
)
1888 return KERN_NOT_WAITING
;
1890 res
= thread_go(thread
, result
);
1891 assert(res
== KERN_SUCCESS
);
1892 thread_unlock(thread
);
1897 * Routine: wait_queue_wakeup_thread
1899 * Wakeup the particular thread that was specified if and only
1900 * it was in this wait queue (or one of it's set queues)
1901 * and waiting on the specified event.
1903 * This is much safer than just removing the thread from
1904 * whatever wait queue it happens to be on. For instance, it
1905 * may have already been awoken from the wait you intended to
1906 * interrupt and waited on something else (like another
1909 * nothing of interest locked
1910 * we need to assume spl needs to be raised
1912 * KERN_SUCCESS - the thread was found waiting and awakened
1913 * KERN_NOT_WAITING - the thread was not waiting here
1916 wait_queue_wakeup_thread(
1920 wait_result_t result
)
1925 if (!wait_queue_is_valid(wq
)) {
1926 return KERN_INVALID_ARGUMENT
;
1930 wait_queue_lock(wq
);
1931 res
= _wait_queue_select64_thread(wq
, CAST_DOWN(event64_t
,event
), thread
);
1932 wait_queue_unlock(wq
);
1934 if (res
== KERN_SUCCESS
) {
1935 res
= thread_go(thread
, result
);
1936 assert(res
== KERN_SUCCESS
);
1937 thread_unlock(thread
);
1942 return KERN_NOT_WAITING
;
1946 * Routine: wait_queue_wakeup64_thread
1948 * Wakeup the particular thread that was specified if and only
1949 * it was in this wait queue (or one of it's set's queues)
1950 * and waiting on the specified event.
1952 * This is much safer than just removing the thread from
1953 * whatever wait queue it happens to be on. For instance, it
1954 * may have already been awoken from the wait you intended to
1955 * interrupt and waited on something else (like another
1958 * nothing of interest locked
1959 * we need to assume spl needs to be raised
1961 * KERN_SUCCESS - the thread was found waiting and awakened
1962 * KERN_NOT_WAITING - the thread was not waiting here
1965 wait_queue_wakeup64_thread(
1969 wait_result_t result
)
1974 if (!wait_queue_is_valid(wq
)) {
1975 return KERN_INVALID_ARGUMENT
;
1979 wait_queue_lock(wq
);
1980 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1981 wait_queue_unlock(wq
);
1983 if (res
== KERN_SUCCESS
) {
1984 res
= thread_go(thread
, result
);
1985 assert(res
== KERN_SUCCESS
);
1986 thread_unlock(thread
);
1991 return KERN_NOT_WAITING
;