2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: wait_queue.c (adapted from sched_prim.c)
60 * Author: Avadis Tevanian, Jr.
63 * Primitives for manipulating wait queues: either global
64 * ones from sched_prim.c, or private ones associated with
65 * particular structures(pots, semaphores, etc..).
68 #include <kern/kern_types.h>
69 #include <kern/simple_lock.h>
70 #include <kern/zalloc.h>
71 #include <kern/queue.h>
73 #include <mach/sync_policy.h>
74 #include <kern/mach_param.h>
75 #include <kern/sched_prim.h>
77 #include <kern/wait_queue.h>
78 #include <vm/vm_kern.h>
80 /* forward declarations */
81 static boolean_t
wait_queue_member_locked(
83 wait_queue_set_t wq_set
);
85 static void wait_queues_init(void) __attribute__((section("__TEXT, initcode")));
88 #define WAIT_QUEUE_MAX thread_max
89 #define WAIT_QUEUE_SET_MAX task_max * 3
90 #define WAIT_QUEUE_LINK_MAX PORT_MAX / 2 + (WAIT_QUEUE_MAX * WAIT_QUEUE_SET_MAX) / 64
92 static zone_t _wait_queue_link_zone
;
93 static zone_t _wait_queue_set_zone
;
94 static zone_t _wait_queue_zone
;
96 /* see rdar://6737748&5561610; we need an unshadowed
97 * definition of a WaitQueueLink for debugging,
98 * but it needs to be used somewhere to wind up in
100 volatile WaitQueueLink
*unused_except_for_debugging
;
104 * Waiting protocols and implementation:
106 * Each thread may be waiting for exactly one event; this event
107 * is set using assert_wait(). That thread may be awakened either
108 * by performing a thread_wakeup_prim() on its event,
109 * or by directly waking that thread up with clear_wait().
111 * The implementation of wait events uses a hash table. Each
112 * bucket is queue of threads having the same hash function
113 * value; the chain for the queue (linked list) is the run queue
114 * field. [It is not possible to be waiting and runnable at the
117 * Locks on both the thread and on the hash buckets govern the
118 * wait event field and the queue chain field. Because wakeup
119 * operations only have the event as an argument, the event hash
120 * bucket must be locked before any thread.
122 * Scheduling operations may also occur at interrupt level; therefore,
123 * interrupts below splsched() must be prevented when holding
124 * thread or hash bucket locks.
126 * The wait event hash table declarations are as follows:
129 struct wait_queue boot_wait_queue
[1];
130 __private_extern__
struct wait_queue
*wait_queues
= &boot_wait_queue
[0];
132 __private_extern__
uint32_t num_wait_queues
= 1;
135 compute_wait_hash_size(__unused
unsigned cpu_count
, __unused
uint64_t memsize
) {
136 uint32_t hsize
= (uint32_t)round_page_64((thread_max
/ 11) * sizeof(struct wait_queue
));
139 if (PE_parse_boot_argn("wqsize", &bhsize
, sizeof(bhsize
)))
146 wait_queues_init(void)
151 whsize
= compute_wait_hash_size(processor_avail_count
, machine_info
.max_mem
);
152 num_wait_queues
= (whsize
/ ((uint32_t)sizeof(struct wait_queue
))) - 1;
154 kret
= kernel_memory_allocate(kernel_map
, (vm_offset_t
*) &wait_queues
, whsize
, 0, KMA_KOBJECT
|KMA_NOPAGEWAIT
);
156 if (kret
!= KERN_SUCCESS
|| wait_queues
== NULL
)
157 panic("kernel_memory_allocate() failed to allocate wait queues, error: %d, whsize: 0x%x", kret
, whsize
);
159 for (i
= 0; i
< num_wait_queues
; i
++) {
160 wait_queue_init(&wait_queues
[i
], SYNC_POLICY_FIFO
);
165 wait_queue_bootstrap(void)
168 _wait_queue_zone
= zinit(sizeof(struct wait_queue
),
169 WAIT_QUEUE_MAX
* sizeof(struct wait_queue
),
170 sizeof(struct wait_queue
),
172 zone_change(_wait_queue_zone
, Z_NOENCRYPT
, TRUE
);
174 _wait_queue_set_zone
= zinit(sizeof(struct wait_queue_set
),
175 WAIT_QUEUE_SET_MAX
* sizeof(struct wait_queue_set
),
176 sizeof(struct wait_queue_set
),
178 zone_change(_wait_queue_set_zone
, Z_NOENCRYPT
, TRUE
);
180 _wait_queue_link_zone
= zinit(sizeof(struct _wait_queue_link
),
181 WAIT_QUEUE_LINK_MAX
* sizeof(struct _wait_queue_link
),
182 sizeof(struct _wait_queue_link
),
184 zone_change(_wait_queue_link_zone
, Z_NOENCRYPT
, TRUE
);
188 * Routine: wait_queue_init
190 * Initialize a previously allocated wait queue.
192 * KERN_SUCCESS - The wait_queue_t was initialized
193 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
200 /* only FIFO and LIFO for now */
201 if ((policy
& SYNC_POLICY_FIXED_PRIORITY
) != 0)
202 return KERN_INVALID_ARGUMENT
;
204 wq
->wq_fifo
= ((policy
& SYNC_POLICY_REVERSED
) == 0);
205 wq
->wq_type
= _WAIT_QUEUE_inited
;
206 queue_init(&wq
->wq_queue
);
207 hw_lock_init(&wq
->wq_interlock
);
212 * Routine: wait_queue_alloc
214 * Allocate and initialize a wait queue for use outside of
215 * of the mach part of the kernel.
217 * Nothing locked - can block.
219 * The allocated and initialized wait queue
220 * WAIT_QUEUE_NULL if there is a resource shortage
229 wq
= (wait_queue_t
) zalloc(_wait_queue_zone
);
230 if (wq
!= WAIT_QUEUE_NULL
) {
231 ret
= wait_queue_init(wq
, policy
);
232 if (ret
!= KERN_SUCCESS
) {
233 zfree(_wait_queue_zone
, wq
);
234 wq
= WAIT_QUEUE_NULL
;
241 * Routine: wait_queue_free
243 * Free an allocated wait queue.
251 if (!wait_queue_is_queue(wq
))
252 return KERN_INVALID_ARGUMENT
;
253 if (!queue_empty(&wq
->wq_queue
))
255 zfree(_wait_queue_zone
, wq
);
260 * Routine: wait_queue_set_init
262 * Initialize a previously allocated wait queue set.
264 * KERN_SUCCESS - The wait_queue_set_t was initialized
265 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
269 wait_queue_set_t wqset
,
274 ret
= wait_queue_init(&wqset
->wqs_wait_queue
, policy
);
275 if (ret
!= KERN_SUCCESS
)
278 wqset
->wqs_wait_queue
.wq_type
= _WAIT_QUEUE_SET_inited
;
279 if (policy
& SYNC_POLICY_PREPOST
)
280 wqset
->wqs_wait_queue
.wq_prepost
= TRUE
;
282 wqset
->wqs_wait_queue
.wq_prepost
= FALSE
;
283 queue_init(&wqset
->wqs_setlinks
);
284 queue_init(&wqset
->wqs_preposts
);
291 wait_queue_set_t wqset
,
294 return wait_queue_set_init(wqset
, policy
);
298 wait_queue_sub_clearrefs(
299 wait_queue_set_t wq_set
)
301 wait_queue_link_t wql
;
305 if (!wait_queue_is_set(wq_set
))
306 return KERN_INVALID_ARGUMENT
;
310 q
= &wq_set
->wqs_preposts
;
311 while (!queue_empty(q
)) {
312 queue_remove_first(q
, wql
, wait_queue_link_t
, wql_preposts
);
313 assert(!wql_is_preposted(wql
));
321 * Routine: wait_queue_set_alloc
323 * Allocate and initialize a wait queue set for
324 * use outside of the mach part of the kernel.
328 * The allocated and initialized wait queue set
329 * WAIT_QUEUE_SET_NULL if there is a resource shortage
332 wait_queue_set_alloc(
335 wait_queue_set_t wq_set
;
337 wq_set
= (wait_queue_set_t
) zalloc(_wait_queue_set_zone
);
338 if (wq_set
!= WAIT_QUEUE_SET_NULL
) {
341 ret
= wait_queue_set_init(wq_set
, policy
);
342 if (ret
!= KERN_SUCCESS
) {
343 zfree(_wait_queue_set_zone
, wq_set
);
344 wq_set
= WAIT_QUEUE_SET_NULL
;
351 * Routine: wait_queue_set_free
353 * Free an allocated wait queue set
359 wait_queue_set_t wq_set
)
361 if (!wait_queue_is_set(wq_set
))
362 return KERN_INVALID_ARGUMENT
;
364 if (!queue_empty(&wq_set
->wqs_wait_queue
.wq_queue
))
367 zfree(_wait_queue_set_zone
, wq_set
);
374 * Routine: wait_queue_set_size
375 * Routine: wait_queue_link_size
377 * Return the size of opaque wait queue structures
379 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet
); }
380 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink
); }
382 /* declare a unique type for wait queue link structures */
383 static unsigned int _wait_queue_link
;
384 static unsigned int _wait_queue_link_noalloc
;
385 static unsigned int _wait_queue_unlinked
;
387 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
388 #define WAIT_QUEUE_LINK_NOALLOC ((void *)&_wait_queue_link_noalloc)
389 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
391 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
392 WQASSERT(((wqe)->wqe_queue == (wq) && \
393 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
394 "wait queue element list corruption: wq=%#x, wqe=%#x", \
397 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
398 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
399 (queue_t)(wql) : &(wql)->wql_setlinks)))
401 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
402 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
403 (queue_t)(wql) : &(wql)->wql_setlinks)))
405 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
406 WQASSERT(((((wql)->wql_type == WAIT_QUEUE_LINK) || \
407 ((wql)->wql_type == WAIT_QUEUE_LINK_NOALLOC)) && \
408 ((wql)->wql_setqueue == (wqs)) && \
409 (((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) || \
410 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_SET_inited)) && \
411 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
412 "wait queue set links corruption: wqs=%#x, wql=%#x", \
415 #if defined(_WAIT_QUEUE_DEBUG_)
417 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
419 #define WAIT_QUEUE_CHECK(wq) \
421 queue_t q2 = &(wq)->wq_queue; \
422 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
423 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
424 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
425 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
429 #define WAIT_QUEUE_SET_CHECK(wqs) \
431 queue_t q2 = &(wqs)->wqs_setlinks; \
432 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
433 while (!queue_end(q2, (queue_entry_t)wql2)) { \
434 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
435 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
439 #else /* !_WAIT_QUEUE_DEBUG_ */
441 #define WQASSERT(e, s, p0, p1) assert(e)
443 #define WAIT_QUEUE_CHECK(wq)
444 #define WAIT_QUEUE_SET_CHECK(wqs)
446 #endif /* !_WAIT_QUEUE_DEBUG_ */
449 * Routine: wait_queue_member_locked
451 * Indicate if this set queue is a member of the queue
453 * The wait queue is locked
454 * The set queue is just that, a set queue
457 wait_queue_member_locked(
459 wait_queue_set_t wq_set
)
461 wait_queue_element_t wq_element
;
464 assert(wait_queue_held(wq
));
465 assert(wait_queue_is_set(wq_set
));
469 wq_element
= (wait_queue_element_t
) queue_first(q
);
470 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
471 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
472 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
) ||
473 (wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
)) {
474 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
476 if (wql
->wql_setqueue
== wq_set
)
479 wq_element
= (wait_queue_element_t
)
480 queue_next((queue_t
) wq_element
);
487 * Routine: wait_queue_member
489 * Indicate if this set queue is a member of the queue
491 * The set queue is just that, a set queue
496 wait_queue_set_t wq_set
)
501 if (!wait_queue_is_set(wq_set
))
506 ret
= wait_queue_member_locked(wq
, wq_set
);
507 wait_queue_unlock(wq
);
515 * Routine: wait_queue_link_internal
517 * Insert a set wait queue into a wait queue. This
518 * requires us to link the two together using a wait_queue_link
519 * structure that was provided.
521 * The wait queue being inserted must be inited as a set queue
522 * The wait_queue_link structure must already be properly typed
526 wait_queue_link_internal(
528 wait_queue_set_t wq_set
,
529 wait_queue_link_t wql
)
531 wait_queue_element_t wq_element
;
535 if (!wait_queue_is_valid(wq
) || !wait_queue_is_set(wq_set
))
536 return KERN_INVALID_ARGUMENT
;
539 * There are probably fewer threads and sets associated with
540 * the wait queue than there are wait queues associated with
541 * the set. So let's validate it that way.
546 wq_element
= (wait_queue_element_t
) queue_first(q
);
547 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
548 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
549 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
550 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) &&
551 ((wait_queue_link_t
)wq_element
)->wql_setqueue
== wq_set
) {
552 wait_queue_unlock(wq
);
554 return KERN_ALREADY_IN_SET
;
556 wq_element
= (wait_queue_element_t
)
557 queue_next((queue_t
) wq_element
);
561 * Not already a member, so we can add it.
565 WAIT_QUEUE_SET_CHECK(wq_set
);
567 assert(wql
->wql_type
== WAIT_QUEUE_LINK
||
568 wql
->wql_type
== WAIT_QUEUE_LINK_NOALLOC
);
571 wql_clear_prepost(wql
);
572 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
573 wql
->wql_setqueue
= wq_set
;
574 queue_enter(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
577 wait_queue_unlock(wq
);
584 * Routine: wait_queue_link_noalloc
586 * Insert a set wait queue into a wait queue. This
587 * requires us to link the two together using a wait_queue_link
588 * structure that we allocate.
590 * The wait queue being inserted must be inited as a set queue
593 wait_queue_link_noalloc(
595 wait_queue_set_t wq_set
,
596 wait_queue_link_t wql
)
598 wql
->wql_type
= WAIT_QUEUE_LINK_NOALLOC
;
599 return wait_queue_link_internal(wq
, wq_set
, wql
);
603 * Routine: wait_queue_link
605 * Insert a set wait queue into a wait queue. This
606 * requires us to link the two together using a wait_queue_link
607 * structure that we allocate.
609 * The wait queue being inserted must be inited as a set queue
614 wait_queue_set_t wq_set
)
616 wait_queue_link_t wql
;
619 wql
= (wait_queue_link_t
) zalloc(_wait_queue_link_zone
);
620 if (wql
== WAIT_QUEUE_LINK_NULL
)
621 return KERN_RESOURCE_SHORTAGE
;
623 wql
->wql_type
= WAIT_QUEUE_LINK
;
624 ret
= wait_queue_link_internal(wq
, wq_set
, wql
);
625 if (ret
!= KERN_SUCCESS
)
626 zfree(_wait_queue_link_zone
, wql
);
633 * Routine: wait_queue_unlink_locked
635 * Undo the linkage between a wait queue and a set.
638 wait_queue_unlink_locked(
640 wait_queue_set_t wq_set
,
641 wait_queue_link_t wql
)
643 assert(wait_queue_held(wq
));
644 assert(wait_queue_held(&wq_set
->wqs_wait_queue
));
646 wql
->wql_queue
= WAIT_QUEUE_NULL
;
647 queue_remove(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
648 wql
->wql_setqueue
= WAIT_QUEUE_SET_NULL
;
649 queue_remove(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
650 if (wql_is_preposted(wql
)) {
651 queue_t ppq
= &wq_set
->wqs_preposts
;
652 queue_remove(ppq
, wql
, wait_queue_link_t
, wql_preposts
);
654 wql
->wql_type
= WAIT_QUEUE_UNLINKED
;
656 WAIT_QUEUE_CHECK(wq
);
657 WAIT_QUEUE_SET_CHECK(wq_set
);
661 * Routine: wait_queue_unlink
663 * Remove the linkage between a wait queue and a set,
664 * freeing the linkage structure.
666 * The wait queue being must be a member set queue
671 wait_queue_set_t wq_set
)
673 wait_queue_element_t wq_element
;
674 wait_queue_link_t wql
;
678 if (!wait_queue_is_valid(wq
) || !wait_queue_is_set(wq_set
)) {
679 return KERN_INVALID_ARGUMENT
;
685 wq_element
= (wait_queue_element_t
) queue_first(q
);
686 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
687 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
688 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
689 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
691 wql
= (wait_queue_link_t
)wq_element
;
693 if (wql
->wql_setqueue
== wq_set
) {
696 alloced
= (wql
->wql_type
== WAIT_QUEUE_LINK
);
698 wait_queue_unlink_locked(wq
, wq_set
, wql
);
700 wait_queue_unlock(wq
);
703 zfree(_wait_queue_link_zone
, wql
);
707 wq_element
= (wait_queue_element_t
)
708 queue_next((queue_t
) wq_element
);
710 wait_queue_unlock(wq
);
712 return KERN_NOT_IN_SET
;
716 * Routine: wait_queue_unlink_all
718 * Remove the linkage between a wait queue and all its sets.
719 * All the linkage structures that were allocated internally
720 * are freed. The others are the caller's responsibility.
722 * Nothing of interest locked.
726 wait_queue_unlink_all(
729 wait_queue_element_t wq_element
;
730 wait_queue_element_t wq_next_element
;
731 wait_queue_set_t wq_set
;
732 wait_queue_link_t wql
;
733 queue_head_t links_queue_head
;
734 queue_t links
= &links_queue_head
;
738 if (!wait_queue_is_valid(wq
)) {
739 return KERN_INVALID_ARGUMENT
;
749 wq_element
= (wait_queue_element_t
) queue_first(q
);
750 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
753 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
754 wq_next_element
= (wait_queue_element_t
)
755 queue_next((queue_t
) wq_element
);
757 alloced
= (wq_element
->wqe_type
== WAIT_QUEUE_LINK
);
758 if (alloced
|| wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
759 wql
= (wait_queue_link_t
)wq_element
;
760 wq_set
= wql
->wql_setqueue
;
762 wait_queue_unlink_locked(wq
, wq_set
, wql
);
765 enqueue(links
, &wql
->wql_links
);
767 wq_element
= wq_next_element
;
769 wait_queue_unlock(wq
);
772 while(!queue_empty(links
)) {
773 wql
= (wait_queue_link_t
) dequeue(links
);
774 zfree(_wait_queue_link_zone
, wql
);
777 return(KERN_SUCCESS
);
780 /* legacy interface naming */
782 wait_subqueue_unlink_all(
783 wait_queue_set_t wq_set
)
785 return wait_queue_set_unlink_all(wq_set
);
790 * Routine: wait_queue_set_unlink_all
792 * Remove the linkage between a set wait queue and all its
793 * member wait queues. The link structures are freed for those
794 * links which were dynamically allocated.
796 * The wait queue must be a set
799 wait_queue_set_unlink_all(
800 wait_queue_set_t wq_set
)
802 wait_queue_link_t wql
;
805 queue_head_t links_queue_head
;
806 queue_t links
= &links_queue_head
;
809 if (!wait_queue_is_set(wq_set
)) {
810 return KERN_INVALID_ARGUMENT
;
819 q
= &wq_set
->wqs_setlinks
;
821 wql
= (wait_queue_link_t
)queue_first(q
);
822 while (!queue_end(q
, (queue_entry_t
)wql
)) {
823 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
825 if (wait_queue_lock_try(wq
)) {
828 alloced
= (wql
->wql_type
== WAIT_QUEUE_LINK
);
829 wait_queue_unlink_locked(wq
, wq_set
, wql
);
830 wait_queue_unlock(wq
);
832 enqueue(links
, &wql
->wql_links
);
833 wql
= (wait_queue_link_t
)queue_first(q
);
844 while (!queue_empty (links
)) {
845 wql
= (wait_queue_link_t
) dequeue(links
);
846 zfree(_wait_queue_link_zone
, wql
);
848 return(KERN_SUCCESS
);
852 * Routine: wait_queue_assert_wait64_locked
854 * Insert the current thread into the supplied wait queue
855 * waiting for a particular event to be posted to that queue.
858 * The wait queue is assumed locked.
859 * The waiting thread is assumed locked.
862 __private_extern__ wait_result_t
863 wait_queue_assert_wait64_locked(
866 wait_interrupt_t interruptible
,
870 wait_result_t wait_result
;
872 if (!wait_queue_assert_possible(thread
))
873 panic("wait_queue_assert_wait64_locked");
875 if (wq
->wq_type
== _WAIT_QUEUE_SET_inited
) {
876 wait_queue_set_t wqs
= (wait_queue_set_t
)wq
;
878 if (event
== NO_EVENT64
&& wqs_is_preposted(wqs
))
879 return(THREAD_AWAKENED
);
883 * This is the extent to which we currently take scheduling attributes
884 * into account. If the thread is vm priviledged, we stick it at
885 * the front of the queue. Later, these queues will honor the policy
886 * value set at wait_queue_init time.
888 wait_result
= thread_mark_wait_locked(thread
, interruptible
);
889 if (wait_result
== THREAD_WAITING
) {
890 if (!wq
->wq_fifo
|| thread
->options
& TH_OPT_VMPRIV
)
891 enqueue_head(&wq
->wq_queue
, (queue_entry_t
) thread
);
893 enqueue_tail(&wq
->wq_queue
, (queue_entry_t
) thread
);
895 thread
->wait_event
= event
;
896 thread
->wait_queue
= wq
;
899 if (!timer_call_enter(&thread
->wait_timer
, deadline
))
900 thread
->wait_timer_active
++;
901 thread
->wait_timer_is_set
= TRUE
;
908 * Routine: wait_queue_assert_wait
910 * Insert the current thread into the supplied wait queue
911 * waiting for a particular event to be posted to that queue.
914 * nothing of interest locked.
917 wait_queue_assert_wait(
920 wait_interrupt_t interruptible
,
925 thread_t thread
= current_thread();
927 /* If it is an invalid wait queue, you can't wait on it */
928 if (!wait_queue_is_valid(wq
))
929 return (thread
->wait_result
= THREAD_RESTART
);
934 ret
= wait_queue_assert_wait64_locked(wq
, CAST_DOWN(event64_t
,event
),
935 interruptible
, deadline
, thread
);
936 thread_unlock(thread
);
937 wait_queue_unlock(wq
);
943 * Routine: wait_queue_assert_wait64
945 * Insert the current thread into the supplied wait queue
946 * waiting for a particular event to be posted to that queue.
948 * nothing of interest locked.
951 wait_queue_assert_wait64(
954 wait_interrupt_t interruptible
,
959 thread_t thread
= current_thread();
961 /* If it is an invalid wait queue, you cant wait on it */
962 if (!wait_queue_is_valid(wq
))
963 return (thread
->wait_result
= THREAD_RESTART
);
968 ret
= wait_queue_assert_wait64_locked(wq
, event
, interruptible
, deadline
, thread
);
969 thread_unlock(thread
);
970 wait_queue_unlock(wq
);
976 * Routine: _wait_queue_select64_all
978 * Select all threads off a wait queue that meet the
983 * wake_queue initialized and ready for insertion
986 * a queue of locked threads
989 _wait_queue_select64_all(
994 wait_queue_element_t wq_element
;
995 wait_queue_element_t wqe_next
;
1000 wq_element
= (wait_queue_element_t
) queue_first(q
);
1001 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1002 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1003 wqe_next
= (wait_queue_element_t
)
1004 queue_next((queue_t
) wq_element
);
1007 * We may have to recurse if this is a compound wait queue.
1009 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
1010 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
1011 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1012 wait_queue_set_t set_queue
= wql
->wql_setqueue
;
1015 * We have to check the set wait queue. If it is marked
1016 * as pre-post, and it is the "generic event" then mark
1017 * it pre-posted now (if not already).
1019 wqs_lock(set_queue
);
1020 if (event
== NO_EVENT64
&& set_queue
->wqs_prepost
&& !wql_is_preposted(wql
)) {
1021 queue_t ppq
= &set_queue
->wqs_preposts
;
1022 queue_enter(ppq
, wql
, wait_queue_link_t
, wql_preposts
);
1024 if (! wait_queue_empty(&set_queue
->wqs_wait_queue
))
1025 _wait_queue_select64_all(&set_queue
->wqs_wait_queue
, event
, wake_queue
);
1026 wqs_unlock(set_queue
);
1030 * Otherwise, its a thread. If it is waiting on
1031 * the event we are posting to this queue, pull
1032 * it off the queue and stick it in out wake_queue.
1034 thread_t t
= (thread_t
)wq_element
;
1036 if (t
->wait_event
== event
) {
1038 remqueue(q
, (queue_entry_t
) t
);
1039 enqueue (wake_queue
, (queue_entry_t
) t
);
1040 t
->wait_queue
= WAIT_QUEUE_NULL
;
1041 t
->wait_event
= NO_EVENT64
;
1042 t
->at_safe_point
= FALSE
;
1043 /* returned locked */
1046 wq_element
= wqe_next
;
1051 * Routine: wait_queue_wakeup64_all_locked
1053 * Wakeup some number of threads that are in the specified
1054 * wait queue and waiting on the specified event.
1056 * wait queue already locked (may be released).
1058 * KERN_SUCCESS - Threads were woken up
1059 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1061 __private_extern__ kern_return_t
1062 wait_queue_wakeup64_all_locked(
1065 wait_result_t result
,
1068 queue_head_t wake_queue_head
;
1069 queue_t q
= &wake_queue_head
;
1072 // assert(wait_queue_held(wq));
1073 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1074 // panic("wait_queue_wakeup64_all_locked: lock not held on %p\n", wq); /* (BRINGUP) */
1080 * Select the threads that we will wake up. The threads
1081 * are returned to us locked and cleanly removed from the
1084 _wait_queue_select64_all(wq
, event
, q
);
1086 wait_queue_unlock(wq
);
1089 * For each thread, set it running.
1091 res
= KERN_NOT_WAITING
;
1092 while (!queue_empty (q
)) {
1093 thread_t thread
= (thread_t
) dequeue(q
);
1094 res
= thread_go(thread
, result
);
1095 assert(res
== KERN_SUCCESS
);
1096 thread_unlock(thread
);
1103 * Routine: wait_queue_wakeup_all
1105 * Wakeup some number of threads that are in the specified
1106 * wait queue and waiting on the specified event.
1110 * KERN_SUCCESS - Threads were woken up
1111 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1114 wait_queue_wakeup_all(
1117 wait_result_t result
)
1122 if (!wait_queue_is_valid(wq
)) {
1123 return KERN_INVALID_ARGUMENT
;
1127 wait_queue_lock(wq
);
1128 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1129 // panic("wait_queue_wakeup_all: we did not get the lock on %p\n", wq); /* (BRINGUP) */
1131 ret
= wait_queue_wakeup64_all_locked(
1132 wq
, CAST_DOWN(event64_t
,event
),
1140 * Routine: wait_queue_wakeup64_all
1142 * Wakeup some number of threads that are in the specified
1143 * wait queue and waiting on the specified event.
1147 * KERN_SUCCESS - Threads were woken up
1148 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1151 wait_queue_wakeup64_all(
1154 wait_result_t result
)
1159 if (!wait_queue_is_valid(wq
)) {
1160 return KERN_INVALID_ARGUMENT
;
1164 wait_queue_lock(wq
);
1165 ret
= wait_queue_wakeup64_all_locked(wq
, event
, result
, TRUE
);
1172 * Routine: _wait_queue_select64_one
1174 * Select the best thread off a wait queue that meet the
1175 * supplied criteria.
1179 * possibly recursive
1181 * a locked thread - if one found
1183 * This is where the sync policy of the wait queue comes
1184 * into effect. For now, we just assume FIFO/LIFO.
1187 _wait_queue_select64_one(
1191 wait_queue_element_t wq_element
;
1192 wait_queue_element_t wqe_next
;
1193 thread_t t
= THREAD_NULL
;
1198 wq_element
= (wait_queue_element_t
) queue_first(q
);
1199 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1200 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1201 wqe_next
= (wait_queue_element_t
)
1202 queue_next((queue_t
) wq_element
);
1205 * We may have to recurse if this is a compound wait queue.
1207 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
1208 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
1209 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1210 wait_queue_set_t set_queue
= wql
->wql_setqueue
;
1213 * We have to check the set wait queue. If the set
1214 * supports pre-posting, it isn't already preposted,
1215 * and we didn't find a thread in the set, then mark it.
1217 * If we later find a thread, there may be a spurious
1218 * pre-post here on this set. The wait side has to check
1219 * for that either pre- or post-wait.
1221 wqs_lock(set_queue
);
1222 if (! wait_queue_empty(&set_queue
->wqs_wait_queue
)) {
1223 t
= _wait_queue_select64_one(&set_queue
->wqs_wait_queue
, event
);
1225 if (t
!= THREAD_NULL
) {
1226 wqs_unlock(set_queue
);
1229 if (event
== NO_EVENT64
&& set_queue
->wqs_prepost
&& !wql_is_preposted(wql
)) {
1230 queue_t ppq
= &set_queue
->wqs_preposts
;
1231 queue_enter(ppq
, wql
, wait_queue_link_t
, wql_preposts
);
1233 wqs_unlock(set_queue
);
1238 * Otherwise, its a thread. If it is waiting on
1239 * the event we are posting to this queue, pull
1240 * it off the queue and stick it in out wake_queue.
1242 t
= (thread_t
)wq_element
;
1243 if (t
->wait_event
== event
) {
1245 remqueue(q
, (queue_entry_t
) t
);
1246 t
->wait_queue
= WAIT_QUEUE_NULL
;
1247 t
->wait_event
= NO_EVENT64
;
1248 t
->at_safe_point
= FALSE
;
1249 return t
; /* still locked */
1254 wq_element
= wqe_next
;
1261 * Routine: wait_queue_pull_thread_locked
1263 * Pull a thread off its wait queue and (possibly) unlock
1270 * with the thread still locked.
1273 wait_queue_pull_thread_locked(
1279 assert(thread
->wait_queue
== waitq
);
1281 remqueue(&waitq
->wq_queue
, (queue_entry_t
)thread
);
1282 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1283 thread
->wait_event
= NO_EVENT64
;
1284 thread
->at_safe_point
= FALSE
;
1286 wait_queue_unlock(waitq
);
1291 * Routine: wait_queue_select64_thread
1293 * Look for a thread and remove it from the queues, if
1294 * (and only if) the thread is waiting on the supplied
1295 * <wait_queue, event> pair.
1299 * possibly recursive
1301 * KERN_NOT_WAITING: Thread is not waiting here.
1302 * KERN_SUCCESS: It was, and is now removed (returned locked)
1304 static kern_return_t
1305 _wait_queue_select64_thread(
1310 wait_queue_element_t wq_element
;
1311 wait_queue_element_t wqe_next
;
1312 kern_return_t res
= KERN_NOT_WAITING
;
1313 queue_t q
= &wq
->wq_queue
;
1315 thread_lock(thread
);
1316 if ((thread
->wait_queue
== wq
) && (thread
->wait_event
== event
)) {
1317 remqueue(q
, (queue_entry_t
) thread
);
1318 thread
->at_safe_point
= FALSE
;
1319 thread
->wait_event
= NO_EVENT64
;
1320 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1321 /* thread still locked */
1322 return KERN_SUCCESS
;
1324 thread_unlock(thread
);
1327 * The wait_queue associated with the thread may be one of this
1328 * wait queue's sets. Go see. If so, removing it from
1329 * there is like removing it from here.
1331 wq_element
= (wait_queue_element_t
) queue_first(q
);
1332 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1333 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1334 wqe_next
= (wait_queue_element_t
)
1335 queue_next((queue_t
) wq_element
);
1337 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
1338 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
1339 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1340 wait_queue_set_t set_queue
= wql
->wql_setqueue
;
1342 wqs_lock(set_queue
);
1343 if (! wait_queue_empty(&set_queue
->wqs_wait_queue
)) {
1344 res
= _wait_queue_select64_thread(&set_queue
->wqs_wait_queue
,
1348 wqs_unlock(set_queue
);
1349 if (res
== KERN_SUCCESS
)
1350 return KERN_SUCCESS
;
1352 wq_element
= wqe_next
;
1359 * Routine: wait_queue_wakeup64_identity_locked
1361 * Select a single thread that is most-eligible to run and set
1362 * set it running. But return the thread locked.
1367 * possibly recursive
1369 * a pointer to the locked thread that was awakened
1371 __private_extern__ thread_t
1372 wait_queue_wakeup64_identity_locked(
1375 wait_result_t result
,
1381 assert(wait_queue_held(wq
));
1383 thread
= _wait_queue_select64_one(wq
, event
);
1385 wait_queue_unlock(wq
);
1388 res
= thread_go(thread
, result
);
1389 assert(res
== KERN_SUCCESS
);
1391 return thread
; /* still locked if not NULL */
1396 * Routine: wait_queue_wakeup64_one_locked
1398 * Select a single thread that is most-eligible to run and set
1404 * possibly recursive
1406 * KERN_SUCCESS: It was, and is, now removed.
1407 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1409 __private_extern__ kern_return_t
1410 wait_queue_wakeup64_one_locked(
1413 wait_result_t result
,
1418 assert(wait_queue_held(wq
));
1420 thread
= _wait_queue_select64_one(wq
, event
);
1422 wait_queue_unlock(wq
);
1427 res
= thread_go(thread
, result
);
1428 assert(res
== KERN_SUCCESS
);
1429 thread_unlock(thread
);
1433 return KERN_NOT_WAITING
;
1437 * Routine: wait_queue_wakeup_one
1439 * Wakeup the most appropriate thread that is in the specified
1440 * wait queue for the specified event.
1444 * KERN_SUCCESS - Thread was woken up
1445 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1448 wait_queue_wakeup_one(
1451 wait_result_t result
)
1456 if (!wait_queue_is_valid(wq
)) {
1457 return KERN_INVALID_ARGUMENT
;
1461 wait_queue_lock(wq
);
1462 thread
= _wait_queue_select64_one(wq
, CAST_DOWN(event64_t
,event
));
1463 wait_queue_unlock(wq
);
1468 res
= thread_go(thread
, result
);
1469 assert(res
== KERN_SUCCESS
);
1470 thread_unlock(thread
);
1476 return KERN_NOT_WAITING
;
1480 * Routine: wait_queue_wakeup64_one
1482 * Wakeup the most appropriate thread that is in the specified
1483 * wait queue for the specified event.
1487 * KERN_SUCCESS - Thread was woken up
1488 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1491 wait_queue_wakeup64_one(
1494 wait_result_t result
)
1499 if (!wait_queue_is_valid(wq
)) {
1500 return KERN_INVALID_ARGUMENT
;
1503 wait_queue_lock(wq
);
1504 thread
= _wait_queue_select64_one(wq
, event
);
1505 wait_queue_unlock(wq
);
1510 res
= thread_go(thread
, result
);
1511 assert(res
== KERN_SUCCESS
);
1512 thread_unlock(thread
);
1518 return KERN_NOT_WAITING
;
1523 * Routine: wait_queue_wakeup64_thread_locked
1525 * Wakeup the particular thread that was specified if and only
1526 * it was in this wait queue (or one of it's set queues)
1527 * and waiting on the specified event.
1529 * This is much safer than just removing the thread from
1530 * whatever wait queue it happens to be on. For instance, it
1531 * may have already been awoken from the wait you intended to
1532 * interrupt and waited on something else (like another
1536 * wait queue already locked (may be released).
1538 * KERN_SUCCESS - the thread was found waiting and awakened
1539 * KERN_NOT_WAITING - the thread was not waiting here
1541 __private_extern__ kern_return_t
1542 wait_queue_wakeup64_thread_locked(
1546 wait_result_t result
,
1551 assert(wait_queue_held(wq
));
1554 * See if the thread was still waiting there. If so, it got
1555 * dequeued and returned locked.
1557 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1559 wait_queue_unlock(wq
);
1561 if (res
!= KERN_SUCCESS
)
1562 return KERN_NOT_WAITING
;
1564 res
= thread_go(thread
, result
);
1565 assert(res
== KERN_SUCCESS
);
1566 thread_unlock(thread
);
1571 * Routine: wait_queue_wakeup_thread
1573 * Wakeup the particular thread that was specified if and only
1574 * it was in this wait queue (or one of it's set queues)
1575 * and waiting on the specified event.
1577 * This is much safer than just removing the thread from
1578 * whatever wait queue it happens to be on. For instance, it
1579 * may have already been awoken from the wait you intended to
1580 * interrupt and waited on something else (like another
1583 * nothing of interest locked
1584 * we need to assume spl needs to be raised
1586 * KERN_SUCCESS - the thread was found waiting and awakened
1587 * KERN_NOT_WAITING - the thread was not waiting here
1590 wait_queue_wakeup_thread(
1594 wait_result_t result
)
1599 if (!wait_queue_is_valid(wq
)) {
1600 return KERN_INVALID_ARGUMENT
;
1604 wait_queue_lock(wq
);
1605 res
= _wait_queue_select64_thread(wq
, CAST_DOWN(event64_t
,event
), thread
);
1606 wait_queue_unlock(wq
);
1608 if (res
== KERN_SUCCESS
) {
1609 res
= thread_go(thread
, result
);
1610 assert(res
== KERN_SUCCESS
);
1611 thread_unlock(thread
);
1616 return KERN_NOT_WAITING
;
1620 * Routine: wait_queue_wakeup64_thread
1622 * Wakeup the particular thread that was specified if and only
1623 * it was in this wait queue (or one of it's set's queues)
1624 * and waiting on the specified event.
1626 * This is much safer than just removing the thread from
1627 * whatever wait queue it happens to be on. For instance, it
1628 * may have already been awoken from the wait you intended to
1629 * interrupt and waited on something else (like another
1632 * nothing of interest locked
1633 * we need to assume spl needs to be raised
1635 * KERN_SUCCESS - the thread was found waiting and awakened
1636 * KERN_NOT_WAITING - the thread was not waiting here
1639 wait_queue_wakeup64_thread(
1643 wait_result_t result
)
1648 if (!wait_queue_is_valid(wq
)) {
1649 return KERN_INVALID_ARGUMENT
;
1653 wait_queue_lock(wq
);
1654 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1655 wait_queue_unlock(wq
);
1657 if (res
== KERN_SUCCESS
) {
1658 res
= thread_go(thread
, result
);
1659 assert(res
== KERN_SUCCESS
);
1660 thread_unlock(thread
);
1665 return KERN_NOT_WAITING
;