2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: wait_queue.c (adapted from sched_prim.c)
60 * Author: Avadis Tevanian, Jr.
63 * Primitives for manipulating wait queues: either global
64 * ones from sched_prim.c, or private ones associated with
65 * particular structures(pots, semaphores, etc..).
68 #include <kern/kern_types.h>
69 #include <kern/simple_lock.h>
70 #include <kern/kalloc.h>
71 #include <kern/queue.h>
73 #include <mach/sync_policy.h>
74 #include <kern/sched_prim.h>
76 #include <kern/wait_queue.h>
78 /* forward declarations */
79 static boolean_t
wait_queue_member_locked(
81 wait_queue_set_t wq_set
);
83 void wait_queue_unlink_one(
85 wait_queue_set_t
*wq_setp
);
87 kern_return_t
wait_queue_set_unlink_all_nofree(
88 wait_queue_set_t wq_set
);
91 * Routine: wait_queue_init
93 * Initialize a previously allocated wait queue.
95 * KERN_SUCCESS - The wait_queue_t was initialized
96 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
103 if (!((policy
& SYNC_POLICY_ORDER_MASK
) == SYNC_POLICY_FIFO
))
104 return KERN_INVALID_ARGUMENT
;
107 wq
->wq_type
= _WAIT_QUEUE_inited
;
108 queue_init(&wq
->wq_queue
);
109 hw_lock_init(&wq
->wq_interlock
);
114 * Routine: wait_queue_alloc
116 * Allocate and initialize a wait queue for use outside of
117 * of the mach part of the kernel.
119 * Nothing locked - can block.
121 * The allocated and initialized wait queue
122 * WAIT_QUEUE_NULL if there is a resource shortage
131 wq
= (wait_queue_t
) kalloc(sizeof(struct wait_queue
));
132 if (wq
!= WAIT_QUEUE_NULL
) {
133 ret
= wait_queue_init(wq
, policy
);
134 if (ret
!= KERN_SUCCESS
) {
135 kfree(wq
, sizeof(struct wait_queue
));
136 wq
= WAIT_QUEUE_NULL
;
143 * Routine: wait_queue_free
145 * Free an allocated wait queue.
153 if (!wait_queue_is_queue(wq
))
154 return KERN_INVALID_ARGUMENT
;
155 if (!queue_empty(&wq
->wq_queue
))
157 kfree(wq
, sizeof(struct wait_queue
));
162 * Routine: wait_queue_set_init
164 * Initialize a previously allocated wait queue set.
166 * KERN_SUCCESS - The wait_queue_set_t was initialized
167 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
171 wait_queue_set_t wqset
,
176 ret
= wait_queue_init(&wqset
->wqs_wait_queue
, policy
);
177 if (ret
!= KERN_SUCCESS
)
180 wqset
->wqs_wait_queue
.wq_type
= _WAIT_QUEUE_SET_inited
;
181 if (policy
& SYNC_POLICY_PREPOST
)
182 wqset
->wqs_wait_queue
.wq_isprepost
= TRUE
;
184 wqset
->wqs_wait_queue
.wq_isprepost
= FALSE
;
185 queue_init(&wqset
->wqs_setlinks
);
186 wqset
->wqs_refcount
= 0;
193 wait_queue_set_t wqset
,
196 return wait_queue_set_init(wqset
, policy
);
200 wait_queue_sub_clearrefs(
201 wait_queue_set_t wq_set
)
203 if (!wait_queue_is_set(wq_set
))
204 return KERN_INVALID_ARGUMENT
;
207 wq_set
->wqs_refcount
= 0;
213 * Routine: wait_queue_set_alloc
215 * Allocate and initialize a wait queue set for
216 * use outside of the mach part of the kernel.
220 * The allocated and initialized wait queue set
221 * WAIT_QUEUE_SET_NULL if there is a resource shortage
224 wait_queue_set_alloc(
227 wait_queue_set_t wq_set
;
229 wq_set
= (wait_queue_set_t
) kalloc(sizeof(struct wait_queue_set
));
230 if (wq_set
!= WAIT_QUEUE_SET_NULL
) {
233 ret
= wait_queue_set_init(wq_set
, policy
);
234 if (ret
!= KERN_SUCCESS
) {
235 kfree(wq_set
, sizeof(struct wait_queue_set
));
236 wq_set
= WAIT_QUEUE_SET_NULL
;
243 * Routine: wait_queue_set_free
245 * Free an allocated wait queue set
251 wait_queue_set_t wq_set
)
253 if (!wait_queue_is_set(wq_set
))
254 return KERN_INVALID_ARGUMENT
;
256 if (!queue_empty(&wq_set
->wqs_wait_queue
.wq_queue
))
259 kfree(wq_set
, sizeof(struct wait_queue_set
));
266 * Routine: wait_queue_set_size
267 * Routine: wait_queue_link_size
269 * Return the size of opaque wait queue structures
271 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet
); }
272 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink
); }
274 /* declare a unique type for wait queue link structures */
275 static unsigned int _wait_queue_link
;
276 static unsigned int _wait_queue_unlinked
;
278 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
279 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
281 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
282 WQASSERT(((wqe)->wqe_queue == (wq) && \
283 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
284 "wait queue element list corruption: wq=%#x, wqe=%#x", \
287 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
288 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
289 (queue_t)(wql) : &(wql)->wql_setlinks)))
291 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
292 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
293 (queue_t)(wql) : &(wql)->wql_setlinks)))
295 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
296 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
297 ((wql)->wql_setqueue == (wqs)) && \
298 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
299 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
300 "wait queue set links corruption: wqs=%#x, wql=%#x", \
303 #if defined(_WAIT_QUEUE_DEBUG_)
305 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
307 #define WAIT_QUEUE_CHECK(wq) \
309 queue_t q2 = &(wq)->wq_queue; \
310 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
311 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
312 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
313 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
317 #define WAIT_QUEUE_SET_CHECK(wqs) \
319 queue_t q2 = &(wqs)->wqs_setlinks; \
320 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
321 while (!queue_end(q2, (queue_entry_t)wql2)) { \
322 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
323 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
327 #else /* !_WAIT_QUEUE_DEBUG_ */
329 #define WQASSERT(e, s, p0, p1) assert(e)
331 #define WAIT_QUEUE_CHECK(wq)
332 #define WAIT_QUEUE_SET_CHECK(wqs)
334 #endif /* !_WAIT_QUEUE_DEBUG_ */
337 * Routine: wait_queue_member_locked
339 * Indicate if this set queue is a member of the queue
341 * The wait queue is locked
342 * The set queue is just that, a set queue
345 wait_queue_member_locked(
347 wait_queue_set_t wq_set
)
349 wait_queue_element_t wq_element
;
352 assert(wait_queue_held(wq
));
353 assert(wait_queue_is_set(wq_set
));
357 wq_element
= (wait_queue_element_t
) queue_first(q
);
358 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
359 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
360 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
)) {
361 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
363 if (wql
->wql_setqueue
== wq_set
)
366 wq_element
= (wait_queue_element_t
)
367 queue_next((queue_t
) wq_element
);
374 * Routine: wait_queue_member
376 * Indicate if this set queue is a member of the queue
378 * The set queue is just that, a set queue
383 wait_queue_set_t wq_set
)
388 if (!wait_queue_is_set(wq_set
))
393 ret
= wait_queue_member_locked(wq
, wq_set
);
394 wait_queue_unlock(wq
);
402 * Routine: wait_queue_link_noalloc
404 * Insert a set wait queue into a wait queue. This
405 * requires us to link the two together using a wait_queue_link
406 * structure that we allocate.
408 * The wait queue being inserted must be inited as a set queue
411 wait_queue_link_noalloc(
413 wait_queue_set_t wq_set
,
414 wait_queue_link_t wql
)
416 wait_queue_element_t wq_element
;
420 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
))
421 return KERN_INVALID_ARGUMENT
;
424 * There are probably less threads and sets associated with
425 * the wait queue, then there are wait queues associated with
426 * the set. So lets validate it that way.
431 wq_element
= (wait_queue_element_t
) queue_first(q
);
432 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
433 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
434 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
&&
435 ((wait_queue_link_t
)wq_element
)->wql_setqueue
== wq_set
) {
436 wait_queue_unlock(wq
);
438 return KERN_ALREADY_IN_SET
;
440 wq_element
= (wait_queue_element_t
)
441 queue_next((queue_t
) wq_element
);
445 * Not already a member, so we can add it.
449 WAIT_QUEUE_SET_CHECK(wq_set
);
452 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
453 wql
->wql_setqueue
= wq_set
;
454 queue_enter(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
455 wql
->wql_type
= WAIT_QUEUE_LINK
;
458 wait_queue_unlock(wq
);
465 * Routine: wait_queue_link
467 * Insert a set wait queue into a wait queue. This
468 * requires us to link the two together using a wait_queue_link
469 * structure that we allocate.
471 * The wait queue being inserted must be inited as a set queue
476 wait_queue_set_t wq_set
)
478 wait_queue_link_t wql
;
481 wql
= (wait_queue_link_t
) kalloc(sizeof(struct _wait_queue_link
));
482 if (wql
== WAIT_QUEUE_LINK_NULL
)
483 return KERN_RESOURCE_SHORTAGE
;
485 ret
= wait_queue_link_noalloc(wq
, wq_set
, wql
);
486 if (ret
!= KERN_SUCCESS
)
487 kfree(wql
, sizeof(struct _wait_queue_link
));
494 * Routine: wait_queue_unlink_nofree
496 * Undo the linkage between a wait queue and a set.
499 wait_queue_unlink_locked(
501 wait_queue_set_t wq_set
,
502 wait_queue_link_t wql
)
504 assert(wait_queue_held(wq
));
505 assert(wait_queue_held(&wq_set
->wqs_wait_queue
));
507 wql
->wql_queue
= WAIT_QUEUE_NULL
;
508 queue_remove(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
509 wql
->wql_setqueue
= WAIT_QUEUE_SET_NULL
;
510 queue_remove(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
511 wql
->wql_type
= WAIT_QUEUE_UNLINKED
;
513 WAIT_QUEUE_CHECK(wq
);
514 WAIT_QUEUE_SET_CHECK(wq_set
);
518 * Routine: wait_queue_unlink
520 * Remove the linkage between a wait queue and a set,
521 * freeing the linkage structure.
523 * The wait queue being must be a member set queue
528 wait_queue_set_t wq_set
)
530 wait_queue_element_t wq_element
;
531 wait_queue_link_t wql
;
535 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
)) {
536 return KERN_INVALID_ARGUMENT
;
542 wq_element
= (wait_queue_element_t
) queue_first(q
);
543 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
544 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
545 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
546 wql
= (wait_queue_link_t
)wq_element
;
548 if (wql
->wql_setqueue
== wq_set
) {
550 wait_queue_unlink_locked(wq
, wq_set
, wql
);
552 wait_queue_unlock(wq
);
554 kfree(wql
, sizeof(struct _wait_queue_link
));
558 wq_element
= (wait_queue_element_t
)
559 queue_next((queue_t
) wq_element
);
561 wait_queue_unlock(wq
);
563 return KERN_NOT_IN_SET
;
568 * Routine: wait_queue_unlinkall_nofree
570 * Remove the linkage between a wait queue and all its
571 * sets. The caller is responsible for freeing
572 * the wait queue link structures.
576 wait_queue_unlinkall_nofree(
579 wait_queue_element_t wq_element
;
580 wait_queue_element_t wq_next_element
;
581 wait_queue_set_t wq_set
;
582 wait_queue_link_t wql
;
583 queue_head_t links_queue_head
;
584 queue_t links
= &links_queue_head
;
588 if (!wait_queue_is_queue(wq
)) {
589 return KERN_INVALID_ARGUMENT
;
599 wq_element
= (wait_queue_element_t
) queue_first(q
);
600 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
601 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
602 wq_next_element
= (wait_queue_element_t
)
603 queue_next((queue_t
) wq_element
);
605 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
606 wql
= (wait_queue_link_t
)wq_element
;
607 wq_set
= wql
->wql_setqueue
;
609 wait_queue_unlink_locked(wq
, wq_set
, wql
);
612 wq_element
= wq_next_element
;
614 wait_queue_unlock(wq
);
616 return(KERN_SUCCESS
);
621 * Routine: wait_queue_unlink_all
623 * Remove the linkage between a wait queue and all its sets.
624 * All the linkage structures are freed.
626 * Nothing of interest locked.
630 wait_queue_unlink_all(
633 wait_queue_element_t wq_element
;
634 wait_queue_element_t wq_next_element
;
635 wait_queue_set_t wq_set
;
636 wait_queue_link_t wql
;
637 queue_head_t links_queue_head
;
638 queue_t links
= &links_queue_head
;
642 if (!wait_queue_is_queue(wq
)) {
643 return KERN_INVALID_ARGUMENT
;
653 wq_element
= (wait_queue_element_t
) queue_first(q
);
654 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
655 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
656 wq_next_element
= (wait_queue_element_t
)
657 queue_next((queue_t
) wq_element
);
659 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
660 wql
= (wait_queue_link_t
)wq_element
;
661 wq_set
= wql
->wql_setqueue
;
663 wait_queue_unlink_locked(wq
, wq_set
, wql
);
665 enqueue(links
, &wql
->wql_links
);
667 wq_element
= wq_next_element
;
669 wait_queue_unlock(wq
);
672 while(!queue_empty(links
)) {
673 wql
= (wait_queue_link_t
) dequeue(links
);
674 kfree(wql
, sizeof(struct _wait_queue_link
));
677 return(KERN_SUCCESS
);
681 * Routine: wait_queue_set_unlink_all_nofree
683 * Remove the linkage between a set wait queue and all its
684 * member wait queues. The link structures are not freed, nor
685 * returned. It is the caller's responsibility to track and free
688 * The wait queue being must be a member set queue
691 wait_queue_set_unlink_all_nofree(
692 wait_queue_set_t wq_set
)
694 wait_queue_link_t wql
;
699 if (!wait_queue_is_set(wq_set
)) {
700 return KERN_INVALID_ARGUMENT
;
707 q
= &wq_set
->wqs_setlinks
;
709 wql
= (wait_queue_link_t
)queue_first(q
);
710 while (!queue_end(q
, (queue_entry_t
)wql
)) {
711 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
713 if (wait_queue_lock_try(wq
)) {
714 wait_queue_unlink_locked(wq
, wq_set
, wql
);
715 wait_queue_unlock(wq
);
716 wql
= (wait_queue_link_t
)queue_first(q
);
727 return(KERN_SUCCESS
);
730 /* legacy interface naming */
732 wait_subqueue_unlink_all(
733 wait_queue_set_t wq_set
)
735 return wait_queue_set_unlink_all_nofree(wq_set
);
740 * Routine: wait_queue_set_unlink_all
742 * Remove the linkage between a set wait queue and all its
743 * member wait queues. The link structures are freed.
745 * The wait queue must be a set
748 wait_queue_set_unlink_all(
749 wait_queue_set_t wq_set
)
751 wait_queue_link_t wql
;
754 queue_head_t links_queue_head
;
755 queue_t links
= &links_queue_head
;
758 if (!wait_queue_is_set(wq_set
)) {
759 return KERN_INVALID_ARGUMENT
;
768 q
= &wq_set
->wqs_setlinks
;
770 wql
= (wait_queue_link_t
)queue_first(q
);
771 while (!queue_end(q
, (queue_entry_t
)wql
)) {
772 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
774 if (wait_queue_lock_try(wq
)) {
775 wait_queue_unlink_locked(wq
, wq_set
, wql
);
776 wait_queue_unlock(wq
);
777 enqueue(links
, &wql
->wql_links
);
778 wql
= (wait_queue_link_t
)queue_first(q
);
789 while (!queue_empty (links
)) {
790 wql
= (wait_queue_link_t
) dequeue(links
);
791 kfree(wql
, sizeof(struct _wait_queue_link
));
793 return(KERN_SUCCESS
);
798 * Routine: wait_queue_unlink_one
800 * Find and unlink one set wait queue
802 * Nothing of interest locked.
805 wait_queue_unlink_one(
807 wait_queue_set_t
*wq_setp
)
809 wait_queue_element_t wq_element
;
818 wq_element
= (wait_queue_element_t
) queue_first(q
);
819 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
821 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
822 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
823 wait_queue_set_t wq_set
= wql
->wql_setqueue
;
826 wait_queue_unlink_locked(wq
, wq_set
, wql
);
828 wait_queue_unlock(wq
);
830 kfree(wql
,sizeof(struct _wait_queue_link
));
835 wq_element
= (wait_queue_element_t
)
836 queue_next((queue_t
) wq_element
);
838 wait_queue_unlock(wq
);
840 *wq_setp
= WAIT_QUEUE_SET_NULL
;
845 * Routine: wait_queue_assert_wait64_locked
847 * Insert the current thread into the supplied wait queue
848 * waiting for a particular event to be posted to that queue.
851 * The wait queue is assumed locked.
852 * The waiting thread is assumed locked.
855 __private_extern__ wait_result_t
856 wait_queue_assert_wait64_locked(
859 wait_interrupt_t interruptible
,
863 wait_result_t wait_result
;
865 if (!wait_queue_assert_possible(thread
))
866 panic("wait_queue_assert_wait64_locked");
868 if (wq
->wq_type
== _WAIT_QUEUE_SET_inited
) {
869 wait_queue_set_t wqs
= (wait_queue_set_t
)wq
;
871 if (wqs
->wqs_isprepost
&& wqs
->wqs_refcount
> 0)
872 return(THREAD_AWAKENED
);
876 * This is the extent to which we currently take scheduling attributes
877 * into account. If the thread is vm priviledged, we stick it at
878 * the front of the queue. Later, these queues will honor the policy
879 * value set at wait_queue_init time.
881 wait_result
= thread_mark_wait_locked(thread
, interruptible
);
882 if (wait_result
== THREAD_WAITING
) {
883 if (thread
->options
& TH_OPT_VMPRIV
)
884 enqueue_head(&wq
->wq_queue
, (queue_entry_t
) thread
);
886 enqueue_tail(&wq
->wq_queue
, (queue_entry_t
) thread
);
888 thread
->wait_event
= event
;
889 thread
->wait_queue
= wq
;
892 if (!timer_call_enter(&thread
->wait_timer
, deadline
))
893 thread
->wait_timer_active
++;
894 thread
->wait_timer_is_set
= TRUE
;
901 * Routine: wait_queue_assert_wait
903 * Insert the current thread into the supplied wait queue
904 * waiting for a particular event to be posted to that queue.
907 * nothing of interest locked.
910 wait_queue_assert_wait(
913 wait_interrupt_t interruptible
,
918 thread_t thread
= current_thread();
920 /* If it is an invalid wait queue, you can't wait on it */
921 if (!wait_queue_is_valid(wq
))
922 return (thread
->wait_result
= THREAD_RESTART
);
927 ret
= wait_queue_assert_wait64_locked(wq
, (event64_t
)((uint32_t)event
),
928 interruptible
, deadline
, thread
);
929 thread_unlock(thread
);
930 wait_queue_unlock(wq
);
936 * Routine: wait_queue_assert_wait64
938 * Insert the current thread into the supplied wait queue
939 * waiting for a particular event to be posted to that queue.
941 * nothing of interest locked.
944 wait_queue_assert_wait64(
947 wait_interrupt_t interruptible
,
952 thread_t thread
= current_thread();
954 /* If it is an invalid wait queue, you cant wait on it */
955 if (!wait_queue_is_valid(wq
))
956 return (thread
->wait_result
= THREAD_RESTART
);
961 ret
= wait_queue_assert_wait64_locked(wq
, event
, interruptible
, deadline
, thread
);
962 thread_unlock(thread
);
963 wait_queue_unlock(wq
);
969 * Routine: _wait_queue_select64_all
971 * Select all threads off a wait queue that meet the
976 * wake_queue initialized and ready for insertion
979 * a queue of locked threads
982 _wait_queue_select64_all(
987 wait_queue_element_t wq_element
;
988 wait_queue_element_t wqe_next
;
993 wq_element
= (wait_queue_element_t
) queue_first(q
);
994 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
995 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
996 wqe_next
= (wait_queue_element_t
)
997 queue_next((queue_t
) wq_element
);
1000 * We may have to recurse if this is a compound wait queue.
1002 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1003 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1004 wait_queue_t set_queue
;
1007 * We have to check the set wait queue.
1009 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1010 wait_queue_lock(set_queue
);
1011 if (set_queue
->wq_isprepost
) {
1012 wait_queue_set_t wqs
= (wait_queue_set_t
)set_queue
;
1015 * Preposting is only for sets and wait queue
1016 * is the first element of set
1018 wqs
->wqs_refcount
++;
1020 if (! wait_queue_empty(set_queue
))
1021 _wait_queue_select64_all(set_queue
, event
, wake_queue
);
1022 wait_queue_unlock(set_queue
);
1026 * Otherwise, its a thread. If it is waiting on
1027 * the event we are posting to this queue, pull
1028 * it off the queue and stick it in out wake_queue.
1030 thread_t t
= (thread_t
)wq_element
;
1032 if (t
->wait_event
== event
) {
1034 remqueue(q
, (queue_entry_t
) t
);
1035 enqueue (wake_queue
, (queue_entry_t
) t
);
1036 t
->wait_queue
= WAIT_QUEUE_NULL
;
1037 t
->wait_event
= NO_EVENT64
;
1038 t
->at_safe_point
= FALSE
;
1039 /* returned locked */
1042 wq_element
= wqe_next
;
1047 * Routine: wait_queue_wakeup64_all_locked
1049 * Wakeup some number of threads that are in the specified
1050 * wait queue and waiting on the specified event.
1052 * wait queue already locked (may be released).
1054 * KERN_SUCCESS - Threads were woken up
1055 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1057 __private_extern__ kern_return_t
1058 wait_queue_wakeup64_all_locked(
1061 wait_result_t result
,
1064 queue_head_t wake_queue_head
;
1065 queue_t q
= &wake_queue_head
;
1068 // assert(wait_queue_held(wq));
1069 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1070 // panic("wait_queue_wakeup64_all_locked: lock not held on %p\n", wq); /* (BRINGUP) */
1076 * Select the threads that we will wake up. The threads
1077 * are returned to us locked and cleanly removed from the
1080 _wait_queue_select64_all(wq
, event
, q
);
1082 wait_queue_unlock(wq
);
1085 * For each thread, set it running.
1087 res
= KERN_NOT_WAITING
;
1088 while (!queue_empty (q
)) {
1089 thread_t thread
= (thread_t
) dequeue(q
);
1090 res
= thread_go(thread
, result
);
1091 assert(res
== KERN_SUCCESS
);
1092 thread_unlock(thread
);
1099 * Routine: wait_queue_wakeup_all
1101 * Wakeup some number of threads that are in the specified
1102 * wait queue and waiting on the specified event.
1106 * KERN_SUCCESS - Threads were woken up
1107 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1110 wait_queue_wakeup_all(
1113 wait_result_t result
)
1118 if (!wait_queue_is_valid(wq
)) {
1119 return KERN_INVALID_ARGUMENT
;
1123 wait_queue_lock(wq
);
1124 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1125 // panic("wait_queue_wakeup_all: we did not get the lock on %p\n", wq); /* (BRINGUP) */
1127 ret
= wait_queue_wakeup64_all_locked(
1128 wq
, (event64_t
)((uint32_t)event
),
1136 * Routine: wait_queue_wakeup64_all
1138 * Wakeup some number of threads that are in the specified
1139 * wait queue and waiting on the specified event.
1143 * KERN_SUCCESS - Threads were woken up
1144 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1147 wait_queue_wakeup64_all(
1150 wait_result_t result
)
1155 if (!wait_queue_is_valid(wq
)) {
1156 return KERN_INVALID_ARGUMENT
;
1160 wait_queue_lock(wq
);
1161 ret
= wait_queue_wakeup64_all_locked(wq
, event
, result
, TRUE
);
1168 * Routine: _wait_queue_select64_one
1170 * Select the best thread off a wait queue that meet the
1171 * supplied criteria.
1175 * possibly recursive
1177 * a locked thread - if one found
1179 * This is where the sync policy of the wait queue comes
1180 * into effect. For now, we just assume FIFO.
1183 _wait_queue_select64_one(
1187 wait_queue_element_t wq_element
;
1188 wait_queue_element_t wqe_next
;
1189 thread_t t
= THREAD_NULL
;
1192 assert(wq
->wq_fifo
);
1196 wq_element
= (wait_queue_element_t
) queue_first(q
);
1197 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1198 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1199 wqe_next
= (wait_queue_element_t
)
1200 queue_next((queue_t
) wq_element
);
1203 * We may have to recurse if this is a compound wait queue.
1205 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1206 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1207 wait_queue_t set_queue
;
1210 * We have to check the set wait queue.
1212 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1213 wait_queue_lock(set_queue
);
1214 if (! wait_queue_empty(set_queue
)) {
1215 t
= _wait_queue_select64_one(set_queue
, event
);
1217 wait_queue_unlock(set_queue
);
1218 if (t
!= THREAD_NULL
)
1223 * Otherwise, its a thread. If it is waiting on
1224 * the event we are posting to this queue, pull
1225 * it off the queue and stick it in out wake_queue.
1227 t
= (thread_t
)wq_element
;
1228 if (t
->wait_event
== event
) {
1230 remqueue(q
, (queue_entry_t
) t
);
1231 t
->wait_queue
= WAIT_QUEUE_NULL
;
1232 t
->wait_event
= NO_EVENT64
;
1233 t
->at_safe_point
= FALSE
;
1234 return t
; /* still locked */
1239 wq_element
= wqe_next
;
1246 * Routine: wait_queue_pull_thread_locked
1248 * Pull a thread off its wait queue and (possibly) unlock
1255 * with the thread still locked.
1258 wait_queue_pull_thread_locked(
1264 assert(thread
->wait_queue
== waitq
);
1266 remqueue(&waitq
->wq_queue
, (queue_entry_t
)thread
);
1267 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1268 thread
->wait_event
= NO_EVENT64
;
1269 thread
->at_safe_point
= FALSE
;
1271 wait_queue_unlock(waitq
);
1276 * Routine: wait_queue_select64_thread
1278 * Look for a thread and remove it from the queues, if
1279 * (and only if) the thread is waiting on the supplied
1280 * <wait_queue, event> pair.
1284 * possibly recursive
1286 * KERN_NOT_WAITING: Thread is not waiting here.
1287 * KERN_SUCCESS: It was, and is now removed (returned locked)
1289 static kern_return_t
1290 _wait_queue_select64_thread(
1295 wait_queue_element_t wq_element
;
1296 wait_queue_element_t wqe_next
;
1297 kern_return_t res
= KERN_NOT_WAITING
;
1298 queue_t q
= &wq
->wq_queue
;
1300 thread_lock(thread
);
1301 if ((thread
->wait_queue
== wq
) && (thread
->wait_event
== event
)) {
1302 remqueue(q
, (queue_entry_t
) thread
);
1303 thread
->at_safe_point
= FALSE
;
1304 thread
->wait_event
= NO_EVENT64
;
1305 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1306 /* thread still locked */
1307 return KERN_SUCCESS
;
1309 thread_unlock(thread
);
1312 * The wait_queue associated with the thread may be one of this
1313 * wait queue's sets. Go see. If so, removing it from
1314 * there is like removing it from here.
1316 wq_element
= (wait_queue_element_t
) queue_first(q
);
1317 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1318 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1319 wqe_next
= (wait_queue_element_t
)
1320 queue_next((queue_t
) wq_element
);
1322 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1323 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1324 wait_queue_t set_queue
;
1326 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1327 wait_queue_lock(set_queue
);
1328 if (! wait_queue_empty(set_queue
)) {
1329 res
= _wait_queue_select64_thread(set_queue
,
1333 wait_queue_unlock(set_queue
);
1334 if (res
== KERN_SUCCESS
)
1335 return KERN_SUCCESS
;
1337 wq_element
= wqe_next
;
1344 * Routine: wait_queue_wakeup64_identity_locked
1346 * Select a single thread that is most-eligible to run and set
1347 * set it running. But return the thread locked.
1352 * possibly recursive
1354 * a pointer to the locked thread that was awakened
1356 __private_extern__ thread_t
1357 wait_queue_wakeup64_identity_locked(
1360 wait_result_t result
,
1366 assert(wait_queue_held(wq
));
1368 thread
= _wait_queue_select64_one(wq
, event
);
1370 wait_queue_unlock(wq
);
1373 res
= thread_go(thread
, result
);
1374 assert(res
== KERN_SUCCESS
);
1376 return thread
; /* still locked if not NULL */
1381 * Routine: wait_queue_wakeup64_one_locked
1383 * Select a single thread that is most-eligible to run and set
1389 * possibly recursive
1391 * KERN_SUCCESS: It was, and is, now removed.
1392 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1394 __private_extern__ kern_return_t
1395 wait_queue_wakeup64_one_locked(
1398 wait_result_t result
,
1403 assert(wait_queue_held(wq
));
1405 thread
= _wait_queue_select64_one(wq
, event
);
1407 wait_queue_unlock(wq
);
1412 res
= thread_go(thread
, result
);
1413 assert(res
== KERN_SUCCESS
);
1414 thread_unlock(thread
);
1418 return KERN_NOT_WAITING
;
1422 * Routine: wait_queue_wakeup_one
1424 * Wakeup the most appropriate thread that is in the specified
1425 * wait queue for the specified event.
1429 * KERN_SUCCESS - Thread was woken up
1430 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1433 wait_queue_wakeup_one(
1436 wait_result_t result
)
1441 if (!wait_queue_is_valid(wq
)) {
1442 return KERN_INVALID_ARGUMENT
;
1446 wait_queue_lock(wq
);
1447 thread
= _wait_queue_select64_one(wq
, (event64_t
)((uint32_t)event
));
1448 wait_queue_unlock(wq
);
1453 res
= thread_go(thread
, result
);
1454 assert(res
== KERN_SUCCESS
);
1455 thread_unlock(thread
);
1461 return KERN_NOT_WAITING
;
1465 * Routine: wait_queue_wakeup64_one
1467 * Wakeup the most appropriate thread that is in the specified
1468 * wait queue for the specified event.
1472 * KERN_SUCCESS - Thread was woken up
1473 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1476 wait_queue_wakeup64_one(
1479 wait_result_t result
)
1484 if (!wait_queue_is_valid(wq
)) {
1485 return KERN_INVALID_ARGUMENT
;
1488 wait_queue_lock(wq
);
1489 thread
= _wait_queue_select64_one(wq
, event
);
1490 wait_queue_unlock(wq
);
1495 res
= thread_go(thread
, result
);
1496 assert(res
== KERN_SUCCESS
);
1497 thread_unlock(thread
);
1503 return KERN_NOT_WAITING
;
1508 * Routine: wait_queue_wakeup64_thread_locked
1510 * Wakeup the particular thread that was specified if and only
1511 * it was in this wait queue (or one of it's set queues)
1512 * and waiting on the specified event.
1514 * This is much safer than just removing the thread from
1515 * whatever wait queue it happens to be on. For instance, it
1516 * may have already been awoken from the wait you intended to
1517 * interrupt and waited on something else (like another
1521 * wait queue already locked (may be released).
1523 * KERN_SUCCESS - the thread was found waiting and awakened
1524 * KERN_NOT_WAITING - the thread was not waiting here
1526 __private_extern__ kern_return_t
1527 wait_queue_wakeup64_thread_locked(
1531 wait_result_t result
,
1536 assert(wait_queue_held(wq
));
1539 * See if the thread was still waiting there. If so, it got
1540 * dequeued and returned locked.
1542 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1544 wait_queue_unlock(wq
);
1546 if (res
!= KERN_SUCCESS
)
1547 return KERN_NOT_WAITING
;
1549 res
= thread_go(thread
, result
);
1550 assert(res
== KERN_SUCCESS
);
1551 thread_unlock(thread
);
1556 * Routine: wait_queue_wakeup_thread
1558 * Wakeup the particular thread that was specified if and only
1559 * it was in this wait queue (or one of it's set queues)
1560 * and waiting on the specified event.
1562 * This is much safer than just removing the thread from
1563 * whatever wait queue it happens to be on. For instance, it
1564 * may have already been awoken from the wait you intended to
1565 * interrupt and waited on something else (like another
1568 * nothing of interest locked
1569 * we need to assume spl needs to be raised
1571 * KERN_SUCCESS - the thread was found waiting and awakened
1572 * KERN_NOT_WAITING - the thread was not waiting here
1575 wait_queue_wakeup_thread(
1579 wait_result_t result
)
1584 if (!wait_queue_is_valid(wq
)) {
1585 return KERN_INVALID_ARGUMENT
;
1589 wait_queue_lock(wq
);
1590 res
= _wait_queue_select64_thread(wq
, (event64_t
)((uint32_t)event
), thread
);
1591 wait_queue_unlock(wq
);
1593 if (res
== KERN_SUCCESS
) {
1594 res
= thread_go(thread
, result
);
1595 assert(res
== KERN_SUCCESS
);
1596 thread_unlock(thread
);
1601 return KERN_NOT_WAITING
;
1605 * Routine: wait_queue_wakeup64_thread
1607 * Wakeup the particular thread that was specified if and only
1608 * it was in this wait queue (or one of it's set's queues)
1609 * and waiting on the specified event.
1611 * This is much safer than just removing the thread from
1612 * whatever wait queue it happens to be on. For instance, it
1613 * may have already been awoken from the wait you intended to
1614 * interrupt and waited on something else (like another
1617 * nothing of interest locked
1618 * we need to assume spl needs to be raised
1620 * KERN_SUCCESS - the thread was found waiting and awakened
1621 * KERN_NOT_WAITING - the thread was not waiting here
1624 wait_queue_wakeup64_thread(
1628 wait_result_t result
)
1633 if (!wait_queue_is_valid(wq
)) {
1634 return KERN_INVALID_ARGUMENT
;
1638 wait_queue_lock(wq
);
1639 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1640 wait_queue_unlock(wq
);
1642 if (res
== KERN_SUCCESS
) {
1643 res
= thread_go(thread
, result
);
1644 assert(res
== KERN_SUCCESS
);
1645 thread_unlock(thread
);
1650 return KERN_NOT_WAITING
;