2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * @OSF_FREE_COPYRIGHT@
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
29 * All Rights Reserved.
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
41 * Carnegie Mellon requests users of this software to return to
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
54 * File: wait_queue.c (adapted from sched_prim.c)
55 * Author: Avadis Tevanian, Jr.
58 * Primitives for manipulating wait queues: either global
59 * ones from sched_prim.c, or private ones associated with
60 * particular structures(pots, semaphores, etc..).
63 #include <kern/kern_types.h>
64 #include <kern/simple_lock.h>
65 #include <kern/kalloc.h>
66 #include <kern/queue.h>
68 #include <mach/sync_policy.h>
69 #include <kern/sched_prim.h>
71 #include <kern/wait_queue.h>
73 /* forward declarations */
74 static boolean_t
wait_queue_member_locked(
76 wait_queue_set_t wq_set
);
78 void wait_queue_unlink_one(
80 wait_queue_set_t
*wq_setp
);
82 kern_return_t
wait_queue_set_unlink_all_nofree(
83 wait_queue_set_t wq_set
);
86 * Routine: wait_queue_init
88 * Initialize a previously allocated wait queue.
90 * KERN_SUCCESS - The wait_queue_t was initialized
91 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
98 if (!((policy
& SYNC_POLICY_ORDER_MASK
) == SYNC_POLICY_FIFO
))
99 return KERN_INVALID_ARGUMENT
;
102 wq
->wq_type
= _WAIT_QUEUE_inited
;
103 queue_init(&wq
->wq_queue
);
104 hw_lock_init(&wq
->wq_interlock
);
109 * Routine: wait_queue_alloc
111 * Allocate and initialize a wait queue for use outside of
112 * of the mach part of the kernel.
114 * Nothing locked - can block.
116 * The allocated and initialized wait queue
117 * WAIT_QUEUE_NULL if there is a resource shortage
126 wq
= (wait_queue_t
) kalloc(sizeof(struct wait_queue
));
127 if (wq
!= WAIT_QUEUE_NULL
) {
128 ret
= wait_queue_init(wq
, policy
);
129 if (ret
!= KERN_SUCCESS
) {
130 kfree(wq
, sizeof(struct wait_queue
));
131 wq
= WAIT_QUEUE_NULL
;
138 * Routine: wait_queue_free
140 * Free an allocated wait queue.
148 if (!wait_queue_is_queue(wq
))
149 return KERN_INVALID_ARGUMENT
;
150 if (!queue_empty(&wq
->wq_queue
))
152 kfree(wq
, sizeof(struct wait_queue
));
157 * Routine: wait_queue_set_init
159 * Initialize a previously allocated wait queue set.
161 * KERN_SUCCESS - The wait_queue_set_t was initialized
162 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
166 wait_queue_set_t wqset
,
171 ret
= wait_queue_init(&wqset
->wqs_wait_queue
, policy
);
172 if (ret
!= KERN_SUCCESS
)
175 wqset
->wqs_wait_queue
.wq_type
= _WAIT_QUEUE_SET_inited
;
176 if (policy
& SYNC_POLICY_PREPOST
)
177 wqset
->wqs_wait_queue
.wq_isprepost
= TRUE
;
179 wqset
->wqs_wait_queue
.wq_isprepost
= FALSE
;
180 queue_init(&wqset
->wqs_setlinks
);
181 wqset
->wqs_refcount
= 0;
188 wait_queue_set_t wqset
,
191 return wait_queue_set_init(wqset
, policy
);
195 wait_queue_sub_clearrefs(
196 wait_queue_set_t wq_set
)
198 if (!wait_queue_is_set(wq_set
))
199 return KERN_INVALID_ARGUMENT
;
202 wq_set
->wqs_refcount
= 0;
208 * Routine: wait_queue_set_alloc
210 * Allocate and initialize a wait queue set for
211 * use outside of the mach part of the kernel.
215 * The allocated and initialized wait queue set
216 * WAIT_QUEUE_SET_NULL if there is a resource shortage
219 wait_queue_set_alloc(
222 wait_queue_set_t wq_set
;
224 wq_set
= (wait_queue_set_t
) kalloc(sizeof(struct wait_queue_set
));
225 if (wq_set
!= WAIT_QUEUE_SET_NULL
) {
228 ret
= wait_queue_set_init(wq_set
, policy
);
229 if (ret
!= KERN_SUCCESS
) {
230 kfree(wq_set
, sizeof(struct wait_queue_set
));
231 wq_set
= WAIT_QUEUE_SET_NULL
;
238 * Routine: wait_queue_set_free
240 * Free an allocated wait queue set
246 wait_queue_set_t wq_set
)
248 if (!wait_queue_is_set(wq_set
))
249 return KERN_INVALID_ARGUMENT
;
251 if (!queue_empty(&wq_set
->wqs_wait_queue
.wq_queue
))
254 kfree(wq_set
, sizeof(struct wait_queue_set
));
261 * Routine: wait_queue_set_size
262 * Routine: wait_queue_link_size
264 * Return the size of opaque wait queue structures
266 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet
); }
267 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink
); }
269 /* declare a unique type for wait queue link structures */
270 static unsigned int _wait_queue_link
;
271 static unsigned int _wait_queue_unlinked
;
273 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
274 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
276 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
277 WQASSERT(((wqe)->wqe_queue == (wq) && \
278 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
279 "wait queue element list corruption: wq=%#x, wqe=%#x", \
282 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
283 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
284 (queue_t)(wql) : &(wql)->wql_setlinks)))
286 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
287 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
288 (queue_t)(wql) : &(wql)->wql_setlinks)))
290 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
291 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
292 ((wql)->wql_setqueue == (wqs)) && \
293 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
294 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
295 "wait queue set links corruption: wqs=%#x, wql=%#x", \
298 #if defined(_WAIT_QUEUE_DEBUG_)
300 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
302 #define WAIT_QUEUE_CHECK(wq) \
304 queue_t q2 = &(wq)->wq_queue; \
305 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
306 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
307 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
308 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
312 #define WAIT_QUEUE_SET_CHECK(wqs) \
314 queue_t q2 = &(wqs)->wqs_setlinks; \
315 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
316 while (!queue_end(q2, (queue_entry_t)wql2)) { \
317 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
318 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
322 #else /* !_WAIT_QUEUE_DEBUG_ */
324 #define WQASSERT(e, s, p0, p1) assert(e)
326 #define WAIT_QUEUE_CHECK(wq)
327 #define WAIT_QUEUE_SET_CHECK(wqs)
329 #endif /* !_WAIT_QUEUE_DEBUG_ */
332 * Routine: wait_queue_member_locked
334 * Indicate if this set queue is a member of the queue
336 * The wait queue is locked
337 * The set queue is just that, a set queue
340 wait_queue_member_locked(
342 wait_queue_set_t wq_set
)
344 wait_queue_element_t wq_element
;
347 assert(wait_queue_held(wq
));
348 assert(wait_queue_is_set(wq_set
));
352 wq_element
= (wait_queue_element_t
) queue_first(q
);
353 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
354 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
355 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
)) {
356 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
358 if (wql
->wql_setqueue
== wq_set
)
361 wq_element
= (wait_queue_element_t
)
362 queue_next((queue_t
) wq_element
);
369 * Routine: wait_queue_member
371 * Indicate if this set queue is a member of the queue
373 * The set queue is just that, a set queue
378 wait_queue_set_t wq_set
)
383 if (!wait_queue_is_set(wq_set
))
388 ret
= wait_queue_member_locked(wq
, wq_set
);
389 wait_queue_unlock(wq
);
397 * Routine: wait_queue_link_noalloc
399 * Insert a set wait queue into a wait queue. This
400 * requires us to link the two together using a wait_queue_link
401 * structure that we allocate.
403 * The wait queue being inserted must be inited as a set queue
406 wait_queue_link_noalloc(
408 wait_queue_set_t wq_set
,
409 wait_queue_link_t wql
)
411 wait_queue_element_t wq_element
;
415 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
))
416 return KERN_INVALID_ARGUMENT
;
419 * There are probably less threads and sets associated with
420 * the wait queue, then there are wait queues associated with
421 * the set. So lets validate it that way.
426 wq_element
= (wait_queue_element_t
) queue_first(q
);
427 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
428 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
429 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
&&
430 ((wait_queue_link_t
)wq_element
)->wql_setqueue
== wq_set
) {
431 wait_queue_unlock(wq
);
433 return KERN_ALREADY_IN_SET
;
435 wq_element
= (wait_queue_element_t
)
436 queue_next((queue_t
) wq_element
);
440 * Not already a member, so we can add it.
444 WAIT_QUEUE_SET_CHECK(wq_set
);
447 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
448 wql
->wql_setqueue
= wq_set
;
449 queue_enter(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
450 wql
->wql_type
= WAIT_QUEUE_LINK
;
453 wait_queue_unlock(wq
);
460 * Routine: wait_queue_link
462 * Insert a set wait queue into a wait queue. This
463 * requires us to link the two together using a wait_queue_link
464 * structure that we allocate.
466 * The wait queue being inserted must be inited as a set queue
471 wait_queue_set_t wq_set
)
473 wait_queue_link_t wql
;
476 wql
= (wait_queue_link_t
) kalloc(sizeof(struct wait_queue_link
));
477 if (wql
== WAIT_QUEUE_LINK_NULL
)
478 return KERN_RESOURCE_SHORTAGE
;
480 ret
= wait_queue_link_noalloc(wq
, wq_set
, wql
);
481 if (ret
!= KERN_SUCCESS
)
482 kfree(wql
, sizeof(struct wait_queue_link
));
489 * Routine: wait_queue_unlink_nofree
491 * Undo the linkage between a wait queue and a set.
494 wait_queue_unlink_locked(
496 wait_queue_set_t wq_set
,
497 wait_queue_link_t wql
)
499 assert(wait_queue_held(wq
));
500 assert(wait_queue_held(&wq_set
->wqs_wait_queue
));
502 wql
->wql_queue
= WAIT_QUEUE_NULL
;
503 queue_remove(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
504 wql
->wql_setqueue
= WAIT_QUEUE_SET_NULL
;
505 queue_remove(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
506 wql
->wql_type
= WAIT_QUEUE_UNLINKED
;
508 WAIT_QUEUE_CHECK(wq
);
509 WAIT_QUEUE_SET_CHECK(wq_set
);
513 * Routine: wait_queue_unlink
515 * Remove the linkage between a wait queue and a set,
516 * freeing the linkage structure.
518 * The wait queue being must be a member set queue
523 wait_queue_set_t wq_set
)
525 wait_queue_element_t wq_element
;
526 wait_queue_link_t wql
;
530 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
)) {
531 return KERN_INVALID_ARGUMENT
;
537 wq_element
= (wait_queue_element_t
) queue_first(q
);
538 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
539 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
540 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
541 wql
= (wait_queue_link_t
)wq_element
;
543 if (wql
->wql_setqueue
== wq_set
) {
545 wait_queue_unlink_locked(wq
, wq_set
, wql
);
547 wait_queue_unlock(wq
);
549 kfree(wql
, sizeof(struct wait_queue_link
));
553 wq_element
= (wait_queue_element_t
)
554 queue_next((queue_t
) wq_element
);
556 wait_queue_unlock(wq
);
558 return KERN_NOT_IN_SET
;
563 * Routine: wait_queue_unlinkall_nofree
565 * Remove the linkage between a wait queue and all its
566 * sets. The caller is responsible for freeing
567 * the wait queue link structures.
571 wait_queue_unlinkall_nofree(
574 wait_queue_element_t wq_element
;
575 wait_queue_element_t wq_next_element
;
576 wait_queue_set_t wq_set
;
577 wait_queue_link_t wql
;
578 queue_head_t links_queue_head
;
579 queue_t links
= &links_queue_head
;
583 if (!wait_queue_is_queue(wq
)) {
584 return KERN_INVALID_ARGUMENT
;
594 wq_element
= (wait_queue_element_t
) queue_first(q
);
595 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
596 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
597 wq_next_element
= (wait_queue_element_t
)
598 queue_next((queue_t
) wq_element
);
600 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
601 wql
= (wait_queue_link_t
)wq_element
;
602 wq_set
= wql
->wql_setqueue
;
604 wait_queue_unlink_locked(wq
, wq_set
, wql
);
607 wq_element
= wq_next_element
;
609 wait_queue_unlock(wq
);
611 return(KERN_SUCCESS
);
616 * Routine: wait_queue_unlink_all
618 * Remove the linkage between a wait queue and all its sets.
619 * All the linkage structures are freed.
621 * Nothing of interest locked.
625 wait_queue_unlink_all(
628 wait_queue_element_t wq_element
;
629 wait_queue_element_t wq_next_element
;
630 wait_queue_set_t wq_set
;
631 wait_queue_link_t wql
;
632 queue_head_t links_queue_head
;
633 queue_t links
= &links_queue_head
;
637 if (!wait_queue_is_queue(wq
)) {
638 return KERN_INVALID_ARGUMENT
;
648 wq_element
= (wait_queue_element_t
) queue_first(q
);
649 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
650 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
651 wq_next_element
= (wait_queue_element_t
)
652 queue_next((queue_t
) wq_element
);
654 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
655 wql
= (wait_queue_link_t
)wq_element
;
656 wq_set
= wql
->wql_setqueue
;
658 wait_queue_unlink_locked(wq
, wq_set
, wql
);
660 enqueue(links
, &wql
->wql_links
);
662 wq_element
= wq_next_element
;
664 wait_queue_unlock(wq
);
667 while(!queue_empty(links
)) {
668 wql
= (wait_queue_link_t
) dequeue(links
);
669 kfree(wql
, sizeof(struct wait_queue_link
));
672 return(KERN_SUCCESS
);
676 * Routine: wait_queue_set_unlink_all_nofree
678 * Remove the linkage between a set wait queue and all its
679 * member wait queues. The link structures are not freed, nor
680 * returned. It is the caller's responsibility to track and free
683 * The wait queue being must be a member set queue
686 wait_queue_set_unlink_all_nofree(
687 wait_queue_set_t wq_set
)
689 wait_queue_link_t wql
;
694 if (!wait_queue_is_set(wq_set
)) {
695 return KERN_INVALID_ARGUMENT
;
702 q
= &wq_set
->wqs_setlinks
;
704 wql
= (wait_queue_link_t
)queue_first(q
);
705 while (!queue_end(q
, (queue_entry_t
)wql
)) {
706 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
708 if (wait_queue_lock_try(wq
)) {
709 wait_queue_unlink_locked(wq
, wq_set
, wql
);
710 wait_queue_unlock(wq
);
711 wql
= (wait_queue_link_t
)queue_first(q
);
722 return(KERN_SUCCESS
);
725 /* legacy interface naming */
727 wait_subqueue_unlink_all(
728 wait_queue_set_t wq_set
)
730 return wait_queue_set_unlink_all_nofree(wq_set
);
735 * Routine: wait_queue_set_unlink_all
737 * Remove the linkage between a set wait queue and all its
738 * member wait queues. The link structures are freed.
740 * The wait queue must be a set
743 wait_queue_set_unlink_all(
744 wait_queue_set_t wq_set
)
746 wait_queue_link_t wql
;
749 queue_head_t links_queue_head
;
750 queue_t links
= &links_queue_head
;
753 if (!wait_queue_is_set(wq_set
)) {
754 return KERN_INVALID_ARGUMENT
;
763 q
= &wq_set
->wqs_setlinks
;
765 wql
= (wait_queue_link_t
)queue_first(q
);
766 while (!queue_end(q
, (queue_entry_t
)wql
)) {
767 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
769 if (wait_queue_lock_try(wq
)) {
770 wait_queue_unlink_locked(wq
, wq_set
, wql
);
771 wait_queue_unlock(wq
);
772 enqueue(links
, &wql
->wql_links
);
773 wql
= (wait_queue_link_t
)queue_first(q
);
784 while (!queue_empty (links
)) {
785 wql
= (wait_queue_link_t
) dequeue(links
);
786 kfree(wql
, sizeof(struct wait_queue_link
));
788 return(KERN_SUCCESS
);
793 * Routine: wait_queue_unlink_one
795 * Find and unlink one set wait queue
797 * Nothing of interest locked.
800 wait_queue_unlink_one(
802 wait_queue_set_t
*wq_setp
)
804 wait_queue_element_t wq_element
;
813 wq_element
= (wait_queue_element_t
) queue_first(q
);
814 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
816 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
817 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
818 wait_queue_set_t wq_set
= wql
->wql_setqueue
;
821 wait_queue_unlink_locked(wq
, wq_set
, wql
);
823 wait_queue_unlock(wq
);
825 kfree(wql
,sizeof(struct wait_queue_link
));
830 wq_element
= (wait_queue_element_t
)
831 queue_next((queue_t
) wq_element
);
833 wait_queue_unlock(wq
);
835 *wq_setp
= WAIT_QUEUE_SET_NULL
;
840 * Routine: wait_queue_assert_wait64_locked
842 * Insert the current thread into the supplied wait queue
843 * waiting for a particular event to be posted to that queue.
846 * The wait queue is assumed locked.
847 * The waiting thread is assumed locked.
850 __private_extern__ wait_result_t
851 wait_queue_assert_wait64_locked(
854 wait_interrupt_t interruptible
,
858 wait_result_t wait_result
;
860 if (!wait_queue_assert_possible(thread
))
861 panic("wait_queue_assert_wait64_locked");
863 if (wq
->wq_type
== _WAIT_QUEUE_SET_inited
) {
864 wait_queue_set_t wqs
= (wait_queue_set_t
)wq
;
866 if (wqs
->wqs_isprepost
&& wqs
->wqs_refcount
> 0)
867 return(THREAD_AWAKENED
);
871 * This is the extent to which we currently take scheduling attributes
872 * into account. If the thread is vm priviledged, we stick it at
873 * the front of the queue. Later, these queues will honor the policy
874 * value set at wait_queue_init time.
876 wait_result
= thread_mark_wait_locked(thread
, interruptible
);
877 if (wait_result
== THREAD_WAITING
) {
878 if (thread
->options
& TH_OPT_VMPRIV
)
879 enqueue_head(&wq
->wq_queue
, (queue_entry_t
) thread
);
881 enqueue_tail(&wq
->wq_queue
, (queue_entry_t
) thread
);
883 thread
->wait_event
= event
;
884 thread
->wait_queue
= wq
;
887 if (!timer_call_enter(&thread
->wait_timer
, deadline
))
888 thread
->wait_timer_active
++;
889 thread
->wait_timer_is_set
= TRUE
;
896 * Routine: wait_queue_assert_wait
898 * Insert the current thread into the supplied wait queue
899 * waiting for a particular event to be posted to that queue.
902 * nothing of interest locked.
905 wait_queue_assert_wait(
908 wait_interrupt_t interruptible
,
913 thread_t thread
= current_thread();
915 /* If it is an invalid wait queue, you can't wait on it */
916 if (!wait_queue_is_valid(wq
))
917 return (thread
->wait_result
= THREAD_RESTART
);
922 ret
= wait_queue_assert_wait64_locked(wq
, (event64_t
)((uint32_t)event
),
923 interruptible
, deadline
, thread
);
924 thread_unlock(thread
);
925 wait_queue_unlock(wq
);
931 * Routine: wait_queue_assert_wait64
933 * Insert the current thread into the supplied wait queue
934 * waiting for a particular event to be posted to that queue.
936 * nothing of interest locked.
939 wait_queue_assert_wait64(
942 wait_interrupt_t interruptible
,
947 thread_t thread
= current_thread();
949 /* If it is an invalid wait queue, you cant wait on it */
950 if (!wait_queue_is_valid(wq
))
951 return (thread
->wait_result
= THREAD_RESTART
);
956 ret
= wait_queue_assert_wait64_locked(wq
, event
, interruptible
, deadline
, thread
);
957 thread_unlock(thread
);
958 wait_queue_unlock(wq
);
964 * Routine: _wait_queue_select64_all
966 * Select all threads off a wait queue that meet the
971 * wake_queue initialized and ready for insertion
974 * a queue of locked threads
977 _wait_queue_select64_all(
982 wait_queue_element_t wq_element
;
983 wait_queue_element_t wqe_next
;
988 wq_element
= (wait_queue_element_t
) queue_first(q
);
989 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
990 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
991 wqe_next
= (wait_queue_element_t
)
992 queue_next((queue_t
) wq_element
);
995 * We may have to recurse if this is a compound wait queue.
997 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
998 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
999 wait_queue_t set_queue
;
1002 * We have to check the set wait queue.
1004 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1005 wait_queue_lock(set_queue
);
1006 if (set_queue
->wq_isprepost
) {
1007 wait_queue_set_t wqs
= (wait_queue_set_t
)set_queue
;
1010 * Preposting is only for sets and wait queue
1011 * is the first element of set
1013 wqs
->wqs_refcount
++;
1015 if (! wait_queue_empty(set_queue
))
1016 _wait_queue_select64_all(set_queue
, event
, wake_queue
);
1017 wait_queue_unlock(set_queue
);
1021 * Otherwise, its a thread. If it is waiting on
1022 * the event we are posting to this queue, pull
1023 * it off the queue and stick it in out wake_queue.
1025 thread_t t
= (thread_t
)wq_element
;
1027 if (t
->wait_event
== event
) {
1029 remqueue(q
, (queue_entry_t
) t
);
1030 enqueue (wake_queue
, (queue_entry_t
) t
);
1031 t
->wait_queue
= WAIT_QUEUE_NULL
;
1032 t
->wait_event
= NO_EVENT64
;
1033 t
->at_safe_point
= FALSE
;
1034 /* returned locked */
1037 wq_element
= wqe_next
;
1042 * Routine: wait_queue_wakeup64_all_locked
1044 * Wakeup some number of threads that are in the specified
1045 * wait queue and waiting on the specified event.
1047 * wait queue already locked (may be released).
1049 * KERN_SUCCESS - Threads were woken up
1050 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1052 __private_extern__ kern_return_t
1053 wait_queue_wakeup64_all_locked(
1056 wait_result_t result
,
1059 queue_head_t wake_queue_head
;
1060 queue_t q
= &wake_queue_head
;
1063 assert(wait_queue_held(wq
));
1067 * Select the threads that we will wake up. The threads
1068 * are returned to us locked and cleanly removed from the
1071 _wait_queue_select64_all(wq
, event
, q
);
1073 wait_queue_unlock(wq
);
1076 * For each thread, set it running.
1078 res
= KERN_NOT_WAITING
;
1079 while (!queue_empty (q
)) {
1080 thread_t thread
= (thread_t
) dequeue(q
);
1081 res
= thread_go(thread
, result
);
1082 assert(res
== KERN_SUCCESS
);
1083 thread_unlock(thread
);
1090 * Routine: wait_queue_wakeup_all
1092 * Wakeup some number of threads that are in the specified
1093 * wait queue and waiting on the specified event.
1097 * KERN_SUCCESS - Threads were woken up
1098 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1101 wait_queue_wakeup_all(
1104 wait_result_t result
)
1109 if (!wait_queue_is_valid(wq
)) {
1110 return KERN_INVALID_ARGUMENT
;
1114 wait_queue_lock(wq
);
1115 ret
= wait_queue_wakeup64_all_locked(
1116 wq
, (event64_t
)((uint32_t)event
),
1124 * Routine: wait_queue_wakeup64_all
1126 * Wakeup some number of threads that are in the specified
1127 * wait queue and waiting on the specified event.
1131 * KERN_SUCCESS - Threads were woken up
1132 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1135 wait_queue_wakeup64_all(
1138 wait_result_t result
)
1143 if (!wait_queue_is_valid(wq
)) {
1144 return KERN_INVALID_ARGUMENT
;
1148 wait_queue_lock(wq
);
1149 ret
= wait_queue_wakeup64_all_locked(wq
, event
, result
, TRUE
);
1156 * Routine: _wait_queue_select64_one
1158 * Select the best thread off a wait queue that meet the
1159 * supplied criteria.
1163 * possibly recursive
1165 * a locked thread - if one found
1167 * This is where the sync policy of the wait queue comes
1168 * into effect. For now, we just assume FIFO.
1171 _wait_queue_select64_one(
1175 wait_queue_element_t wq_element
;
1176 wait_queue_element_t wqe_next
;
1177 thread_t t
= THREAD_NULL
;
1180 assert(wq
->wq_fifo
);
1184 wq_element
= (wait_queue_element_t
) queue_first(q
);
1185 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1186 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1187 wqe_next
= (wait_queue_element_t
)
1188 queue_next((queue_t
) wq_element
);
1191 * We may have to recurse if this is a compound wait queue.
1193 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1194 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1195 wait_queue_t set_queue
;
1198 * We have to check the set wait queue.
1200 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1201 wait_queue_lock(set_queue
);
1202 if (! wait_queue_empty(set_queue
)) {
1203 t
= _wait_queue_select64_one(set_queue
, event
);
1205 wait_queue_unlock(set_queue
);
1206 if (t
!= THREAD_NULL
)
1211 * Otherwise, its a thread. If it is waiting on
1212 * the event we are posting to this queue, pull
1213 * it off the queue and stick it in out wake_queue.
1215 t
= (thread_t
)wq_element
;
1216 if (t
->wait_event
== event
) {
1218 remqueue(q
, (queue_entry_t
) t
);
1219 t
->wait_queue
= WAIT_QUEUE_NULL
;
1220 t
->wait_event
= NO_EVENT64
;
1221 t
->at_safe_point
= FALSE
;
1222 return t
; /* still locked */
1227 wq_element
= wqe_next
;
1233 * Routine: wait_queue_peek64_locked
1235 * Select the best thread from a wait queue that meet the
1236 * supplied criteria, but leave it on the queue it was
1237 * found on. The thread, and the actual wait_queue the
1238 * thread was found on are identified.
1242 * possibly recursive
1244 * a locked thread - if one found
1245 * a locked waitq - the one the thread was found on
1247 * Both the waitq the thread was actually found on, and
1248 * the supplied wait queue, are locked after this.
1250 __private_extern__
void
1251 wait_queue_peek64_locked(
1257 wait_queue_element_t wq_element
;
1258 wait_queue_element_t wqe_next
;
1261 assert(wq
->wq_fifo
);
1267 wq_element
= (wait_queue_element_t
) queue_first(q
);
1268 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1269 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1270 wqe_next
= (wait_queue_element_t
)
1271 queue_next((queue_t
) wq_element
);
1274 * We may have to recurse if this is a compound wait queue.
1276 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1277 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1278 wait_queue_t set_queue
;
1281 * We have to check the set wait queue.
1283 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1284 wait_queue_lock(set_queue
);
1285 if (! wait_queue_empty(set_queue
)) {
1286 wait_queue_peek64_locked(set_queue
, event
, tp
, wqp
);
1288 if (*tp
!= THREAD_NULL
) {
1289 if (*wqp
!= set_queue
)
1290 wait_queue_unlock(set_queue
);
1291 return; /* thread and its waitq locked */
1294 wait_queue_unlock(set_queue
);
1298 * Otherwise, its a thread. If it is waiting on
1299 * the event we are posting to this queue, return
1300 * it locked, but leave it on the queue.
1302 thread_t t
= (thread_t
)wq_element
;
1304 if (t
->wait_event
== event
) {
1311 wq_element
= wqe_next
;
1316 * Routine: wait_queue_pull_thread_locked
1318 * Pull a thread that was previously "peeked" off the wait
1319 * queue and (possibly) unlock the waitq.
1325 * with the thread still locked.
1328 wait_queue_pull_thread_locked(
1334 assert(thread
->wait_queue
== waitq
);
1336 remqueue(&waitq
->wq_queue
, (queue_entry_t
)thread
);
1337 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1338 thread
->wait_event
= NO_EVENT64
;
1339 thread
->at_safe_point
= FALSE
;
1341 wait_queue_unlock(waitq
);
1346 * Routine: wait_queue_select64_thread
1348 * Look for a thread and remove it from the queues, if
1349 * (and only if) the thread is waiting on the supplied
1350 * <wait_queue, event> pair.
1354 * possibly recursive
1356 * KERN_NOT_WAITING: Thread is not waiting here.
1357 * KERN_SUCCESS: It was, and is now removed (returned locked)
1359 static kern_return_t
1360 _wait_queue_select64_thread(
1365 wait_queue_element_t wq_element
;
1366 wait_queue_element_t wqe_next
;
1367 kern_return_t res
= KERN_NOT_WAITING
;
1368 queue_t q
= &wq
->wq_queue
;
1370 thread_lock(thread
);
1371 if ((thread
->wait_queue
== wq
) && (thread
->wait_event
== event
)) {
1372 remqueue(q
, (queue_entry_t
) thread
);
1373 thread
->at_safe_point
= FALSE
;
1374 thread
->wait_event
= NO_EVENT64
;
1375 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1376 /* thread still locked */
1377 return KERN_SUCCESS
;
1379 thread_unlock(thread
);
1382 * The wait_queue associated with the thread may be one of this
1383 * wait queue's sets. Go see. If so, removing it from
1384 * there is like removing it from here.
1386 wq_element
= (wait_queue_element_t
) queue_first(q
);
1387 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1388 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1389 wqe_next
= (wait_queue_element_t
)
1390 queue_next((queue_t
) wq_element
);
1392 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1393 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1394 wait_queue_t set_queue
;
1396 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1397 wait_queue_lock(set_queue
);
1398 if (! wait_queue_empty(set_queue
)) {
1399 res
= _wait_queue_select64_thread(set_queue
,
1403 wait_queue_unlock(set_queue
);
1404 if (res
== KERN_SUCCESS
)
1405 return KERN_SUCCESS
;
1407 wq_element
= wqe_next
;
1414 * Routine: wait_queue_wakeup64_identity_locked
1416 * Select a single thread that is most-eligible to run and set
1417 * set it running. But return the thread locked.
1422 * possibly recursive
1424 * a pointer to the locked thread that was awakened
1426 __private_extern__ thread_t
1427 wait_queue_wakeup64_identity_locked(
1430 wait_result_t result
,
1436 assert(wait_queue_held(wq
));
1439 thread
= _wait_queue_select64_one(wq
, event
);
1441 wait_queue_unlock(wq
);
1444 res
= thread_go(thread
, result
);
1445 assert(res
== KERN_SUCCESS
);
1447 return thread
; /* still locked if not NULL */
1452 * Routine: wait_queue_wakeup64_one_locked
1454 * Select a single thread that is most-eligible to run and set
1460 * possibly recursive
1462 * KERN_SUCCESS: It was, and is, now removed.
1463 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1465 __private_extern__ kern_return_t
1466 wait_queue_wakeup64_one_locked(
1469 wait_result_t result
,
1474 assert(wait_queue_held(wq
));
1476 thread
= _wait_queue_select64_one(wq
, event
);
1478 wait_queue_unlock(wq
);
1483 res
= thread_go(thread
, result
);
1484 assert(res
== KERN_SUCCESS
);
1485 thread_unlock(thread
);
1489 return KERN_NOT_WAITING
;
1493 * Routine: wait_queue_wakeup_one
1495 * Wakeup the most appropriate thread that is in the specified
1496 * wait queue for the specified event.
1500 * KERN_SUCCESS - Thread was woken up
1501 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1504 wait_queue_wakeup_one(
1507 wait_result_t result
)
1512 if (!wait_queue_is_valid(wq
)) {
1513 return KERN_INVALID_ARGUMENT
;
1517 wait_queue_lock(wq
);
1518 thread
= _wait_queue_select64_one(wq
, (event64_t
)((uint32_t)event
));
1519 wait_queue_unlock(wq
);
1524 res
= thread_go(thread
, result
);
1525 assert(res
== KERN_SUCCESS
);
1526 thread_unlock(thread
);
1532 return KERN_NOT_WAITING
;
1536 * Routine: wait_queue_wakeup64_one
1538 * Wakeup the most appropriate thread that is in the specified
1539 * wait queue for the specified event.
1543 * KERN_SUCCESS - Thread was woken up
1544 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1547 wait_queue_wakeup64_one(
1550 wait_result_t result
)
1555 if (!wait_queue_is_valid(wq
)) {
1556 return KERN_INVALID_ARGUMENT
;
1559 wait_queue_lock(wq
);
1560 thread
= _wait_queue_select64_one(wq
, event
);
1561 wait_queue_unlock(wq
);
1566 res
= thread_go(thread
, result
);
1567 assert(res
== KERN_SUCCESS
);
1568 thread_unlock(thread
);
1574 return KERN_NOT_WAITING
;
1579 * Routine: wait_queue_wakeup64_thread_locked
1581 * Wakeup the particular thread that was specified if and only
1582 * it was in this wait queue (or one of it's set queues)
1583 * and waiting on the specified event.
1585 * This is much safer than just removing the thread from
1586 * whatever wait queue it happens to be on. For instance, it
1587 * may have already been awoken from the wait you intended to
1588 * interrupt and waited on something else (like another
1592 * wait queue already locked (may be released).
1594 * KERN_SUCCESS - the thread was found waiting and awakened
1595 * KERN_NOT_WAITING - the thread was not waiting here
1597 __private_extern__ kern_return_t
1598 wait_queue_wakeup64_thread_locked(
1602 wait_result_t result
,
1607 assert(wait_queue_held(wq
));
1610 * See if the thread was still waiting there. If so, it got
1611 * dequeued and returned locked.
1613 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1615 wait_queue_unlock(wq
);
1617 if (res
!= KERN_SUCCESS
)
1618 return KERN_NOT_WAITING
;
1620 res
= thread_go(thread
, result
);
1621 assert(res
== KERN_SUCCESS
);
1622 thread_unlock(thread
);
1627 * Routine: wait_queue_wakeup_thread
1629 * Wakeup the particular thread that was specified if and only
1630 * it was in this wait queue (or one of it's set queues)
1631 * and waiting on the specified event.
1633 * This is much safer than just removing the thread from
1634 * whatever wait queue it happens to be on. For instance, it
1635 * may have already been awoken from the wait you intended to
1636 * interrupt and waited on something else (like another
1639 * nothing of interest locked
1640 * we need to assume spl needs to be raised
1642 * KERN_SUCCESS - the thread was found waiting and awakened
1643 * KERN_NOT_WAITING - the thread was not waiting here
1646 wait_queue_wakeup_thread(
1650 wait_result_t result
)
1655 if (!wait_queue_is_valid(wq
)) {
1656 return KERN_INVALID_ARGUMENT
;
1660 wait_queue_lock(wq
);
1661 res
= _wait_queue_select64_thread(wq
, (event64_t
)((uint32_t)event
), thread
);
1662 wait_queue_unlock(wq
);
1664 if (res
== KERN_SUCCESS
) {
1665 res
= thread_go(thread
, result
);
1666 assert(res
== KERN_SUCCESS
);
1667 thread_unlock(thread
);
1672 return KERN_NOT_WAITING
;
1676 * Routine: wait_queue_wakeup64_thread
1678 * Wakeup the particular thread that was specified if and only
1679 * it was in this wait queue (or one of it's set's queues)
1680 * and waiting on the specified event.
1682 * This is much safer than just removing the thread from
1683 * whatever wait queue it happens to be on. For instance, it
1684 * may have already been awoken from the wait you intended to
1685 * interrupt and waited on something else (like another
1688 * nothing of interest locked
1689 * we need to assume spl needs to be raised
1691 * KERN_SUCCESS - the thread was found waiting and awakened
1692 * KERN_NOT_WAITING - the thread was not waiting here
1695 wait_queue_wakeup64_thread(
1699 wait_result_t result
)
1704 if (!wait_queue_is_valid(wq
)) {
1705 return KERN_INVALID_ARGUMENT
;
1709 wait_queue_lock(wq
);
1710 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1711 wait_queue_unlock(wq
);
1713 if (res
== KERN_SUCCESS
) {
1714 res
= thread_go(thread
, result
);
1715 assert(res
== KERN_SUCCESS
);
1716 thread_unlock(thread
);
1721 return KERN_NOT_WAITING
;