2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: wait_queue.c (adapted from sched_prim.c)
54 * Author: Avadis Tevanian, Jr.
57 * Primitives for manipulating wait queues: either global
58 * ones from sched_prim.c, or private ones associated with
59 * particular structures(pots, semaphores, etc..).
62 #include <kern/kern_types.h>
63 #include <kern/simple_lock.h>
64 #include <kern/kalloc.h>
65 #include <kern/queue.h>
67 #include <mach/sync_policy.h>
69 #include <kern/sched_prim.h>
70 #include <kern/wait_queue.h>
77 wq
->wq_fifo
= (policy
== SYNC_POLICY_FIFO
);
79 queue_init(&wq
->wq_queue
);
80 hw_lock_init(&wq
->wq_interlock
);
85 wait_queue_sub_t wqsub
,
88 wait_queue_init(&wqsub
->wqs_wait_queue
, policy
);
89 wqsub
->wqs_wait_queue
.wq_issub
= TRUE
;
90 if ( policy
& SYNC_POLICY_PREPOST
) {
91 wqsub
->wqs_wait_queue
.wq_isprepost
= TRUE
;
92 wqsub
->wqs_refcount
= 0;
94 wqsub
->wqs_wait_queue
.wq_isprepost
= FALSE
;
95 queue_init(&wqsub
->wqs_sublinks
);
99 wait_queue_sub_clearrefs(
100 wait_queue_sub_t wq_sub
)
102 assert(wait_queue_is_sub(wq_sub
));
106 wq_sub
->wqs_refcount
= 0;
113 wait_queue_link_init(
114 wait_queue_link_t wql
)
116 queue_init(&wql
->wql_links
);
117 queue_init(&wql
->wql_sublinks
);
118 wql
->wql_queue
= WAIT_QUEUE_NULL
;
119 wql
->wql_subqueue
= WAIT_QUEUE_SUB_NULL
;
120 wql
->wql_event
= NO_EVENT
;
124 * Routine: wait_queue_alloc
126 * Allocate and initialize a wait queue for use outside of
127 * of the mach part of the kernel.
130 * Nothing locked - can block.
133 * The allocated and initialized wait queue
134 * WAIT_QUEUE_NULL if there is a resource shortage
142 wq
= (wait_queue_t
) kalloc(sizeof(struct wait_queue
));
143 if (wq
!= WAIT_QUEUE_NULL
)
144 wait_queue_init(wq
, policy
);
149 * Routine: wait_queue_free
151 * Free an allocated wait queue.
154 * Nothing locked - can block.
160 assert(queue_empty(&wq
->wq_queue
));
161 kfree((vm_offset_t
)wq
, sizeof(struct wait_queue
));
166 * Routine: wait_queue_lock
168 * Lock the wait queue.
170 * the appropriate spl level (if any) is already raised.
180 * Double the standard lock timeout, because wait queues tend
181 * to iterate over a number of threads - locking each. If there is
182 * a problem with a thread lock, it normally times out at the wait
183 * queue level first, hiding the real problem.
185 pc
= GET_RETURN_PC(&wq
);
186 if (!hw_lock_to(&wq
->wq_interlock
, LockTimeOut
* 2)) {
187 panic("wait queue deadlock detection - wq=0x%x, cpu=%d, ret=0x%x\n", wq
, cpu_number(), pc
);
190 hw_lock_lock(&wq
->wq_interlock
);
195 * Routine: wait_queue_lock_try
197 * Try to lock the wait queue without waiting
199 * the appropriate spl level (if any) is already raised.
201 * TRUE if the lock was acquired
202 * FALSE if we would have needed to wait
208 return hw_lock_try(&wq
->wq_interlock
);
212 * Routine: wait_queue_unlock
214 * unlock the wait queue
216 * The wait queue is assumed locked.
217 * appropriate spl level is still maintained
223 assert(hw_lock_held(&wq
->wq_interlock
));
225 hw_lock_unlock(&wq
->wq_interlock
);
228 int _wait_queue_subordinate
; /* phoney event for subordinate wait q elements */
232 * Routine: wait_queue_member_locked
234 * Indicate if this sub queue is a member of the queue
236 * The wait queue is locked
237 * The sub queue is just that, a sub queue
240 wait_queue_member_locked(
242 wait_queue_sub_t wq_sub
)
244 wait_queue_element_t wq_element
;
247 assert(wait_queue_held(wq
));
248 assert(wait_queue_is_sub(wq_sub
));
252 wq_element
= (wait_queue_element_t
) queue_first(q
);
253 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
255 if ((wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
)) {
256 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
258 if (wql
->wql_subqueue
== wq_sub
)
261 wq_element
= (wait_queue_element_t
)
262 queue_next((queue_t
) wq_element
);
269 * Routine: wait_queue_member
271 * Indicate if this sub queue is a member of the queue
273 * The sub queue is just that, a sub queue
278 wait_queue_sub_t wq_sub
)
283 assert(wait_queue_is_sub(wq_sub
));
287 ret
= wait_queue_member_locked(wq
, wq_sub
);
288 wait_queue_unlock(wq
);
295 * Routine: wait_queue_link
297 * Insert a subordinate wait queue into a wait queue. This
298 * requires us to link the two together using a wait_queue_link
299 * structure that we allocate.
301 * The wait queue being inserted must be inited as a sub queue
302 * The sub waitq is not already linked
308 wait_queue_sub_t wq_sub
)
310 wait_queue_link_t wql
;
313 assert(wait_queue_is_sub(wq_sub
));
314 assert(!wait_queue_member(wq
, wq_sub
));
316 wql
= (wait_queue_link_t
) kalloc(sizeof(struct wait_queue_link
));
317 if (wql
== WAIT_QUEUE_LINK_NULL
)
318 return KERN_RESOURCE_SHORTAGE
;
320 wait_queue_link_init(wql
);
327 wql
->wql_subqueue
= wq_sub
;
328 wql
->wql_event
= WAIT_QUEUE_SUBORDINATE
;
329 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
330 queue_enter(&wq_sub
->wqs_sublinks
, wql
, wait_queue_link_t
, wql_sublinks
);
333 wait_queue_unlock(wq
);
339 * Routine: wait_queue_link_noalloc
341 * Insert a subordinate wait queue into a wait queue. This
342 * requires us to link the two together using a wait_queue_link
343 * structure that we allocate.
345 * The wait queue being inserted must be inited as a sub queue
346 * The sub waitq is not already linked
350 wait_queue_link_noalloc(
352 wait_queue_sub_t wq_sub
,
353 wait_queue_link_t wql
)
357 assert(wait_queue_is_sub(wq_sub
));
358 assert(!wait_queue_member(wq
, wq_sub
));
360 wait_queue_link_init(wql
);
367 wql
->wql_subqueue
= wq_sub
;
368 wql
->wql_event
= WAIT_QUEUE_SUBORDINATE
;
369 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
370 queue_enter(&wq_sub
->wqs_sublinks
, wql
, wait_queue_link_t
, wql_sublinks
);
373 wait_queue_unlock(wq
);
380 * Routine: wait_queue_unlink
382 * Remove the linkage between a wait queue and its subordinate.
384 * The wait queue being must be a member sub queue
389 wait_queue_sub_t wq_sub
)
391 wait_queue_element_t wq_element
;
395 assert(wait_queue_is_sub(wq_sub
));
396 assert(wait_queue_member(wq
, wq_sub
));
404 wq_element
= (wait_queue_element_t
) queue_first(q
);
405 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
407 if (wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
) {
408 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
411 if (wql
->wql_subqueue
== wq_sub
) {
412 sq
= &wq_sub
->wqs_sublinks
;
413 queue_remove(q
, wql
, wait_queue_link_t
, wql_links
);
414 queue_remove(sq
, wql
, wait_queue_link_t
, wql_sublinks
);
416 wait_queue_unlock(wq
);
418 kfree((vm_offset_t
)wql
,sizeof(struct wait_queue_link
));
423 wq_element
= (wait_queue_element_t
)
424 queue_next((queue_t
) wq_element
);
426 panic("wait_queue_unlink");
430 * Routine: wait_queue_unlink_nofree
432 * Remove the linkage between a wait queue and its subordinate. Do not deallcoate the wql
434 * The wait queue being must be a member sub queue
437 wait_queue_unlink_nofree(
439 wait_queue_sub_t wq_sub
)
441 wait_queue_element_t wq_element
;
444 assert(wait_queue_is_sub(wq_sub
));
448 wq_element
= (wait_queue_element_t
) queue_first(q
);
449 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
451 if (wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
) {
452 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
455 if (wql
->wql_subqueue
== wq_sub
) {
456 sq
= &wq_sub
->wqs_sublinks
;
457 queue_remove(q
, wql
, wait_queue_link_t
, wql_links
);
458 queue_remove(sq
, wql
, wait_queue_link_t
, wql_sublinks
);
459 return(KERN_SUCCESS
);
463 wq_element
= (wait_queue_element_t
)
464 queue_next((queue_t
) wq_element
);
466 /* due to dropping the sub's lock to get to this routine we can see
467 * no entries in waitqueue. It is valid case, so we should just return
469 return(KERN_FAILURE
);
473 * Routine: wait_subqueue_unlink_all
475 * Remove the linkage between a wait queue and its subordinate.
477 * The wait queue being must be a member sub queue
480 wait_subqueue_unlink_all(
481 wait_queue_sub_t wq_sub
)
483 wait_queue_link_t wql
;
489 assert(wait_queue_is_sub(wq_sub
));
495 q
= &wq_sub
->wqs_sublinks
;
497 wql
= (wait_queue_link_t
)queue_first(q
);
498 while (!queue_end(q
, (queue_entry_t
)wql
)) {
500 if (wait_queue_lock_try(wq
)) {
506 queue_remove(q1
, wql
, wait_queue_link_t
, wql_links
);
507 queue_remove(q
, wql
, wait_queue_link_t
, wql_sublinks
);
509 if ((kret
= wait_queue_unlink_nofree(wq
, wq_sub
)) != KERN_SUCCESS
) {
510 queue_remove(q
, wql
, wait_queue_link_t
, wql_sublinks
);
514 wait_queue_unlock(wq
);
515 wql
= (wait_queue_link_t
)queue_first(q
);
525 return(KERN_SUCCESS
);
530 * Routine: wait_queue_unlinkall_nofree
532 * Remove the linkage between a wait queue and all subordinates.
536 wait_queue_unlinkall_nofree(
539 wait_queue_element_t wq_element
;
540 wait_queue_sub_t wq_sub
;
550 wq_element
= (wait_queue_element_t
) queue_first(q
);
551 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
553 if (wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
) {
554 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
557 wq_sub
= wql
->wql_subqueue
;
559 sq
= &wq_sub
->wqs_sublinks
;
560 queue_remove(q
, wql
, wait_queue_link_t
, wql_links
);
561 queue_remove(sq
, wql
, wait_queue_link_t
, wql_sublinks
);
563 wq_element
= (wait_queue_element_t
) queue_first(q
);
565 wq_element
= (wait_queue_element_t
)
566 queue_next((queue_t
) wq_element
);
570 wait_queue_unlock(wq
);
573 return(KERN_SUCCESS
);
576 * Routine: wait_queue_unlink_one
578 * Find and unlink one subordinate wait queue
580 * Nothing of interest locked.
583 wait_queue_unlink_one(
585 wait_queue_sub_t
*wq_subp
)
587 wait_queue_element_t wq_element
;
596 wq_element
= (wait_queue_element_t
) queue_first(q
);
597 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
599 if (wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
) {
600 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
601 wait_queue_sub_t wq_sub
= wql
->wql_subqueue
;
605 sq
= &wq_sub
->wqs_sublinks
;
606 queue_remove(q
, wql
, wait_queue_link_t
, wql_links
);
607 queue_remove(sq
, wql
, wait_queue_link_t
, wql_sublinks
);
609 wait_queue_unlock(wq
);
611 kfree((vm_offset_t
)wql
,sizeof(struct wait_queue_link
));
616 wq_element
= (wait_queue_element_t
)
617 queue_next((queue_t
) wq_element
);
619 wait_queue_unlock(wq
);
621 *wq_subp
= WAIT_QUEUE_SUB_NULL
;
625 * Routine: wait_queue_assert_wait_locked
627 * Insert the current thread into the supplied wait queue
628 * waiting for a particular event to be posted to that queue.
631 * The wait queue is assumed locked.
635 wait_queue_assert_wait_locked(
641 thread_t thread
= current_thread();
645 if (wq
->wq_issub
&& wq
->wq_isprepost
) {
646 wait_queue_sub_t wqs
= (wait_queue_sub_t
)wq
;
648 if (wqs
->wqs_refcount
> 0) {
650 wait_queue_unlock(wq
);
658 * This is the extent to which we currently take scheduling attributes
659 * into account. If the thread is vm priviledged, we stick it at
660 * the front of the queue. Later, these queues will honor the policy
661 * value set at wait_queue_init time.
663 if (thread
->vm_privilege
)
664 enqueue_head(&wq
->wq_queue
, (queue_entry_t
) thread
);
666 enqueue_tail(&wq
->wq_queue
, (queue_entry_t
) thread
);
667 thread
->wait_event
= event
;
668 thread
->wait_queue
= wq
;
669 thread_mark_wait_locked(thread
, interruptible
);
670 thread_unlock(thread
);
672 wait_queue_unlock(wq
);
677 * Routine: wait_queue_assert_wait
679 * Insert the current thread into the supplied wait queue
680 * waiting for a particular event to be posted to that queue.
683 * nothing of interest locked.
686 wait_queue_assert_wait(
696 ret
= wait_queue_assert_wait_locked(wq
, event
, interruptible
, TRUE
);
697 /* wait queue unlocked */
704 * Routine: wait_queue_select_all
706 * Select all threads off a wait queue that meet the
712 * wake_queue initialized and ready for insertion
716 * a queue of locked threads
719 _wait_queue_select_all(
724 wait_queue_element_t wq_element
;
725 wait_queue_element_t wqe_next
;
730 wq_element
= (wait_queue_element_t
) queue_first(q
);
731 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
732 wqe_next
= (wait_queue_element_t
)
733 queue_next((queue_t
) wq_element
);
736 * We may have to recurse if this is a compound wait queue.
738 if (wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
) {
739 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
740 wait_queue_t sub_queue
;
743 * We have to check the subordinate wait queue.
745 sub_queue
= (wait_queue_t
)wql
->wql_subqueue
;
746 wait_queue_lock(sub_queue
);
747 if (sub_queue
->wq_isprepost
) {
748 wait_queue_sub_t wqs
= (wait_queue_sub_t
)sub_queue
;
751 * Preposting is only for subordinates and wait queue
752 * is the first element of subordinate
756 if (! wait_queue_empty(sub_queue
))
757 _wait_queue_select_all(sub_queue
, event
, wake_queue
);
758 wait_queue_unlock(sub_queue
);
762 * Otherwise, its a thread. If it is waiting on
763 * the event we are posting to this queue, pull
764 * it off the queue and stick it in out wake_queue.
766 thread_t t
= (thread_t
)wq_element
;
768 if (t
->wait_event
== event
) {
770 remqueue(q
, (queue_entry_t
) t
);
771 enqueue (wake_queue
, (queue_entry_t
) t
);
772 t
->wait_queue
= WAIT_QUEUE_NULL
;
773 t
->wait_event
= NO_EVENT
;
774 t
->at_safe_point
= FALSE
;
775 /* returned locked */
778 wq_element
= wqe_next
;
783 * Routine: wait_queue_wakeup_all_locked
785 * Wakeup some number of threads that are in the specified
786 * wait queue and waiting on the specified event.
788 * wait queue already locked (may be released).
790 * KERN_SUCCESS - Threads were woken up
791 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
794 wait_queue_wakeup_all_locked(
800 queue_head_t wake_queue_head
;
801 queue_t q
= &wake_queue_head
;
802 kern_return_t ret
= KERN_NOT_WAITING
;
804 assert(wait_queue_held(wq
));
809 * Select the threads that we will wake up. The threads
810 * are returned to us locked and cleanly removed from the
813 _wait_queue_select_all(wq
, event
, q
);
815 wait_queue_unlock(wq
);
818 * For each thread, set it running.
820 while (!queue_empty (q
)) {
821 thread_t thread
= (thread_t
) dequeue(q
);
822 thread_go_locked(thread
, result
);
823 thread_unlock(thread
);
831 * Routine: wait_queue_wakeup_all
833 * Wakeup some number of threads that are in the specified
834 * wait queue and waiting on the specified event.
840 * KERN_SUCCESS - Threads were woken up
841 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
844 wait_queue_wakeup_all(
854 ret
= wait_queue_wakeup_all_locked(wq
, event
, result
, TRUE
);
862 * Routine: wait_queue_select_one
864 * Select the best thread off a wait queue that meet the
871 * a locked thread - if one found
873 * This is where the sync policy of the wait queue comes
874 * into effect. For now, we just assume FIFO.
877 _wait_queue_select_one(
881 wait_queue_element_t wq_element
;
882 wait_queue_element_t wqe_next
;
883 thread_t t
= THREAD_NULL
;
890 wq_element
= (wait_queue_element_t
) queue_first(q
);
891 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
892 wqe_next
= (wait_queue_element_t
)
893 queue_next((queue_t
) wq_element
);
896 * We may have to recurse if this is a compound wait queue.
898 if (wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
) {
899 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
900 wait_queue_t sub_queue
;
903 * We have to check the subordinate wait queue.
905 sub_queue
= (wait_queue_t
)wql
->wql_subqueue
;
906 wait_queue_lock(sub_queue
);
907 if (! wait_queue_empty(sub_queue
)) {
908 t
= _wait_queue_select_one(sub_queue
, event
);
910 wait_queue_unlock(sub_queue
);
911 if (t
!= THREAD_NULL
)
916 * Otherwise, its a thread. If it is waiting on
917 * the event we are posting to this queue, pull
918 * it off the queue and stick it in out wake_queue.
920 thread_t t
= (thread_t
)wq_element
;
922 if (t
->wait_event
== event
) {
924 remqueue(q
, (queue_entry_t
) t
);
925 t
->wait_queue
= WAIT_QUEUE_NULL
;
926 t
->wait_event
= NO_EVENT
;
927 t
->at_safe_point
= FALSE
;
928 return t
; /* still locked */
931 wq_element
= wqe_next
;
937 * Routine: wait_queue_peek_locked
939 * Select the best thread from a wait queue that meet the
940 * supplied criteria, but leave it on the queue you it was
941 * found on. The thread, and the actual wait_queue the
942 * thread was found on are identified.
948 * a locked thread - if one found
949 * a locked waitq - the one the thread was found on
951 * Only the waitq the thread was actually found on is locked
955 wait_queue_peek_locked(
961 wait_queue_element_t wq_element
;
962 wait_queue_element_t wqe_next
;
972 wq_element
= (wait_queue_element_t
) queue_first(q
);
973 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
974 wqe_next
= (wait_queue_element_t
)
975 queue_next((queue_t
) wq_element
);
978 * We may have to recurse if this is a compound wait queue.
980 if (wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
) {
981 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
982 wait_queue_t sub_queue
;
985 * We have to check the subordinate wait queue.
987 sub_queue
= (wait_queue_t
)wql
->wql_subqueue
;
988 wait_queue_lock(sub_queue
);
989 if (! wait_queue_empty(sub_queue
)) {
990 wait_queue_peek_locked(sub_queue
, event
, tp
, wqp
);
992 if (*tp
!= THREAD_NULL
)
993 return; /* thread and its waitq locked */
995 wait_queue_unlock(sub_queue
);
999 * Otherwise, its a thread. If it is waiting on
1000 * the event we are posting to this queue, return
1001 * it locked, but leave it on the queue.
1003 thread_t t
= (thread_t
)wq_element
;
1005 if (t
->wait_event
== event
) {
1012 wq_element
= wqe_next
;
1017 * Routine: wait_queue_pull_thread_locked
1019 * Pull a thread that was previously "peeked" off the wait
1020 * queue and (possibly) unlock the waitq.
1026 * with the thread still locked.
1029 wait_queue_pull_thread_locked(
1035 assert(thread
->wait_queue
== waitq
);
1037 remqueue(&waitq
->wq_queue
, (queue_entry_t
)thread
);
1038 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1039 thread
->wait_event
= NO_EVENT
;
1040 thread
->at_safe_point
= FALSE
;
1042 wait_queue_unlock(waitq
);
1047 * Routine: wait_queue_select_thread
1049 * Look for a thread and remove it from the queues, if
1050 * (and only if) the thread is waiting on the supplied
1051 * <wait_queue, event> pair.
1055 * possibly recursive
1057 * KERN_NOT_WAITING: Thread is not waiting here.
1058 * KERN_SUCCESS: It was, and is now removed (returned locked)
1061 _wait_queue_select_thread(
1066 wait_queue_element_t wq_element
;
1067 wait_queue_element_t wqe_next
;
1068 kern_return_t res
= KERN_NOT_WAITING
;
1069 queue_t q
= &wq
->wq_queue
;
1071 assert(wq
->wq_fifo
);
1073 thread_lock(thread
);
1074 if ((thread
->wait_queue
== wq
) && (thread
->wait_event
== event
)) {
1075 remqueue(q
, (queue_entry_t
) thread
);
1076 thread
->at_safe_point
= FALSE
;
1077 thread
->wait_event
= NO_EVENT
;
1078 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1079 /* thread still locked */
1080 return KERN_SUCCESS
;
1082 thread_unlock(thread
);
1085 * The wait_queue associated with the thread may be one of this
1086 * wait queue's subordinates. Go see. If so, removing it from
1087 * there is like removing it from here.
1089 wq_element
= (wait_queue_element_t
) queue_first(q
);
1090 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1091 wqe_next
= (wait_queue_element_t
)
1092 queue_next((queue_t
) wq_element
);
1094 if (wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
) {
1095 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1096 wait_queue_t sub_queue
;
1098 sub_queue
= (wait_queue_t
)wql
->wql_subqueue
;
1099 wait_queue_lock(sub_queue
);
1100 if (! wait_queue_empty(sub_queue
)) {
1101 res
= _wait_queue_select_thread(sub_queue
,
1105 wait_queue_unlock(sub_queue
);
1106 if (res
== KERN_SUCCESS
)
1107 return KERN_SUCCESS
;
1109 wq_element
= wqe_next
;
1116 * Routine: wait_queue_wakeup_identity_locked
1118 * Select a single thread that is most-eligible to run and set
1119 * set it running. But return the thread locked.
1124 * possibly recursive
1126 * a pointer to the locked thread that was awakened
1129 wait_queue_wakeup_identity_locked(
1137 assert(wait_queue_held(wq
));
1139 thread
= _wait_queue_select_one(wq
, event
);
1141 wait_queue_unlock(wq
);
1144 thread_go_locked(thread
, result
);
1145 return thread
; /* still locked if not NULL */
1150 * Routine: wait_queue_wakeup_one_locked
1152 * Select a single thread that is most-eligible to run and set
1158 * possibly recursive
1160 * KERN_SUCCESS: It was, and is, now removed.
1161 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1164 wait_queue_wakeup_one_locked(
1172 assert(wait_queue_held(wq
));
1174 thread
= _wait_queue_select_one(wq
, event
);
1176 wait_queue_unlock(wq
);
1179 thread_go_locked(thread
, result
);
1180 thread_unlock(thread
);
1181 return KERN_SUCCESS
;
1184 return KERN_NOT_WAITING
;
1188 * Routine: wait_queue_wakeup_one
1190 * Wakeup the most appropriate thread that is in the specified
1191 * wait queue for the specified event.
1197 * KERN_SUCCESS - Thread was woken up
1198 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1201 wait_queue_wakeup_one(
1210 wait_queue_lock(wq
);
1211 thread
= _wait_queue_select_one(wq
, event
);
1212 wait_queue_unlock(wq
);
1215 thread_go_locked(thread
, result
);
1216 thread_unlock(thread
);
1218 return KERN_SUCCESS
;
1222 return KERN_NOT_WAITING
;
1228 * Routine: wait_queue_wakeup_thread_locked
1230 * Wakeup the particular thread that was specified if and only
1231 * it was in this wait queue (or one of it's subordinate queues)
1232 * and waiting on the specified event.
1234 * This is much safer than just removing the thread from
1235 * whatever wait queue it happens to be on. For instance, it
1236 * may have already been awoken from the wait you intended to
1237 * interrupt and waited on something else (like another
1241 * wait queue already locked (may be released).
1243 * KERN_SUCCESS - the thread was found waiting and awakened
1244 * KERN_NOT_WAITING - the thread was not waiting here
1247 wait_queue_wakeup_thread_locked(
1256 assert(wait_queue_held(wq
));
1259 * See if the thread was still waiting there. If so, it got
1260 * dequeued and returned locked.
1262 res
= _wait_queue_select_thread(wq
, event
, thread
);
1264 wait_queue_unlock(wq
);
1266 if (res
!= KERN_SUCCESS
)
1267 return KERN_NOT_WAITING
;
1269 thread_go_locked(thread
, result
);
1270 thread_unlock(thread
);
1271 return KERN_SUCCESS
;
1275 * Routine: wait_queue_wakeup_thread
1277 * Wakeup the particular thread that was specified if and only
1278 * it was in this wait queue (or one of it's subordinate queues)
1279 * and waiting on the specified event.
1281 * This is much safer than just removing the thread from
1282 * whatever wait queue it happens to be on. For instance, it
1283 * may have already been awoken from the wait you intended to
1284 * interrupt and waited on something else (like another
1287 * nothing of interest locked
1288 * we need to assume spl needs to be raised
1290 * KERN_SUCCESS - the thread was found waiting and awakened
1291 * KERN_NOT_WAITING - the thread was not waiting here
1294 wait_queue_wakeup_thread(
1304 wait_queue_lock(wq
);
1305 res
= _wait_queue_select_thread(wq
, event
, thread
);
1306 wait_queue_unlock(wq
);
1308 if (res
== KERN_SUCCESS
) {
1309 thread_go_locked(thread
, result
);
1310 thread_unlock(thread
);
1312 return KERN_SUCCESS
;
1315 return KERN_NOT_WAITING
;
1320 * Routine: wait_queue_remove
1322 * Normal removal operations from wait queues drive from the
1323 * wait queue to select a thread. However, if a thread is
1324 * interrupted out of a wait, this routine is called to
1325 * remove it from whatever wait queue it may be in.
1329 * thread locked on entry and exit, but may be dropped.
1332 * KERN_SUCCESS - if thread was in a wait queue
1333 * KERN_NOT_WAITING - it was not
1339 wait_queue_t wq
= thread
->wait_queue
;
1341 if (wq
== WAIT_QUEUE_NULL
)
1342 return KERN_NOT_WAITING
;
1345 * have to get the locks again in the right order.
1347 thread_unlock(thread
);
1348 wait_queue_lock(wq
);
1349 thread_lock(thread
);
1351 if (thread
->wait_queue
== wq
) {
1352 remqueue(&wq
->wq_queue
, (queue_entry_t
)thread
);
1353 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1354 thread
->wait_event
= NO_EVENT
;
1355 thread
->at_safe_point
= FALSE
;
1356 wait_queue_unlock(wq
);
1357 return KERN_SUCCESS
;
1359 wait_queue_unlock(wq
);
1360 return KERN_NOT_WAITING
; /* anymore */