2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: wait_queue.c (adapted from sched_prim.c)
54 * Author: Avadis Tevanian, Jr.
57 * Primitives for manipulating wait queues: either global
58 * ones from sched_prim.c, or private ones associated with
59 * particular structures(pots, semaphores, etc..).
62 #include <kern/kern_types.h>
63 #include <kern/simple_lock.h>
64 #include <kern/kalloc.h>
65 #include <kern/queue.h>
67 #include <mach/sync_policy.h>
69 #include <kern/sched_prim.h>
70 #include <kern/wait_queue.h>
77 wq
->wq_fifo
= (policy
== SYNC_POLICY_FIFO
);
79 queue_init(&wq
->wq_queue
);
80 hw_lock_init(&wq
->wq_interlock
);
85 wait_queue_sub_t wqsub
,
88 wait_queue_init(&wqsub
->wqs_wait_queue
, policy
);
89 wqsub
->wqs_wait_queue
.wq_issub
= TRUE
;
90 queue_init(&wqsub
->wqs_sublinks
);
95 wait_queue_link_t wql
)
97 queue_init(&wql
->wql_links
);
98 queue_init(&wql
->wql_sublinks
);
99 wql
->wql_queue
= WAIT_QUEUE_NULL
;
100 wql
->wql_subqueue
= WAIT_QUEUE_SUB_NULL
;
101 wql
->wql_event
= NO_EVENT
;
106 * Routine: wait_queue_lock
108 * Lock the wait queue.
110 * the appropriate spl level (if any) is already raised.
120 * Double the standard lock timeout, because wait queues tend
121 * to iterate over a number of threads - locking each. If there is
122 * a problem with a thread lock, it normally times out at the wait
123 * queue level first, hiding the real problem.
125 pc
= GET_RETURN_PC(&wq
);
126 if (!hw_lock_to(&wq
->wq_interlock
, LockTimeOut
* 2)) {
127 panic("wait queue deadlock detection - wq=0x%x, cpu=%d, ret=0x%x\n", wq
, cpu_number(), pc
);
130 hw_lock_lock(&wq
->wq_interlock
);
135 * Routine: wait_queue_lock_try
137 * Try to lock the wait queue without waiting
139 * the appropriate spl level (if any) is already raised.
141 * TRUE if the lock was acquired
142 * FALSE if we would have needed to wait
148 return hw_lock_try(&wq
->wq_interlock
);
152 * Routine: wait_queue_unlock
154 * unlock the wait queue
156 * The wait queue is assumed locked.
157 * appropriate spl level is still maintained
163 assert(hw_lock_held(&wq
->wq_interlock
));
165 hw_lock_unlock(&wq
->wq_interlock
);
168 int _wait_queue_subordinate
; /* phoney event for subordinate wait q elements */
172 * Routine: wait_queue_member_locked
174 * Indicate if this sub queue is a member of the queue
176 * The wait queue is locked
177 * The sub queue is just that, a sub queue
180 wait_queue_member_locked(
182 wait_queue_sub_t wq_sub
)
184 wait_queue_element_t wq_element
;
187 assert(wait_queue_held(wq
));
188 assert(wait_queue_is_sub(wq_sub
));
192 wq_element
= (wait_queue_element_t
) queue_first(q
);
193 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
195 if ((wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
)) {
196 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
198 if (wql
->wql_subqueue
== wq_sub
)
201 wq_element
= (wait_queue_element_t
)
202 queue_next((queue_t
) wq_element
);
209 * Routine: wait_queue_member
211 * Indicate if this sub queue is a member of the queue
213 * The sub queue is just that, a sub queue
218 wait_queue_sub_t wq_sub
)
223 assert(wait_queue_is_sub(wq_sub
));
227 ret
= wait_queue_member_locked(wq
, wq_sub
);
228 wait_queue_unlock(wq
);
235 * Routine: wait_queue_link
237 * Insert a subordinate wait queue into a wait queue. This
238 * requires us to link the two together using a wait_queue_link
239 * structure that we allocate.
241 * The wait queue being inserted must be inited as a sub queue
242 * The sub waitq is not already linked
248 wait_queue_sub_t wq_sub
)
250 wait_queue_link_t wql
;
253 assert(wait_queue_is_sub(wq_sub
));
254 assert(!wait_queue_member(wq
, wq_sub
));
256 wql
= (wait_queue_link_t
) kalloc(sizeof(struct wait_queue_link
));
257 if (wql
== WAIT_QUEUE_LINK_NULL
)
258 return KERN_RESOURCE_SHORTAGE
;
260 wait_queue_link_init(wql
);
267 wql
->wql_subqueue
= wq_sub
;
268 wql
->wql_event
= WAIT_QUEUE_SUBORDINATE
;
269 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
270 queue_enter(&wq_sub
->wqs_sublinks
, wql
, wait_queue_link_t
, wql_sublinks
);
273 wait_queue_unlock(wq
);
280 * Routine: wait_queue_unlink
282 * Remove the linkage between a wait queue and its subordinate.
284 * The wait queue being must be a member sub queue
289 wait_queue_sub_t wq_sub
)
291 wait_queue_element_t wq_element
;
295 assert(wait_queue_is_sub(wq_sub
));
296 assert(wait_queue_member(wq
, wq_sub
));
304 wq_element
= (wait_queue_element_t
) queue_first(q
);
305 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
307 if (wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
) {
308 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
311 if (wql
->wql_subqueue
== wq_sub
) {
312 sq
= &wq_sub
->wqs_sublinks
;
313 queue_remove(q
, wql
, wait_queue_link_t
, wql_links
);
314 queue_remove(sq
, wql
, wait_queue_link_t
, wql_sublinks
);
316 wait_queue_unlock(wq
);
318 kfree((vm_offset_t
)wql
,sizeof(struct wait_queue_link
));
323 wq_element
= (wait_queue_element_t
)
324 queue_next((queue_t
) wq_element
);
326 panic("wait_queue_unlink");
330 * Routine: wait_queue_unlink_one
332 * Find and unlink one subordinate wait queue
334 * Nothing of interest locked.
337 wait_queue_unlink_one(
339 wait_queue_sub_t
*wq_subp
)
341 wait_queue_element_t wq_element
;
350 wq_element
= (wait_queue_element_t
) queue_first(q
);
351 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
353 if (wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
) {
354 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
355 wait_queue_sub_t wq_sub
= wql
->wql_subqueue
;
359 sq
= &wq_sub
->wqs_sublinks
;
360 queue_remove(q
, wql
, wait_queue_link_t
, wql_links
);
361 queue_remove(sq
, wql
, wait_queue_link_t
, wql_sublinks
);
363 wait_queue_unlock(wq
);
365 kfree((vm_offset_t
)wql
,sizeof(struct wait_queue_link
));
370 wq_element
= (wait_queue_element_t
)
371 queue_next((queue_t
) wq_element
);
373 wait_queue_unlock(wq
);
375 *wq_subp
= WAIT_QUEUE_SUB_NULL
;
379 * Routine: wait_queue_assert_wait_locked
381 * Insert the current thread into the supplied wait queue
382 * waiting for a particular event to be posted to that queue.
385 * The wait queue is assumed locked.
389 wait_queue_assert_wait_locked(
395 thread_t thread
= current_thread();
400 * This is the extent to which we currently take scheduling attributes
401 * into account. If the thread is vm priviledged, we stick it at
402 * the front of the queue. Later, these queues will honor the policy
403 * value set at wait_queue_init time.
405 if (thread
->vm_privilege
)
406 enqueue_head(&wq
->wq_queue
, (queue_entry_t
) thread
);
408 enqueue_tail(&wq
->wq_queue
, (queue_entry_t
) thread
);
409 thread
->wait_event
= event
;
410 thread
->wait_queue
= wq
;
411 thread_mark_wait_locked(thread
, interruptible
);
412 thread_unlock(thread
);
414 wait_queue_unlock(wq
);
418 * Routine: wait_queue_assert_wait
420 * Insert the current thread into the supplied wait queue
421 * waiting for a particular event to be posted to that queue.
424 * nothing of interest locked.
427 wait_queue_assert_wait(
436 wait_queue_assert_wait_locked(wq
, event
, interruptible
, TRUE
);
437 /* wait queue unlocked */
443 * Routine: wait_queue_select_all
445 * Select all threads off a wait queue that meet the
451 * wake_queue initialized and ready for insertion
455 * a queue of locked threads
458 _wait_queue_select_all(
463 wait_queue_element_t wq_element
;
464 wait_queue_element_t wqe_next
;
469 wq_element
= (wait_queue_element_t
) queue_first(q
);
470 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
471 wqe_next
= (wait_queue_element_t
)
472 queue_next((queue_t
) wq_element
);
475 * We may have to recurse if this is a compound wait queue.
477 if (wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
) {
478 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
479 wait_queue_t sub_queue
;
482 * We have to check the subordinate wait queue.
484 sub_queue
= (wait_queue_t
)wql
->wql_subqueue
;
485 wait_queue_lock(sub_queue
);
486 if (! wait_queue_empty(sub_queue
))
487 _wait_queue_select_all(sub_queue
, event
, wake_queue
);
488 wait_queue_unlock(sub_queue
);
492 * Otherwise, its a thread. If it is waiting on
493 * the event we are posting to this queue, pull
494 * it off the queue and stick it in out wake_queue.
496 thread_t t
= (thread_t
)wq_element
;
498 if (t
->wait_event
== event
) {
500 remqueue(q
, (queue_entry_t
) t
);
501 enqueue (wake_queue
, (queue_entry_t
) t
);
502 t
->wait_queue
= WAIT_QUEUE_NULL
;
503 t
->wait_event
= NO_EVENT
;
504 t
->at_safe_point
= FALSE
;
505 /* returned locked */
508 wq_element
= wqe_next
;
513 * Routine: wait_queue_wakeup_all_locked
515 * Wakeup some number of threads that are in the specified
516 * wait queue and waiting on the specified event.
518 * wait queue already locked (may be released).
520 * KERN_SUCCESS - Threads were woken up
521 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
524 wait_queue_wakeup_all_locked(
530 queue_head_t wake_queue_head
;
531 queue_t q
= &wake_queue_head
;
532 kern_return_t ret
= KERN_NOT_WAITING
;
534 assert(wait_queue_held(wq
));
539 * Select the threads that we will wake up. The threads
540 * are returned to us locked and cleanly removed from the
543 _wait_queue_select_all(wq
, event
, q
);
545 wait_queue_unlock(wq
);
548 * For each thread, set it running.
550 while (!queue_empty (q
)) {
551 thread_t thread
= (thread_t
) dequeue(q
);
552 thread_go_locked(thread
, result
);
553 thread_unlock(thread
);
561 * Routine: wait_queue_wakeup_all
563 * Wakeup some number of threads that are in the specified
564 * wait queue and waiting on the specified event.
570 * KERN_SUCCESS - Threads were woken up
571 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
574 wait_queue_wakeup_all(
584 ret
= wait_queue_wakeup_all_locked(wq
, event
, result
, TRUE
);
592 * Routine: wait_queue_select_one
594 * Select the best thread off a wait queue that meet the
601 * a locked thread - if one found
603 * This is where the sync policy of the wait queue comes
604 * into effect. For now, we just assume FIFO.
607 _wait_queue_select_one(
611 wait_queue_element_t wq_element
;
612 wait_queue_element_t wqe_next
;
613 thread_t t
= THREAD_NULL
;
620 wq_element
= (wait_queue_element_t
) queue_first(q
);
621 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
622 wqe_next
= (wait_queue_element_t
)
623 queue_next((queue_t
) wq_element
);
626 * We may have to recurse if this is a compound wait queue.
628 if (wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
) {
629 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
630 wait_queue_t sub_queue
;
633 * We have to check the subordinate wait queue.
635 sub_queue
= (wait_queue_t
)wql
->wql_subqueue
;
636 wait_queue_lock(sub_queue
);
637 if (! wait_queue_empty(sub_queue
)) {
638 t
= _wait_queue_select_one(sub_queue
, event
);
640 wait_queue_unlock(sub_queue
);
641 if (t
!= THREAD_NULL
)
646 * Otherwise, its a thread. If it is waiting on
647 * the event we are posting to this queue, pull
648 * it off the queue and stick it in out wake_queue.
650 thread_t t
= (thread_t
)wq_element
;
652 if (t
->wait_event
== event
) {
654 remqueue(q
, (queue_entry_t
) t
);
655 t
->wait_queue
= WAIT_QUEUE_NULL
;
656 t
->wait_event
= NO_EVENT
;
657 t
->at_safe_point
= FALSE
;
658 return t
; /* still locked */
661 wq_element
= wqe_next
;
667 * Routine: wait_queue_peek_locked
669 * Select the best thread from a wait queue that meet the
670 * supplied criteria, but leave it on the queue you it was
671 * found on. The thread, and the actual wait_queue the
672 * thread was found on are identified.
678 * a locked thread - if one found
679 * a locked waitq - the one the thread was found on
681 * Only the waitq the thread was actually found on is locked
685 wait_queue_peek_locked(
691 wait_queue_element_t wq_element
;
692 wait_queue_element_t wqe_next
;
702 wq_element
= (wait_queue_element_t
) queue_first(q
);
703 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
704 wqe_next
= (wait_queue_element_t
)
705 queue_next((queue_t
) wq_element
);
708 * We may have to recurse if this is a compound wait queue.
710 if (wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
) {
711 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
712 wait_queue_t sub_queue
;
715 * We have to check the subordinate wait queue.
717 sub_queue
= (wait_queue_t
)wql
->wql_subqueue
;
718 wait_queue_lock(sub_queue
);
719 if (! wait_queue_empty(sub_queue
)) {
720 wait_queue_peek_locked(sub_queue
, event
, tp
, wqp
);
722 if (*tp
!= THREAD_NULL
)
723 return; /* thread and its waitq locked */
725 wait_queue_unlock(sub_queue
);
729 * Otherwise, its a thread. If it is waiting on
730 * the event we are posting to this queue, return
731 * it locked, but leave it on the queue.
733 thread_t t
= (thread_t
)wq_element
;
735 if (t
->wait_event
== event
) {
742 wq_element
= wqe_next
;
747 * Routine: wait_queue_pull_thread_locked
749 * Pull a thread that was previously "peeked" off the wait
750 * queue and (possibly) unlock the waitq.
756 * with the thread still locked.
759 wait_queue_pull_thread_locked(
765 assert(thread
->wait_queue
== waitq
);
767 remqueue(&waitq
->wq_queue
, (queue_entry_t
)thread
);
768 thread
->wait_queue
= WAIT_QUEUE_NULL
;
769 thread
->wait_event
= NO_EVENT
;
770 thread
->at_safe_point
= FALSE
;
772 wait_queue_unlock(waitq
);
777 * Routine: wait_queue_select_thread
779 * Look for a thread and remove it from the queues, if
780 * (and only if) the thread is waiting on the supplied
781 * <wait_queue, event> pair.
787 * KERN_NOT_WAITING: Thread is not waiting here.
788 * KERN_SUCCESS: It was, and is now removed (returned locked)
791 _wait_queue_select_thread(
796 wait_queue_element_t wq_element
;
797 wait_queue_element_t wqe_next
;
798 kern_return_t res
= KERN_NOT_WAITING
;
799 queue_t q
= &wq
->wq_queue
;
804 if ((thread
->wait_queue
== wq
) && (thread
->wait_event
== event
)) {
805 remqueue(q
, (queue_entry_t
) thread
);
806 thread
->at_safe_point
= FALSE
;
807 thread
->wait_event
= NO_EVENT
;
808 thread
->wait_queue
= WAIT_QUEUE_NULL
;
809 /* thread still locked */
812 thread_unlock(thread
);
815 * The wait_queue associated with the thread may be one of this
816 * wait queue's subordinates. Go see. If so, removing it from
817 * there is like removing it from here.
819 wq_element
= (wait_queue_element_t
) queue_first(q
);
820 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
821 wqe_next
= (wait_queue_element_t
)
822 queue_next((queue_t
) wq_element
);
824 if (wq_element
->wqe_event
== WAIT_QUEUE_SUBORDINATE
) {
825 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
826 wait_queue_t sub_queue
;
828 sub_queue
= (wait_queue_t
)wql
->wql_subqueue
;
829 wait_queue_lock(sub_queue
);
830 if (! wait_queue_empty(sub_queue
)) {
831 res
= _wait_queue_select_thread(sub_queue
,
835 wait_queue_unlock(sub_queue
);
836 if (res
== KERN_SUCCESS
)
839 wq_element
= wqe_next
;
846 * Routine: wait_queue_wakeup_identity_locked
848 * Select a single thread that is most-eligible to run and set
849 * set it running. But return the thread locked.
856 * a pointer to the locked thread that was awakened
859 wait_queue_wakeup_identity_locked(
867 assert(wait_queue_held(wq
));
869 thread
= _wait_queue_select_one(wq
, event
);
871 wait_queue_unlock(wq
);
874 thread_go_locked(thread
, result
);
875 return thread
; /* still locked if not NULL */
880 * Routine: wait_queue_wakeup_one_locked
882 * Select a single thread that is most-eligible to run and set
890 * KERN_SUCCESS: It was, and is, now removed.
891 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
894 wait_queue_wakeup_one_locked(
902 assert(wait_queue_held(wq
));
904 thread
= _wait_queue_select_one(wq
, event
);
906 wait_queue_unlock(wq
);
909 thread_go_locked(thread
, result
);
910 thread_unlock(thread
);
914 return KERN_NOT_WAITING
;
918 * Routine: wait_queue_wakeup_one
920 * Wakeup the most appropriate thread that is in the specified
921 * wait queue for the specified event.
927 * KERN_SUCCESS - Thread was woken up
928 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
931 wait_queue_wakeup_one(
941 thread
= _wait_queue_select_one(wq
, event
);
942 wait_queue_unlock(wq
);
945 thread_go_locked(thread
, result
);
946 thread_unlock(thread
);
952 return KERN_NOT_WAITING
;
958 * Routine: wait_queue_wakeup_thread_locked
960 * Wakeup the particular thread that was specified if and only
961 * it was in this wait queue (or one of it's subordinate queues)
962 * and waiting on the specified event.
964 * This is much safer than just removing the thread from
965 * whatever wait queue it happens to be on. For instance, it
966 * may have already been awoken from the wait you intended to
967 * interrupt and waited on something else (like another
971 * wait queue already locked (may be released).
973 * KERN_SUCCESS - the thread was found waiting and awakened
974 * KERN_NOT_WAITING - the thread was not waiting here
977 wait_queue_wakeup_thread_locked(
986 assert(wait_queue_held(wq
));
989 * See if the thread was still waiting there. If so, it got
990 * dequeued and returned locked.
992 res
= _wait_queue_select_thread(wq
, event
, thread
);
994 wait_queue_unlock(wq
);
996 if (res
!= KERN_SUCCESS
)
997 return KERN_NOT_WAITING
;
999 thread_go_locked(thread
, result
);
1000 thread_unlock(thread
);
1001 return KERN_SUCCESS
;
1005 * Routine: wait_queue_wakeup_thread
1007 * Wakeup the particular thread that was specified if and only
1008 * it was in this wait queue (or one of it's subordinate queues)
1009 * and waiting on the specified event.
1011 * This is much safer than just removing the thread from
1012 * whatever wait queue it happens to be on. For instance, it
1013 * may have already been awoken from the wait you intended to
1014 * interrupt and waited on something else (like another
1017 * nothing of interest locked
1018 * we need to assume spl needs to be raised
1020 * KERN_SUCCESS - the thread was found waiting and awakened
1021 * KERN_NOT_WAITING - the thread was not waiting here
1024 wait_queue_wakeup_thread(
1034 wait_queue_lock(wq
);
1035 res
= _wait_queue_select_thread(wq
, event
, thread
);
1036 wait_queue_unlock(wq
);
1038 if (res
== KERN_SUCCESS
) {
1039 thread_go_locked(thread
, result
);
1040 thread_unlock(thread
);
1042 return KERN_SUCCESS
;
1045 return KERN_NOT_WAITING
;
1050 * Routine: wait_queue_remove
1052 * Normal removal operations from wait queues drive from the
1053 * wait queue to select a thread. However, if a thread is
1054 * interrupted out of a wait, this routine is called to
1055 * remove it from whatever wait queue it may be in.
1059 * thread locked on entry and exit, but may be dropped.
1062 * KERN_SUCCESS - if thread was in a wait queue
1063 * KERN_NOT_WAITING - it was not
1069 wait_queue_t wq
= thread
->wait_queue
;
1071 if (wq
== WAIT_QUEUE_NULL
)
1072 return KERN_NOT_WAITING
;
1075 * have to get the locks again in the right order.
1077 thread_unlock(thread
);
1078 wait_queue_lock(wq
);
1079 thread_lock(thread
);
1081 if (thread
->wait_queue
== wq
) {
1082 remqueue(&wq
->wq_queue
, (queue_entry_t
)thread
);
1083 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1084 thread
->wait_event
= NO_EVENT
;
1085 thread
->at_safe_point
= FALSE
;
1086 wait_queue_unlock(wq
);
1087 return KERN_SUCCESS
;
1089 wait_queue_unlock(wq
);
1090 return KERN_NOT_WAITING
; /* anymore */