2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 * @OSF_FREE_COPYRIGHT@
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
61 * File: wait_queue.c (adapted from sched_prim.c)
62 * Author: Avadis Tevanian, Jr.
65 * Primitives for manipulating wait queues: either global
66 * ones from sched_prim.c, or private ones associated with
67 * particular structures(pots, semaphores, etc..).
70 #include <kern/kern_types.h>
71 #include <kern/simple_lock.h>
72 #include <kern/kalloc.h>
73 #include <kern/queue.h>
75 #include <mach/sync_policy.h>
76 #include <kern/sched_prim.h>
78 #include <kern/wait_queue.h>
80 /* forward declarations */
81 static boolean_t
wait_queue_member_locked(
83 wait_queue_set_t wq_set
);
85 void wait_queue_unlink_one(
87 wait_queue_set_t
*wq_setp
);
89 kern_return_t
wait_queue_set_unlink_all_nofree(
90 wait_queue_set_t wq_set
);
93 * Routine: wait_queue_init
95 * Initialize a previously allocated wait queue.
97 * KERN_SUCCESS - The wait_queue_t was initialized
98 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
105 if (!((policy
& SYNC_POLICY_ORDER_MASK
) == SYNC_POLICY_FIFO
))
106 return KERN_INVALID_ARGUMENT
;
109 wq
->wq_type
= _WAIT_QUEUE_inited
;
110 queue_init(&wq
->wq_queue
);
111 hw_lock_init(&wq
->wq_interlock
);
116 * Routine: wait_queue_alloc
118 * Allocate and initialize a wait queue for use outside of
119 * of the mach part of the kernel.
121 * Nothing locked - can block.
123 * The allocated and initialized wait queue
124 * WAIT_QUEUE_NULL if there is a resource shortage
133 wq
= (wait_queue_t
) kalloc(sizeof(struct wait_queue
));
134 if (wq
!= WAIT_QUEUE_NULL
) {
135 ret
= wait_queue_init(wq
, policy
);
136 if (ret
!= KERN_SUCCESS
) {
137 kfree(wq
, sizeof(struct wait_queue
));
138 wq
= WAIT_QUEUE_NULL
;
145 * Routine: wait_queue_free
147 * Free an allocated wait queue.
155 if (!wait_queue_is_queue(wq
))
156 return KERN_INVALID_ARGUMENT
;
157 if (!queue_empty(&wq
->wq_queue
))
159 kfree(wq
, sizeof(struct wait_queue
));
164 * Routine: wait_queue_set_init
166 * Initialize a previously allocated wait queue set.
168 * KERN_SUCCESS - The wait_queue_set_t was initialized
169 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
173 wait_queue_set_t wqset
,
178 ret
= wait_queue_init(&wqset
->wqs_wait_queue
, policy
);
179 if (ret
!= KERN_SUCCESS
)
182 wqset
->wqs_wait_queue
.wq_type
= _WAIT_QUEUE_SET_inited
;
183 if (policy
& SYNC_POLICY_PREPOST
)
184 wqset
->wqs_wait_queue
.wq_isprepost
= TRUE
;
186 wqset
->wqs_wait_queue
.wq_isprepost
= FALSE
;
187 queue_init(&wqset
->wqs_setlinks
);
188 wqset
->wqs_refcount
= 0;
195 wait_queue_set_t wqset
,
198 return wait_queue_set_init(wqset
, policy
);
202 wait_queue_sub_clearrefs(
203 wait_queue_set_t wq_set
)
205 if (!wait_queue_is_set(wq_set
))
206 return KERN_INVALID_ARGUMENT
;
209 wq_set
->wqs_refcount
= 0;
215 * Routine: wait_queue_set_alloc
217 * Allocate and initialize a wait queue set for
218 * use outside of the mach part of the kernel.
222 * The allocated and initialized wait queue set
223 * WAIT_QUEUE_SET_NULL if there is a resource shortage
226 wait_queue_set_alloc(
229 wait_queue_set_t wq_set
;
231 wq_set
= (wait_queue_set_t
) kalloc(sizeof(struct wait_queue_set
));
232 if (wq_set
!= WAIT_QUEUE_SET_NULL
) {
235 ret
= wait_queue_set_init(wq_set
, policy
);
236 if (ret
!= KERN_SUCCESS
) {
237 kfree(wq_set
, sizeof(struct wait_queue_set
));
238 wq_set
= WAIT_QUEUE_SET_NULL
;
245 * Routine: wait_queue_set_free
247 * Free an allocated wait queue set
253 wait_queue_set_t wq_set
)
255 if (!wait_queue_is_set(wq_set
))
256 return KERN_INVALID_ARGUMENT
;
258 if (!queue_empty(&wq_set
->wqs_wait_queue
.wq_queue
))
261 kfree(wq_set
, sizeof(struct wait_queue_set
));
268 * Routine: wait_queue_set_size
269 * Routine: wait_queue_link_size
271 * Return the size of opaque wait queue structures
273 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet
); }
274 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink
); }
276 /* declare a unique type for wait queue link structures */
277 static unsigned int _wait_queue_link
;
278 static unsigned int _wait_queue_unlinked
;
280 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
281 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
283 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
284 WQASSERT(((wqe)->wqe_queue == (wq) && \
285 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
286 "wait queue element list corruption: wq=%#x, wqe=%#x", \
289 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
290 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
291 (queue_t)(wql) : &(wql)->wql_setlinks)))
293 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
294 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
295 (queue_t)(wql) : &(wql)->wql_setlinks)))
297 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
298 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
299 ((wql)->wql_setqueue == (wqs)) && \
300 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
301 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
302 "wait queue set links corruption: wqs=%#x, wql=%#x", \
305 #if defined(_WAIT_QUEUE_DEBUG_)
307 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
309 #define WAIT_QUEUE_CHECK(wq) \
311 queue_t q2 = &(wq)->wq_queue; \
312 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
313 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
314 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
315 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
319 #define WAIT_QUEUE_SET_CHECK(wqs) \
321 queue_t q2 = &(wqs)->wqs_setlinks; \
322 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
323 while (!queue_end(q2, (queue_entry_t)wql2)) { \
324 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
325 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
329 #else /* !_WAIT_QUEUE_DEBUG_ */
331 #define WQASSERT(e, s, p0, p1) assert(e)
333 #define WAIT_QUEUE_CHECK(wq)
334 #define WAIT_QUEUE_SET_CHECK(wqs)
336 #endif /* !_WAIT_QUEUE_DEBUG_ */
339 * Routine: wait_queue_member_locked
341 * Indicate if this set queue is a member of the queue
343 * The wait queue is locked
344 * The set queue is just that, a set queue
347 wait_queue_member_locked(
349 wait_queue_set_t wq_set
)
351 wait_queue_element_t wq_element
;
354 assert(wait_queue_held(wq
));
355 assert(wait_queue_is_set(wq_set
));
359 wq_element
= (wait_queue_element_t
) queue_first(q
);
360 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
361 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
362 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
)) {
363 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
365 if (wql
->wql_setqueue
== wq_set
)
368 wq_element
= (wait_queue_element_t
)
369 queue_next((queue_t
) wq_element
);
376 * Routine: wait_queue_member
378 * Indicate if this set queue is a member of the queue
380 * The set queue is just that, a set queue
385 wait_queue_set_t wq_set
)
390 if (!wait_queue_is_set(wq_set
))
395 ret
= wait_queue_member_locked(wq
, wq_set
);
396 wait_queue_unlock(wq
);
404 * Routine: wait_queue_link_noalloc
406 * Insert a set wait queue into a wait queue. This
407 * requires us to link the two together using a wait_queue_link
408 * structure that we allocate.
410 * The wait queue being inserted must be inited as a set queue
413 wait_queue_link_noalloc(
415 wait_queue_set_t wq_set
,
416 wait_queue_link_t wql
)
418 wait_queue_element_t wq_element
;
422 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
))
423 return KERN_INVALID_ARGUMENT
;
426 * There are probably less threads and sets associated with
427 * the wait queue, then there are wait queues associated with
428 * the set. So lets validate it that way.
433 wq_element
= (wait_queue_element_t
) queue_first(q
);
434 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
435 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
436 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
&&
437 ((wait_queue_link_t
)wq_element
)->wql_setqueue
== wq_set
) {
438 wait_queue_unlock(wq
);
440 return KERN_ALREADY_IN_SET
;
442 wq_element
= (wait_queue_element_t
)
443 queue_next((queue_t
) wq_element
);
447 * Not already a member, so we can add it.
451 WAIT_QUEUE_SET_CHECK(wq_set
);
454 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
455 wql
->wql_setqueue
= wq_set
;
456 queue_enter(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
457 wql
->wql_type
= WAIT_QUEUE_LINK
;
460 wait_queue_unlock(wq
);
467 * Routine: wait_queue_link
469 * Insert a set wait queue into a wait queue. This
470 * requires us to link the two together using a wait_queue_link
471 * structure that we allocate.
473 * The wait queue being inserted must be inited as a set queue
478 wait_queue_set_t wq_set
)
480 wait_queue_link_t wql
;
483 wql
= (wait_queue_link_t
) kalloc(sizeof(struct wait_queue_link
));
484 if (wql
== WAIT_QUEUE_LINK_NULL
)
485 return KERN_RESOURCE_SHORTAGE
;
487 ret
= wait_queue_link_noalloc(wq
, wq_set
, wql
);
488 if (ret
!= KERN_SUCCESS
)
489 kfree(wql
, sizeof(struct wait_queue_link
));
496 * Routine: wait_queue_unlink_nofree
498 * Undo the linkage between a wait queue and a set.
501 wait_queue_unlink_locked(
503 wait_queue_set_t wq_set
,
504 wait_queue_link_t wql
)
506 assert(wait_queue_held(wq
));
507 assert(wait_queue_held(&wq_set
->wqs_wait_queue
));
509 wql
->wql_queue
= WAIT_QUEUE_NULL
;
510 queue_remove(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
511 wql
->wql_setqueue
= WAIT_QUEUE_SET_NULL
;
512 queue_remove(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
513 wql
->wql_type
= WAIT_QUEUE_UNLINKED
;
515 WAIT_QUEUE_CHECK(wq
);
516 WAIT_QUEUE_SET_CHECK(wq_set
);
520 * Routine: wait_queue_unlink
522 * Remove the linkage between a wait queue and a set,
523 * freeing the linkage structure.
525 * The wait queue being must be a member set queue
530 wait_queue_set_t wq_set
)
532 wait_queue_element_t wq_element
;
533 wait_queue_link_t wql
;
537 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
)) {
538 return KERN_INVALID_ARGUMENT
;
544 wq_element
= (wait_queue_element_t
) queue_first(q
);
545 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
546 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
547 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
548 wql
= (wait_queue_link_t
)wq_element
;
550 if (wql
->wql_setqueue
== wq_set
) {
552 wait_queue_unlink_locked(wq
, wq_set
, wql
);
554 wait_queue_unlock(wq
);
556 kfree(wql
, sizeof(struct wait_queue_link
));
560 wq_element
= (wait_queue_element_t
)
561 queue_next((queue_t
) wq_element
);
563 wait_queue_unlock(wq
);
565 return KERN_NOT_IN_SET
;
570 * Routine: wait_queue_unlinkall_nofree
572 * Remove the linkage between a wait queue and all its
573 * sets. The caller is responsible for freeing
574 * the wait queue link structures.
578 wait_queue_unlinkall_nofree(
581 wait_queue_element_t wq_element
;
582 wait_queue_element_t wq_next_element
;
583 wait_queue_set_t wq_set
;
584 wait_queue_link_t wql
;
585 queue_head_t links_queue_head
;
586 queue_t links
= &links_queue_head
;
590 if (!wait_queue_is_queue(wq
)) {
591 return KERN_INVALID_ARGUMENT
;
601 wq_element
= (wait_queue_element_t
) queue_first(q
);
602 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
603 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
604 wq_next_element
= (wait_queue_element_t
)
605 queue_next((queue_t
) wq_element
);
607 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
608 wql
= (wait_queue_link_t
)wq_element
;
609 wq_set
= wql
->wql_setqueue
;
611 wait_queue_unlink_locked(wq
, wq_set
, wql
);
614 wq_element
= wq_next_element
;
616 wait_queue_unlock(wq
);
618 return(KERN_SUCCESS
);
623 * Routine: wait_queue_unlink_all
625 * Remove the linkage between a wait queue and all its sets.
626 * All the linkage structures are freed.
628 * Nothing of interest locked.
632 wait_queue_unlink_all(
635 wait_queue_element_t wq_element
;
636 wait_queue_element_t wq_next_element
;
637 wait_queue_set_t wq_set
;
638 wait_queue_link_t wql
;
639 queue_head_t links_queue_head
;
640 queue_t links
= &links_queue_head
;
644 if (!wait_queue_is_queue(wq
)) {
645 return KERN_INVALID_ARGUMENT
;
655 wq_element
= (wait_queue_element_t
) queue_first(q
);
656 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
657 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
658 wq_next_element
= (wait_queue_element_t
)
659 queue_next((queue_t
) wq_element
);
661 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
662 wql
= (wait_queue_link_t
)wq_element
;
663 wq_set
= wql
->wql_setqueue
;
665 wait_queue_unlink_locked(wq
, wq_set
, wql
);
667 enqueue(links
, &wql
->wql_links
);
669 wq_element
= wq_next_element
;
671 wait_queue_unlock(wq
);
674 while(!queue_empty(links
)) {
675 wql
= (wait_queue_link_t
) dequeue(links
);
676 kfree(wql
, sizeof(struct wait_queue_link
));
679 return(KERN_SUCCESS
);
683 * Routine: wait_queue_set_unlink_all_nofree
685 * Remove the linkage between a set wait queue and all its
686 * member wait queues. The link structures are not freed, nor
687 * returned. It is the caller's responsibility to track and free
690 * The wait queue being must be a member set queue
693 wait_queue_set_unlink_all_nofree(
694 wait_queue_set_t wq_set
)
696 wait_queue_link_t wql
;
701 if (!wait_queue_is_set(wq_set
)) {
702 return KERN_INVALID_ARGUMENT
;
709 q
= &wq_set
->wqs_setlinks
;
711 wql
= (wait_queue_link_t
)queue_first(q
);
712 while (!queue_end(q
, (queue_entry_t
)wql
)) {
713 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
715 if (wait_queue_lock_try(wq
)) {
716 wait_queue_unlink_locked(wq
, wq_set
, wql
);
717 wait_queue_unlock(wq
);
718 wql
= (wait_queue_link_t
)queue_first(q
);
729 return(KERN_SUCCESS
);
732 /* legacy interface naming */
734 wait_subqueue_unlink_all(
735 wait_queue_set_t wq_set
)
737 return wait_queue_set_unlink_all_nofree(wq_set
);
742 * Routine: wait_queue_set_unlink_all
744 * Remove the linkage between a set wait queue and all its
745 * member wait queues. The link structures are freed.
747 * The wait queue must be a set
750 wait_queue_set_unlink_all(
751 wait_queue_set_t wq_set
)
753 wait_queue_link_t wql
;
756 queue_head_t links_queue_head
;
757 queue_t links
= &links_queue_head
;
760 if (!wait_queue_is_set(wq_set
)) {
761 return KERN_INVALID_ARGUMENT
;
770 q
= &wq_set
->wqs_setlinks
;
772 wql
= (wait_queue_link_t
)queue_first(q
);
773 while (!queue_end(q
, (queue_entry_t
)wql
)) {
774 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
776 if (wait_queue_lock_try(wq
)) {
777 wait_queue_unlink_locked(wq
, wq_set
, wql
);
778 wait_queue_unlock(wq
);
779 enqueue(links
, &wql
->wql_links
);
780 wql
= (wait_queue_link_t
)queue_first(q
);
791 while (!queue_empty (links
)) {
792 wql
= (wait_queue_link_t
) dequeue(links
);
793 kfree(wql
, sizeof(struct wait_queue_link
));
795 return(KERN_SUCCESS
);
800 * Routine: wait_queue_unlink_one
802 * Find and unlink one set wait queue
804 * Nothing of interest locked.
807 wait_queue_unlink_one(
809 wait_queue_set_t
*wq_setp
)
811 wait_queue_element_t wq_element
;
820 wq_element
= (wait_queue_element_t
) queue_first(q
);
821 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
823 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
824 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
825 wait_queue_set_t wq_set
= wql
->wql_setqueue
;
828 wait_queue_unlink_locked(wq
, wq_set
, wql
);
830 wait_queue_unlock(wq
);
832 kfree(wql
,sizeof(struct wait_queue_link
));
837 wq_element
= (wait_queue_element_t
)
838 queue_next((queue_t
) wq_element
);
840 wait_queue_unlock(wq
);
842 *wq_setp
= WAIT_QUEUE_SET_NULL
;
847 * Routine: wait_queue_assert_wait64_locked
849 * Insert the current thread into the supplied wait queue
850 * waiting for a particular event to be posted to that queue.
853 * The wait queue is assumed locked.
854 * The waiting thread is assumed locked.
857 __private_extern__ wait_result_t
858 wait_queue_assert_wait64_locked(
861 wait_interrupt_t interruptible
,
865 wait_result_t wait_result
;
867 if (!wait_queue_assert_possible(thread
))
868 panic("wait_queue_assert_wait64_locked");
870 if (wq
->wq_type
== _WAIT_QUEUE_SET_inited
) {
871 wait_queue_set_t wqs
= (wait_queue_set_t
)wq
;
873 if (wqs
->wqs_isprepost
&& wqs
->wqs_refcount
> 0)
874 return(THREAD_AWAKENED
);
878 * This is the extent to which we currently take scheduling attributes
879 * into account. If the thread is vm priviledged, we stick it at
880 * the front of the queue. Later, these queues will honor the policy
881 * value set at wait_queue_init time.
883 wait_result
= thread_mark_wait_locked(thread
, interruptible
);
884 if (wait_result
== THREAD_WAITING
) {
885 if (thread
->options
& TH_OPT_VMPRIV
)
886 enqueue_head(&wq
->wq_queue
, (queue_entry_t
) thread
);
888 enqueue_tail(&wq
->wq_queue
, (queue_entry_t
) thread
);
890 thread
->wait_event
= event
;
891 thread
->wait_queue
= wq
;
894 if (!timer_call_enter(&thread
->wait_timer
, deadline
))
895 thread
->wait_timer_active
++;
896 thread
->wait_timer_is_set
= TRUE
;
903 * Routine: wait_queue_assert_wait
905 * Insert the current thread into the supplied wait queue
906 * waiting for a particular event to be posted to that queue.
909 * nothing of interest locked.
912 wait_queue_assert_wait(
915 wait_interrupt_t interruptible
,
920 thread_t thread
= current_thread();
922 /* If it is an invalid wait queue, you can't wait on it */
923 if (!wait_queue_is_valid(wq
))
924 return (thread
->wait_result
= THREAD_RESTART
);
929 ret
= wait_queue_assert_wait64_locked(wq
, (event64_t
)((uint32_t)event
),
930 interruptible
, deadline
, thread
);
931 thread_unlock(thread
);
932 wait_queue_unlock(wq
);
938 * Routine: wait_queue_assert_wait64
940 * Insert the current thread into the supplied wait queue
941 * waiting for a particular event to be posted to that queue.
943 * nothing of interest locked.
946 wait_queue_assert_wait64(
949 wait_interrupt_t interruptible
,
954 thread_t thread
= current_thread();
956 /* If it is an invalid wait queue, you cant wait on it */
957 if (!wait_queue_is_valid(wq
))
958 return (thread
->wait_result
= THREAD_RESTART
);
963 ret
= wait_queue_assert_wait64_locked(wq
, event
, interruptible
, deadline
, thread
);
964 thread_unlock(thread
);
965 wait_queue_unlock(wq
);
971 * Routine: _wait_queue_select64_all
973 * Select all threads off a wait queue that meet the
978 * wake_queue initialized and ready for insertion
981 * a queue of locked threads
984 _wait_queue_select64_all(
989 wait_queue_element_t wq_element
;
990 wait_queue_element_t wqe_next
;
995 wq_element
= (wait_queue_element_t
) queue_first(q
);
996 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
997 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
998 wqe_next
= (wait_queue_element_t
)
999 queue_next((queue_t
) wq_element
);
1002 * We may have to recurse if this is a compound wait queue.
1004 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1005 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1006 wait_queue_t set_queue
;
1009 * We have to check the set wait queue.
1011 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1012 wait_queue_lock(set_queue
);
1013 if (set_queue
->wq_isprepost
) {
1014 wait_queue_set_t wqs
= (wait_queue_set_t
)set_queue
;
1017 * Preposting is only for sets and wait queue
1018 * is the first element of set
1020 wqs
->wqs_refcount
++;
1022 if (! wait_queue_empty(set_queue
))
1023 _wait_queue_select64_all(set_queue
, event
, wake_queue
);
1024 wait_queue_unlock(set_queue
);
1028 * Otherwise, its a thread. If it is waiting on
1029 * the event we are posting to this queue, pull
1030 * it off the queue and stick it in out wake_queue.
1032 thread_t t
= (thread_t
)wq_element
;
1034 if (t
->wait_event
== event
) {
1036 remqueue(q
, (queue_entry_t
) t
);
1037 enqueue (wake_queue
, (queue_entry_t
) t
);
1038 t
->wait_queue
= WAIT_QUEUE_NULL
;
1039 t
->wait_event
= NO_EVENT64
;
1040 t
->at_safe_point
= FALSE
;
1041 /* returned locked */
1044 wq_element
= wqe_next
;
1049 * Routine: wait_queue_wakeup64_all_locked
1051 * Wakeup some number of threads that are in the specified
1052 * wait queue and waiting on the specified event.
1054 * wait queue already locked (may be released).
1056 * KERN_SUCCESS - Threads were woken up
1057 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1059 __private_extern__ kern_return_t
1060 wait_queue_wakeup64_all_locked(
1063 wait_result_t result
,
1066 queue_head_t wake_queue_head
;
1067 queue_t q
= &wake_queue_head
;
1070 assert(wait_queue_held(wq
));
1074 * Select the threads that we will wake up. The threads
1075 * are returned to us locked and cleanly removed from the
1078 _wait_queue_select64_all(wq
, event
, q
);
1080 wait_queue_unlock(wq
);
1083 * For each thread, set it running.
1085 res
= KERN_NOT_WAITING
;
1086 while (!queue_empty (q
)) {
1087 thread_t thread
= (thread_t
) dequeue(q
);
1088 res
= thread_go(thread
, result
);
1089 assert(res
== KERN_SUCCESS
);
1090 thread_unlock(thread
);
1097 * Routine: wait_queue_wakeup_all
1099 * Wakeup some number of threads that are in the specified
1100 * wait queue and waiting on the specified event.
1104 * KERN_SUCCESS - Threads were woken up
1105 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1108 wait_queue_wakeup_all(
1111 wait_result_t result
)
1116 if (!wait_queue_is_valid(wq
)) {
1117 return KERN_INVALID_ARGUMENT
;
1121 wait_queue_lock(wq
);
1122 ret
= wait_queue_wakeup64_all_locked(
1123 wq
, (event64_t
)((uint32_t)event
),
1131 * Routine: wait_queue_wakeup64_all
1133 * Wakeup some number of threads that are in the specified
1134 * wait queue and waiting on the specified event.
1138 * KERN_SUCCESS - Threads were woken up
1139 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1142 wait_queue_wakeup64_all(
1145 wait_result_t result
)
1150 if (!wait_queue_is_valid(wq
)) {
1151 return KERN_INVALID_ARGUMENT
;
1155 wait_queue_lock(wq
);
1156 ret
= wait_queue_wakeup64_all_locked(wq
, event
, result
, TRUE
);
1163 * Routine: _wait_queue_select64_one
1165 * Select the best thread off a wait queue that meet the
1166 * supplied criteria.
1170 * possibly recursive
1172 * a locked thread - if one found
1174 * This is where the sync policy of the wait queue comes
1175 * into effect. For now, we just assume FIFO.
1178 _wait_queue_select64_one(
1182 wait_queue_element_t wq_element
;
1183 wait_queue_element_t wqe_next
;
1184 thread_t t
= THREAD_NULL
;
1187 assert(wq
->wq_fifo
);
1191 wq_element
= (wait_queue_element_t
) queue_first(q
);
1192 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1193 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1194 wqe_next
= (wait_queue_element_t
)
1195 queue_next((queue_t
) wq_element
);
1198 * We may have to recurse if this is a compound wait queue.
1200 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1201 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1202 wait_queue_t set_queue
;
1205 * We have to check the set wait queue.
1207 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1208 wait_queue_lock(set_queue
);
1209 if (! wait_queue_empty(set_queue
)) {
1210 t
= _wait_queue_select64_one(set_queue
, event
);
1212 wait_queue_unlock(set_queue
);
1213 if (t
!= THREAD_NULL
)
1218 * Otherwise, its a thread. If it is waiting on
1219 * the event we are posting to this queue, pull
1220 * it off the queue and stick it in out wake_queue.
1222 t
= (thread_t
)wq_element
;
1223 if (t
->wait_event
== event
) {
1225 remqueue(q
, (queue_entry_t
) t
);
1226 t
->wait_queue
= WAIT_QUEUE_NULL
;
1227 t
->wait_event
= NO_EVENT64
;
1228 t
->at_safe_point
= FALSE
;
1229 return t
; /* still locked */
1234 wq_element
= wqe_next
;
1240 * Routine: wait_queue_peek64_locked
1242 * Select the best thread from a wait queue that meet the
1243 * supplied criteria, but leave it on the queue it was
1244 * found on. The thread, and the actual wait_queue the
1245 * thread was found on are identified.
1249 * possibly recursive
1251 * a locked thread - if one found
1252 * a locked waitq - the one the thread was found on
1254 * Both the waitq the thread was actually found on, and
1255 * the supplied wait queue, are locked after this.
1257 __private_extern__
void
1258 wait_queue_peek64_locked(
1264 wait_queue_element_t wq_element
;
1265 wait_queue_element_t wqe_next
;
1268 assert(wq
->wq_fifo
);
1274 wq_element
= (wait_queue_element_t
) queue_first(q
);
1275 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1276 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1277 wqe_next
= (wait_queue_element_t
)
1278 queue_next((queue_t
) wq_element
);
1281 * We may have to recurse if this is a compound wait queue.
1283 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1284 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1285 wait_queue_t set_queue
;
1288 * We have to check the set wait queue.
1290 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1291 wait_queue_lock(set_queue
);
1292 if (! wait_queue_empty(set_queue
)) {
1293 wait_queue_peek64_locked(set_queue
, event
, tp
, wqp
);
1295 if (*tp
!= THREAD_NULL
) {
1296 if (*wqp
!= set_queue
)
1297 wait_queue_unlock(set_queue
);
1298 return; /* thread and its waitq locked */
1301 wait_queue_unlock(set_queue
);
1305 * Otherwise, its a thread. If it is waiting on
1306 * the event we are posting to this queue, return
1307 * it locked, but leave it on the queue.
1309 thread_t t
= (thread_t
)wq_element
;
1311 if (t
->wait_event
== event
) {
1318 wq_element
= wqe_next
;
1323 * Routine: wait_queue_pull_thread_locked
1325 * Pull a thread that was previously "peeked" off the wait
1326 * queue and (possibly) unlock the waitq.
1332 * with the thread still locked.
1335 wait_queue_pull_thread_locked(
1341 assert(thread
->wait_queue
== waitq
);
1343 remqueue(&waitq
->wq_queue
, (queue_entry_t
)thread
);
1344 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1345 thread
->wait_event
= NO_EVENT64
;
1346 thread
->at_safe_point
= FALSE
;
1348 wait_queue_unlock(waitq
);
1353 * Routine: wait_queue_select64_thread
1355 * Look for a thread and remove it from the queues, if
1356 * (and only if) the thread is waiting on the supplied
1357 * <wait_queue, event> pair.
1361 * possibly recursive
1363 * KERN_NOT_WAITING: Thread is not waiting here.
1364 * KERN_SUCCESS: It was, and is now removed (returned locked)
1366 static kern_return_t
1367 _wait_queue_select64_thread(
1372 wait_queue_element_t wq_element
;
1373 wait_queue_element_t wqe_next
;
1374 kern_return_t res
= KERN_NOT_WAITING
;
1375 queue_t q
= &wq
->wq_queue
;
1377 thread_lock(thread
);
1378 if ((thread
->wait_queue
== wq
) && (thread
->wait_event
== event
)) {
1379 remqueue(q
, (queue_entry_t
) thread
);
1380 thread
->at_safe_point
= FALSE
;
1381 thread
->wait_event
= NO_EVENT64
;
1382 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1383 /* thread still locked */
1384 return KERN_SUCCESS
;
1386 thread_unlock(thread
);
1389 * The wait_queue associated with the thread may be one of this
1390 * wait queue's sets. Go see. If so, removing it from
1391 * there is like removing it from here.
1393 wq_element
= (wait_queue_element_t
) queue_first(q
);
1394 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1395 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1396 wqe_next
= (wait_queue_element_t
)
1397 queue_next((queue_t
) wq_element
);
1399 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1400 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1401 wait_queue_t set_queue
;
1403 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1404 wait_queue_lock(set_queue
);
1405 if (! wait_queue_empty(set_queue
)) {
1406 res
= _wait_queue_select64_thread(set_queue
,
1410 wait_queue_unlock(set_queue
);
1411 if (res
== KERN_SUCCESS
)
1412 return KERN_SUCCESS
;
1414 wq_element
= wqe_next
;
1421 * Routine: wait_queue_wakeup64_identity_locked
1423 * Select a single thread that is most-eligible to run and set
1424 * set it running. But return the thread locked.
1429 * possibly recursive
1431 * a pointer to the locked thread that was awakened
1433 __private_extern__ thread_t
1434 wait_queue_wakeup64_identity_locked(
1437 wait_result_t result
,
1443 assert(wait_queue_held(wq
));
1446 thread
= _wait_queue_select64_one(wq
, event
);
1448 wait_queue_unlock(wq
);
1451 res
= thread_go(thread
, result
);
1452 assert(res
== KERN_SUCCESS
);
1454 return thread
; /* still locked if not NULL */
1459 * Routine: wait_queue_wakeup64_one_locked
1461 * Select a single thread that is most-eligible to run and set
1467 * possibly recursive
1469 * KERN_SUCCESS: It was, and is, now removed.
1470 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1472 __private_extern__ kern_return_t
1473 wait_queue_wakeup64_one_locked(
1476 wait_result_t result
,
1481 assert(wait_queue_held(wq
));
1483 thread
= _wait_queue_select64_one(wq
, event
);
1485 wait_queue_unlock(wq
);
1490 res
= thread_go(thread
, result
);
1491 assert(res
== KERN_SUCCESS
);
1492 thread_unlock(thread
);
1496 return KERN_NOT_WAITING
;
1500 * Routine: wait_queue_wakeup_one
1502 * Wakeup the most appropriate thread that is in the specified
1503 * wait queue for the specified event.
1507 * KERN_SUCCESS - Thread was woken up
1508 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1511 wait_queue_wakeup_one(
1514 wait_result_t result
)
1519 if (!wait_queue_is_valid(wq
)) {
1520 return KERN_INVALID_ARGUMENT
;
1524 wait_queue_lock(wq
);
1525 thread
= _wait_queue_select64_one(wq
, (event64_t
)((uint32_t)event
));
1526 wait_queue_unlock(wq
);
1531 res
= thread_go(thread
, result
);
1532 assert(res
== KERN_SUCCESS
);
1533 thread_unlock(thread
);
1539 return KERN_NOT_WAITING
;
1543 * Routine: wait_queue_wakeup64_one
1545 * Wakeup the most appropriate thread that is in the specified
1546 * wait queue for the specified event.
1550 * KERN_SUCCESS - Thread was woken up
1551 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1554 wait_queue_wakeup64_one(
1557 wait_result_t result
)
1562 if (!wait_queue_is_valid(wq
)) {
1563 return KERN_INVALID_ARGUMENT
;
1566 wait_queue_lock(wq
);
1567 thread
= _wait_queue_select64_one(wq
, event
);
1568 wait_queue_unlock(wq
);
1573 res
= thread_go(thread
, result
);
1574 assert(res
== KERN_SUCCESS
);
1575 thread_unlock(thread
);
1581 return KERN_NOT_WAITING
;
1586 * Routine: wait_queue_wakeup64_thread_locked
1588 * Wakeup the particular thread that was specified if and only
1589 * it was in this wait queue (or one of it's set queues)
1590 * and waiting on the specified event.
1592 * This is much safer than just removing the thread from
1593 * whatever wait queue it happens to be on. For instance, it
1594 * may have already been awoken from the wait you intended to
1595 * interrupt and waited on something else (like another
1599 * wait queue already locked (may be released).
1601 * KERN_SUCCESS - the thread was found waiting and awakened
1602 * KERN_NOT_WAITING - the thread was not waiting here
1604 __private_extern__ kern_return_t
1605 wait_queue_wakeup64_thread_locked(
1609 wait_result_t result
,
1614 assert(wait_queue_held(wq
));
1617 * See if the thread was still waiting there. If so, it got
1618 * dequeued and returned locked.
1620 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1622 wait_queue_unlock(wq
);
1624 if (res
!= KERN_SUCCESS
)
1625 return KERN_NOT_WAITING
;
1627 res
= thread_go(thread
, result
);
1628 assert(res
== KERN_SUCCESS
);
1629 thread_unlock(thread
);
1634 * Routine: wait_queue_wakeup_thread
1636 * Wakeup the particular thread that was specified if and only
1637 * it was in this wait queue (or one of it's set queues)
1638 * and waiting on the specified event.
1640 * This is much safer than just removing the thread from
1641 * whatever wait queue it happens to be on. For instance, it
1642 * may have already been awoken from the wait you intended to
1643 * interrupt and waited on something else (like another
1646 * nothing of interest locked
1647 * we need to assume spl needs to be raised
1649 * KERN_SUCCESS - the thread was found waiting and awakened
1650 * KERN_NOT_WAITING - the thread was not waiting here
1653 wait_queue_wakeup_thread(
1657 wait_result_t result
)
1662 if (!wait_queue_is_valid(wq
)) {
1663 return KERN_INVALID_ARGUMENT
;
1667 wait_queue_lock(wq
);
1668 res
= _wait_queue_select64_thread(wq
, (event64_t
)((uint32_t)event
), thread
);
1669 wait_queue_unlock(wq
);
1671 if (res
== KERN_SUCCESS
) {
1672 res
= thread_go(thread
, result
);
1673 assert(res
== KERN_SUCCESS
);
1674 thread_unlock(thread
);
1679 return KERN_NOT_WAITING
;
1683 * Routine: wait_queue_wakeup64_thread
1685 * Wakeup the particular thread that was specified if and only
1686 * it was in this wait queue (or one of it's set's queues)
1687 * and waiting on the specified event.
1689 * This is much safer than just removing the thread from
1690 * whatever wait queue it happens to be on. For instance, it
1691 * may have already been awoken from the wait you intended to
1692 * interrupt and waited on something else (like another
1695 * nothing of interest locked
1696 * we need to assume spl needs to be raised
1698 * KERN_SUCCESS - the thread was found waiting and awakened
1699 * KERN_NOT_WAITING - the thread was not waiting here
1702 wait_queue_wakeup64_thread(
1706 wait_result_t result
)
1711 if (!wait_queue_is_valid(wq
)) {
1712 return KERN_INVALID_ARGUMENT
;
1716 wait_queue_lock(wq
);
1717 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1718 wait_queue_unlock(wq
);
1720 if (res
== KERN_SUCCESS
) {
1721 res
= thread_go(thread
, result
);
1722 assert(res
== KERN_SUCCESS
);
1723 thread_unlock(thread
);
1728 return KERN_NOT_WAITING
;