2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 * @OSF_FREE_COPYRIGHT@
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
61 * File: wait_queue.c (adapted from sched_prim.c)
62 * Author: Avadis Tevanian, Jr.
65 * Primitives for manipulating wait queues: either global
66 * ones from sched_prim.c, or private ones associated with
67 * particular structures(pots, semaphores, etc..).
70 #include <kern/kern_types.h>
71 #include <kern/simple_lock.h>
72 #include <kern/kalloc.h>
73 #include <kern/queue.h>
75 #include <mach/sync_policy.h>
76 #include <kern/sched_prim.h>
78 #include <kern/wait_queue.h>
80 /* forward declarations */
81 static boolean_t
wait_queue_member_locked(
83 wait_queue_set_t wq_set
);
85 void wait_queue_unlink_one(
87 wait_queue_set_t
*wq_setp
);
89 kern_return_t
wait_queue_set_unlink_all_nofree(
90 wait_queue_set_t wq_set
);
93 * Routine: wait_queue_init
95 * Initialize a previously allocated wait queue.
97 * KERN_SUCCESS - The wait_queue_t was initialized
98 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
105 if (!((policy
& SYNC_POLICY_ORDER_MASK
) == SYNC_POLICY_FIFO
))
106 return KERN_INVALID_ARGUMENT
;
109 wq
->wq_type
= _WAIT_QUEUE_inited
;
110 queue_init(&wq
->wq_queue
);
111 hw_lock_init(&wq
->wq_interlock
);
116 * Routine: wait_queue_alloc
118 * Allocate and initialize a wait queue for use outside of
119 * of the mach part of the kernel.
121 * Nothing locked - can block.
123 * The allocated and initialized wait queue
124 * WAIT_QUEUE_NULL if there is a resource shortage
133 wq
= (wait_queue_t
) kalloc(sizeof(struct wait_queue
));
134 if (wq
!= WAIT_QUEUE_NULL
) {
135 ret
= wait_queue_init(wq
, policy
);
136 if (ret
!= KERN_SUCCESS
) {
137 kfree(wq
, sizeof(struct wait_queue
));
138 wq
= WAIT_QUEUE_NULL
;
145 * Routine: wait_queue_free
147 * Free an allocated wait queue.
155 if (!wait_queue_is_queue(wq
))
156 return KERN_INVALID_ARGUMENT
;
157 if (!queue_empty(&wq
->wq_queue
))
159 kfree(wq
, sizeof(struct wait_queue
));
164 * Routine: wait_queue_set_init
166 * Initialize a previously allocated wait queue set.
168 * KERN_SUCCESS - The wait_queue_set_t was initialized
169 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
173 wait_queue_set_t wqset
,
178 ret
= wait_queue_init(&wqset
->wqs_wait_queue
, policy
);
179 if (ret
!= KERN_SUCCESS
)
182 wqset
->wqs_wait_queue
.wq_type
= _WAIT_QUEUE_SET_inited
;
183 if (policy
& SYNC_POLICY_PREPOST
)
184 wqset
->wqs_wait_queue
.wq_isprepost
= TRUE
;
186 wqset
->wqs_wait_queue
.wq_isprepost
= FALSE
;
187 queue_init(&wqset
->wqs_setlinks
);
188 wqset
->wqs_refcount
= 0;
195 wait_queue_set_t wqset
,
198 return wait_queue_set_init(wqset
, policy
);
202 wait_queue_sub_clearrefs(
203 wait_queue_set_t wq_set
)
205 if (!wait_queue_is_set(wq_set
))
206 return KERN_INVALID_ARGUMENT
;
209 wq_set
->wqs_refcount
= 0;
215 * Routine: wait_queue_set_alloc
217 * Allocate and initialize a wait queue set for
218 * use outside of the mach part of the kernel.
222 * The allocated and initialized wait queue set
223 * WAIT_QUEUE_SET_NULL if there is a resource shortage
226 wait_queue_set_alloc(
229 wait_queue_set_t wq_set
;
231 wq_set
= (wait_queue_set_t
) kalloc(sizeof(struct wait_queue_set
));
232 if (wq_set
!= WAIT_QUEUE_SET_NULL
) {
235 ret
= wait_queue_set_init(wq_set
, policy
);
236 if (ret
!= KERN_SUCCESS
) {
237 kfree(wq_set
, sizeof(struct wait_queue_set
));
238 wq_set
= WAIT_QUEUE_SET_NULL
;
245 * Routine: wait_queue_set_free
247 * Free an allocated wait queue set
253 wait_queue_set_t wq_set
)
255 if (!wait_queue_is_set(wq_set
))
256 return KERN_INVALID_ARGUMENT
;
258 if (!queue_empty(&wq_set
->wqs_wait_queue
.wq_queue
))
261 kfree(wq_set
, sizeof(struct wait_queue_set
));
268 * Routine: wait_queue_set_size
269 * Routine: wait_queue_link_size
271 * Return the size of opaque wait queue structures
273 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet
); }
274 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink
); }
276 /* declare a unique type for wait queue link structures */
277 static unsigned int _wait_queue_link
;
278 static unsigned int _wait_queue_unlinked
;
280 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
281 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
283 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
284 WQASSERT(((wqe)->wqe_queue == (wq) && \
285 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
286 "wait queue element list corruption: wq=%#x, wqe=%#x", \
289 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
290 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
291 (queue_t)(wql) : &(wql)->wql_setlinks)))
293 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
294 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
295 (queue_t)(wql) : &(wql)->wql_setlinks)))
297 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
298 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
299 ((wql)->wql_setqueue == (wqs)) && \
300 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
301 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
302 "wait queue set links corruption: wqs=%#x, wql=%#x", \
305 #if defined(_WAIT_QUEUE_DEBUG_)
307 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
309 #define WAIT_QUEUE_CHECK(wq) \
311 queue_t q2 = &(wq)->wq_queue; \
312 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
313 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
314 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
315 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
319 #define WAIT_QUEUE_SET_CHECK(wqs) \
321 queue_t q2 = &(wqs)->wqs_setlinks; \
322 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
323 while (!queue_end(q2, (queue_entry_t)wql2)) { \
324 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
325 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
329 #else /* !_WAIT_QUEUE_DEBUG_ */
331 #define WQASSERT(e, s, p0, p1) assert(e)
333 #define WAIT_QUEUE_CHECK(wq)
334 #define WAIT_QUEUE_SET_CHECK(wqs)
336 #endif /* !_WAIT_QUEUE_DEBUG_ */
339 * Routine: wait_queue_member_locked
341 * Indicate if this set queue is a member of the queue
343 * The wait queue is locked
344 * The set queue is just that, a set queue
347 wait_queue_member_locked(
349 wait_queue_set_t wq_set
)
351 wait_queue_element_t wq_element
;
354 assert(wait_queue_held(wq
));
355 assert(wait_queue_is_set(wq_set
));
359 wq_element
= (wait_queue_element_t
) queue_first(q
);
360 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
361 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
362 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
)) {
363 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
365 if (wql
->wql_setqueue
== wq_set
)
368 wq_element
= (wait_queue_element_t
)
369 queue_next((queue_t
) wq_element
);
376 * Routine: wait_queue_member
378 * Indicate if this set queue is a member of the queue
380 * The set queue is just that, a set queue
385 wait_queue_set_t wq_set
)
390 if (!wait_queue_is_set(wq_set
))
395 ret
= wait_queue_member_locked(wq
, wq_set
);
396 wait_queue_unlock(wq
);
404 * Routine: wait_queue_link_noalloc
406 * Insert a set wait queue into a wait queue. This
407 * requires us to link the two together using a wait_queue_link
408 * structure that we allocate.
410 * The wait queue being inserted must be inited as a set queue
413 wait_queue_link_noalloc(
415 wait_queue_set_t wq_set
,
416 wait_queue_link_t wql
)
418 wait_queue_element_t wq_element
;
422 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
))
423 return KERN_INVALID_ARGUMENT
;
426 * There are probably less threads and sets associated with
427 * the wait queue, then there are wait queues associated with
428 * the set. So lets validate it that way.
433 wq_element
= (wait_queue_element_t
) queue_first(q
);
434 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
435 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
436 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
&&
437 ((wait_queue_link_t
)wq_element
)->wql_setqueue
== wq_set
) {
438 wait_queue_unlock(wq
);
440 return KERN_ALREADY_IN_SET
;
442 wq_element
= (wait_queue_element_t
)
443 queue_next((queue_t
) wq_element
);
447 * Not already a member, so we can add it.
451 WAIT_QUEUE_SET_CHECK(wq_set
);
454 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
455 wql
->wql_setqueue
= wq_set
;
456 queue_enter(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
457 wql
->wql_type
= WAIT_QUEUE_LINK
;
460 wait_queue_unlock(wq
);
467 * Routine: wait_queue_link
469 * Insert a set wait queue into a wait queue. This
470 * requires us to link the two together using a wait_queue_link
471 * structure that we allocate.
473 * The wait queue being inserted must be inited as a set queue
478 wait_queue_set_t wq_set
)
480 wait_queue_link_t wql
;
483 wql
= (wait_queue_link_t
) kalloc(sizeof(struct wait_queue_link
));
484 if (wql
== WAIT_QUEUE_LINK_NULL
)
485 return KERN_RESOURCE_SHORTAGE
;
487 ret
= wait_queue_link_noalloc(wq
, wq_set
, wql
);
488 if (ret
!= KERN_SUCCESS
)
489 kfree(wql
, sizeof(struct wait_queue_link
));
496 * Routine: wait_queue_unlink_nofree
498 * Undo the linkage between a wait queue and a set.
501 wait_queue_unlink_locked(
503 wait_queue_set_t wq_set
,
504 wait_queue_link_t wql
)
506 assert(wait_queue_held(wq
));
507 assert(wait_queue_held(&wq_set
->wqs_wait_queue
));
509 wql
->wql_queue
= WAIT_QUEUE_NULL
;
510 queue_remove(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
511 wql
->wql_setqueue
= WAIT_QUEUE_SET_NULL
;
512 queue_remove(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
513 wql
->wql_type
= WAIT_QUEUE_UNLINKED
;
515 WAIT_QUEUE_CHECK(wq
);
516 WAIT_QUEUE_SET_CHECK(wq_set
);
520 * Routine: wait_queue_unlink
522 * Remove the linkage between a wait queue and a set,
523 * freeing the linkage structure.
525 * The wait queue being must be a member set queue
530 wait_queue_set_t wq_set
)
532 wait_queue_element_t wq_element
;
533 wait_queue_link_t wql
;
537 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
)) {
538 return KERN_INVALID_ARGUMENT
;
544 wq_element
= (wait_queue_element_t
) queue_first(q
);
545 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
546 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
547 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
548 wql
= (wait_queue_link_t
)wq_element
;
550 if (wql
->wql_setqueue
== wq_set
) {
552 wait_queue_unlink_locked(wq
, wq_set
, wql
);
554 wait_queue_unlock(wq
);
556 kfree(wql
, sizeof(struct wait_queue_link
));
560 wq_element
= (wait_queue_element_t
)
561 queue_next((queue_t
) wq_element
);
563 wait_queue_unlock(wq
);
565 return KERN_NOT_IN_SET
;
570 * Routine: wait_queue_unlinkall_nofree
572 * Remove the linkage between a wait queue and all its
573 * sets. The caller is responsible for freeing
574 * the wait queue link structures.
578 wait_queue_unlinkall_nofree(
581 wait_queue_element_t wq_element
;
582 wait_queue_element_t wq_next_element
;
583 wait_queue_set_t wq_set
;
584 wait_queue_link_t wql
;
585 queue_head_t links_queue_head
;
586 queue_t links
= &links_queue_head
;
590 if (!wait_queue_is_queue(wq
)) {
591 return KERN_INVALID_ARGUMENT
;
601 wq_element
= (wait_queue_element_t
) queue_first(q
);
602 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
603 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
604 wq_next_element
= (wait_queue_element_t
)
605 queue_next((queue_t
) wq_element
);
607 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
608 wql
= (wait_queue_link_t
)wq_element
;
609 wq_set
= wql
->wql_setqueue
;
611 wait_queue_unlink_locked(wq
, wq_set
, wql
);
614 wq_element
= wq_next_element
;
616 wait_queue_unlock(wq
);
618 return(KERN_SUCCESS
);
623 * Routine: wait_queue_unlink_all
625 * Remove the linkage between a wait queue and all its sets.
626 * All the linkage structures are freed.
628 * Nothing of interest locked.
632 wait_queue_unlink_all(
635 wait_queue_element_t wq_element
;
636 wait_queue_element_t wq_next_element
;
637 wait_queue_set_t wq_set
;
638 wait_queue_link_t wql
;
639 queue_head_t links_queue_head
;
640 queue_t links
= &links_queue_head
;
644 if (!wait_queue_is_queue(wq
)) {
645 return KERN_INVALID_ARGUMENT
;
655 wq_element
= (wait_queue_element_t
) queue_first(q
);
656 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
657 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
658 wq_next_element
= (wait_queue_element_t
)
659 queue_next((queue_t
) wq_element
);
661 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
662 wql
= (wait_queue_link_t
)wq_element
;
663 wq_set
= wql
->wql_setqueue
;
665 wait_queue_unlink_locked(wq
, wq_set
, wql
);
667 enqueue(links
, &wql
->wql_links
);
669 wq_element
= wq_next_element
;
671 wait_queue_unlock(wq
);
674 while(!queue_empty(links
)) {
675 wql
= (wait_queue_link_t
) dequeue(links
);
676 kfree(wql
, sizeof(struct wait_queue_link
));
679 return(KERN_SUCCESS
);
683 * Routine: wait_queue_set_unlink_all_nofree
685 * Remove the linkage between a set wait queue and all its
686 * member wait queues. The link structures are not freed, nor
687 * returned. It is the caller's responsibility to track and free
690 * The wait queue being must be a member set queue
693 wait_queue_set_unlink_all_nofree(
694 wait_queue_set_t wq_set
)
696 wait_queue_link_t wql
;
701 if (!wait_queue_is_set(wq_set
)) {
702 return KERN_INVALID_ARGUMENT
;
709 q
= &wq_set
->wqs_setlinks
;
711 wql
= (wait_queue_link_t
)queue_first(q
);
712 while (!queue_end(q
, (queue_entry_t
)wql
)) {
713 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
715 if (wait_queue_lock_try(wq
)) {
716 wait_queue_unlink_locked(wq
, wq_set
, wql
);
717 wait_queue_unlock(wq
);
718 wql
= (wait_queue_link_t
)queue_first(q
);
729 return(KERN_SUCCESS
);
732 /* legacy interface naming */
734 wait_subqueue_unlink_all(
735 wait_queue_set_t wq_set
)
737 return wait_queue_set_unlink_all_nofree(wq_set
);
742 * Routine: wait_queue_set_unlink_all
744 * Remove the linkage between a set wait queue and all its
745 * member wait queues. The link structures are freed.
747 * The wait queue must be a set
750 wait_queue_set_unlink_all(
751 wait_queue_set_t wq_set
)
753 wait_queue_link_t wql
;
756 queue_head_t links_queue_head
;
757 queue_t links
= &links_queue_head
;
760 if (!wait_queue_is_set(wq_set
)) {
761 return KERN_INVALID_ARGUMENT
;
770 q
= &wq_set
->wqs_setlinks
;
772 wql
= (wait_queue_link_t
)queue_first(q
);
773 while (!queue_end(q
, (queue_entry_t
)wql
)) {
774 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
776 if (wait_queue_lock_try(wq
)) {
777 wait_queue_unlink_locked(wq
, wq_set
, wql
);
778 wait_queue_unlock(wq
);
779 enqueue(links
, &wql
->wql_links
);
780 wql
= (wait_queue_link_t
)queue_first(q
);
791 while (!queue_empty (links
)) {
792 wql
= (wait_queue_link_t
) dequeue(links
);
793 kfree(wql
, sizeof(struct wait_queue_link
));
795 return(KERN_SUCCESS
);
800 * Routine: wait_queue_unlink_one
802 * Find and unlink one set wait queue
804 * Nothing of interest locked.
807 wait_queue_unlink_one(
809 wait_queue_set_t
*wq_setp
)
811 wait_queue_element_t wq_element
;
820 wq_element
= (wait_queue_element_t
) queue_first(q
);
821 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
823 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
824 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
825 wait_queue_set_t wq_set
= wql
->wql_setqueue
;
828 wait_queue_unlink_locked(wq
, wq_set
, wql
);
830 wait_queue_unlock(wq
);
832 kfree(wql
,sizeof(struct wait_queue_link
));
837 wq_element
= (wait_queue_element_t
)
838 queue_next((queue_t
) wq_element
);
840 wait_queue_unlock(wq
);
842 *wq_setp
= WAIT_QUEUE_SET_NULL
;
847 * Routine: wait_queue_assert_wait64_locked
849 * Insert the current thread into the supplied wait queue
850 * waiting for a particular event to be posted to that queue.
853 * The wait queue is assumed locked.
854 * The waiting thread is assumed locked.
857 __private_extern__ wait_result_t
858 wait_queue_assert_wait64_locked(
861 wait_interrupt_t interruptible
,
865 wait_result_t wait_result
;
867 if (!wait_queue_assert_possible(thread
))
868 panic("wait_queue_assert_wait64_locked");
870 if (wq
->wq_type
== _WAIT_QUEUE_SET_inited
) {
871 wait_queue_set_t wqs
= (wait_queue_set_t
)wq
;
873 if (wqs
->wqs_isprepost
&& wqs
->wqs_refcount
> 0)
874 return(THREAD_AWAKENED
);
878 * This is the extent to which we currently take scheduling attributes
879 * into account. If the thread is vm priviledged, we stick it at
880 * the front of the queue. Later, these queues will honor the policy
881 * value set at wait_queue_init time.
883 wait_result
= thread_mark_wait_locked(thread
, interruptible
);
884 if (wait_result
== THREAD_WAITING
) {
885 if (thread
->options
& TH_OPT_VMPRIV
)
886 enqueue_head(&wq
->wq_queue
, (queue_entry_t
) thread
);
888 enqueue_tail(&wq
->wq_queue
, (queue_entry_t
) thread
);
890 thread
->wait_event
= event
;
891 thread
->wait_queue
= wq
;
894 if (!timer_call_enter(&thread
->wait_timer
, deadline
))
895 thread
->wait_timer_active
++;
896 thread
->wait_timer_is_set
= TRUE
;
903 * Routine: wait_queue_assert_wait
905 * Insert the current thread into the supplied wait queue
906 * waiting for a particular event to be posted to that queue.
909 * nothing of interest locked.
912 wait_queue_assert_wait(
915 wait_interrupt_t interruptible
,
920 thread_t thread
= current_thread();
922 /* If it is an invalid wait queue, you can't wait on it */
923 if (!wait_queue_is_valid(wq
))
924 return (thread
->wait_result
= THREAD_RESTART
);
929 ret
= wait_queue_assert_wait64_locked(wq
, (event64_t
)((uint32_t)event
),
930 interruptible
, deadline
, thread
);
931 thread_unlock(thread
);
932 wait_queue_unlock(wq
);
938 * Routine: wait_queue_assert_wait64
940 * Insert the current thread into the supplied wait queue
941 * waiting for a particular event to be posted to that queue.
943 * nothing of interest locked.
946 wait_queue_assert_wait64(
949 wait_interrupt_t interruptible
,
954 thread_t thread
= current_thread();
956 /* If it is an invalid wait queue, you cant wait on it */
957 if (!wait_queue_is_valid(wq
))
958 return (thread
->wait_result
= THREAD_RESTART
);
963 ret
= wait_queue_assert_wait64_locked(wq
, event
, interruptible
, deadline
, thread
);
964 thread_unlock(thread
);
965 wait_queue_unlock(wq
);
971 * Routine: _wait_queue_select64_all
973 * Select all threads off a wait queue that meet the
978 * wake_queue initialized and ready for insertion
981 * a queue of locked threads
984 _wait_queue_select64_all(
989 wait_queue_element_t wq_element
;
990 wait_queue_element_t wqe_next
;
995 wq_element
= (wait_queue_element_t
) queue_first(q
);
996 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
997 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
998 wqe_next
= (wait_queue_element_t
)
999 queue_next((queue_t
) wq_element
);
1002 * We may have to recurse if this is a compound wait queue.
1004 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1005 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1006 wait_queue_t set_queue
;
1009 * We have to check the set wait queue.
1011 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1012 wait_queue_lock(set_queue
);
1013 if (set_queue
->wq_isprepost
) {
1014 wait_queue_set_t wqs
= (wait_queue_set_t
)set_queue
;
1017 * Preposting is only for sets and wait queue
1018 * is the first element of set
1020 wqs
->wqs_refcount
++;
1022 if (! wait_queue_empty(set_queue
))
1023 _wait_queue_select64_all(set_queue
, event
, wake_queue
);
1024 wait_queue_unlock(set_queue
);
1028 * Otherwise, its a thread. If it is waiting on
1029 * the event we are posting to this queue, pull
1030 * it off the queue and stick it in out wake_queue.
1032 thread_t t
= (thread_t
)wq_element
;
1034 if (t
->wait_event
== event
) {
1036 remqueue(q
, (queue_entry_t
) t
);
1037 enqueue (wake_queue
, (queue_entry_t
) t
);
1038 t
->wait_queue
= WAIT_QUEUE_NULL
;
1039 t
->wait_event
= NO_EVENT64
;
1040 t
->at_safe_point
= FALSE
;
1041 /* returned locked */
1044 wq_element
= wqe_next
;
1049 * Routine: wait_queue_wakeup64_all_locked
1051 * Wakeup some number of threads that are in the specified
1052 * wait queue and waiting on the specified event.
1054 * wait queue already locked (may be released).
1056 * KERN_SUCCESS - Threads were woken up
1057 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1059 __private_extern__ kern_return_t
1060 wait_queue_wakeup64_all_locked(
1063 wait_result_t result
,
1066 queue_head_t wake_queue_head
;
1067 queue_t q
= &wake_queue_head
;
1070 // assert(wait_queue_held(wq));
1071 if(!wq
->wq_interlock
.lock_data
) { /* (BRINGUP */
1072 panic("wait_queue_wakeup64_all_locked: lock not held on %08X\n", wq
); /* (BRINGUP) */
1078 * Select the threads that we will wake up. The threads
1079 * are returned to us locked and cleanly removed from the
1082 _wait_queue_select64_all(wq
, event
, q
);
1084 wait_queue_unlock(wq
);
1087 * For each thread, set it running.
1089 res
= KERN_NOT_WAITING
;
1090 while (!queue_empty (q
)) {
1091 thread_t thread
= (thread_t
) dequeue(q
);
1092 res
= thread_go(thread
, result
);
1093 assert(res
== KERN_SUCCESS
);
1094 thread_unlock(thread
);
1101 * Routine: wait_queue_wakeup_all
1103 * Wakeup some number of threads that are in the specified
1104 * wait queue and waiting on the specified event.
1108 * KERN_SUCCESS - Threads were woken up
1109 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1112 wait_queue_wakeup_all(
1115 wait_result_t result
)
1120 if (!wait_queue_is_valid(wq
)) {
1121 return KERN_INVALID_ARGUMENT
;
1125 wait_queue_lock(wq
);
1126 if(!wq
->wq_interlock
.lock_data
) { /* (BRINGUP */
1127 panic("wait_queue_wakeup_all: we did not get the lock on %08X\n", wq
); /* (BRINGUP) */
1129 ret
= wait_queue_wakeup64_all_locked(
1130 wq
, (event64_t
)((uint32_t)event
),
1138 * Routine: wait_queue_wakeup64_all
1140 * Wakeup some number of threads that are in the specified
1141 * wait queue and waiting on the specified event.
1145 * KERN_SUCCESS - Threads were woken up
1146 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1149 wait_queue_wakeup64_all(
1152 wait_result_t result
)
1157 if (!wait_queue_is_valid(wq
)) {
1158 return KERN_INVALID_ARGUMENT
;
1162 wait_queue_lock(wq
);
1163 ret
= wait_queue_wakeup64_all_locked(wq
, event
, result
, TRUE
);
1170 * Routine: _wait_queue_select64_one
1172 * Select the best thread off a wait queue that meet the
1173 * supplied criteria.
1177 * possibly recursive
1179 * a locked thread - if one found
1181 * This is where the sync policy of the wait queue comes
1182 * into effect. For now, we just assume FIFO.
1185 _wait_queue_select64_one(
1189 wait_queue_element_t wq_element
;
1190 wait_queue_element_t wqe_next
;
1191 thread_t t
= THREAD_NULL
;
1194 assert(wq
->wq_fifo
);
1198 wq_element
= (wait_queue_element_t
) queue_first(q
);
1199 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1200 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1201 wqe_next
= (wait_queue_element_t
)
1202 queue_next((queue_t
) wq_element
);
1205 * We may have to recurse if this is a compound wait queue.
1207 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1208 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1209 wait_queue_t set_queue
;
1212 * We have to check the set wait queue.
1214 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1215 wait_queue_lock(set_queue
);
1216 if (! wait_queue_empty(set_queue
)) {
1217 t
= _wait_queue_select64_one(set_queue
, event
);
1219 wait_queue_unlock(set_queue
);
1220 if (t
!= THREAD_NULL
)
1225 * Otherwise, its a thread. If it is waiting on
1226 * the event we are posting to this queue, pull
1227 * it off the queue and stick it in out wake_queue.
1229 t
= (thread_t
)wq_element
;
1230 if (t
->wait_event
== event
) {
1232 remqueue(q
, (queue_entry_t
) t
);
1233 t
->wait_queue
= WAIT_QUEUE_NULL
;
1234 t
->wait_event
= NO_EVENT64
;
1235 t
->at_safe_point
= FALSE
;
1236 return t
; /* still locked */
1241 wq_element
= wqe_next
;
1247 * Routine: wait_queue_peek64_locked
1249 * Select the best thread from a wait queue that meet the
1250 * supplied criteria, but leave it on the queue it was
1251 * found on. The thread, and the actual wait_queue the
1252 * thread was found on are identified.
1256 * possibly recursive
1258 * a locked thread - if one found
1259 * a locked waitq - the one the thread was found on
1261 * Both the waitq the thread was actually found on, and
1262 * the supplied wait queue, are locked after this.
1264 __private_extern__
void
1265 wait_queue_peek64_locked(
1271 wait_queue_element_t wq_element
;
1272 wait_queue_element_t wqe_next
;
1275 assert(wq
->wq_fifo
);
1281 wq_element
= (wait_queue_element_t
) queue_first(q
);
1282 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1283 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1284 wqe_next
= (wait_queue_element_t
)
1285 queue_next((queue_t
) wq_element
);
1288 * We may have to recurse if this is a compound wait queue.
1290 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1291 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1292 wait_queue_t set_queue
;
1295 * We have to check the set wait queue.
1297 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1298 wait_queue_lock(set_queue
);
1299 if (! wait_queue_empty(set_queue
)) {
1300 wait_queue_peek64_locked(set_queue
, event
, tp
, wqp
);
1302 if (*tp
!= THREAD_NULL
) {
1303 if (*wqp
!= set_queue
)
1304 wait_queue_unlock(set_queue
);
1305 return; /* thread and its waitq locked */
1308 wait_queue_unlock(set_queue
);
1312 * Otherwise, its a thread. If it is waiting on
1313 * the event we are posting to this queue, return
1314 * it locked, but leave it on the queue.
1316 thread_t t
= (thread_t
)wq_element
;
1318 if (t
->wait_event
== event
) {
1325 wq_element
= wqe_next
;
1330 * Routine: wait_queue_pull_thread_locked
1332 * Pull a thread that was previously "peeked" off the wait
1333 * queue and (possibly) unlock the waitq.
1339 * with the thread still locked.
1342 wait_queue_pull_thread_locked(
1348 assert(thread
->wait_queue
== waitq
);
1350 remqueue(&waitq
->wq_queue
, (queue_entry_t
)thread
);
1351 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1352 thread
->wait_event
= NO_EVENT64
;
1353 thread
->at_safe_point
= FALSE
;
1355 wait_queue_unlock(waitq
);
1360 * Routine: wait_queue_select64_thread
1362 * Look for a thread and remove it from the queues, if
1363 * (and only if) the thread is waiting on the supplied
1364 * <wait_queue, event> pair.
1368 * possibly recursive
1370 * KERN_NOT_WAITING: Thread is not waiting here.
1371 * KERN_SUCCESS: It was, and is now removed (returned locked)
1373 static kern_return_t
1374 _wait_queue_select64_thread(
1379 wait_queue_element_t wq_element
;
1380 wait_queue_element_t wqe_next
;
1381 kern_return_t res
= KERN_NOT_WAITING
;
1382 queue_t q
= &wq
->wq_queue
;
1384 thread_lock(thread
);
1385 if ((thread
->wait_queue
== wq
) && (thread
->wait_event
== event
)) {
1386 remqueue(q
, (queue_entry_t
) thread
);
1387 thread
->at_safe_point
= FALSE
;
1388 thread
->wait_event
= NO_EVENT64
;
1389 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1390 /* thread still locked */
1391 return KERN_SUCCESS
;
1393 thread_unlock(thread
);
1396 * The wait_queue associated with the thread may be one of this
1397 * wait queue's sets. Go see. If so, removing it from
1398 * there is like removing it from here.
1400 wq_element
= (wait_queue_element_t
) queue_first(q
);
1401 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1402 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1403 wqe_next
= (wait_queue_element_t
)
1404 queue_next((queue_t
) wq_element
);
1406 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1407 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1408 wait_queue_t set_queue
;
1410 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1411 wait_queue_lock(set_queue
);
1412 if (! wait_queue_empty(set_queue
)) {
1413 res
= _wait_queue_select64_thread(set_queue
,
1417 wait_queue_unlock(set_queue
);
1418 if (res
== KERN_SUCCESS
)
1419 return KERN_SUCCESS
;
1421 wq_element
= wqe_next
;
1428 * Routine: wait_queue_wakeup64_identity_locked
1430 * Select a single thread that is most-eligible to run and set
1431 * set it running. But return the thread locked.
1436 * possibly recursive
1438 * a pointer to the locked thread that was awakened
1440 __private_extern__ thread_t
1441 wait_queue_wakeup64_identity_locked(
1444 wait_result_t result
,
1450 assert(wait_queue_held(wq
));
1453 thread
= _wait_queue_select64_one(wq
, event
);
1455 wait_queue_unlock(wq
);
1458 res
= thread_go(thread
, result
);
1459 assert(res
== KERN_SUCCESS
);
1461 return thread
; /* still locked if not NULL */
1466 * Routine: wait_queue_wakeup64_one_locked
1468 * Select a single thread that is most-eligible to run and set
1474 * possibly recursive
1476 * KERN_SUCCESS: It was, and is, now removed.
1477 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1479 __private_extern__ kern_return_t
1480 wait_queue_wakeup64_one_locked(
1483 wait_result_t result
,
1488 assert(wait_queue_held(wq
));
1490 thread
= _wait_queue_select64_one(wq
, event
);
1492 wait_queue_unlock(wq
);
1497 res
= thread_go(thread
, result
);
1498 assert(res
== KERN_SUCCESS
);
1499 thread_unlock(thread
);
1503 return KERN_NOT_WAITING
;
1507 * Routine: wait_queue_wakeup_one
1509 * Wakeup the most appropriate thread that is in the specified
1510 * wait queue for the specified event.
1514 * KERN_SUCCESS - Thread was woken up
1515 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1518 wait_queue_wakeup_one(
1521 wait_result_t result
)
1526 if (!wait_queue_is_valid(wq
)) {
1527 return KERN_INVALID_ARGUMENT
;
1531 wait_queue_lock(wq
);
1532 thread
= _wait_queue_select64_one(wq
, (event64_t
)((uint32_t)event
));
1533 wait_queue_unlock(wq
);
1538 res
= thread_go(thread
, result
);
1539 assert(res
== KERN_SUCCESS
);
1540 thread_unlock(thread
);
1546 return KERN_NOT_WAITING
;
1550 * Routine: wait_queue_wakeup64_one
1552 * Wakeup the most appropriate thread that is in the specified
1553 * wait queue for the specified event.
1557 * KERN_SUCCESS - Thread was woken up
1558 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1561 wait_queue_wakeup64_one(
1564 wait_result_t result
)
1569 if (!wait_queue_is_valid(wq
)) {
1570 return KERN_INVALID_ARGUMENT
;
1573 wait_queue_lock(wq
);
1574 thread
= _wait_queue_select64_one(wq
, event
);
1575 wait_queue_unlock(wq
);
1580 res
= thread_go(thread
, result
);
1581 assert(res
== KERN_SUCCESS
);
1582 thread_unlock(thread
);
1588 return KERN_NOT_WAITING
;
1593 * Routine: wait_queue_wakeup64_thread_locked
1595 * Wakeup the particular thread that was specified if and only
1596 * it was in this wait queue (or one of it's set queues)
1597 * and waiting on the specified event.
1599 * This is much safer than just removing the thread from
1600 * whatever wait queue it happens to be on. For instance, it
1601 * may have already been awoken from the wait you intended to
1602 * interrupt and waited on something else (like another
1606 * wait queue already locked (may be released).
1608 * KERN_SUCCESS - the thread was found waiting and awakened
1609 * KERN_NOT_WAITING - the thread was not waiting here
1611 __private_extern__ kern_return_t
1612 wait_queue_wakeup64_thread_locked(
1616 wait_result_t result
,
1621 assert(wait_queue_held(wq
));
1624 * See if the thread was still waiting there. If so, it got
1625 * dequeued and returned locked.
1627 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1629 wait_queue_unlock(wq
);
1631 if (res
!= KERN_SUCCESS
)
1632 return KERN_NOT_WAITING
;
1634 res
= thread_go(thread
, result
);
1635 assert(res
== KERN_SUCCESS
);
1636 thread_unlock(thread
);
1641 * Routine: wait_queue_wakeup_thread
1643 * Wakeup the particular thread that was specified if and only
1644 * it was in this wait queue (or one of it's set queues)
1645 * and waiting on the specified event.
1647 * This is much safer than just removing the thread from
1648 * whatever wait queue it happens to be on. For instance, it
1649 * may have already been awoken from the wait you intended to
1650 * interrupt and waited on something else (like another
1653 * nothing of interest locked
1654 * we need to assume spl needs to be raised
1656 * KERN_SUCCESS - the thread was found waiting and awakened
1657 * KERN_NOT_WAITING - the thread was not waiting here
1660 wait_queue_wakeup_thread(
1664 wait_result_t result
)
1669 if (!wait_queue_is_valid(wq
)) {
1670 return KERN_INVALID_ARGUMENT
;
1674 wait_queue_lock(wq
);
1675 res
= _wait_queue_select64_thread(wq
, (event64_t
)((uint32_t)event
), thread
);
1676 wait_queue_unlock(wq
);
1678 if (res
== KERN_SUCCESS
) {
1679 res
= thread_go(thread
, result
);
1680 assert(res
== KERN_SUCCESS
);
1681 thread_unlock(thread
);
1686 return KERN_NOT_WAITING
;
1690 * Routine: wait_queue_wakeup64_thread
1692 * Wakeup the particular thread that was specified if and only
1693 * it was in this wait queue (or one of it's set's queues)
1694 * and waiting on the specified event.
1696 * This is much safer than just removing the thread from
1697 * whatever wait queue it happens to be on. For instance, it
1698 * may have already been awoken from the wait you intended to
1699 * interrupt and waited on something else (like another
1702 * nothing of interest locked
1703 * we need to assume spl needs to be raised
1705 * KERN_SUCCESS - the thread was found waiting and awakened
1706 * KERN_NOT_WAITING - the thread was not waiting here
1709 wait_queue_wakeup64_thread(
1713 wait_result_t result
)
1718 if (!wait_queue_is_valid(wq
)) {
1719 return KERN_INVALID_ARGUMENT
;
1723 wait_queue_lock(wq
);
1724 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1725 wait_queue_unlock(wq
);
1727 if (res
== KERN_SUCCESS
) {
1728 res
= thread_go(thread
, result
);
1729 assert(res
== KERN_SUCCESS
);
1730 thread_unlock(thread
);
1735 return KERN_NOT_WAITING
;