2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * @OSF_FREE_COPYRIGHT@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
56 * File: wait_queue.c (adapted from sched_prim.c)
57 * Author: Avadis Tevanian, Jr.
60 * Primitives for manipulating wait queues: either global
61 * ones from sched_prim.c, or private ones associated with
62 * particular structures(pots, semaphores, etc..).
65 #include <kern/kern_types.h>
66 #include <kern/simple_lock.h>
67 #include <kern/kalloc.h>
68 #include <kern/queue.h>
70 #include <mach/sync_policy.h>
71 #include <kern/sched_prim.h>
73 #include <kern/wait_queue.h>
76 * Routine: wait_queue_init
78 * Initialize a previously allocated wait queue.
80 * KERN_SUCCESS - The wait_queue_t was initialized
81 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
88 if (!((policy
& SYNC_POLICY_ORDER_MASK
) == SYNC_POLICY_FIFO
))
89 return KERN_INVALID_ARGUMENT
;
92 wq
->wq_type
= _WAIT_QUEUE_inited
;
93 queue_init(&wq
->wq_queue
);
94 hw_lock_init(&wq
->wq_interlock
);
99 * Routine: wait_queue_alloc
101 * Allocate and initialize a wait queue for use outside of
102 * of the mach part of the kernel.
104 * Nothing locked - can block.
106 * The allocated and initialized wait queue
107 * WAIT_QUEUE_NULL if there is a resource shortage
116 wq
= (wait_queue_t
) kalloc(sizeof(struct wait_queue
));
117 if (wq
!= WAIT_QUEUE_NULL
) {
118 ret
= wait_queue_init(wq
, policy
);
119 if (ret
!= KERN_SUCCESS
) {
120 kfree((vm_offset_t
)wq
, sizeof(struct wait_queue
));
121 wq
= WAIT_QUEUE_NULL
;
128 * Routine: wait_queue_free
130 * Free an allocated wait queue.
138 if (!wait_queue_is_queue(wq
))
139 return KERN_INVALID_ARGUMENT
;
140 if (!queue_empty(&wq
->wq_queue
))
142 kfree((vm_offset_t
)wq
, sizeof(struct wait_queue
));
147 * Routine: wait_queue_set_init
149 * Initialize a previously allocated wait queue set.
151 * KERN_SUCCESS - The wait_queue_set_t was initialized
152 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
156 wait_queue_set_t wqset
,
161 ret
= wait_queue_init(&wqset
->wqs_wait_queue
, policy
);
162 if (ret
!= KERN_SUCCESS
)
165 wqset
->wqs_wait_queue
.wq_type
= _WAIT_QUEUE_SET_inited
;
166 if (policy
& SYNC_POLICY_PREPOST
)
167 wqset
->wqs_wait_queue
.wq_isprepost
= TRUE
;
169 wqset
->wqs_wait_queue
.wq_isprepost
= FALSE
;
170 queue_init(&wqset
->wqs_setlinks
);
171 wqset
->wqs_refcount
= 0;
178 wait_queue_set_t wqset
,
181 return wait_queue_set_init(wqset
, policy
);
185 * Routine: wait_queue_set_alloc
187 * Allocate and initialize a wait queue set for
188 * use outside of the mach part of the kernel.
192 * The allocated and initialized wait queue set
193 * WAIT_QUEUE_SET_NULL if there is a resource shortage
196 wait_queue_set_alloc(
199 wait_queue_set_t wq_set
;
201 wq_set
= (wait_queue_set_t
) kalloc(sizeof(struct wait_queue_set
));
202 if (wq_set
!= WAIT_QUEUE_SET_NULL
) {
205 ret
= wait_queue_set_init(wq_set
, policy
);
206 if (ret
!= KERN_SUCCESS
) {
207 kfree((vm_offset_t
)wq_set
, sizeof(struct wait_queue_set
));
208 wq_set
= WAIT_QUEUE_SET_NULL
;
215 * Routine: wait_queue_set_free
217 * Free an allocated wait queue set
223 wait_queue_set_t wq_set
)
225 if (!wait_queue_is_set(wq_set
))
226 return KERN_INVALID_ARGUMENT
;
228 if (!queue_empty(&wq_set
->wqs_wait_queue
.wq_queue
))
231 kfree((vm_offset_t
)wq_set
, sizeof(struct wait_queue_set
));
236 wait_queue_sub_clearrefs(
237 wait_queue_set_t wq_set
)
239 if (!wait_queue_is_set(wq_set
))
240 return KERN_INVALID_ARGUMENT
;
243 wq_set
->wqs_refcount
= 0;
250 * Routine: wait_queue_set_size
251 * Routine: wait_queue_link_size
253 * Return the size of opaque wait queue structures
255 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet
); }
256 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink
); }
258 /* declare a unique type for wait queue link structures */
259 static unsigned int _wait_queue_link
;
260 static unsigned int _wait_queue_unlinked
;
262 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
263 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
265 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
266 WQASSERT(((wqe)->wqe_queue == (wq) && \
267 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
268 "wait queue element list corruption: wq=%#x, wqe=%#x", \
271 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
272 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
273 (queue_t)(wql) : &(wql)->wql_setlinks)))
275 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
276 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
277 (queue_t)(wql) : &(wql)->wql_setlinks)))
279 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
280 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
281 ((wql)->wql_setqueue == (wqs)) && \
282 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
283 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
284 "wait queue set links corruption: wqs=%#x, wql=%#x", \
287 #if defined(_WAIT_QUEUE_DEBUG_)
289 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
291 #define WAIT_QUEUE_CHECK(wq) \
293 queue_t q2 = &(wq)->wq_queue; \
294 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
295 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
296 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
297 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
301 #define WAIT_QUEUE_SET_CHECK(wqs) \
303 queue_t q2 = &(wqs)->wqs_setlinks; \
304 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
305 while (!queue_end(q2, (queue_entry_t)wql2)) { \
306 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
307 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
311 #else /* !_WAIT_QUEUE_DEBUG_ */
313 #define WQASSERT(e, s, p0, p1) assert(e)
315 #define WAIT_QUEUE_CHECK(wq)
316 #define WAIT_QUEUE_SET_CHECK(wqs)
318 #endif /* !_WAIT_QUEUE_DEBUG_ */
321 * Routine: wait_queue_member_locked
323 * Indicate if this set queue is a member of the queue
325 * The wait queue is locked
326 * The set queue is just that, a set queue
328 __private_extern__ boolean_t
329 wait_queue_member_locked(
331 wait_queue_set_t wq_set
)
333 wait_queue_element_t wq_element
;
336 assert(wait_queue_held(wq
));
337 assert(wait_queue_is_set(wq_set
));
341 wq_element
= (wait_queue_element_t
) queue_first(q
);
342 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
343 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
344 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
)) {
345 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
347 if (wql
->wql_setqueue
== wq_set
)
350 wq_element
= (wait_queue_element_t
)
351 queue_next((queue_t
) wq_element
);
358 * Routine: wait_queue_member
360 * Indicate if this set queue is a member of the queue
362 * The set queue is just that, a set queue
367 wait_queue_set_t wq_set
)
372 if (!wait_queue_is_set(wq_set
))
377 ret
= wait_queue_member_locked(wq
, wq_set
);
378 wait_queue_unlock(wq
);
386 * Routine: wait_queue_link_noalloc
388 * Insert a set wait queue into a wait queue. This
389 * requires us to link the two together using a wait_queue_link
390 * structure that we allocate.
392 * The wait queue being inserted must be inited as a set queue
395 wait_queue_link_noalloc(
397 wait_queue_set_t wq_set
,
398 wait_queue_link_t wql
)
400 wait_queue_element_t wq_element
;
404 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
))
405 return KERN_INVALID_ARGUMENT
;
408 * There are probably less threads and sets associated with
409 * the wait queue, then there are wait queues associated with
410 * the set. So lets validate it that way.
415 wq_element
= (wait_queue_element_t
) queue_first(q
);
416 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
417 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
418 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
&&
419 ((wait_queue_link_t
)wq_element
)->wql_setqueue
== wq_set
) {
420 wait_queue_unlock(wq
);
422 return KERN_ALREADY_IN_SET
;
424 wq_element
= (wait_queue_element_t
)
425 queue_next((queue_t
) wq_element
);
429 * Not already a member, so we can add it.
433 WAIT_QUEUE_SET_CHECK(wq_set
);
436 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
437 wql
->wql_setqueue
= wq_set
;
438 queue_enter(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
439 wql
->wql_type
= WAIT_QUEUE_LINK
;
442 wait_queue_unlock(wq
);
449 * Routine: wait_queue_link
451 * Insert a set wait queue into a wait queue. This
452 * requires us to link the two together using a wait_queue_link
453 * structure that we allocate.
455 * The wait queue being inserted must be inited as a set queue
460 wait_queue_set_t wq_set
)
462 wait_queue_link_t wql
;
465 wql
= (wait_queue_link_t
) kalloc(sizeof(struct wait_queue_link
));
466 if (wql
== WAIT_QUEUE_LINK_NULL
)
467 return KERN_RESOURCE_SHORTAGE
;
469 ret
= wait_queue_link_noalloc(wq
, wq_set
, wql
);
470 if (ret
!= KERN_SUCCESS
)
471 kfree((vm_offset_t
)wql
, sizeof(struct wait_queue_link
));
478 * Routine: wait_queue_unlink_nofree
480 * Undo the linkage between a wait queue and a set.
483 wait_queue_unlink_locked(
485 wait_queue_set_t wq_set
,
486 wait_queue_link_t wql
)
488 assert(wait_queue_held(wq
));
489 assert(wait_queue_held(&wq_set
->wqs_wait_queue
));
491 wql
->wql_queue
= WAIT_QUEUE_NULL
;
492 queue_remove(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
493 wql
->wql_setqueue
= WAIT_QUEUE_SET_NULL
;
494 queue_remove(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
495 wql
->wql_type
= WAIT_QUEUE_UNLINKED
;
497 WAIT_QUEUE_CHECK(wq
);
498 WAIT_QUEUE_SET_CHECK(wq_set
);
502 * Routine: wait_queue_unlink
504 * Remove the linkage between a wait queue and a set,
505 * freeing the linkage structure.
507 * The wait queue being must be a member set queue
512 wait_queue_set_t wq_set
)
514 wait_queue_element_t wq_element
;
515 wait_queue_link_t wql
;
519 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
)) {
520 return KERN_INVALID_ARGUMENT
;
526 wq_element
= (wait_queue_element_t
) queue_first(q
);
527 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
528 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
529 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
530 wql
= (wait_queue_link_t
)wq_element
;
532 if (wql
->wql_setqueue
== wq_set
) {
534 wait_queue_unlink_locked(wq
, wq_set
, wql
);
536 wait_queue_unlock(wq
);
538 kfree((vm_offset_t
)wql
, sizeof(struct wait_queue_link
));
542 wq_element
= (wait_queue_element_t
)
543 queue_next((queue_t
) wq_element
);
545 wait_queue_unlock(wq
);
547 return KERN_NOT_IN_SET
;
552 * Routine: wait_queue_unlinkall_nofree
554 * Remove the linkage between a wait queue and all its
555 * sets. The caller is responsible for freeing
556 * the wait queue link structures.
560 wait_queue_unlinkall_nofree(
563 wait_queue_element_t wq_element
;
564 wait_queue_element_t wq_next_element
;
565 wait_queue_set_t wq_set
;
566 wait_queue_link_t wql
;
567 queue_head_t links_queue_head
;
568 queue_t links
= &links_queue_head
;
572 if (!wait_queue_is_queue(wq
)) {
573 return KERN_INVALID_ARGUMENT
;
583 wq_element
= (wait_queue_element_t
) queue_first(q
);
584 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
585 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
586 wq_next_element
= (wait_queue_element_t
)
587 queue_next((queue_t
) wq_element
);
589 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
590 wql
= (wait_queue_link_t
)wq_element
;
591 wq_set
= wql
->wql_setqueue
;
593 wait_queue_unlink_locked(wq
, wq_set
, wql
);
596 wq_element
= wq_next_element
;
598 wait_queue_unlock(wq
);
600 return(KERN_SUCCESS
);
605 * Routine: wait_queue_unlink_all
607 * Remove the linkage between a wait queue and all its sets.
608 * All the linkage structures are freed.
610 * Nothing of interest locked.
614 wait_queue_unlink_all(
617 wait_queue_element_t wq_element
;
618 wait_queue_element_t wq_next_element
;
619 wait_queue_set_t wq_set
;
620 wait_queue_link_t wql
;
621 queue_head_t links_queue_head
;
622 queue_t links
= &links_queue_head
;
626 if (!wait_queue_is_queue(wq
)) {
627 return KERN_INVALID_ARGUMENT
;
637 wq_element
= (wait_queue_element_t
) queue_first(q
);
638 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
639 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
640 wq_next_element
= (wait_queue_element_t
)
641 queue_next((queue_t
) wq_element
);
643 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
644 wql
= (wait_queue_link_t
)wq_element
;
645 wq_set
= wql
->wql_setqueue
;
647 wait_queue_unlink_locked(wq
, wq_set
, wql
);
649 enqueue(links
, &wql
->wql_links
);
651 wq_element
= wq_next_element
;
653 wait_queue_unlock(wq
);
656 while(!queue_empty(links
)) {
657 wql
= (wait_queue_link_t
) dequeue(links
);
658 kfree((vm_offset_t
) wql
, sizeof(struct wait_queue_link
));
661 return(KERN_SUCCESS
);
665 * Routine: wait_queue_set_unlink_all_nofree
667 * Remove the linkage between a set wait queue and all its
668 * member wait queues. The link structures are not freed, nor
669 * returned. It is the caller's responsibility to track and free
672 * The wait queue being must be a member set queue
675 wait_queue_set_unlink_all_nofree(
676 wait_queue_set_t wq_set
)
678 wait_queue_link_t wql
;
684 if (!wait_queue_is_set(wq_set
)) {
685 return KERN_INVALID_ARGUMENT
;
692 q
= &wq_set
->wqs_setlinks
;
694 wql
= (wait_queue_link_t
)queue_first(q
);
695 while (!queue_end(q
, (queue_entry_t
)wql
)) {
696 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
698 if (wait_queue_lock_try(wq
)) {
699 wait_queue_unlink_locked(wq
, wq_set
, wql
);
700 wait_queue_unlock(wq
);
701 wql
= (wait_queue_link_t
)queue_first(q
);
712 return(KERN_SUCCESS
);
715 /* legacy interface naming */
717 wait_subqueue_unlink_all(
718 wait_queue_set_t wq_set
)
720 return wait_queue_set_unlink_all_nofree(wq_set
);
725 * Routine: wait_queue_set_unlink_all
727 * Remove the linkage between a set wait queue and all its
728 * member wait queues. The link structures are freed.
730 * The wait queue must be a set
733 wait_queue_set_unlink_all(
734 wait_queue_set_t wq_set
)
736 wait_queue_link_t wql
;
739 queue_head_t links_queue_head
;
740 queue_t links
= &links_queue_head
;
744 if (!wait_queue_is_set(wq_set
)) {
745 return KERN_INVALID_ARGUMENT
;
754 q
= &wq_set
->wqs_setlinks
;
756 wql
= (wait_queue_link_t
)queue_first(q
);
757 while (!queue_end(q
, (queue_entry_t
)wql
)) {
758 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
760 if (wait_queue_lock_try(wq
)) {
761 wait_queue_unlink_locked(wq
, wq_set
, wql
);
762 wait_queue_unlock(wq
);
763 enqueue(links
, &wql
->wql_links
);
764 wql
= (wait_queue_link_t
)queue_first(q
);
775 while (!queue_empty (links
)) {
776 wql
= (wait_queue_link_t
) dequeue(links
);
777 kfree((vm_offset_t
)wql
, sizeof(struct wait_queue_link
));
779 return(KERN_SUCCESS
);
784 * Routine: wait_queue_unlink_one
786 * Find and unlink one set wait queue
788 * Nothing of interest locked.
791 wait_queue_unlink_one(
793 wait_queue_set_t
*wq_setp
)
795 wait_queue_element_t wq_element
;
804 wq_element
= (wait_queue_element_t
) queue_first(q
);
805 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
807 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
808 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
809 wait_queue_set_t wq_set
= wql
->wql_setqueue
;
812 wait_queue_unlink_locked(wq
, wq_set
, wql
);
814 wait_queue_unlock(wq
);
816 kfree((vm_offset_t
)wql
,sizeof(struct wait_queue_link
));
821 wq_element
= (wait_queue_element_t
)
822 queue_next((queue_t
) wq_element
);
824 wait_queue_unlock(wq
);
826 *wq_setp
= WAIT_QUEUE_SET_NULL
;
831 * Routine: wait_queue_assert_wait64_locked
833 * Insert the current thread into the supplied wait queue
834 * waiting for a particular event to be posted to that queue.
837 * The wait queue is assumed locked.
838 * The waiting thread is assumed locked.
841 __private_extern__ wait_result_t
842 wait_queue_assert_wait64_locked(
845 wait_interrupt_t interruptible
,
848 wait_result_t wait_result
;
850 if (!wait_queue_assert_possible(thread
))
851 panic("wait_queue_assert_wait64_locked");
853 if (wq
->wq_type
== _WAIT_QUEUE_SET_inited
) {
854 wait_queue_set_t wqs
= (wait_queue_set_t
)wq
;
856 if (wqs
->wqs_isprepost
&& wqs
->wqs_refcount
> 0)
857 return(THREAD_AWAKENED
);
861 * This is the extent to which we currently take scheduling attributes
862 * into account. If the thread is vm priviledged, we stick it at
863 * the front of the queue. Later, these queues will honor the policy
864 * value set at wait_queue_init time.
866 wait_result
= thread_mark_wait_locked(thread
, interruptible
);
867 if (wait_result
== THREAD_WAITING
) {
868 if (thread
->vm_privilege
)
869 enqueue_head(&wq
->wq_queue
, (queue_entry_t
) thread
);
871 enqueue_tail(&wq
->wq_queue
, (queue_entry_t
) thread
);
872 thread
->wait_event
= event
;
873 thread
->wait_queue
= wq
;
879 * Routine: wait_queue_assert_wait
881 * Insert the current thread into the supplied wait queue
882 * waiting for a particular event to be posted to that queue.
885 * nothing of interest locked.
888 wait_queue_assert_wait(
891 wait_interrupt_t interruptible
)
895 thread_t cur_thread
= current_thread();
897 /* If it is an invalid wait queue, you can't wait on it */
898 if (!wait_queue_is_valid(wq
)) {
899 thread_t thread
= current_thread();
900 return (thread
->wait_result
= THREAD_RESTART
);
905 thread_lock(cur_thread
);
906 ret
= wait_queue_assert_wait64_locked(
907 wq
, (event64_t
)((uint32_t)event
),
908 interruptible
, cur_thread
);
909 thread_unlock(cur_thread
);
910 wait_queue_unlock(wq
);
916 * Routine: wait_queue_assert_wait64
918 * Insert the current thread into the supplied wait queue
919 * waiting for a particular event to be posted to that queue.
921 * nothing of interest locked.
924 wait_queue_assert_wait64(
927 wait_interrupt_t interruptible
)
931 thread_t cur_thread
= current_thread();
933 /* If it is an invalid wait queue, you cant wait on it */
934 if (!wait_queue_is_valid(wq
)) {
935 thread_t thread
= current_thread();
936 return (thread
->wait_result
= THREAD_RESTART
);
941 thread_lock(cur_thread
);
942 ret
= wait_queue_assert_wait64_locked(wq
, event
, interruptible
, cur_thread
);
943 thread_unlock(cur_thread
);
944 wait_queue_unlock(wq
);
951 * Routine: _wait_queue_select64_all
953 * Select all threads off a wait queue that meet the
958 * wake_queue initialized and ready for insertion
961 * a queue of locked threads
964 _wait_queue_select64_all(
969 wait_queue_element_t wq_element
;
970 wait_queue_element_t wqe_next
;
975 wq_element
= (wait_queue_element_t
) queue_first(q
);
976 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
977 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
978 wqe_next
= (wait_queue_element_t
)
979 queue_next((queue_t
) wq_element
);
982 * We may have to recurse if this is a compound wait queue.
984 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
985 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
986 wait_queue_t set_queue
;
989 * We have to check the set wait queue.
991 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
992 wait_queue_lock(set_queue
);
993 if (set_queue
->wq_isprepost
) {
994 wait_queue_set_t wqs
= (wait_queue_set_t
)set_queue
;
997 * Preposting is only for sets and wait queue
998 * is the first element of set
1000 wqs
->wqs_refcount
++;
1002 if (! wait_queue_empty(set_queue
))
1003 _wait_queue_select64_all(set_queue
, event
, wake_queue
);
1004 wait_queue_unlock(set_queue
);
1008 * Otherwise, its a thread. If it is waiting on
1009 * the event we are posting to this queue, pull
1010 * it off the queue and stick it in out wake_queue.
1012 thread_t t
= (thread_t
)wq_element
;
1014 if (t
->wait_event
== event
) {
1016 remqueue(q
, (queue_entry_t
) t
);
1017 enqueue (wake_queue
, (queue_entry_t
) t
);
1018 t
->wait_queue
= WAIT_QUEUE_NULL
;
1019 t
->wait_event
= NO_EVENT64
;
1020 t
->at_safe_point
= FALSE
;
1021 /* returned locked */
1024 wq_element
= wqe_next
;
1029 * Routine: wait_queue_wakeup64_all_locked
1031 * Wakeup some number of threads that are in the specified
1032 * wait queue and waiting on the specified event.
1034 * wait queue already locked (may be released).
1036 * KERN_SUCCESS - Threads were woken up
1037 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1039 __private_extern__ kern_return_t
1040 wait_queue_wakeup64_all_locked(
1043 wait_result_t result
,
1046 queue_head_t wake_queue_head
;
1047 queue_t q
= &wake_queue_head
;
1050 assert(wait_queue_held(wq
));
1054 * Select the threads that we will wake up. The threads
1055 * are returned to us locked and cleanly removed from the
1058 _wait_queue_select64_all(wq
, event
, q
);
1060 wait_queue_unlock(wq
);
1063 * For each thread, set it running.
1065 res
= KERN_NOT_WAITING
;
1066 while (!queue_empty (q
)) {
1067 thread_t thread
= (thread_t
) dequeue(q
);
1068 res
= thread_go_locked(thread
, result
);
1069 assert(res
== KERN_SUCCESS
);
1070 thread_unlock(thread
);
1077 * Routine: wait_queue_wakeup_all
1079 * Wakeup some number of threads that are in the specified
1080 * wait queue and waiting on the specified event.
1084 * KERN_SUCCESS - Threads were woken up
1085 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1088 wait_queue_wakeup_all(
1091 wait_result_t result
)
1096 if (!wait_queue_is_valid(wq
)) {
1097 return KERN_INVALID_ARGUMENT
;
1101 wait_queue_lock(wq
);
1102 ret
= wait_queue_wakeup64_all_locked(
1103 wq
, (event64_t
)((uint32_t)event
),
1111 * Routine: wait_queue_wakeup64_all
1113 * Wakeup some number of threads that are in the specified
1114 * wait queue and waiting on the specified event.
1118 * KERN_SUCCESS - Threads were woken up
1119 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1122 wait_queue_wakeup64_all(
1125 wait_result_t result
)
1130 if (!wait_queue_is_valid(wq
)) {
1131 return KERN_INVALID_ARGUMENT
;
1135 wait_queue_lock(wq
);
1136 ret
= wait_queue_wakeup64_all_locked(wq
, event
, result
, TRUE
);
1143 * Routine: _wait_queue_select64_one
1145 * Select the best thread off a wait queue that meet the
1146 * supplied criteria.
1150 * possibly recursive
1152 * a locked thread - if one found
1154 * This is where the sync policy of the wait queue comes
1155 * into effect. For now, we just assume FIFO.
1158 _wait_queue_select64_one(
1162 wait_queue_element_t wq_element
;
1163 wait_queue_element_t wqe_next
;
1164 thread_t t
= THREAD_NULL
;
1167 assert(wq
->wq_fifo
);
1171 wq_element
= (wait_queue_element_t
) queue_first(q
);
1172 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1173 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1174 wqe_next
= (wait_queue_element_t
)
1175 queue_next((queue_t
) wq_element
);
1178 * We may have to recurse if this is a compound wait queue.
1180 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1181 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1182 wait_queue_t set_queue
;
1185 * We have to check the set wait queue.
1187 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1188 wait_queue_lock(set_queue
);
1189 if (! wait_queue_empty(set_queue
)) {
1190 t
= _wait_queue_select64_one(set_queue
, event
);
1192 wait_queue_unlock(set_queue
);
1193 if (t
!= THREAD_NULL
)
1198 * Otherwise, its a thread. If it is waiting on
1199 * the event we are posting to this queue, pull
1200 * it off the queue and stick it in out wake_queue.
1202 thread_t t
= (thread_t
)wq_element
;
1204 if (t
->wait_event
== event
) {
1206 remqueue(q
, (queue_entry_t
) t
);
1207 t
->wait_queue
= WAIT_QUEUE_NULL
;
1208 t
->wait_event
= NO_EVENT64
;
1209 t
->at_safe_point
= FALSE
;
1210 return t
; /* still locked */
1213 wq_element
= wqe_next
;
1219 * Routine: wait_queue_peek64_locked
1221 * Select the best thread from a wait queue that meet the
1222 * supplied criteria, but leave it on the queue it was
1223 * found on. The thread, and the actual wait_queue the
1224 * thread was found on are identified.
1228 * possibly recursive
1230 * a locked thread - if one found
1231 * a locked waitq - the one the thread was found on
1233 * Both the waitq the thread was actually found on, and
1234 * the supplied wait queue, are locked after this.
1236 __private_extern__
void
1237 wait_queue_peek64_locked(
1243 wait_queue_element_t wq_element
;
1244 wait_queue_element_t wqe_next
;
1248 assert(wq
->wq_fifo
);
1254 wq_element
= (wait_queue_element_t
) queue_first(q
);
1255 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1256 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1257 wqe_next
= (wait_queue_element_t
)
1258 queue_next((queue_t
) wq_element
);
1261 * We may have to recurse if this is a compound wait queue.
1263 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1264 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1265 wait_queue_t set_queue
;
1268 * We have to check the set wait queue.
1270 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1271 wait_queue_lock(set_queue
);
1272 if (! wait_queue_empty(set_queue
)) {
1273 wait_queue_peek64_locked(set_queue
, event
, tp
, wqp
);
1275 if (*tp
!= THREAD_NULL
) {
1276 if (*wqp
!= set_queue
)
1277 wait_queue_unlock(set_queue
);
1278 return; /* thread and its waitq locked */
1281 wait_queue_unlock(set_queue
);
1285 * Otherwise, its a thread. If it is waiting on
1286 * the event we are posting to this queue, return
1287 * it locked, but leave it on the queue.
1289 thread_t t
= (thread_t
)wq_element
;
1291 if (t
->wait_event
== event
) {
1298 wq_element
= wqe_next
;
1303 * Routine: wait_queue_pull_thread_locked
1305 * Pull a thread that was previously "peeked" off the wait
1306 * queue and (possibly) unlock the waitq.
1312 * with the thread still locked.
1315 wait_queue_pull_thread_locked(
1321 assert(thread
->wait_queue
== waitq
);
1323 remqueue(&waitq
->wq_queue
, (queue_entry_t
)thread
);
1324 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1325 thread
->wait_event
= NO_EVENT64
;
1326 thread
->at_safe_point
= FALSE
;
1328 wait_queue_unlock(waitq
);
1333 * Routine: wait_queue_select64_thread
1335 * Look for a thread and remove it from the queues, if
1336 * (and only if) the thread is waiting on the supplied
1337 * <wait_queue, event> pair.
1341 * possibly recursive
1343 * KERN_NOT_WAITING: Thread is not waiting here.
1344 * KERN_SUCCESS: It was, and is now removed (returned locked)
1346 static kern_return_t
1347 _wait_queue_select64_thread(
1352 wait_queue_element_t wq_element
;
1353 wait_queue_element_t wqe_next
;
1354 kern_return_t res
= KERN_NOT_WAITING
;
1355 queue_t q
= &wq
->wq_queue
;
1357 thread_lock(thread
);
1358 if ((thread
->wait_queue
== wq
) && (thread
->wait_event
== event
)) {
1359 remqueue(q
, (queue_entry_t
) thread
);
1360 thread
->at_safe_point
= FALSE
;
1361 thread
->wait_event
= NO_EVENT64
;
1362 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1363 /* thread still locked */
1364 return KERN_SUCCESS
;
1366 thread_unlock(thread
);
1369 * The wait_queue associated with the thread may be one of this
1370 * wait queue's sets. Go see. If so, removing it from
1371 * there is like removing it from here.
1373 wq_element
= (wait_queue_element_t
) queue_first(q
);
1374 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1375 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1376 wqe_next
= (wait_queue_element_t
)
1377 queue_next((queue_t
) wq_element
);
1379 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1380 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1381 wait_queue_t set_queue
;
1383 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1384 wait_queue_lock(set_queue
);
1385 if (! wait_queue_empty(set_queue
)) {
1386 res
= _wait_queue_select64_thread(set_queue
,
1390 wait_queue_unlock(set_queue
);
1391 if (res
== KERN_SUCCESS
)
1392 return KERN_SUCCESS
;
1394 wq_element
= wqe_next
;
1401 * Routine: wait_queue_wakeup64_identity_locked
1403 * Select a single thread that is most-eligible to run and set
1404 * set it running. But return the thread locked.
1409 * possibly recursive
1411 * a pointer to the locked thread that was awakened
1413 __private_extern__ thread_t
1414 wait_queue_wakeup64_identity_locked(
1417 wait_result_t result
,
1423 assert(wait_queue_held(wq
));
1426 thread
= _wait_queue_select64_one(wq
, event
);
1428 wait_queue_unlock(wq
);
1431 res
= thread_go_locked(thread
, result
);
1432 assert(res
== KERN_SUCCESS
);
1434 return thread
; /* still locked if not NULL */
1439 * Routine: wait_queue_wakeup64_one_locked
1441 * Select a single thread that is most-eligible to run and set
1447 * possibly recursive
1449 * KERN_SUCCESS: It was, and is, now removed.
1450 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1452 __private_extern__ kern_return_t
1453 wait_queue_wakeup64_one_locked(
1456 wait_result_t result
,
1461 assert(wait_queue_held(wq
));
1463 thread
= _wait_queue_select64_one(wq
, event
);
1465 wait_queue_unlock(wq
);
1470 res
= thread_go_locked(thread
, result
);
1471 assert(res
== KERN_SUCCESS
);
1472 thread_unlock(thread
);
1476 return KERN_NOT_WAITING
;
1480 * Routine: wait_queue_wakeup_one
1482 * Wakeup the most appropriate thread that is in the specified
1483 * wait queue for the specified event.
1487 * KERN_SUCCESS - Thread was woken up
1488 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1491 wait_queue_wakeup_one(
1494 wait_result_t result
)
1499 if (!wait_queue_is_valid(wq
)) {
1500 return KERN_INVALID_ARGUMENT
;
1504 wait_queue_lock(wq
);
1505 thread
= _wait_queue_select64_one(wq
, (event64_t
)((uint32_t)event
));
1506 wait_queue_unlock(wq
);
1511 res
= thread_go_locked(thread
, result
);
1512 assert(res
== KERN_SUCCESS
);
1513 thread_unlock(thread
);
1519 return KERN_NOT_WAITING
;
1523 * Routine: wait_queue_wakeup64_one
1525 * Wakeup the most appropriate thread that is in the specified
1526 * wait queue for the specified event.
1530 * KERN_SUCCESS - Thread was woken up
1531 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1534 wait_queue_wakeup64_one(
1537 wait_result_t result
)
1542 if (!wait_queue_is_valid(wq
)) {
1543 return KERN_INVALID_ARGUMENT
;
1546 wait_queue_lock(wq
);
1547 thread
= _wait_queue_select64_one(wq
, event
);
1548 wait_queue_unlock(wq
);
1553 res
= thread_go_locked(thread
, result
);
1554 assert(res
== KERN_SUCCESS
);
1555 thread_unlock(thread
);
1561 return KERN_NOT_WAITING
;
1566 * Routine: wait_queue_wakeup64_thread_locked
1568 * Wakeup the particular thread that was specified if and only
1569 * it was in this wait queue (or one of it's set queues)
1570 * and waiting on the specified event.
1572 * This is much safer than just removing the thread from
1573 * whatever wait queue it happens to be on. For instance, it
1574 * may have already been awoken from the wait you intended to
1575 * interrupt and waited on something else (like another
1579 * wait queue already locked (may be released).
1581 * KERN_SUCCESS - the thread was found waiting and awakened
1582 * KERN_NOT_WAITING - the thread was not waiting here
1584 __private_extern__ kern_return_t
1585 wait_queue_wakeup64_thread_locked(
1589 wait_result_t result
,
1594 assert(wait_queue_held(wq
));
1597 * See if the thread was still waiting there. If so, it got
1598 * dequeued and returned locked.
1600 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1602 wait_queue_unlock(wq
);
1604 if (res
!= KERN_SUCCESS
)
1605 return KERN_NOT_WAITING
;
1607 res
= thread_go_locked(thread
, result
);
1608 assert(res
== KERN_SUCCESS
);
1609 thread_unlock(thread
);
1614 * Routine: wait_queue_wakeup_thread
1616 * Wakeup the particular thread that was specified if and only
1617 * it was in this wait queue (or one of it's set queues)
1618 * and waiting on the specified event.
1620 * This is much safer than just removing the thread from
1621 * whatever wait queue it happens to be on. For instance, it
1622 * may have already been awoken from the wait you intended to
1623 * interrupt and waited on something else (like another
1626 * nothing of interest locked
1627 * we need to assume spl needs to be raised
1629 * KERN_SUCCESS - the thread was found waiting and awakened
1630 * KERN_NOT_WAITING - the thread was not waiting here
1633 wait_queue_wakeup_thread(
1637 wait_result_t result
)
1642 if (!wait_queue_is_valid(wq
)) {
1643 return KERN_INVALID_ARGUMENT
;
1647 wait_queue_lock(wq
);
1648 res
= _wait_queue_select64_thread(wq
, (event64_t
)((uint32_t)event
), thread
);
1649 wait_queue_unlock(wq
);
1651 if (res
== KERN_SUCCESS
) {
1652 res
= thread_go_locked(thread
, result
);
1653 assert(res
== KERN_SUCCESS
);
1654 thread_unlock(thread
);
1659 return KERN_NOT_WAITING
;
1663 * Routine: wait_queue_wakeup64_thread
1665 * Wakeup the particular thread that was specified if and only
1666 * it was in this wait queue (or one of it's set's queues)
1667 * and waiting on the specified event.
1669 * This is much safer than just removing the thread from
1670 * whatever wait queue it happens to be on. For instance, it
1671 * may have already been awoken from the wait you intended to
1672 * interrupt and waited on something else (like another
1675 * nothing of interest locked
1676 * we need to assume spl needs to be raised
1678 * KERN_SUCCESS - the thread was found waiting and awakened
1679 * KERN_NOT_WAITING - the thread was not waiting here
1682 wait_queue_wakeup64_thread(
1686 wait_result_t result
)
1691 if (!wait_queue_is_valid(wq
)) {
1692 return KERN_INVALID_ARGUMENT
;
1696 wait_queue_lock(wq
);
1697 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1698 wait_queue_unlock(wq
);
1700 if (res
== KERN_SUCCESS
) {
1701 res
= thread_go_locked(thread
, result
);
1702 assert(res
== KERN_SUCCESS
);
1703 thread_unlock(thread
);
1708 return KERN_NOT_WAITING
;