2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: wait_queue.c (adapted from sched_prim.c)
54 * Author: Avadis Tevanian, Jr.
57 * Primitives for manipulating wait queues: either global
58 * ones from sched_prim.c, or private ones associated with
59 * particular structures(pots, semaphores, etc..).
62 #include <kern/kern_types.h>
63 #include <kern/simple_lock.h>
64 #include <kern/kalloc.h>
65 #include <kern/queue.h>
67 #include <mach/sync_policy.h>
68 #include <kern/sched_prim.h>
70 #include <kern/wait_queue.h>
72 /* forward declarations */
73 static boolean_t
wait_queue_member_locked(
75 wait_queue_set_t wq_set
);
77 void wait_queue_unlink_one(
79 wait_queue_set_t
*wq_setp
);
81 kern_return_t
wait_queue_set_unlink_all_nofree(
82 wait_queue_set_t wq_set
);
85 * Routine: wait_queue_init
87 * Initialize a previously allocated wait queue.
89 * KERN_SUCCESS - The wait_queue_t was initialized
90 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
97 if (!((policy
& SYNC_POLICY_ORDER_MASK
) == SYNC_POLICY_FIFO
))
98 return KERN_INVALID_ARGUMENT
;
101 wq
->wq_type
= _WAIT_QUEUE_inited
;
102 queue_init(&wq
->wq_queue
);
103 hw_lock_init(&wq
->wq_interlock
);
108 * Routine: wait_queue_alloc
110 * Allocate and initialize a wait queue for use outside of
111 * of the mach part of the kernel.
113 * Nothing locked - can block.
115 * The allocated and initialized wait queue
116 * WAIT_QUEUE_NULL if there is a resource shortage
125 wq
= (wait_queue_t
) kalloc(sizeof(struct wait_queue
));
126 if (wq
!= WAIT_QUEUE_NULL
) {
127 ret
= wait_queue_init(wq
, policy
);
128 if (ret
!= KERN_SUCCESS
) {
129 kfree(wq
, sizeof(struct wait_queue
));
130 wq
= WAIT_QUEUE_NULL
;
137 * Routine: wait_queue_free
139 * Free an allocated wait queue.
147 if (!wait_queue_is_queue(wq
))
148 return KERN_INVALID_ARGUMENT
;
149 if (!queue_empty(&wq
->wq_queue
))
151 kfree(wq
, sizeof(struct wait_queue
));
156 * Routine: wait_queue_set_init
158 * Initialize a previously allocated wait queue set.
160 * KERN_SUCCESS - The wait_queue_set_t was initialized
161 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
165 wait_queue_set_t wqset
,
170 ret
= wait_queue_init(&wqset
->wqs_wait_queue
, policy
);
171 if (ret
!= KERN_SUCCESS
)
174 wqset
->wqs_wait_queue
.wq_type
= _WAIT_QUEUE_SET_inited
;
175 if (policy
& SYNC_POLICY_PREPOST
)
176 wqset
->wqs_wait_queue
.wq_isprepost
= TRUE
;
178 wqset
->wqs_wait_queue
.wq_isprepost
= FALSE
;
179 queue_init(&wqset
->wqs_setlinks
);
180 wqset
->wqs_refcount
= 0;
187 wait_queue_set_t wqset
,
190 return wait_queue_set_init(wqset
, policy
);
194 wait_queue_sub_clearrefs(
195 wait_queue_set_t wq_set
)
197 if (!wait_queue_is_set(wq_set
))
198 return KERN_INVALID_ARGUMENT
;
201 wq_set
->wqs_refcount
= 0;
207 * Routine: wait_queue_set_alloc
209 * Allocate and initialize a wait queue set for
210 * use outside of the mach part of the kernel.
214 * The allocated and initialized wait queue set
215 * WAIT_QUEUE_SET_NULL if there is a resource shortage
218 wait_queue_set_alloc(
221 wait_queue_set_t wq_set
;
223 wq_set
= (wait_queue_set_t
) kalloc(sizeof(struct wait_queue_set
));
224 if (wq_set
!= WAIT_QUEUE_SET_NULL
) {
227 ret
= wait_queue_set_init(wq_set
, policy
);
228 if (ret
!= KERN_SUCCESS
) {
229 kfree(wq_set
, sizeof(struct wait_queue_set
));
230 wq_set
= WAIT_QUEUE_SET_NULL
;
237 * Routine: wait_queue_set_free
239 * Free an allocated wait queue set
245 wait_queue_set_t wq_set
)
247 if (!wait_queue_is_set(wq_set
))
248 return KERN_INVALID_ARGUMENT
;
250 if (!queue_empty(&wq_set
->wqs_wait_queue
.wq_queue
))
253 kfree(wq_set
, sizeof(struct wait_queue_set
));
260 * Routine: wait_queue_set_size
261 * Routine: wait_queue_link_size
263 * Return the size of opaque wait queue structures
265 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet
); }
266 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink
); }
268 /* declare a unique type for wait queue link structures */
269 static unsigned int _wait_queue_link
;
270 static unsigned int _wait_queue_unlinked
;
272 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
273 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
275 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
276 WQASSERT(((wqe)->wqe_queue == (wq) && \
277 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
278 "wait queue element list corruption: wq=%#x, wqe=%#x", \
281 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
282 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
283 (queue_t)(wql) : &(wql)->wql_setlinks)))
285 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
286 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
287 (queue_t)(wql) : &(wql)->wql_setlinks)))
289 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
290 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
291 ((wql)->wql_setqueue == (wqs)) && \
292 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
293 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
294 "wait queue set links corruption: wqs=%#x, wql=%#x", \
297 #if defined(_WAIT_QUEUE_DEBUG_)
299 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
301 #define WAIT_QUEUE_CHECK(wq) \
303 queue_t q2 = &(wq)->wq_queue; \
304 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
305 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
306 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
307 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
311 #define WAIT_QUEUE_SET_CHECK(wqs) \
313 queue_t q2 = &(wqs)->wqs_setlinks; \
314 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
315 while (!queue_end(q2, (queue_entry_t)wql2)) { \
316 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
317 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
321 #else /* !_WAIT_QUEUE_DEBUG_ */
323 #define WQASSERT(e, s, p0, p1) assert(e)
325 #define WAIT_QUEUE_CHECK(wq)
326 #define WAIT_QUEUE_SET_CHECK(wqs)
328 #endif /* !_WAIT_QUEUE_DEBUG_ */
331 * Routine: wait_queue_member_locked
333 * Indicate if this set queue is a member of the queue
335 * The wait queue is locked
336 * The set queue is just that, a set queue
339 wait_queue_member_locked(
341 wait_queue_set_t wq_set
)
343 wait_queue_element_t wq_element
;
346 assert(wait_queue_held(wq
));
347 assert(wait_queue_is_set(wq_set
));
351 wq_element
= (wait_queue_element_t
) queue_first(q
);
352 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
353 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
354 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
)) {
355 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
357 if (wql
->wql_setqueue
== wq_set
)
360 wq_element
= (wait_queue_element_t
)
361 queue_next((queue_t
) wq_element
);
368 * Routine: wait_queue_member
370 * Indicate if this set queue is a member of the queue
372 * The set queue is just that, a set queue
377 wait_queue_set_t wq_set
)
382 if (!wait_queue_is_set(wq_set
))
387 ret
= wait_queue_member_locked(wq
, wq_set
);
388 wait_queue_unlock(wq
);
396 * Routine: wait_queue_link_noalloc
398 * Insert a set wait queue into a wait queue. This
399 * requires us to link the two together using a wait_queue_link
400 * structure that we allocate.
402 * The wait queue being inserted must be inited as a set queue
405 wait_queue_link_noalloc(
407 wait_queue_set_t wq_set
,
408 wait_queue_link_t wql
)
410 wait_queue_element_t wq_element
;
414 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
))
415 return KERN_INVALID_ARGUMENT
;
418 * There are probably less threads and sets associated with
419 * the wait queue, then there are wait queues associated with
420 * the set. So lets validate it that way.
425 wq_element
= (wait_queue_element_t
) queue_first(q
);
426 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
427 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
428 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
&&
429 ((wait_queue_link_t
)wq_element
)->wql_setqueue
== wq_set
) {
430 wait_queue_unlock(wq
);
432 return KERN_ALREADY_IN_SET
;
434 wq_element
= (wait_queue_element_t
)
435 queue_next((queue_t
) wq_element
);
439 * Not already a member, so we can add it.
443 WAIT_QUEUE_SET_CHECK(wq_set
);
446 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
447 wql
->wql_setqueue
= wq_set
;
448 queue_enter(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
449 wql
->wql_type
= WAIT_QUEUE_LINK
;
452 wait_queue_unlock(wq
);
459 * Routine: wait_queue_link
461 * Insert a set wait queue into a wait queue. This
462 * requires us to link the two together using a wait_queue_link
463 * structure that we allocate.
465 * The wait queue being inserted must be inited as a set queue
470 wait_queue_set_t wq_set
)
472 wait_queue_link_t wql
;
475 wql
= (wait_queue_link_t
) kalloc(sizeof(struct wait_queue_link
));
476 if (wql
== WAIT_QUEUE_LINK_NULL
)
477 return KERN_RESOURCE_SHORTAGE
;
479 ret
= wait_queue_link_noalloc(wq
, wq_set
, wql
);
480 if (ret
!= KERN_SUCCESS
)
481 kfree(wql
, sizeof(struct wait_queue_link
));
488 * Routine: wait_queue_unlink_nofree
490 * Undo the linkage between a wait queue and a set.
493 wait_queue_unlink_locked(
495 wait_queue_set_t wq_set
,
496 wait_queue_link_t wql
)
498 assert(wait_queue_held(wq
));
499 assert(wait_queue_held(&wq_set
->wqs_wait_queue
));
501 wql
->wql_queue
= WAIT_QUEUE_NULL
;
502 queue_remove(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
503 wql
->wql_setqueue
= WAIT_QUEUE_SET_NULL
;
504 queue_remove(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
505 wql
->wql_type
= WAIT_QUEUE_UNLINKED
;
507 WAIT_QUEUE_CHECK(wq
);
508 WAIT_QUEUE_SET_CHECK(wq_set
);
512 * Routine: wait_queue_unlink
514 * Remove the linkage between a wait queue and a set,
515 * freeing the linkage structure.
517 * The wait queue being must be a member set queue
522 wait_queue_set_t wq_set
)
524 wait_queue_element_t wq_element
;
525 wait_queue_link_t wql
;
529 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
)) {
530 return KERN_INVALID_ARGUMENT
;
536 wq_element
= (wait_queue_element_t
) queue_first(q
);
537 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
538 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
539 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
540 wql
= (wait_queue_link_t
)wq_element
;
542 if (wql
->wql_setqueue
== wq_set
) {
544 wait_queue_unlink_locked(wq
, wq_set
, wql
);
546 wait_queue_unlock(wq
);
548 kfree(wql
, sizeof(struct wait_queue_link
));
552 wq_element
= (wait_queue_element_t
)
553 queue_next((queue_t
) wq_element
);
555 wait_queue_unlock(wq
);
557 return KERN_NOT_IN_SET
;
562 * Routine: wait_queue_unlinkall_nofree
564 * Remove the linkage between a wait queue and all its
565 * sets. The caller is responsible for freeing
566 * the wait queue link structures.
570 wait_queue_unlinkall_nofree(
573 wait_queue_element_t wq_element
;
574 wait_queue_element_t wq_next_element
;
575 wait_queue_set_t wq_set
;
576 wait_queue_link_t wql
;
577 queue_head_t links_queue_head
;
578 queue_t links
= &links_queue_head
;
582 if (!wait_queue_is_queue(wq
)) {
583 return KERN_INVALID_ARGUMENT
;
593 wq_element
= (wait_queue_element_t
) queue_first(q
);
594 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
595 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
596 wq_next_element
= (wait_queue_element_t
)
597 queue_next((queue_t
) wq_element
);
599 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
600 wql
= (wait_queue_link_t
)wq_element
;
601 wq_set
= wql
->wql_setqueue
;
603 wait_queue_unlink_locked(wq
, wq_set
, wql
);
606 wq_element
= wq_next_element
;
608 wait_queue_unlock(wq
);
610 return(KERN_SUCCESS
);
615 * Routine: wait_queue_unlink_all
617 * Remove the linkage between a wait queue and all its sets.
618 * All the linkage structures are freed.
620 * Nothing of interest locked.
624 wait_queue_unlink_all(
627 wait_queue_element_t wq_element
;
628 wait_queue_element_t wq_next_element
;
629 wait_queue_set_t wq_set
;
630 wait_queue_link_t wql
;
631 queue_head_t links_queue_head
;
632 queue_t links
= &links_queue_head
;
636 if (!wait_queue_is_queue(wq
)) {
637 return KERN_INVALID_ARGUMENT
;
647 wq_element
= (wait_queue_element_t
) queue_first(q
);
648 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
649 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
650 wq_next_element
= (wait_queue_element_t
)
651 queue_next((queue_t
) wq_element
);
653 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
654 wql
= (wait_queue_link_t
)wq_element
;
655 wq_set
= wql
->wql_setqueue
;
657 wait_queue_unlink_locked(wq
, wq_set
, wql
);
659 enqueue(links
, &wql
->wql_links
);
661 wq_element
= wq_next_element
;
663 wait_queue_unlock(wq
);
666 while(!queue_empty(links
)) {
667 wql
= (wait_queue_link_t
) dequeue(links
);
668 kfree(wql
, sizeof(struct wait_queue_link
));
671 return(KERN_SUCCESS
);
675 * Routine: wait_queue_set_unlink_all_nofree
677 * Remove the linkage between a set wait queue and all its
678 * member wait queues. The link structures are not freed, nor
679 * returned. It is the caller's responsibility to track and free
682 * The wait queue being must be a member set queue
685 wait_queue_set_unlink_all_nofree(
686 wait_queue_set_t wq_set
)
688 wait_queue_link_t wql
;
693 if (!wait_queue_is_set(wq_set
)) {
694 return KERN_INVALID_ARGUMENT
;
701 q
= &wq_set
->wqs_setlinks
;
703 wql
= (wait_queue_link_t
)queue_first(q
);
704 while (!queue_end(q
, (queue_entry_t
)wql
)) {
705 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
707 if (wait_queue_lock_try(wq
)) {
708 wait_queue_unlink_locked(wq
, wq_set
, wql
);
709 wait_queue_unlock(wq
);
710 wql
= (wait_queue_link_t
)queue_first(q
);
721 return(KERN_SUCCESS
);
724 /* legacy interface naming */
726 wait_subqueue_unlink_all(
727 wait_queue_set_t wq_set
)
729 return wait_queue_set_unlink_all_nofree(wq_set
);
734 * Routine: wait_queue_set_unlink_all
736 * Remove the linkage between a set wait queue and all its
737 * member wait queues. The link structures are freed.
739 * The wait queue must be a set
742 wait_queue_set_unlink_all(
743 wait_queue_set_t wq_set
)
745 wait_queue_link_t wql
;
748 queue_head_t links_queue_head
;
749 queue_t links
= &links_queue_head
;
752 if (!wait_queue_is_set(wq_set
)) {
753 return KERN_INVALID_ARGUMENT
;
762 q
= &wq_set
->wqs_setlinks
;
764 wql
= (wait_queue_link_t
)queue_first(q
);
765 while (!queue_end(q
, (queue_entry_t
)wql
)) {
766 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
768 if (wait_queue_lock_try(wq
)) {
769 wait_queue_unlink_locked(wq
, wq_set
, wql
);
770 wait_queue_unlock(wq
);
771 enqueue(links
, &wql
->wql_links
);
772 wql
= (wait_queue_link_t
)queue_first(q
);
783 while (!queue_empty (links
)) {
784 wql
= (wait_queue_link_t
) dequeue(links
);
785 kfree(wql
, sizeof(struct wait_queue_link
));
787 return(KERN_SUCCESS
);
792 * Routine: wait_queue_unlink_one
794 * Find and unlink one set wait queue
796 * Nothing of interest locked.
799 wait_queue_unlink_one(
801 wait_queue_set_t
*wq_setp
)
803 wait_queue_element_t wq_element
;
812 wq_element
= (wait_queue_element_t
) queue_first(q
);
813 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
815 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
816 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
817 wait_queue_set_t wq_set
= wql
->wql_setqueue
;
820 wait_queue_unlink_locked(wq
, wq_set
, wql
);
822 wait_queue_unlock(wq
);
824 kfree(wql
,sizeof(struct wait_queue_link
));
829 wq_element
= (wait_queue_element_t
)
830 queue_next((queue_t
) wq_element
);
832 wait_queue_unlock(wq
);
834 *wq_setp
= WAIT_QUEUE_SET_NULL
;
839 * Routine: wait_queue_assert_wait64_locked
841 * Insert the current thread into the supplied wait queue
842 * waiting for a particular event to be posted to that queue.
845 * The wait queue is assumed locked.
846 * The waiting thread is assumed locked.
849 __private_extern__ wait_result_t
850 wait_queue_assert_wait64_locked(
853 wait_interrupt_t interruptible
,
857 wait_result_t wait_result
;
859 if (!wait_queue_assert_possible(thread
))
860 panic("wait_queue_assert_wait64_locked");
862 if (wq
->wq_type
== _WAIT_QUEUE_SET_inited
) {
863 wait_queue_set_t wqs
= (wait_queue_set_t
)wq
;
865 if (wqs
->wqs_isprepost
&& wqs
->wqs_refcount
> 0)
866 return(THREAD_AWAKENED
);
870 * This is the extent to which we currently take scheduling attributes
871 * into account. If the thread is vm priviledged, we stick it at
872 * the front of the queue. Later, these queues will honor the policy
873 * value set at wait_queue_init time.
875 wait_result
= thread_mark_wait_locked(thread
, interruptible
);
876 if (wait_result
== THREAD_WAITING
) {
877 if (thread
->options
& TH_OPT_VMPRIV
)
878 enqueue_head(&wq
->wq_queue
, (queue_entry_t
) thread
);
880 enqueue_tail(&wq
->wq_queue
, (queue_entry_t
) thread
);
882 thread
->wait_event
= event
;
883 thread
->wait_queue
= wq
;
886 if (!timer_call_enter(&thread
->wait_timer
, deadline
))
887 thread
->wait_timer_active
++;
888 thread
->wait_timer_is_set
= TRUE
;
895 * Routine: wait_queue_assert_wait
897 * Insert the current thread into the supplied wait queue
898 * waiting for a particular event to be posted to that queue.
901 * nothing of interest locked.
904 wait_queue_assert_wait(
907 wait_interrupt_t interruptible
,
912 thread_t thread
= current_thread();
914 /* If it is an invalid wait queue, you can't wait on it */
915 if (!wait_queue_is_valid(wq
))
916 return (thread
->wait_result
= THREAD_RESTART
);
921 ret
= wait_queue_assert_wait64_locked(wq
, (event64_t
)((uint32_t)event
),
922 interruptible
, deadline
, thread
);
923 thread_unlock(thread
);
924 wait_queue_unlock(wq
);
930 * Routine: wait_queue_assert_wait64
932 * Insert the current thread into the supplied wait queue
933 * waiting for a particular event to be posted to that queue.
935 * nothing of interest locked.
938 wait_queue_assert_wait64(
941 wait_interrupt_t interruptible
,
946 thread_t thread
= current_thread();
948 /* If it is an invalid wait queue, you cant wait on it */
949 if (!wait_queue_is_valid(wq
))
950 return (thread
->wait_result
= THREAD_RESTART
);
955 ret
= wait_queue_assert_wait64_locked(wq
, event
, interruptible
, deadline
, thread
);
956 thread_unlock(thread
);
957 wait_queue_unlock(wq
);
963 * Routine: _wait_queue_select64_all
965 * Select all threads off a wait queue that meet the
970 * wake_queue initialized and ready for insertion
973 * a queue of locked threads
976 _wait_queue_select64_all(
981 wait_queue_element_t wq_element
;
982 wait_queue_element_t wqe_next
;
987 wq_element
= (wait_queue_element_t
) queue_first(q
);
988 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
989 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
990 wqe_next
= (wait_queue_element_t
)
991 queue_next((queue_t
) wq_element
);
994 * We may have to recurse if this is a compound wait queue.
996 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
997 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
998 wait_queue_t set_queue
;
1001 * We have to check the set wait queue.
1003 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1004 wait_queue_lock(set_queue
);
1005 if (set_queue
->wq_isprepost
) {
1006 wait_queue_set_t wqs
= (wait_queue_set_t
)set_queue
;
1009 * Preposting is only for sets and wait queue
1010 * is the first element of set
1012 wqs
->wqs_refcount
++;
1014 if (! wait_queue_empty(set_queue
))
1015 _wait_queue_select64_all(set_queue
, event
, wake_queue
);
1016 wait_queue_unlock(set_queue
);
1020 * Otherwise, its a thread. If it is waiting on
1021 * the event we are posting to this queue, pull
1022 * it off the queue and stick it in out wake_queue.
1024 thread_t t
= (thread_t
)wq_element
;
1026 if (t
->wait_event
== event
) {
1028 remqueue(q
, (queue_entry_t
) t
);
1029 enqueue (wake_queue
, (queue_entry_t
) t
);
1030 t
->wait_queue
= WAIT_QUEUE_NULL
;
1031 t
->wait_event
= NO_EVENT64
;
1032 t
->at_safe_point
= FALSE
;
1033 /* returned locked */
1036 wq_element
= wqe_next
;
1041 * Routine: wait_queue_wakeup64_all_locked
1043 * Wakeup some number of threads that are in the specified
1044 * wait queue and waiting on the specified event.
1046 * wait queue already locked (may be released).
1048 * KERN_SUCCESS - Threads were woken up
1049 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1051 __private_extern__ kern_return_t
1052 wait_queue_wakeup64_all_locked(
1055 wait_result_t result
,
1058 queue_head_t wake_queue_head
;
1059 queue_t q
= &wake_queue_head
;
1062 assert(wait_queue_held(wq
));
1066 * Select the threads that we will wake up. The threads
1067 * are returned to us locked and cleanly removed from the
1070 _wait_queue_select64_all(wq
, event
, q
);
1072 wait_queue_unlock(wq
);
1075 * For each thread, set it running.
1077 res
= KERN_NOT_WAITING
;
1078 while (!queue_empty (q
)) {
1079 thread_t thread
= (thread_t
) dequeue(q
);
1080 res
= thread_go(thread
, result
);
1081 assert(res
== KERN_SUCCESS
);
1082 thread_unlock(thread
);
1089 * Routine: wait_queue_wakeup_all
1091 * Wakeup some number of threads that are in the specified
1092 * wait queue and waiting on the specified event.
1096 * KERN_SUCCESS - Threads were woken up
1097 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1100 wait_queue_wakeup_all(
1103 wait_result_t result
)
1108 if (!wait_queue_is_valid(wq
)) {
1109 return KERN_INVALID_ARGUMENT
;
1113 wait_queue_lock(wq
);
1114 ret
= wait_queue_wakeup64_all_locked(
1115 wq
, (event64_t
)((uint32_t)event
),
1123 * Routine: wait_queue_wakeup64_all
1125 * Wakeup some number of threads that are in the specified
1126 * wait queue and waiting on the specified event.
1130 * KERN_SUCCESS - Threads were woken up
1131 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1134 wait_queue_wakeup64_all(
1137 wait_result_t result
)
1142 if (!wait_queue_is_valid(wq
)) {
1143 return KERN_INVALID_ARGUMENT
;
1147 wait_queue_lock(wq
);
1148 ret
= wait_queue_wakeup64_all_locked(wq
, event
, result
, TRUE
);
1155 * Routine: _wait_queue_select64_one
1157 * Select the best thread off a wait queue that meet the
1158 * supplied criteria.
1162 * possibly recursive
1164 * a locked thread - if one found
1166 * This is where the sync policy of the wait queue comes
1167 * into effect. For now, we just assume FIFO.
1170 _wait_queue_select64_one(
1174 wait_queue_element_t wq_element
;
1175 wait_queue_element_t wqe_next
;
1176 thread_t t
= THREAD_NULL
;
1179 assert(wq
->wq_fifo
);
1183 wq_element
= (wait_queue_element_t
) queue_first(q
);
1184 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1185 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1186 wqe_next
= (wait_queue_element_t
)
1187 queue_next((queue_t
) wq_element
);
1190 * We may have to recurse if this is a compound wait queue.
1192 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1193 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1194 wait_queue_t set_queue
;
1197 * We have to check the set wait queue.
1199 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1200 wait_queue_lock(set_queue
);
1201 if (! wait_queue_empty(set_queue
)) {
1202 t
= _wait_queue_select64_one(set_queue
, event
);
1204 wait_queue_unlock(set_queue
);
1205 if (t
!= THREAD_NULL
)
1210 * Otherwise, its a thread. If it is waiting on
1211 * the event we are posting to this queue, pull
1212 * it off the queue and stick it in out wake_queue.
1214 t
= (thread_t
)wq_element
;
1215 if (t
->wait_event
== event
) {
1217 remqueue(q
, (queue_entry_t
) t
);
1218 t
->wait_queue
= WAIT_QUEUE_NULL
;
1219 t
->wait_event
= NO_EVENT64
;
1220 t
->at_safe_point
= FALSE
;
1221 return t
; /* still locked */
1226 wq_element
= wqe_next
;
1232 * Routine: wait_queue_peek64_locked
1234 * Select the best thread from a wait queue that meet the
1235 * supplied criteria, but leave it on the queue it was
1236 * found on. The thread, and the actual wait_queue the
1237 * thread was found on are identified.
1241 * possibly recursive
1243 * a locked thread - if one found
1244 * a locked waitq - the one the thread was found on
1246 * Both the waitq the thread was actually found on, and
1247 * the supplied wait queue, are locked after this.
1249 __private_extern__
void
1250 wait_queue_peek64_locked(
1256 wait_queue_element_t wq_element
;
1257 wait_queue_element_t wqe_next
;
1260 assert(wq
->wq_fifo
);
1266 wq_element
= (wait_queue_element_t
) queue_first(q
);
1267 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1268 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1269 wqe_next
= (wait_queue_element_t
)
1270 queue_next((queue_t
) wq_element
);
1273 * We may have to recurse if this is a compound wait queue.
1275 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1276 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1277 wait_queue_t set_queue
;
1280 * We have to check the set wait queue.
1282 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1283 wait_queue_lock(set_queue
);
1284 if (! wait_queue_empty(set_queue
)) {
1285 wait_queue_peek64_locked(set_queue
, event
, tp
, wqp
);
1287 if (*tp
!= THREAD_NULL
) {
1288 if (*wqp
!= set_queue
)
1289 wait_queue_unlock(set_queue
);
1290 return; /* thread and its waitq locked */
1293 wait_queue_unlock(set_queue
);
1297 * Otherwise, its a thread. If it is waiting on
1298 * the event we are posting to this queue, return
1299 * it locked, but leave it on the queue.
1301 thread_t t
= (thread_t
)wq_element
;
1303 if (t
->wait_event
== event
) {
1310 wq_element
= wqe_next
;
1315 * Routine: wait_queue_pull_thread_locked
1317 * Pull a thread that was previously "peeked" off the wait
1318 * queue and (possibly) unlock the waitq.
1324 * with the thread still locked.
1327 wait_queue_pull_thread_locked(
1333 assert(thread
->wait_queue
== waitq
);
1335 remqueue(&waitq
->wq_queue
, (queue_entry_t
)thread
);
1336 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1337 thread
->wait_event
= NO_EVENT64
;
1338 thread
->at_safe_point
= FALSE
;
1340 wait_queue_unlock(waitq
);
1345 * Routine: wait_queue_select64_thread
1347 * Look for a thread and remove it from the queues, if
1348 * (and only if) the thread is waiting on the supplied
1349 * <wait_queue, event> pair.
1353 * possibly recursive
1355 * KERN_NOT_WAITING: Thread is not waiting here.
1356 * KERN_SUCCESS: It was, and is now removed (returned locked)
1358 static kern_return_t
1359 _wait_queue_select64_thread(
1364 wait_queue_element_t wq_element
;
1365 wait_queue_element_t wqe_next
;
1366 kern_return_t res
= KERN_NOT_WAITING
;
1367 queue_t q
= &wq
->wq_queue
;
1369 thread_lock(thread
);
1370 if ((thread
->wait_queue
== wq
) && (thread
->wait_event
== event
)) {
1371 remqueue(q
, (queue_entry_t
) thread
);
1372 thread
->at_safe_point
= FALSE
;
1373 thread
->wait_event
= NO_EVENT64
;
1374 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1375 /* thread still locked */
1376 return KERN_SUCCESS
;
1378 thread_unlock(thread
);
1381 * The wait_queue associated with the thread may be one of this
1382 * wait queue's sets. Go see. If so, removing it from
1383 * there is like removing it from here.
1385 wq_element
= (wait_queue_element_t
) queue_first(q
);
1386 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1387 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1388 wqe_next
= (wait_queue_element_t
)
1389 queue_next((queue_t
) wq_element
);
1391 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1392 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1393 wait_queue_t set_queue
;
1395 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1396 wait_queue_lock(set_queue
);
1397 if (! wait_queue_empty(set_queue
)) {
1398 res
= _wait_queue_select64_thread(set_queue
,
1402 wait_queue_unlock(set_queue
);
1403 if (res
== KERN_SUCCESS
)
1404 return KERN_SUCCESS
;
1406 wq_element
= wqe_next
;
1413 * Routine: wait_queue_wakeup64_identity_locked
1415 * Select a single thread that is most-eligible to run and set
1416 * set it running. But return the thread locked.
1421 * possibly recursive
1423 * a pointer to the locked thread that was awakened
1425 __private_extern__ thread_t
1426 wait_queue_wakeup64_identity_locked(
1429 wait_result_t result
,
1435 assert(wait_queue_held(wq
));
1438 thread
= _wait_queue_select64_one(wq
, event
);
1440 wait_queue_unlock(wq
);
1443 res
= thread_go(thread
, result
);
1444 assert(res
== KERN_SUCCESS
);
1446 return thread
; /* still locked if not NULL */
1451 * Routine: wait_queue_wakeup64_one_locked
1453 * Select a single thread that is most-eligible to run and set
1459 * possibly recursive
1461 * KERN_SUCCESS: It was, and is, now removed.
1462 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1464 __private_extern__ kern_return_t
1465 wait_queue_wakeup64_one_locked(
1468 wait_result_t result
,
1473 assert(wait_queue_held(wq
));
1475 thread
= _wait_queue_select64_one(wq
, event
);
1477 wait_queue_unlock(wq
);
1482 res
= thread_go(thread
, result
);
1483 assert(res
== KERN_SUCCESS
);
1484 thread_unlock(thread
);
1488 return KERN_NOT_WAITING
;
1492 * Routine: wait_queue_wakeup_one
1494 * Wakeup the most appropriate thread that is in the specified
1495 * wait queue for the specified event.
1499 * KERN_SUCCESS - Thread was woken up
1500 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1503 wait_queue_wakeup_one(
1506 wait_result_t result
)
1511 if (!wait_queue_is_valid(wq
)) {
1512 return KERN_INVALID_ARGUMENT
;
1516 wait_queue_lock(wq
);
1517 thread
= _wait_queue_select64_one(wq
, (event64_t
)((uint32_t)event
));
1518 wait_queue_unlock(wq
);
1523 res
= thread_go(thread
, result
);
1524 assert(res
== KERN_SUCCESS
);
1525 thread_unlock(thread
);
1531 return KERN_NOT_WAITING
;
1535 * Routine: wait_queue_wakeup64_one
1537 * Wakeup the most appropriate thread that is in the specified
1538 * wait queue for the specified event.
1542 * KERN_SUCCESS - Thread was woken up
1543 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1546 wait_queue_wakeup64_one(
1549 wait_result_t result
)
1554 if (!wait_queue_is_valid(wq
)) {
1555 return KERN_INVALID_ARGUMENT
;
1558 wait_queue_lock(wq
);
1559 thread
= _wait_queue_select64_one(wq
, event
);
1560 wait_queue_unlock(wq
);
1565 res
= thread_go(thread
, result
);
1566 assert(res
== KERN_SUCCESS
);
1567 thread_unlock(thread
);
1573 return KERN_NOT_WAITING
;
1578 * Routine: wait_queue_wakeup64_thread_locked
1580 * Wakeup the particular thread that was specified if and only
1581 * it was in this wait queue (or one of it's set queues)
1582 * and waiting on the specified event.
1584 * This is much safer than just removing the thread from
1585 * whatever wait queue it happens to be on. For instance, it
1586 * may have already been awoken from the wait you intended to
1587 * interrupt and waited on something else (like another
1591 * wait queue already locked (may be released).
1593 * KERN_SUCCESS - the thread was found waiting and awakened
1594 * KERN_NOT_WAITING - the thread was not waiting here
1596 __private_extern__ kern_return_t
1597 wait_queue_wakeup64_thread_locked(
1601 wait_result_t result
,
1606 assert(wait_queue_held(wq
));
1609 * See if the thread was still waiting there. If so, it got
1610 * dequeued and returned locked.
1612 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1614 wait_queue_unlock(wq
);
1616 if (res
!= KERN_SUCCESS
)
1617 return KERN_NOT_WAITING
;
1619 res
= thread_go(thread
, result
);
1620 assert(res
== KERN_SUCCESS
);
1621 thread_unlock(thread
);
1626 * Routine: wait_queue_wakeup_thread
1628 * Wakeup the particular thread that was specified if and only
1629 * it was in this wait queue (or one of it's set queues)
1630 * and waiting on the specified event.
1632 * This is much safer than just removing the thread from
1633 * whatever wait queue it happens to be on. For instance, it
1634 * may have already been awoken from the wait you intended to
1635 * interrupt and waited on something else (like another
1638 * nothing of interest locked
1639 * we need to assume spl needs to be raised
1641 * KERN_SUCCESS - the thread was found waiting and awakened
1642 * KERN_NOT_WAITING - the thread was not waiting here
1645 wait_queue_wakeup_thread(
1649 wait_result_t result
)
1654 if (!wait_queue_is_valid(wq
)) {
1655 return KERN_INVALID_ARGUMENT
;
1659 wait_queue_lock(wq
);
1660 res
= _wait_queue_select64_thread(wq
, (event64_t
)((uint32_t)event
), thread
);
1661 wait_queue_unlock(wq
);
1663 if (res
== KERN_SUCCESS
) {
1664 res
= thread_go(thread
, result
);
1665 assert(res
== KERN_SUCCESS
);
1666 thread_unlock(thread
);
1671 return KERN_NOT_WAITING
;
1675 * Routine: wait_queue_wakeup64_thread
1677 * Wakeup the particular thread that was specified if and only
1678 * it was in this wait queue (or one of it's set's queues)
1679 * and waiting on the specified event.
1681 * This is much safer than just removing the thread from
1682 * whatever wait queue it happens to be on. For instance, it
1683 * may have already been awoken from the wait you intended to
1684 * interrupt and waited on something else (like another
1687 * nothing of interest locked
1688 * we need to assume spl needs to be raised
1690 * KERN_SUCCESS - the thread was found waiting and awakened
1691 * KERN_NOT_WAITING - the thread was not waiting here
1694 wait_queue_wakeup64_thread(
1698 wait_result_t result
)
1703 if (!wait_queue_is_valid(wq
)) {
1704 return KERN_INVALID_ARGUMENT
;
1708 wait_queue_lock(wq
);
1709 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1710 wait_queue_unlock(wq
);
1712 if (res
== KERN_SUCCESS
) {
1713 res
= thread_go(thread
, result
);
1714 assert(res
== KERN_SUCCESS
);
1715 thread_unlock(thread
);
1720 return KERN_NOT_WAITING
;