2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: wait_queue.c (adapted from sched_prim.c)
54 * Author: Avadis Tevanian, Jr.
57 * Primitives for manipulating wait queues: either global
58 * ones from sched_prim.c, or private ones associated with
59 * particular structures(pots, semaphores, etc..).
62 #include <kern/kern_types.h>
63 #include <kern/simple_lock.h>
64 #include <kern/kalloc.h>
65 #include <kern/queue.h>
67 #include <mach/sync_policy.h>
68 #include <kern/sched_prim.h>
70 #include <kern/wait_queue.h>
72 /* forward declarations */
73 static boolean_t
wait_queue_member_locked(
75 wait_queue_set_t wq_set
);
77 void wait_queue_unlink_one(
79 wait_queue_set_t
*wq_setp
);
81 kern_return_t
wait_queue_set_unlink_all_nofree(
82 wait_queue_set_t wq_set
);
85 * Routine: wait_queue_init
87 * Initialize a previously allocated wait queue.
89 * KERN_SUCCESS - The wait_queue_t was initialized
90 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
97 if (!((policy
& SYNC_POLICY_ORDER_MASK
) == SYNC_POLICY_FIFO
))
98 return KERN_INVALID_ARGUMENT
;
101 wq
->wq_type
= _WAIT_QUEUE_inited
;
102 queue_init(&wq
->wq_queue
);
103 hw_lock_init(&wq
->wq_interlock
);
108 * Routine: wait_queue_alloc
110 * Allocate and initialize a wait queue for use outside of
111 * of the mach part of the kernel.
113 * Nothing locked - can block.
115 * The allocated and initialized wait queue
116 * WAIT_QUEUE_NULL if there is a resource shortage
125 wq
= (wait_queue_t
) kalloc(sizeof(struct wait_queue
));
126 if (wq
!= WAIT_QUEUE_NULL
) {
127 ret
= wait_queue_init(wq
, policy
);
128 if (ret
!= KERN_SUCCESS
) {
129 kfree(wq
, sizeof(struct wait_queue
));
130 wq
= WAIT_QUEUE_NULL
;
137 * Routine: wait_queue_free
139 * Free an allocated wait queue.
147 if (!wait_queue_is_queue(wq
))
148 return KERN_INVALID_ARGUMENT
;
149 if (!queue_empty(&wq
->wq_queue
))
151 kfree(wq
, sizeof(struct wait_queue
));
156 * Routine: wait_queue_set_init
158 * Initialize a previously allocated wait queue set.
160 * KERN_SUCCESS - The wait_queue_set_t was initialized
161 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
165 wait_queue_set_t wqset
,
170 ret
= wait_queue_init(&wqset
->wqs_wait_queue
, policy
);
171 if (ret
!= KERN_SUCCESS
)
174 wqset
->wqs_wait_queue
.wq_type
= _WAIT_QUEUE_SET_inited
;
175 if (policy
& SYNC_POLICY_PREPOST
)
176 wqset
->wqs_wait_queue
.wq_isprepost
= TRUE
;
178 wqset
->wqs_wait_queue
.wq_isprepost
= FALSE
;
179 queue_init(&wqset
->wqs_setlinks
);
180 wqset
->wqs_refcount
= 0;
187 wait_queue_set_t wqset
,
190 return wait_queue_set_init(wqset
, policy
);
194 wait_queue_sub_clearrefs(
195 wait_queue_set_t wq_set
)
197 if (!wait_queue_is_set(wq_set
))
198 return KERN_INVALID_ARGUMENT
;
201 wq_set
->wqs_refcount
= 0;
207 * Routine: wait_queue_set_alloc
209 * Allocate and initialize a wait queue set for
210 * use outside of the mach part of the kernel.
214 * The allocated and initialized wait queue set
215 * WAIT_QUEUE_SET_NULL if there is a resource shortage
218 wait_queue_set_alloc(
221 wait_queue_set_t wq_set
;
223 wq_set
= (wait_queue_set_t
) kalloc(sizeof(struct wait_queue_set
));
224 if (wq_set
!= WAIT_QUEUE_SET_NULL
) {
227 ret
= wait_queue_set_init(wq_set
, policy
);
228 if (ret
!= KERN_SUCCESS
) {
229 kfree(wq_set
, sizeof(struct wait_queue_set
));
230 wq_set
= WAIT_QUEUE_SET_NULL
;
237 * Routine: wait_queue_set_free
239 * Free an allocated wait queue set
245 wait_queue_set_t wq_set
)
247 if (!wait_queue_is_set(wq_set
))
248 return KERN_INVALID_ARGUMENT
;
250 if (!queue_empty(&wq_set
->wqs_wait_queue
.wq_queue
))
253 kfree(wq_set
, sizeof(struct wait_queue_set
));
260 * Routine: wait_queue_set_size
261 * Routine: wait_queue_link_size
263 * Return the size of opaque wait queue structures
265 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet
); }
266 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink
); }
268 /* declare a unique type for wait queue link structures */
269 static unsigned int _wait_queue_link
;
270 static unsigned int _wait_queue_unlinked
;
272 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
273 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
275 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
276 WQASSERT(((wqe)->wqe_queue == (wq) && \
277 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
278 "wait queue element list corruption: wq=%#x, wqe=%#x", \
281 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
282 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
283 (queue_t)(wql) : &(wql)->wql_setlinks)))
285 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
286 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
287 (queue_t)(wql) : &(wql)->wql_setlinks)))
289 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
290 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
291 ((wql)->wql_setqueue == (wqs)) && \
292 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
293 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
294 "wait queue set links corruption: wqs=%#x, wql=%#x", \
297 #if defined(_WAIT_QUEUE_DEBUG_)
299 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
301 #define WAIT_QUEUE_CHECK(wq) \
303 queue_t q2 = &(wq)->wq_queue; \
304 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
305 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
306 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
307 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
311 #define WAIT_QUEUE_SET_CHECK(wqs) \
313 queue_t q2 = &(wqs)->wqs_setlinks; \
314 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
315 while (!queue_end(q2, (queue_entry_t)wql2)) { \
316 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
317 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
321 #else /* !_WAIT_QUEUE_DEBUG_ */
323 #define WQASSERT(e, s, p0, p1) assert(e)
325 #define WAIT_QUEUE_CHECK(wq)
326 #define WAIT_QUEUE_SET_CHECK(wqs)
328 #endif /* !_WAIT_QUEUE_DEBUG_ */
331 * Routine: wait_queue_member_locked
333 * Indicate if this set queue is a member of the queue
335 * The wait queue is locked
336 * The set queue is just that, a set queue
339 wait_queue_member_locked(
341 wait_queue_set_t wq_set
)
343 wait_queue_element_t wq_element
;
346 assert(wait_queue_held(wq
));
347 assert(wait_queue_is_set(wq_set
));
351 wq_element
= (wait_queue_element_t
) queue_first(q
);
352 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
353 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
354 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
)) {
355 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
357 if (wql
->wql_setqueue
== wq_set
)
360 wq_element
= (wait_queue_element_t
)
361 queue_next((queue_t
) wq_element
);
368 * Routine: wait_queue_member
370 * Indicate if this set queue is a member of the queue
372 * The set queue is just that, a set queue
377 wait_queue_set_t wq_set
)
382 if (!wait_queue_is_set(wq_set
))
387 ret
= wait_queue_member_locked(wq
, wq_set
);
388 wait_queue_unlock(wq
);
396 * Routine: wait_queue_link_noalloc
398 * Insert a set wait queue into a wait queue. This
399 * requires us to link the two together using a wait_queue_link
400 * structure that we allocate.
402 * The wait queue being inserted must be inited as a set queue
405 wait_queue_link_noalloc(
407 wait_queue_set_t wq_set
,
408 wait_queue_link_t wql
)
410 wait_queue_element_t wq_element
;
414 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
))
415 return KERN_INVALID_ARGUMENT
;
418 * There are probably less threads and sets associated with
419 * the wait queue, then there are wait queues associated with
420 * the set. So lets validate it that way.
425 wq_element
= (wait_queue_element_t
) queue_first(q
);
426 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
427 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
428 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
&&
429 ((wait_queue_link_t
)wq_element
)->wql_setqueue
== wq_set
) {
430 wait_queue_unlock(wq
);
432 return KERN_ALREADY_IN_SET
;
434 wq_element
= (wait_queue_element_t
)
435 queue_next((queue_t
) wq_element
);
439 * Not already a member, so we can add it.
443 WAIT_QUEUE_SET_CHECK(wq_set
);
446 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
447 wql
->wql_setqueue
= wq_set
;
448 queue_enter(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
449 wql
->wql_type
= WAIT_QUEUE_LINK
;
452 wait_queue_unlock(wq
);
459 * Routine: wait_queue_link
461 * Insert a set wait queue into a wait queue. This
462 * requires us to link the two together using a wait_queue_link
463 * structure that we allocate.
465 * The wait queue being inserted must be inited as a set queue
470 wait_queue_set_t wq_set
)
472 wait_queue_link_t wql
;
475 wql
= (wait_queue_link_t
) kalloc(sizeof(struct wait_queue_link
));
476 if (wql
== WAIT_QUEUE_LINK_NULL
)
477 return KERN_RESOURCE_SHORTAGE
;
479 ret
= wait_queue_link_noalloc(wq
, wq_set
, wql
);
480 if (ret
!= KERN_SUCCESS
)
481 kfree(wql
, sizeof(struct wait_queue_link
));
488 * Routine: wait_queue_unlink_nofree
490 * Undo the linkage between a wait queue and a set.
493 wait_queue_unlink_locked(
495 wait_queue_set_t wq_set
,
496 wait_queue_link_t wql
)
498 assert(wait_queue_held(wq
));
499 assert(wait_queue_held(&wq_set
->wqs_wait_queue
));
501 wql
->wql_queue
= WAIT_QUEUE_NULL
;
502 queue_remove(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
503 wql
->wql_setqueue
= WAIT_QUEUE_SET_NULL
;
504 queue_remove(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
505 wql
->wql_type
= WAIT_QUEUE_UNLINKED
;
507 WAIT_QUEUE_CHECK(wq
);
508 WAIT_QUEUE_SET_CHECK(wq_set
);
512 * Routine: wait_queue_unlink
514 * Remove the linkage between a wait queue and a set,
515 * freeing the linkage structure.
517 * The wait queue being must be a member set queue
522 wait_queue_set_t wq_set
)
524 wait_queue_element_t wq_element
;
525 wait_queue_link_t wql
;
529 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
)) {
530 return KERN_INVALID_ARGUMENT
;
536 wq_element
= (wait_queue_element_t
) queue_first(q
);
537 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
538 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
539 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
540 wql
= (wait_queue_link_t
)wq_element
;
542 if (wql
->wql_setqueue
== wq_set
) {
544 wait_queue_unlink_locked(wq
, wq_set
, wql
);
546 wait_queue_unlock(wq
);
548 kfree(wql
, sizeof(struct wait_queue_link
));
552 wq_element
= (wait_queue_element_t
)
553 queue_next((queue_t
) wq_element
);
555 wait_queue_unlock(wq
);
557 return KERN_NOT_IN_SET
;
562 * Routine: wait_queue_unlinkall_nofree
564 * Remove the linkage between a wait queue and all its
565 * sets. The caller is responsible for freeing
566 * the wait queue link structures.
570 wait_queue_unlinkall_nofree(
573 wait_queue_element_t wq_element
;
574 wait_queue_element_t wq_next_element
;
575 wait_queue_set_t wq_set
;
576 wait_queue_link_t wql
;
577 queue_head_t links_queue_head
;
578 queue_t links
= &links_queue_head
;
582 if (!wait_queue_is_queue(wq
)) {
583 return KERN_INVALID_ARGUMENT
;
593 wq_element
= (wait_queue_element_t
) queue_first(q
);
594 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
595 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
596 wq_next_element
= (wait_queue_element_t
)
597 queue_next((queue_t
) wq_element
);
599 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
600 wql
= (wait_queue_link_t
)wq_element
;
601 wq_set
= wql
->wql_setqueue
;
603 wait_queue_unlink_locked(wq
, wq_set
, wql
);
606 wq_element
= wq_next_element
;
608 wait_queue_unlock(wq
);
610 return(KERN_SUCCESS
);
615 * Routine: wait_queue_unlink_all
617 * Remove the linkage between a wait queue and all its sets.
618 * All the linkage structures are freed.
620 * Nothing of interest locked.
624 wait_queue_unlink_all(
627 wait_queue_element_t wq_element
;
628 wait_queue_element_t wq_next_element
;
629 wait_queue_set_t wq_set
;
630 wait_queue_link_t wql
;
631 queue_head_t links_queue_head
;
632 queue_t links
= &links_queue_head
;
636 if (!wait_queue_is_queue(wq
)) {
637 return KERN_INVALID_ARGUMENT
;
647 wq_element
= (wait_queue_element_t
) queue_first(q
);
648 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
649 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
650 wq_next_element
= (wait_queue_element_t
)
651 queue_next((queue_t
) wq_element
);
653 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
654 wql
= (wait_queue_link_t
)wq_element
;
655 wq_set
= wql
->wql_setqueue
;
657 wait_queue_unlink_locked(wq
, wq_set
, wql
);
659 enqueue(links
, &wql
->wql_links
);
661 wq_element
= wq_next_element
;
663 wait_queue_unlock(wq
);
666 while(!queue_empty(links
)) {
667 wql
= (wait_queue_link_t
) dequeue(links
);
668 kfree(wql
, sizeof(struct wait_queue_link
));
671 return(KERN_SUCCESS
);
675 * Routine: wait_queue_set_unlink_all_nofree
677 * Remove the linkage between a set wait queue and all its
678 * member wait queues. The link structures are not freed, nor
679 * returned. It is the caller's responsibility to track and free
682 * The wait queue being must be a member set queue
685 wait_queue_set_unlink_all_nofree(
686 wait_queue_set_t wq_set
)
688 wait_queue_link_t wql
;
693 if (!wait_queue_is_set(wq_set
)) {
694 return KERN_INVALID_ARGUMENT
;
701 q
= &wq_set
->wqs_setlinks
;
703 wql
= (wait_queue_link_t
)queue_first(q
);
704 while (!queue_end(q
, (queue_entry_t
)wql
)) {
705 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
707 if (wait_queue_lock_try(wq
)) {
708 wait_queue_unlink_locked(wq
, wq_set
, wql
);
709 wait_queue_unlock(wq
);
710 wql
= (wait_queue_link_t
)queue_first(q
);
721 return(KERN_SUCCESS
);
724 /* legacy interface naming */
726 wait_subqueue_unlink_all(
727 wait_queue_set_t wq_set
)
729 return wait_queue_set_unlink_all_nofree(wq_set
);
734 * Routine: wait_queue_set_unlink_all
736 * Remove the linkage between a set wait queue and all its
737 * member wait queues. The link structures are freed.
739 * The wait queue must be a set
742 wait_queue_set_unlink_all(
743 wait_queue_set_t wq_set
)
745 wait_queue_link_t wql
;
748 queue_head_t links_queue_head
;
749 queue_t links
= &links_queue_head
;
752 if (!wait_queue_is_set(wq_set
)) {
753 return KERN_INVALID_ARGUMENT
;
762 q
= &wq_set
->wqs_setlinks
;
764 wql
= (wait_queue_link_t
)queue_first(q
);
765 while (!queue_end(q
, (queue_entry_t
)wql
)) {
766 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
768 if (wait_queue_lock_try(wq
)) {
769 wait_queue_unlink_locked(wq
, wq_set
, wql
);
770 wait_queue_unlock(wq
);
771 enqueue(links
, &wql
->wql_links
);
772 wql
= (wait_queue_link_t
)queue_first(q
);
783 while (!queue_empty (links
)) {
784 wql
= (wait_queue_link_t
) dequeue(links
);
785 kfree(wql
, sizeof(struct wait_queue_link
));
787 return(KERN_SUCCESS
);
792 * Routine: wait_queue_unlink_one
794 * Find and unlink one set wait queue
796 * Nothing of interest locked.
799 wait_queue_unlink_one(
801 wait_queue_set_t
*wq_setp
)
803 wait_queue_element_t wq_element
;
812 wq_element
= (wait_queue_element_t
) queue_first(q
);
813 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
815 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
816 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
817 wait_queue_set_t wq_set
= wql
->wql_setqueue
;
820 wait_queue_unlink_locked(wq
, wq_set
, wql
);
822 wait_queue_unlock(wq
);
824 kfree(wql
,sizeof(struct wait_queue_link
));
829 wq_element
= (wait_queue_element_t
)
830 queue_next((queue_t
) wq_element
);
832 wait_queue_unlock(wq
);
834 *wq_setp
= WAIT_QUEUE_SET_NULL
;
839 * Routine: wait_queue_assert_wait64_locked
841 * Insert the current thread into the supplied wait queue
842 * waiting for a particular event to be posted to that queue.
845 * The wait queue is assumed locked.
846 * The waiting thread is assumed locked.
849 __private_extern__ wait_result_t
850 wait_queue_assert_wait64_locked(
853 wait_interrupt_t interruptible
,
857 wait_result_t wait_result
;
859 if (!wait_queue_assert_possible(thread
))
860 panic("wait_queue_assert_wait64_locked");
862 if (wq
->wq_type
== _WAIT_QUEUE_SET_inited
) {
863 wait_queue_set_t wqs
= (wait_queue_set_t
)wq
;
865 if (wqs
->wqs_isprepost
&& wqs
->wqs_refcount
> 0)
866 return(THREAD_AWAKENED
);
870 * This is the extent to which we currently take scheduling attributes
871 * into account. If the thread is vm priviledged, we stick it at
872 * the front of the queue. Later, these queues will honor the policy
873 * value set at wait_queue_init time.
875 wait_result
= thread_mark_wait_locked(thread
, interruptible
);
876 if (wait_result
== THREAD_WAITING
) {
877 if (thread
->options
& TH_OPT_VMPRIV
)
878 enqueue_head(&wq
->wq_queue
, (queue_entry_t
) thread
);
880 enqueue_tail(&wq
->wq_queue
, (queue_entry_t
) thread
);
882 thread
->wait_event
= event
;
883 thread
->wait_queue
= wq
;
886 if (!timer_call_enter(&thread
->wait_timer
, deadline
))
887 thread
->wait_timer_active
++;
888 thread
->wait_timer_is_set
= TRUE
;
895 * Routine: wait_queue_assert_wait
897 * Insert the current thread into the supplied wait queue
898 * waiting for a particular event to be posted to that queue.
901 * nothing of interest locked.
904 wait_queue_assert_wait(
907 wait_interrupt_t interruptible
,
912 thread_t thread
= current_thread();
914 /* If it is an invalid wait queue, you can't wait on it */
915 if (!wait_queue_is_valid(wq
))
916 return (thread
->wait_result
= THREAD_RESTART
);
921 ret
= wait_queue_assert_wait64_locked(wq
, (event64_t
)((uint32_t)event
),
922 interruptible
, deadline
, thread
);
923 thread_unlock(thread
);
924 wait_queue_unlock(wq
);
930 * Routine: wait_queue_assert_wait64
932 * Insert the current thread into the supplied wait queue
933 * waiting for a particular event to be posted to that queue.
935 * nothing of interest locked.
938 wait_queue_assert_wait64(
941 wait_interrupt_t interruptible
,
946 thread_t thread
= current_thread();
948 /* If it is an invalid wait queue, you cant wait on it */
949 if (!wait_queue_is_valid(wq
))
950 return (thread
->wait_result
= THREAD_RESTART
);
955 ret
= wait_queue_assert_wait64_locked(wq
, event
, interruptible
, deadline
, thread
);
956 thread_unlock(thread
);
957 wait_queue_unlock(wq
);
963 * Routine: _wait_queue_select64_all
965 * Select all threads off a wait queue that meet the
970 * wake_queue initialized and ready for insertion
973 * a queue of locked threads
976 _wait_queue_select64_all(
981 wait_queue_element_t wq_element
;
982 wait_queue_element_t wqe_next
;
987 wq_element
= (wait_queue_element_t
) queue_first(q
);
988 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
989 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
990 wqe_next
= (wait_queue_element_t
)
991 queue_next((queue_t
) wq_element
);
994 * We may have to recurse if this is a compound wait queue.
996 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
997 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
998 wait_queue_t set_queue
;
1001 * We have to check the set wait queue.
1003 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1004 wait_queue_lock(set_queue
);
1005 if (set_queue
->wq_isprepost
) {
1006 wait_queue_set_t wqs
= (wait_queue_set_t
)set_queue
;
1009 * Preposting is only for sets and wait queue
1010 * is the first element of set
1012 wqs
->wqs_refcount
++;
1014 if (! wait_queue_empty(set_queue
))
1015 _wait_queue_select64_all(set_queue
, event
, wake_queue
);
1016 wait_queue_unlock(set_queue
);
1020 * Otherwise, its a thread. If it is waiting on
1021 * the event we are posting to this queue, pull
1022 * it off the queue and stick it in out wake_queue.
1024 thread_t t
= (thread_t
)wq_element
;
1026 if (t
->wait_event
== event
) {
1028 remqueue(q
, (queue_entry_t
) t
);
1029 enqueue (wake_queue
, (queue_entry_t
) t
);
1030 t
->wait_queue
= WAIT_QUEUE_NULL
;
1031 t
->wait_event
= NO_EVENT64
;
1032 t
->at_safe_point
= FALSE
;
1033 /* returned locked */
1036 wq_element
= wqe_next
;
1041 * Routine: wait_queue_wakeup64_all_locked
1043 * Wakeup some number of threads that are in the specified
1044 * wait queue and waiting on the specified event.
1046 * wait queue already locked (may be released).
1048 * KERN_SUCCESS - Threads were woken up
1049 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1051 __private_extern__ kern_return_t
1052 wait_queue_wakeup64_all_locked(
1055 wait_result_t result
,
1058 queue_head_t wake_queue_head
;
1059 queue_t q
= &wake_queue_head
;
1062 // assert(wait_queue_held(wq));
1063 if(!wq
->wq_interlock
.lock_data
) { /* (BRINGUP */
1064 panic("wait_queue_wakeup64_all_locked: lock not held on %08X\n", wq
); /* (BRINGUP) */
1070 * Select the threads that we will wake up. The threads
1071 * are returned to us locked and cleanly removed from the
1074 _wait_queue_select64_all(wq
, event
, q
);
1076 wait_queue_unlock(wq
);
1079 * For each thread, set it running.
1081 res
= KERN_NOT_WAITING
;
1082 while (!queue_empty (q
)) {
1083 thread_t thread
= (thread_t
) dequeue(q
);
1084 res
= thread_go(thread
, result
);
1085 assert(res
== KERN_SUCCESS
);
1086 thread_unlock(thread
);
1093 * Routine: wait_queue_wakeup_all
1095 * Wakeup some number of threads that are in the specified
1096 * wait queue and waiting on the specified event.
1100 * KERN_SUCCESS - Threads were woken up
1101 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1104 wait_queue_wakeup_all(
1107 wait_result_t result
)
1112 if (!wait_queue_is_valid(wq
)) {
1113 return KERN_INVALID_ARGUMENT
;
1117 wait_queue_lock(wq
);
1118 if(!wq
->wq_interlock
.lock_data
) { /* (BRINGUP */
1119 panic("wait_queue_wakeup_all: we did not get the lock on %08X\n", wq
); /* (BRINGUP) */
1121 ret
= wait_queue_wakeup64_all_locked(
1122 wq
, (event64_t
)((uint32_t)event
),
1130 * Routine: wait_queue_wakeup64_all
1132 * Wakeup some number of threads that are in the specified
1133 * wait queue and waiting on the specified event.
1137 * KERN_SUCCESS - Threads were woken up
1138 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1141 wait_queue_wakeup64_all(
1144 wait_result_t result
)
1149 if (!wait_queue_is_valid(wq
)) {
1150 return KERN_INVALID_ARGUMENT
;
1154 wait_queue_lock(wq
);
1155 ret
= wait_queue_wakeup64_all_locked(wq
, event
, result
, TRUE
);
1162 * Routine: _wait_queue_select64_one
1164 * Select the best thread off a wait queue that meet the
1165 * supplied criteria.
1169 * possibly recursive
1171 * a locked thread - if one found
1173 * This is where the sync policy of the wait queue comes
1174 * into effect. For now, we just assume FIFO.
1177 _wait_queue_select64_one(
1181 wait_queue_element_t wq_element
;
1182 wait_queue_element_t wqe_next
;
1183 thread_t t
= THREAD_NULL
;
1186 assert(wq
->wq_fifo
);
1190 wq_element
= (wait_queue_element_t
) queue_first(q
);
1191 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1192 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1193 wqe_next
= (wait_queue_element_t
)
1194 queue_next((queue_t
) wq_element
);
1197 * We may have to recurse if this is a compound wait queue.
1199 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1200 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1201 wait_queue_t set_queue
;
1204 * We have to check the set wait queue.
1206 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1207 wait_queue_lock(set_queue
);
1208 if (! wait_queue_empty(set_queue
)) {
1209 t
= _wait_queue_select64_one(set_queue
, event
);
1211 wait_queue_unlock(set_queue
);
1212 if (t
!= THREAD_NULL
)
1217 * Otherwise, its a thread. If it is waiting on
1218 * the event we are posting to this queue, pull
1219 * it off the queue and stick it in out wake_queue.
1221 t
= (thread_t
)wq_element
;
1222 if (t
->wait_event
== event
) {
1224 remqueue(q
, (queue_entry_t
) t
);
1225 t
->wait_queue
= WAIT_QUEUE_NULL
;
1226 t
->wait_event
= NO_EVENT64
;
1227 t
->at_safe_point
= FALSE
;
1228 return t
; /* still locked */
1233 wq_element
= wqe_next
;
1239 * Routine: wait_queue_peek64_locked
1241 * Select the best thread from a wait queue that meet the
1242 * supplied criteria, but leave it on the queue it was
1243 * found on. The thread, and the actual wait_queue the
1244 * thread was found on are identified.
1248 * possibly recursive
1250 * a locked thread - if one found
1251 * a locked waitq - the one the thread was found on
1253 * Both the waitq the thread was actually found on, and
1254 * the supplied wait queue, are locked after this.
1256 __private_extern__
void
1257 wait_queue_peek64_locked(
1263 wait_queue_element_t wq_element
;
1264 wait_queue_element_t wqe_next
;
1267 assert(wq
->wq_fifo
);
1273 wq_element
= (wait_queue_element_t
) queue_first(q
);
1274 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1275 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1276 wqe_next
= (wait_queue_element_t
)
1277 queue_next((queue_t
) wq_element
);
1280 * We may have to recurse if this is a compound wait queue.
1282 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1283 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1284 wait_queue_t set_queue
;
1287 * We have to check the set wait queue.
1289 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1290 wait_queue_lock(set_queue
);
1291 if (! wait_queue_empty(set_queue
)) {
1292 wait_queue_peek64_locked(set_queue
, event
, tp
, wqp
);
1294 if (*tp
!= THREAD_NULL
) {
1295 if (*wqp
!= set_queue
)
1296 wait_queue_unlock(set_queue
);
1297 return; /* thread and its waitq locked */
1300 wait_queue_unlock(set_queue
);
1304 * Otherwise, its a thread. If it is waiting on
1305 * the event we are posting to this queue, return
1306 * it locked, but leave it on the queue.
1308 thread_t t
= (thread_t
)wq_element
;
1310 if (t
->wait_event
== event
) {
1317 wq_element
= wqe_next
;
1322 * Routine: wait_queue_pull_thread_locked
1324 * Pull a thread that was previously "peeked" off the wait
1325 * queue and (possibly) unlock the waitq.
1331 * with the thread still locked.
1334 wait_queue_pull_thread_locked(
1340 assert(thread
->wait_queue
== waitq
);
1342 remqueue(&waitq
->wq_queue
, (queue_entry_t
)thread
);
1343 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1344 thread
->wait_event
= NO_EVENT64
;
1345 thread
->at_safe_point
= FALSE
;
1347 wait_queue_unlock(waitq
);
1352 * Routine: wait_queue_select64_thread
1354 * Look for a thread and remove it from the queues, if
1355 * (and only if) the thread is waiting on the supplied
1356 * <wait_queue, event> pair.
1360 * possibly recursive
1362 * KERN_NOT_WAITING: Thread is not waiting here.
1363 * KERN_SUCCESS: It was, and is now removed (returned locked)
1365 static kern_return_t
1366 _wait_queue_select64_thread(
1371 wait_queue_element_t wq_element
;
1372 wait_queue_element_t wqe_next
;
1373 kern_return_t res
= KERN_NOT_WAITING
;
1374 queue_t q
= &wq
->wq_queue
;
1376 thread_lock(thread
);
1377 if ((thread
->wait_queue
== wq
) && (thread
->wait_event
== event
)) {
1378 remqueue(q
, (queue_entry_t
) thread
);
1379 thread
->at_safe_point
= FALSE
;
1380 thread
->wait_event
= NO_EVENT64
;
1381 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1382 /* thread still locked */
1383 return KERN_SUCCESS
;
1385 thread_unlock(thread
);
1388 * The wait_queue associated with the thread may be one of this
1389 * wait queue's sets. Go see. If so, removing it from
1390 * there is like removing it from here.
1392 wq_element
= (wait_queue_element_t
) queue_first(q
);
1393 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1394 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1395 wqe_next
= (wait_queue_element_t
)
1396 queue_next((queue_t
) wq_element
);
1398 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1399 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1400 wait_queue_t set_queue
;
1402 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1403 wait_queue_lock(set_queue
);
1404 if (! wait_queue_empty(set_queue
)) {
1405 res
= _wait_queue_select64_thread(set_queue
,
1409 wait_queue_unlock(set_queue
);
1410 if (res
== KERN_SUCCESS
)
1411 return KERN_SUCCESS
;
1413 wq_element
= wqe_next
;
1420 * Routine: wait_queue_wakeup64_identity_locked
1422 * Select a single thread that is most-eligible to run and set
1423 * set it running. But return the thread locked.
1428 * possibly recursive
1430 * a pointer to the locked thread that was awakened
1432 __private_extern__ thread_t
1433 wait_queue_wakeup64_identity_locked(
1436 wait_result_t result
,
1442 assert(wait_queue_held(wq
));
1445 thread
= _wait_queue_select64_one(wq
, event
);
1447 wait_queue_unlock(wq
);
1450 res
= thread_go(thread
, result
);
1451 assert(res
== KERN_SUCCESS
);
1453 return thread
; /* still locked if not NULL */
1458 * Routine: wait_queue_wakeup64_one_locked
1460 * Select a single thread that is most-eligible to run and set
1466 * possibly recursive
1468 * KERN_SUCCESS: It was, and is, now removed.
1469 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1471 __private_extern__ kern_return_t
1472 wait_queue_wakeup64_one_locked(
1475 wait_result_t result
,
1480 assert(wait_queue_held(wq
));
1482 thread
= _wait_queue_select64_one(wq
, event
);
1484 wait_queue_unlock(wq
);
1489 res
= thread_go(thread
, result
);
1490 assert(res
== KERN_SUCCESS
);
1491 thread_unlock(thread
);
1495 return KERN_NOT_WAITING
;
1499 * Routine: wait_queue_wakeup_one
1501 * Wakeup the most appropriate thread that is in the specified
1502 * wait queue for the specified event.
1506 * KERN_SUCCESS - Thread was woken up
1507 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1510 wait_queue_wakeup_one(
1513 wait_result_t result
)
1518 if (!wait_queue_is_valid(wq
)) {
1519 return KERN_INVALID_ARGUMENT
;
1523 wait_queue_lock(wq
);
1524 thread
= _wait_queue_select64_one(wq
, (event64_t
)((uint32_t)event
));
1525 wait_queue_unlock(wq
);
1530 res
= thread_go(thread
, result
);
1531 assert(res
== KERN_SUCCESS
);
1532 thread_unlock(thread
);
1538 return KERN_NOT_WAITING
;
1542 * Routine: wait_queue_wakeup64_one
1544 * Wakeup the most appropriate thread that is in the specified
1545 * wait queue for the specified event.
1549 * KERN_SUCCESS - Thread was woken up
1550 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1553 wait_queue_wakeup64_one(
1556 wait_result_t result
)
1561 if (!wait_queue_is_valid(wq
)) {
1562 return KERN_INVALID_ARGUMENT
;
1565 wait_queue_lock(wq
);
1566 thread
= _wait_queue_select64_one(wq
, event
);
1567 wait_queue_unlock(wq
);
1572 res
= thread_go(thread
, result
);
1573 assert(res
== KERN_SUCCESS
);
1574 thread_unlock(thread
);
1580 return KERN_NOT_WAITING
;
1585 * Routine: wait_queue_wakeup64_thread_locked
1587 * Wakeup the particular thread that was specified if and only
1588 * it was in this wait queue (or one of it's set queues)
1589 * and waiting on the specified event.
1591 * This is much safer than just removing the thread from
1592 * whatever wait queue it happens to be on. For instance, it
1593 * may have already been awoken from the wait you intended to
1594 * interrupt and waited on something else (like another
1598 * wait queue already locked (may be released).
1600 * KERN_SUCCESS - the thread was found waiting and awakened
1601 * KERN_NOT_WAITING - the thread was not waiting here
1603 __private_extern__ kern_return_t
1604 wait_queue_wakeup64_thread_locked(
1608 wait_result_t result
,
1613 assert(wait_queue_held(wq
));
1616 * See if the thread was still waiting there. If so, it got
1617 * dequeued and returned locked.
1619 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1621 wait_queue_unlock(wq
);
1623 if (res
!= KERN_SUCCESS
)
1624 return KERN_NOT_WAITING
;
1626 res
= thread_go(thread
, result
);
1627 assert(res
== KERN_SUCCESS
);
1628 thread_unlock(thread
);
1633 * Routine: wait_queue_wakeup_thread
1635 * Wakeup the particular thread that was specified if and only
1636 * it was in this wait queue (or one of it's set queues)
1637 * and waiting on the specified event.
1639 * This is much safer than just removing the thread from
1640 * whatever wait queue it happens to be on. For instance, it
1641 * may have already been awoken from the wait you intended to
1642 * interrupt and waited on something else (like another
1645 * nothing of interest locked
1646 * we need to assume spl needs to be raised
1648 * KERN_SUCCESS - the thread was found waiting and awakened
1649 * KERN_NOT_WAITING - the thread was not waiting here
1652 wait_queue_wakeup_thread(
1656 wait_result_t result
)
1661 if (!wait_queue_is_valid(wq
)) {
1662 return KERN_INVALID_ARGUMENT
;
1666 wait_queue_lock(wq
);
1667 res
= _wait_queue_select64_thread(wq
, (event64_t
)((uint32_t)event
), thread
);
1668 wait_queue_unlock(wq
);
1670 if (res
== KERN_SUCCESS
) {
1671 res
= thread_go(thread
, result
);
1672 assert(res
== KERN_SUCCESS
);
1673 thread_unlock(thread
);
1678 return KERN_NOT_WAITING
;
1682 * Routine: wait_queue_wakeup64_thread
1684 * Wakeup the particular thread that was specified if and only
1685 * it was in this wait queue (or one of it's set's queues)
1686 * and waiting on the specified event.
1688 * This is much safer than just removing the thread from
1689 * whatever wait queue it happens to be on. For instance, it
1690 * may have already been awoken from the wait you intended to
1691 * interrupt and waited on something else (like another
1694 * nothing of interest locked
1695 * we need to assume spl needs to be raised
1697 * KERN_SUCCESS - the thread was found waiting and awakened
1698 * KERN_NOT_WAITING - the thread was not waiting here
1701 wait_queue_wakeup64_thread(
1705 wait_result_t result
)
1710 if (!wait_queue_is_valid(wq
)) {
1711 return KERN_INVALID_ARGUMENT
;
1715 wait_queue_lock(wq
);
1716 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1717 wait_queue_unlock(wq
);
1719 if (res
== KERN_SUCCESS
) {
1720 res
= thread_go(thread
, result
);
1721 assert(res
== KERN_SUCCESS
);
1722 thread_unlock(thread
);
1727 return KERN_NOT_WAITING
;