2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * @OSF_FREE_COPYRIGHT@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
56 * File: wait_queue.c (adapted from sched_prim.c)
57 * Author: Avadis Tevanian, Jr.
60 * Primitives for manipulating wait queues: either global
61 * ones from sched_prim.c, or private ones associated with
62 * particular structures(pots, semaphores, etc..).
65 #include <kern/kern_types.h>
66 #include <kern/simple_lock.h>
67 #include <kern/kalloc.h>
68 #include <kern/queue.h>
70 #include <mach/sync_policy.h>
71 #include <kern/sched_prim.h>
73 #include <kern/wait_queue.h>
76 * Routine: wait_queue_init
78 * Initialize a previously allocated wait queue.
80 * KERN_SUCCESS - The wait_queue_t was initialized
81 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
88 if (!((policy
& SYNC_POLICY_ORDER_MASK
) == SYNC_POLICY_FIFO
))
89 return KERN_INVALID_ARGUMENT
;
92 wq
->wq_type
= _WAIT_QUEUE_inited
;
93 queue_init(&wq
->wq_queue
);
94 hw_lock_init(&wq
->wq_interlock
);
99 * Routine: wait_queue_alloc
101 * Allocate and initialize a wait queue for use outside of
102 * of the mach part of the kernel.
104 * Nothing locked - can block.
106 * The allocated and initialized wait queue
107 * WAIT_QUEUE_NULL if there is a resource shortage
116 wq
= (wait_queue_t
) kalloc(sizeof(struct wait_queue
));
117 if (wq
!= WAIT_QUEUE_NULL
) {
118 ret
= wait_queue_init(wq
, policy
);
119 if (ret
!= KERN_SUCCESS
) {
120 kfree((vm_offset_t
)wq
, sizeof(struct wait_queue
));
121 wq
= WAIT_QUEUE_NULL
;
128 * Routine: wait_queue_free
130 * Free an allocated wait queue.
138 if (!wait_queue_is_queue(wq
))
139 return KERN_INVALID_ARGUMENT
;
140 if (!queue_empty(&wq
->wq_queue
))
142 kfree((vm_offset_t
)wq
, sizeof(struct wait_queue
));
147 * Routine: wait_queue_set_init
149 * Initialize a previously allocated wait queue set.
151 * KERN_SUCCESS - The wait_queue_set_t was initialized
152 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
156 wait_queue_set_t wqset
,
161 ret
= wait_queue_init(&wqset
->wqs_wait_queue
, policy
);
162 if (ret
!= KERN_SUCCESS
)
165 wqset
->wqs_wait_queue
.wq_type
= _WAIT_QUEUE_SET_inited
;
166 if (policy
& SYNC_POLICY_PREPOST
)
167 wqset
->wqs_wait_queue
.wq_isprepost
= TRUE
;
169 wqset
->wqs_wait_queue
.wq_isprepost
= FALSE
;
170 queue_init(&wqset
->wqs_setlinks
);
171 wqset
->wqs_refcount
= 0;
178 wait_queue_set_t wqset
,
181 return wait_queue_set_init(wqset
, policy
);
185 * Routine: wait_queue_set_alloc
187 * Allocate and initialize a wait queue set for
188 * use outside of the mach part of the kernel.
192 * The allocated and initialized wait queue set
193 * WAIT_QUEUE_SET_NULL if there is a resource shortage
196 wait_queue_set_alloc(
199 wait_queue_set_t wq_set
;
201 wq_set
= (wait_queue_set_t
) kalloc(sizeof(struct wait_queue_set
));
202 if (wq_set
!= WAIT_QUEUE_SET_NULL
) {
205 ret
= wait_queue_set_init(wq_set
, policy
);
206 if (ret
!= KERN_SUCCESS
) {
207 kfree((vm_offset_t
)wq_set
, sizeof(struct wait_queue_set
));
208 wq_set
= WAIT_QUEUE_SET_NULL
;
215 * Routine: wait_queue_set_free
217 * Free an allocated wait queue set
223 wait_queue_set_t wq_set
)
225 if (!wait_queue_is_set(wq_set
))
226 return KERN_INVALID_ARGUMENT
;
228 if (!queue_empty(&wq_set
->wqs_wait_queue
.wq_queue
))
231 kfree((vm_offset_t
)wq_set
, sizeof(struct wait_queue_set
));
236 wait_queue_sub_clearrefs(
237 wait_queue_set_t wq_set
)
239 if (!wait_queue_is_set(wq_set
))
240 return KERN_INVALID_ARGUMENT
;
243 wq_set
->wqs_refcount
= 0;
250 * Routine: wait_queue_set_size
251 * Routine: wait_queue_link_size
253 * Return the size of opaque wait queue structures
255 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet
); }
256 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink
); }
258 /* declare a unique type for wait queue link structures */
259 static unsigned int _wait_queue_link
;
260 static unsigned int _wait_queue_unlinked
;
262 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
263 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
265 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
266 WQASSERT(((wqe)->wqe_queue == (wq) && \
267 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
268 "wait queue element list corruption: wq=%#x, wqe=%#x", \
271 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
272 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
273 (queue_t)(wql) : &(wql)->wql_setlinks)))
275 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
276 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
277 (queue_t)(wql) : &(wql)->wql_setlinks)))
279 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
280 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
281 ((wql)->wql_setqueue == (wqs)) && \
282 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
283 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
284 "wait queue set links corruption: wqs=%#x, wql=%#x", \
287 #if defined(_WAIT_QUEUE_DEBUG_)
289 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
291 #define WAIT_QUEUE_CHECK(wq) \
293 queue_t q2 = &(wq)->wq_queue; \
294 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
295 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
296 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
297 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
301 #define WAIT_QUEUE_SET_CHECK(wqs) \
303 queue_t q2 = &(wqs)->wqs_setlinks; \
304 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
305 while (!queue_end(q2, (queue_entry_t)wql2)) { \
306 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
307 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
311 #else /* !_WAIT_QUEUE_DEBUG_ */
313 #define WQASSERT(e, s, p0, p1) assert(e)
315 #define WAIT_QUEUE_CHECK(wq)
316 #define WAIT_QUEUE_SET_CHECK(wqs)
318 #endif /* !_WAIT_QUEUE_DEBUG_ */
321 * Routine: wait_queue_member_locked
323 * Indicate if this set queue is a member of the queue
325 * The wait queue is locked
326 * The set queue is just that, a set queue
328 __private_extern__ boolean_t
329 wait_queue_member_locked(
331 wait_queue_set_t wq_set
)
333 wait_queue_element_t wq_element
;
336 assert(wait_queue_held(wq
));
337 assert(wait_queue_is_set(wq_set
));
341 wq_element
= (wait_queue_element_t
) queue_first(q
);
342 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
343 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
344 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
)) {
345 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
347 if (wql
->wql_setqueue
== wq_set
)
350 wq_element
= (wait_queue_element_t
)
351 queue_next((queue_t
) wq_element
);
358 * Routine: wait_queue_member
360 * Indicate if this set queue is a member of the queue
362 * The set queue is just that, a set queue
367 wait_queue_set_t wq_set
)
372 if (!wait_queue_is_set(wq_set
))
377 ret
= wait_queue_member_locked(wq
, wq_set
);
378 wait_queue_unlock(wq
);
386 * Routine: wait_queue_link_noalloc
388 * Insert a set wait queue into a wait queue. This
389 * requires us to link the two together using a wait_queue_link
390 * structure that we allocate.
392 * The wait queue being inserted must be inited as a set queue
395 wait_queue_link_noalloc(
397 wait_queue_set_t wq_set
,
398 wait_queue_link_t wql
)
400 wait_queue_element_t wq_element
;
404 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
))
405 return KERN_INVALID_ARGUMENT
;
408 * There are probably less threads and sets associated with
409 * the wait queue, then there are wait queues associated with
410 * the set. So lets validate it that way.
416 wq_element
= (wait_queue_element_t
) queue_first(q
);
417 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
418 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
419 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
&&
420 ((wait_queue_link_t
)wq_element
)->wql_setqueue
== wq_set
) {
422 wait_queue_unlock(wq
);
424 return KERN_ALREADY_IN_SET
;
426 wq_element
= (wait_queue_element_t
)
427 queue_next((queue_t
) wq_element
);
431 * Not already a member, so we can add it.
434 WAIT_QUEUE_SET_CHECK(wq_set
);
437 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
438 wql
->wql_setqueue
= wq_set
;
439 queue_enter(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
440 wql
->wql_type
= WAIT_QUEUE_LINK
;
443 wait_queue_unlock(wq
);
450 * Routine: wait_queue_link
452 * Insert a set wait queue into a wait queue. This
453 * requires us to link the two together using a wait_queue_link
454 * structure that we allocate.
456 * The wait queue being inserted must be inited as a set queue
461 wait_queue_set_t wq_set
)
463 wait_queue_link_t wql
;
466 wql
= (wait_queue_link_t
) kalloc(sizeof(struct wait_queue_link
));
467 if (wql
== WAIT_QUEUE_LINK_NULL
)
468 return KERN_RESOURCE_SHORTAGE
;
470 ret
= wait_queue_link_noalloc(wq
, wq_set
, wql
);
471 if (ret
!= KERN_SUCCESS
)
472 kfree((vm_offset_t
)wql
, sizeof(struct wait_queue_link
));
479 * Routine: wait_queue_unlink_nofree
481 * Undo the linkage between a wait queue and a set.
484 wait_queue_unlink_locked(
486 wait_queue_set_t wq_set
,
487 wait_queue_link_t wql
)
489 assert(wait_queue_held(wq
));
490 assert(wait_queue_held(&wq_set
->wqs_wait_queue
));
492 wql
->wql_queue
= WAIT_QUEUE_NULL
;
493 queue_remove(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
494 wql
->wql_setqueue
= WAIT_QUEUE_SET_NULL
;
495 queue_remove(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
496 wql
->wql_type
= WAIT_QUEUE_UNLINKED
;
498 WAIT_QUEUE_CHECK(wq
);
499 WAIT_QUEUE_SET_CHECK(wq_set
);
503 * Routine: wait_queue_unlink
505 * Remove the linkage between a wait queue and a set,
506 * freeing the linkage structure.
508 * The wait queue being must be a member set queue
513 wait_queue_set_t wq_set
)
515 wait_queue_element_t wq_element
;
516 wait_queue_link_t wql
;
520 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
)) {
521 return KERN_INVALID_ARGUMENT
;
527 wq_element
= (wait_queue_element_t
) queue_first(q
);
528 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
529 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
530 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
531 wql
= (wait_queue_link_t
)wq_element
;
533 if (wql
->wql_setqueue
== wq_set
) {
535 wait_queue_unlink_locked(wq
, wq_set
, wql
);
537 wait_queue_unlock(wq
);
539 kfree((vm_offset_t
)wql
, sizeof(struct wait_queue_link
));
543 wq_element
= (wait_queue_element_t
)
544 queue_next((queue_t
) wq_element
);
546 wait_queue_unlock(wq
);
548 return KERN_NOT_IN_SET
;
553 * Routine: wait_queue_unlinkall_nofree
555 * Remove the linkage between a wait queue and all its
556 * sets. The caller is responsible for freeing
557 * the wait queue link structures.
561 wait_queue_unlinkall_nofree(
564 wait_queue_element_t wq_element
;
565 wait_queue_element_t wq_next_element
;
566 wait_queue_set_t wq_set
;
567 wait_queue_link_t wql
;
568 queue_head_t links_queue_head
;
569 queue_t links
= &links_queue_head
;
573 if (!wait_queue_is_queue(wq
)) {
574 return KERN_INVALID_ARGUMENT
;
584 wq_element
= (wait_queue_element_t
) queue_first(q
);
585 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
586 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
587 wq_next_element
= (wait_queue_element_t
)
588 queue_next((queue_t
) wq_element
);
590 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
591 wql
= (wait_queue_link_t
)wq_element
;
592 wq_set
= wql
->wql_setqueue
;
594 wait_queue_unlink_locked(wq
, wq_set
, wql
);
597 wq_element
= wq_next_element
;
599 wait_queue_unlock(wq
);
601 return(KERN_SUCCESS
);
606 * Routine: wait_queue_unlink_all
608 * Remove the linkage between a wait queue and all its sets.
609 * All the linkage structures are freed.
611 * Nothing of interest locked.
615 wait_queue_unlink_all(
618 wait_queue_element_t wq_element
;
619 wait_queue_element_t wq_next_element
;
620 wait_queue_set_t wq_set
;
621 wait_queue_link_t wql
;
622 queue_head_t links_queue_head
;
623 queue_t links
= &links_queue_head
;
627 if (!wait_queue_is_queue(wq
)) {
628 return KERN_INVALID_ARGUMENT
;
638 wq_element
= (wait_queue_element_t
) queue_first(q
);
639 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
640 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
641 wq_next_element
= (wait_queue_element_t
)
642 queue_next((queue_t
) wq_element
);
644 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
645 wql
= (wait_queue_link_t
)wq_element
;
646 wq_set
= wql
->wql_setqueue
;
648 wait_queue_unlink_locked(wq
, wq_set
, wql
);
650 enqueue(links
, &wql
->wql_links
);
652 wq_element
= wq_next_element
;
654 wait_queue_unlock(wq
);
657 while(!queue_empty(links
)) {
658 wql
= (wait_queue_link_t
) dequeue(links
);
659 kfree((vm_offset_t
) wql
, sizeof(struct wait_queue_link
));
662 return(KERN_SUCCESS
);
666 * Routine: wait_queue_set_unlink_all_nofree
668 * Remove the linkage between a set wait queue and all its
669 * member wait queues. The link structures are not freed, nor
670 * returned. It is the caller's responsibility to track and free
673 * The wait queue being must be a member set queue
676 wait_queue_set_unlink_all_nofree(
677 wait_queue_set_t wq_set
)
679 wait_queue_link_t wql
;
685 if (!wait_queue_is_set(wq_set
)) {
686 return KERN_INVALID_ARGUMENT
;
693 q
= &wq_set
->wqs_setlinks
;
695 wql
= (wait_queue_link_t
)queue_first(q
);
696 while (!queue_end(q
, (queue_entry_t
)wql
)) {
697 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
699 if (wait_queue_lock_try(wq
)) {
700 wait_queue_unlink_locked(wq
, wq_set
, wql
);
701 wait_queue_unlock(wq
);
702 wql
= (wait_queue_link_t
)queue_first(q
);
713 return(KERN_SUCCESS
);
716 /* legacy interface naming */
718 wait_subqueue_unlink_all(
719 wait_queue_set_t wq_set
)
721 return wait_queue_set_unlink_all_nofree(wq_set
);
726 * Routine: wait_queue_set_unlink_all
728 * Remove the linkage between a set wait queue and all its
729 * member wait queues. The link structures are freed.
731 * The wait queue must be a set
734 wait_queue_set_unlink_all(
735 wait_queue_set_t wq_set
)
737 wait_queue_link_t wql
;
740 queue_head_t links_queue_head
;
741 queue_t links
= &links_queue_head
;
745 if (!wait_queue_is_set(wq_set
)) {
746 return KERN_INVALID_ARGUMENT
;
755 q
= &wq_set
->wqs_setlinks
;
757 wql
= (wait_queue_link_t
)queue_first(q
);
758 while (!queue_end(q
, (queue_entry_t
)wql
)) {
759 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
761 if (wait_queue_lock_try(wq
)) {
762 wait_queue_unlink_locked(wq
, wq_set
, wql
);
763 wait_queue_unlock(wq
);
764 enqueue(links
, &wql
->wql_links
);
765 wql
= (wait_queue_link_t
)queue_first(q
);
776 while (!queue_empty (links
)) {
777 wql
= (wait_queue_link_t
) dequeue(links
);
778 kfree((vm_offset_t
)wql
, sizeof(struct wait_queue_link
));
780 return(KERN_SUCCESS
);
785 * Routine: wait_queue_unlink_one
787 * Find and unlink one set wait queue
789 * Nothing of interest locked.
792 wait_queue_unlink_one(
794 wait_queue_set_t
*wq_setp
)
796 wait_queue_element_t wq_element
;
805 wq_element
= (wait_queue_element_t
) queue_first(q
);
806 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
808 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
809 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
810 wait_queue_set_t wq_set
= wql
->wql_setqueue
;
813 wait_queue_unlink_locked(wq
, wq_set
, wql
);
815 wait_queue_unlock(wq
);
817 kfree((vm_offset_t
)wql
,sizeof(struct wait_queue_link
));
822 wq_element
= (wait_queue_element_t
)
823 queue_next((queue_t
) wq_element
);
825 wait_queue_unlock(wq
);
827 *wq_setp
= WAIT_QUEUE_SET_NULL
;
832 * Routine: wait_queue_assert_wait64_locked
834 * Insert the current thread into the supplied wait queue
835 * waiting for a particular event to be posted to that queue.
838 * The wait queue is assumed locked.
841 __private_extern__ wait_result_t
842 wait_queue_assert_wait64_locked(
845 wait_interrupt_t interruptible
,
849 wait_result_t wait_result
;
851 if (wq
->wq_type
== _WAIT_QUEUE_SET_inited
) {
852 wait_queue_set_t wqs
= (wait_queue_set_t
)wq
;
853 if (wqs
->wqs_isprepost
&& wqs
->wqs_refcount
> 0) {
855 wait_queue_unlock(wq
);
856 return(THREAD_AWAKENED
);
861 * This is the extent to which we currently take scheduling attributes
862 * into account. If the thread is vm priviledged, we stick it at
863 * the front of the queue. Later, these queues will honor the policy
864 * value set at wait_queue_init time.
866 thread
= current_thread();
868 wait_result
= thread_mark_wait_locked(thread
, interruptible
);
869 if (wait_result
== THREAD_WAITING
) {
870 if (thread
->vm_privilege
)
871 enqueue_head(&wq
->wq_queue
, (queue_entry_t
) thread
);
873 enqueue_tail(&wq
->wq_queue
, (queue_entry_t
) thread
);
874 thread
->wait_event
= event
;
875 thread
->wait_queue
= wq
;
877 thread_unlock(thread
);
879 wait_queue_unlock(wq
);
884 * Routine: wait_queue_assert_wait
886 * Insert the current thread into the supplied wait queue
887 * waiting for a particular event to be posted to that queue.
890 * nothing of interest locked.
893 wait_queue_assert_wait(
896 wait_interrupt_t interruptible
)
901 /* If it is an invalid wait queue, you can't wait on it */
902 if (!wait_queue_is_valid(wq
)) {
903 thread_t thread
= current_thread();
904 return (thread
->wait_result
= THREAD_RESTART
);
909 ret
= wait_queue_assert_wait64_locked(
910 wq
, (event64_t
)((uint32_t)event
),
911 interruptible
, TRUE
);
912 /* wait queue unlocked */
918 * Routine: wait_queue_assert_wait64
920 * Insert the current thread into the supplied wait queue
921 * waiting for a particular event to be posted to that queue.
923 * nothing of interest locked.
926 wait_queue_assert_wait64(
929 wait_interrupt_t interruptible
)
934 /* If it is an invalid wait queue, you cant wait on it */
935 if (!wait_queue_is_valid(wq
)) {
936 thread_t thread
= current_thread();
937 return (thread
->wait_result
= THREAD_RESTART
);
942 ret
= wait_queue_assert_wait64_locked(wq
, event
, interruptible
, TRUE
);
943 /* wait queue unlocked */
950 * Routine: _wait_queue_select64_all
952 * Select all threads off a wait queue that meet the
957 * wake_queue initialized and ready for insertion
960 * a queue of locked threads
963 _wait_queue_select64_all(
968 wait_queue_element_t wq_element
;
969 wait_queue_element_t wqe_next
;
974 wq_element
= (wait_queue_element_t
) queue_first(q
);
975 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
976 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
977 wqe_next
= (wait_queue_element_t
)
978 queue_next((queue_t
) wq_element
);
981 * We may have to recurse if this is a compound wait queue.
983 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
984 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
985 wait_queue_t set_queue
;
988 * We have to check the set wait queue.
990 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
991 wait_queue_lock(set_queue
);
992 if (set_queue
->wq_isprepost
) {
993 wait_queue_set_t wqs
= (wait_queue_set_t
)set_queue
;
996 * Preposting is only for sets and wait queue
997 * is the first element of set
1001 if (! wait_queue_empty(set_queue
))
1002 _wait_queue_select64_all(set_queue
, event
, wake_queue
);
1003 wait_queue_unlock(set_queue
);
1007 * Otherwise, its a thread. If it is waiting on
1008 * the event we are posting to this queue, pull
1009 * it off the queue and stick it in out wake_queue.
1011 thread_t t
= (thread_t
)wq_element
;
1013 if (t
->wait_event
== event
) {
1015 remqueue(q
, (queue_entry_t
) t
);
1016 enqueue (wake_queue
, (queue_entry_t
) t
);
1017 t
->wait_queue
= WAIT_QUEUE_NULL
;
1018 t
->wait_event
= NO_EVENT64
;
1019 t
->at_safe_point
= FALSE
;
1020 /* returned locked */
1023 wq_element
= wqe_next
;
1028 * Routine: wait_queue_wakeup64_all_locked
1030 * Wakeup some number of threads that are in the specified
1031 * wait queue and waiting on the specified event.
1033 * wait queue already locked (may be released).
1035 * KERN_SUCCESS - Threads were woken up
1036 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1038 __private_extern__ kern_return_t
1039 wait_queue_wakeup64_all_locked(
1042 wait_result_t result
,
1045 queue_head_t wake_queue_head
;
1046 queue_t q
= &wake_queue_head
;
1049 assert(wait_queue_held(wq
));
1053 * Select the threads that we will wake up. The threads
1054 * are returned to us locked and cleanly removed from the
1057 _wait_queue_select64_all(wq
, event
, q
);
1059 wait_queue_unlock(wq
);
1062 * For each thread, set it running.
1064 res
= KERN_NOT_WAITING
;
1065 while (!queue_empty (q
)) {
1066 thread_t thread
= (thread_t
) dequeue(q
);
1067 res
= thread_go_locked(thread
, result
);
1068 assert(res
== KERN_SUCCESS
);
1069 thread_unlock(thread
);
1076 * Routine: wait_queue_wakeup_all
1078 * Wakeup some number of threads that are in the specified
1079 * wait queue and waiting on the specified event.
1083 * KERN_SUCCESS - Threads were woken up
1084 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1087 wait_queue_wakeup_all(
1090 wait_result_t result
)
1095 if (!wait_queue_is_valid(wq
)) {
1096 return KERN_INVALID_ARGUMENT
;
1100 wait_queue_lock(wq
);
1101 ret
= wait_queue_wakeup64_all_locked(
1102 wq
, (event64_t
)((uint32_t)event
),
1110 * Routine: wait_queue_wakeup64_all
1112 * Wakeup some number of threads that are in the specified
1113 * wait queue and waiting on the specified event.
1117 * KERN_SUCCESS - Threads were woken up
1118 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1121 wait_queue_wakeup64_all(
1124 wait_result_t result
)
1129 if (!wait_queue_is_valid(wq
)) {
1130 return KERN_INVALID_ARGUMENT
;
1134 wait_queue_lock(wq
);
1135 ret
= wait_queue_wakeup64_all_locked(wq
, event
, result
, TRUE
);
1142 * Routine: _wait_queue_select64_one
1144 * Select the best thread off a wait queue that meet the
1145 * supplied criteria.
1149 * possibly recursive
1151 * a locked thread - if one found
1153 * This is where the sync policy of the wait queue comes
1154 * into effect. For now, we just assume FIFO.
1157 _wait_queue_select64_one(
1161 wait_queue_element_t wq_element
;
1162 wait_queue_element_t wqe_next
;
1163 thread_t t
= THREAD_NULL
;
1166 assert(wq
->wq_fifo
);
1170 wq_element
= (wait_queue_element_t
) queue_first(q
);
1171 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1172 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1173 wqe_next
= (wait_queue_element_t
)
1174 queue_next((queue_t
) wq_element
);
1177 * We may have to recurse if this is a compound wait queue.
1179 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1180 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1181 wait_queue_t set_queue
;
1184 * We have to check the set wait queue.
1186 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1187 wait_queue_lock(set_queue
);
1188 if (! wait_queue_empty(set_queue
)) {
1189 t
= _wait_queue_select64_one(set_queue
, event
);
1191 wait_queue_unlock(set_queue
);
1192 if (t
!= THREAD_NULL
)
1197 * Otherwise, its a thread. If it is waiting on
1198 * the event we are posting to this queue, pull
1199 * it off the queue and stick it in out wake_queue.
1201 thread_t t
= (thread_t
)wq_element
;
1203 if (t
->wait_event
== event
) {
1205 remqueue(q
, (queue_entry_t
) t
);
1206 t
->wait_queue
= WAIT_QUEUE_NULL
;
1207 t
->wait_event
= NO_EVENT64
;
1208 t
->at_safe_point
= FALSE
;
1209 return t
; /* still locked */
1212 wq_element
= wqe_next
;
1218 * Routine: wait_queue_peek64_locked
1220 * Select the best thread from a wait queue that meet the
1221 * supplied criteria, but leave it on the queue it was
1222 * found on. The thread, and the actual wait_queue the
1223 * thread was found on are identified.
1227 * possibly recursive
1229 * a locked thread - if one found
1230 * a locked waitq - the one the thread was found on
1232 * Both the waitq the thread was actually found on, and
1233 * the supplied wait queue, are locked after this.
1235 __private_extern__
void
1236 wait_queue_peek64_locked(
1242 wait_queue_element_t wq_element
;
1243 wait_queue_element_t wqe_next
;
1247 assert(wq
->wq_fifo
);
1253 wq_element
= (wait_queue_element_t
) queue_first(q
);
1254 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1255 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1256 wqe_next
= (wait_queue_element_t
)
1257 queue_next((queue_t
) wq_element
);
1260 * We may have to recurse if this is a compound wait queue.
1262 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1263 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1264 wait_queue_t set_queue
;
1267 * We have to check the set wait queue.
1269 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1270 wait_queue_lock(set_queue
);
1271 if (! wait_queue_empty(set_queue
)) {
1272 wait_queue_peek64_locked(set_queue
, event
, tp
, wqp
);
1274 if (*tp
!= THREAD_NULL
) {
1275 if (*wqp
!= set_queue
)
1276 wait_queue_unlock(set_queue
);
1277 return; /* thread and its waitq locked */
1280 wait_queue_unlock(set_queue
);
1284 * Otherwise, its a thread. If it is waiting on
1285 * the event we are posting to this queue, return
1286 * it locked, but leave it on the queue.
1288 thread_t t
= (thread_t
)wq_element
;
1290 if (t
->wait_event
== event
) {
1297 wq_element
= wqe_next
;
1302 * Routine: wait_queue_pull_thread_locked
1304 * Pull a thread that was previously "peeked" off the wait
1305 * queue and (possibly) unlock the waitq.
1311 * with the thread still locked.
1314 wait_queue_pull_thread_locked(
1320 assert(thread
->wait_queue
== waitq
);
1322 remqueue(&waitq
->wq_queue
, (queue_entry_t
)thread
);
1323 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1324 thread
->wait_event
= NO_EVENT64
;
1325 thread
->at_safe_point
= FALSE
;
1327 wait_queue_unlock(waitq
);
1332 * Routine: wait_queue_select64_thread
1334 * Look for a thread and remove it from the queues, if
1335 * (and only if) the thread is waiting on the supplied
1336 * <wait_queue, event> pair.
1340 * possibly recursive
1342 * KERN_NOT_WAITING: Thread is not waiting here.
1343 * KERN_SUCCESS: It was, and is now removed (returned locked)
1345 static kern_return_t
1346 _wait_queue_select64_thread(
1351 wait_queue_element_t wq_element
;
1352 wait_queue_element_t wqe_next
;
1353 kern_return_t res
= KERN_NOT_WAITING
;
1354 queue_t q
= &wq
->wq_queue
;
1356 thread_lock(thread
);
1357 if ((thread
->wait_queue
== wq
) && (thread
->wait_event
== event
)) {
1358 remqueue(q
, (queue_entry_t
) thread
);
1359 thread
->at_safe_point
= FALSE
;
1360 thread
->wait_event
= NO_EVENT64
;
1361 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1362 /* thread still locked */
1363 return KERN_SUCCESS
;
1365 thread_unlock(thread
);
1368 * The wait_queue associated with the thread may be one of this
1369 * wait queue's sets. Go see. If so, removing it from
1370 * there is like removing it from here.
1372 wq_element
= (wait_queue_element_t
) queue_first(q
);
1373 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1374 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1375 wqe_next
= (wait_queue_element_t
)
1376 queue_next((queue_t
) wq_element
);
1378 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1379 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1380 wait_queue_t set_queue
;
1382 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1383 wait_queue_lock(set_queue
);
1384 if (! wait_queue_empty(set_queue
)) {
1385 res
= _wait_queue_select64_thread(set_queue
,
1389 wait_queue_unlock(set_queue
);
1390 if (res
== KERN_SUCCESS
)
1391 return KERN_SUCCESS
;
1393 wq_element
= wqe_next
;
1400 * Routine: wait_queue_wakeup64_identity_locked
1402 * Select a single thread that is most-eligible to run and set
1403 * set it running. But return the thread locked.
1408 * possibly recursive
1410 * a pointer to the locked thread that was awakened
1412 __private_extern__ thread_t
1413 wait_queue_wakeup64_identity_locked(
1416 wait_result_t result
,
1422 assert(wait_queue_held(wq
));
1425 thread
= _wait_queue_select64_one(wq
, event
);
1427 wait_queue_unlock(wq
);
1430 res
= thread_go_locked(thread
, result
);
1431 assert(res
== KERN_SUCCESS
);
1433 return thread
; /* still locked if not NULL */
1438 * Routine: wait_queue_wakeup64_one_locked
1440 * Select a single thread that is most-eligible to run and set
1446 * possibly recursive
1448 * KERN_SUCCESS: It was, and is, now removed.
1449 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1451 __private_extern__ kern_return_t
1452 wait_queue_wakeup64_one_locked(
1455 wait_result_t result
,
1460 assert(wait_queue_held(wq
));
1462 thread
= _wait_queue_select64_one(wq
, event
);
1464 wait_queue_unlock(wq
);
1469 res
= thread_go_locked(thread
, result
);
1470 assert(res
== KERN_SUCCESS
);
1471 thread_unlock(thread
);
1475 return KERN_NOT_WAITING
;
1479 * Routine: wait_queue_wakeup_one
1481 * Wakeup the most appropriate thread that is in the specified
1482 * wait queue for the specified event.
1486 * KERN_SUCCESS - Thread was woken up
1487 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1490 wait_queue_wakeup_one(
1493 wait_result_t result
)
1498 if (!wait_queue_is_valid(wq
)) {
1499 return KERN_INVALID_ARGUMENT
;
1503 wait_queue_lock(wq
);
1504 thread
= _wait_queue_select64_one(wq
, (event64_t
)((uint32_t)event
));
1505 wait_queue_unlock(wq
);
1510 res
= thread_go_locked(thread
, result
);
1511 assert(res
== KERN_SUCCESS
);
1512 thread_unlock(thread
);
1518 return KERN_NOT_WAITING
;
1522 * Routine: wait_queue_wakeup64_one
1524 * Wakeup the most appropriate thread that is in the specified
1525 * wait queue for the specified event.
1529 * KERN_SUCCESS - Thread was woken up
1530 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1533 wait_queue_wakeup64_one(
1536 wait_result_t result
)
1541 if (!wait_queue_is_valid(wq
)) {
1542 return KERN_INVALID_ARGUMENT
;
1545 wait_queue_lock(wq
);
1546 thread
= _wait_queue_select64_one(wq
, event
);
1547 wait_queue_unlock(wq
);
1552 res
= thread_go_locked(thread
, result
);
1553 assert(res
== KERN_SUCCESS
);
1554 thread_unlock(thread
);
1560 return KERN_NOT_WAITING
;
1565 * Routine: wait_queue_wakeup64_thread_locked
1567 * Wakeup the particular thread that was specified if and only
1568 * it was in this wait queue (or one of it's set queues)
1569 * and waiting on the specified event.
1571 * This is much safer than just removing the thread from
1572 * whatever wait queue it happens to be on. For instance, it
1573 * may have already been awoken from the wait you intended to
1574 * interrupt and waited on something else (like another
1578 * wait queue already locked (may be released).
1580 * KERN_SUCCESS - the thread was found waiting and awakened
1581 * KERN_NOT_WAITING - the thread was not waiting here
1583 __private_extern__ kern_return_t
1584 wait_queue_wakeup64_thread_locked(
1588 wait_result_t result
,
1593 assert(wait_queue_held(wq
));
1596 * See if the thread was still waiting there. If so, it got
1597 * dequeued and returned locked.
1599 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1601 wait_queue_unlock(wq
);
1603 if (res
!= KERN_SUCCESS
)
1604 return KERN_NOT_WAITING
;
1606 res
= thread_go_locked(thread
, result
);
1607 assert(res
== KERN_SUCCESS
);
1608 thread_unlock(thread
);
1613 * Routine: wait_queue_wakeup_thread
1615 * Wakeup the particular thread that was specified if and only
1616 * it was in this wait queue (or one of it's set queues)
1617 * and waiting on the specified event.
1619 * This is much safer than just removing the thread from
1620 * whatever wait queue it happens to be on. For instance, it
1621 * may have already been awoken from the wait you intended to
1622 * interrupt and waited on something else (like another
1625 * nothing of interest locked
1626 * we need to assume spl needs to be raised
1628 * KERN_SUCCESS - the thread was found waiting and awakened
1629 * KERN_NOT_WAITING - the thread was not waiting here
1632 wait_queue_wakeup_thread(
1636 wait_result_t result
)
1641 if (!wait_queue_is_valid(wq
)) {
1642 return KERN_INVALID_ARGUMENT
;
1646 wait_queue_lock(wq
);
1647 res
= _wait_queue_select64_thread(wq
, (event64_t
)((uint32_t)event
), thread
);
1648 wait_queue_unlock(wq
);
1650 if (res
== KERN_SUCCESS
) {
1651 res
= thread_go_locked(thread
, result
);
1652 assert(res
== KERN_SUCCESS
);
1653 thread_unlock(thread
);
1658 return KERN_NOT_WAITING
;
1662 * Routine: wait_queue_wakeup64_thread
1664 * Wakeup the particular thread that was specified if and only
1665 * it was in this wait queue (or one of it's set's queues)
1666 * and waiting on the specified event.
1668 * This is much safer than just removing the thread from
1669 * whatever wait queue it happens to be on. For instance, it
1670 * may have already been awoken from the wait you intended to
1671 * interrupt and waited on something else (like another
1674 * nothing of interest locked
1675 * we need to assume spl needs to be raised
1677 * KERN_SUCCESS - the thread was found waiting and awakened
1678 * KERN_NOT_WAITING - the thread was not waiting here
1681 wait_queue_wakeup64_thread(
1685 wait_result_t result
)
1690 if (!wait_queue_is_valid(wq
)) {
1691 return KERN_INVALID_ARGUMENT
;
1695 wait_queue_lock(wq
);
1696 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1697 wait_queue_unlock(wq
);
1699 if (res
== KERN_SUCCESS
) {
1700 res
= thread_go_locked(thread
, result
);
1701 assert(res
== KERN_SUCCESS
);
1702 thread_unlock(thread
);
1707 return KERN_NOT_WAITING
;