2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: wait_queue.c (adapted from sched_prim.c)
54 * Author: Avadis Tevanian, Jr.
57 * Primitives for manipulating wait queues: either global
58 * ones from sched_prim.c, or private ones associated with
59 * particular structures(pots, semaphores, etc..).
62 #include <kern/kern_types.h>
63 #include <kern/simple_lock.h>
64 #include <kern/kalloc.h>
65 #include <kern/queue.h>
67 #include <mach/sync_policy.h>
68 #include <kern/sched_prim.h>
70 #include <kern/wait_queue.h>
73 * Routine: wait_queue_init
75 * Initialize a previously allocated wait queue.
77 * KERN_SUCCESS - The wait_queue_t was initialized
78 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
85 if (!((policy
& SYNC_POLICY_ORDER_MASK
) == SYNC_POLICY_FIFO
))
86 return KERN_INVALID_ARGUMENT
;
89 wq
->wq_type
= _WAIT_QUEUE_inited
;
90 queue_init(&wq
->wq_queue
);
91 hw_lock_init(&wq
->wq_interlock
);
96 * Routine: wait_queue_alloc
98 * Allocate and initialize a wait queue for use outside of
99 * of the mach part of the kernel.
101 * Nothing locked - can block.
103 * The allocated and initialized wait queue
104 * WAIT_QUEUE_NULL if there is a resource shortage
113 wq
= (wait_queue_t
) kalloc(sizeof(struct wait_queue
));
114 if (wq
!= WAIT_QUEUE_NULL
) {
115 ret
= wait_queue_init(wq
, policy
);
116 if (ret
!= KERN_SUCCESS
) {
117 kfree((vm_offset_t
)wq
, sizeof(struct wait_queue
));
118 wq
= WAIT_QUEUE_NULL
;
125 * Routine: wait_queue_free
127 * Free an allocated wait queue.
135 if (!wait_queue_is_queue(wq
))
136 return KERN_INVALID_ARGUMENT
;
137 if (!queue_empty(&wq
->wq_queue
))
139 kfree((vm_offset_t
)wq
, sizeof(struct wait_queue
));
144 * Routine: wait_queue_set_init
146 * Initialize a previously allocated wait queue set.
148 * KERN_SUCCESS - The wait_queue_set_t was initialized
149 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
153 wait_queue_set_t wqset
,
158 ret
= wait_queue_init(&wqset
->wqs_wait_queue
, policy
);
159 if (ret
!= KERN_SUCCESS
)
162 wqset
->wqs_wait_queue
.wq_type
= _WAIT_QUEUE_SET_inited
;
163 if (policy
& SYNC_POLICY_PREPOST
)
164 wqset
->wqs_wait_queue
.wq_isprepost
= TRUE
;
166 wqset
->wqs_wait_queue
.wq_isprepost
= FALSE
;
167 queue_init(&wqset
->wqs_setlinks
);
168 wqset
->wqs_refcount
= 0;
175 wait_queue_set_t wqset
,
178 return wait_queue_set_init(wqset
, policy
);
182 * Routine: wait_queue_set_alloc
184 * Allocate and initialize a wait queue set for
185 * use outside of the mach part of the kernel.
189 * The allocated and initialized wait queue set
190 * WAIT_QUEUE_SET_NULL if there is a resource shortage
193 wait_queue_set_alloc(
196 wait_queue_set_t wq_set
;
198 wq_set
= (wait_queue_set_t
) kalloc(sizeof(struct wait_queue_set
));
199 if (wq_set
!= WAIT_QUEUE_SET_NULL
) {
202 ret
= wait_queue_set_init(wq_set
, policy
);
203 if (ret
!= KERN_SUCCESS
) {
204 kfree((vm_offset_t
)wq_set
, sizeof(struct wait_queue_set
));
205 wq_set
= WAIT_QUEUE_SET_NULL
;
212 * Routine: wait_queue_set_free
214 * Free an allocated wait queue set
220 wait_queue_set_t wq_set
)
222 if (!wait_queue_is_set(wq_set
))
223 return KERN_INVALID_ARGUMENT
;
225 if (!queue_empty(&wq_set
->wqs_wait_queue
.wq_queue
))
228 kfree((vm_offset_t
)wq_set
, sizeof(struct wait_queue_set
));
233 wait_queue_sub_clearrefs(
234 wait_queue_set_t wq_set
)
236 if (!wait_queue_is_set(wq_set
))
237 return KERN_INVALID_ARGUMENT
;
240 wq_set
->wqs_refcount
= 0;
247 * Routine: wait_queue_set_size
248 * Routine: wait_queue_link_size
250 * Return the size of opaque wait queue structures
252 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet
); }
253 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink
); }
255 /* declare a unique type for wait queue link structures */
256 static unsigned int _wait_queue_link
;
257 static unsigned int _wait_queue_unlinked
;
259 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
260 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
262 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
263 WQASSERT(((wqe)->wqe_queue == (wq) && \
264 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
265 "wait queue element list corruption: wq=%#x, wqe=%#x", \
268 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
269 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
270 (queue_t)(wql) : &(wql)->wql_setlinks)))
272 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
273 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
274 (queue_t)(wql) : &(wql)->wql_setlinks)))
276 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
277 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
278 ((wql)->wql_setqueue == (wqs)) && \
279 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
280 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
281 "wait queue set links corruption: wqs=%#x, wql=%#x", \
284 #if defined(_WAIT_QUEUE_DEBUG_)
286 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
288 #define WAIT_QUEUE_CHECK(wq) \
290 queue_t q2 = &(wq)->wq_queue; \
291 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
292 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
293 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
294 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
298 #define WAIT_QUEUE_SET_CHECK(wqs) \
300 queue_t q2 = &(wqs)->wqs_setlinks; \
301 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
302 while (!queue_end(q2, (queue_entry_t)wql2)) { \
303 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
304 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
308 #else /* !_WAIT_QUEUE_DEBUG_ */
310 #define WQASSERT(e, s, p0, p1) assert(e)
312 #define WAIT_QUEUE_CHECK(wq)
313 #define WAIT_QUEUE_SET_CHECK(wqs)
315 #endif /* !_WAIT_QUEUE_DEBUG_ */
318 * Routine: wait_queue_member_locked
320 * Indicate if this set queue is a member of the queue
322 * The wait queue is locked
323 * The set queue is just that, a set queue
325 __private_extern__ boolean_t
326 wait_queue_member_locked(
328 wait_queue_set_t wq_set
)
330 wait_queue_element_t wq_element
;
333 assert(wait_queue_held(wq
));
334 assert(wait_queue_is_set(wq_set
));
338 wq_element
= (wait_queue_element_t
) queue_first(q
);
339 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
340 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
341 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
)) {
342 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
344 if (wql
->wql_setqueue
== wq_set
)
347 wq_element
= (wait_queue_element_t
)
348 queue_next((queue_t
) wq_element
);
355 * Routine: wait_queue_member
357 * Indicate if this set queue is a member of the queue
359 * The set queue is just that, a set queue
364 wait_queue_set_t wq_set
)
369 if (!wait_queue_is_set(wq_set
))
374 ret
= wait_queue_member_locked(wq
, wq_set
);
375 wait_queue_unlock(wq
);
383 * Routine: wait_queue_link_noalloc
385 * Insert a set wait queue into a wait queue. This
386 * requires us to link the two together using a wait_queue_link
387 * structure that we allocate.
389 * The wait queue being inserted must be inited as a set queue
392 wait_queue_link_noalloc(
394 wait_queue_set_t wq_set
,
395 wait_queue_link_t wql
)
397 wait_queue_element_t wq_element
;
401 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
))
402 return KERN_INVALID_ARGUMENT
;
405 * There are probably less threads and sets associated with
406 * the wait queue, then there are wait queues associated with
407 * the set. So lets validate it that way.
412 wq_element
= (wait_queue_element_t
) queue_first(q
);
413 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
414 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
415 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
&&
416 ((wait_queue_link_t
)wq_element
)->wql_setqueue
== wq_set
) {
417 wait_queue_unlock(wq
);
419 return KERN_ALREADY_IN_SET
;
421 wq_element
= (wait_queue_element_t
)
422 queue_next((queue_t
) wq_element
);
426 * Not already a member, so we can add it.
430 WAIT_QUEUE_SET_CHECK(wq_set
);
433 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
434 wql
->wql_setqueue
= wq_set
;
435 queue_enter(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
436 wql
->wql_type
= WAIT_QUEUE_LINK
;
439 wait_queue_unlock(wq
);
446 * Routine: wait_queue_link
448 * Insert a set wait queue into a wait queue. This
449 * requires us to link the two together using a wait_queue_link
450 * structure that we allocate.
452 * The wait queue being inserted must be inited as a set queue
457 wait_queue_set_t wq_set
)
459 wait_queue_link_t wql
;
462 wql
= (wait_queue_link_t
) kalloc(sizeof(struct wait_queue_link
));
463 if (wql
== WAIT_QUEUE_LINK_NULL
)
464 return KERN_RESOURCE_SHORTAGE
;
466 ret
= wait_queue_link_noalloc(wq
, wq_set
, wql
);
467 if (ret
!= KERN_SUCCESS
)
468 kfree((vm_offset_t
)wql
, sizeof(struct wait_queue_link
));
475 * Routine: wait_queue_unlink_nofree
477 * Undo the linkage between a wait queue and a set.
480 wait_queue_unlink_locked(
482 wait_queue_set_t wq_set
,
483 wait_queue_link_t wql
)
485 assert(wait_queue_held(wq
));
486 assert(wait_queue_held(&wq_set
->wqs_wait_queue
));
488 wql
->wql_queue
= WAIT_QUEUE_NULL
;
489 queue_remove(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
490 wql
->wql_setqueue
= WAIT_QUEUE_SET_NULL
;
491 queue_remove(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
492 wql
->wql_type
= WAIT_QUEUE_UNLINKED
;
494 WAIT_QUEUE_CHECK(wq
);
495 WAIT_QUEUE_SET_CHECK(wq_set
);
499 * Routine: wait_queue_unlink
501 * Remove the linkage between a wait queue and a set,
502 * freeing the linkage structure.
504 * The wait queue being must be a member set queue
509 wait_queue_set_t wq_set
)
511 wait_queue_element_t wq_element
;
512 wait_queue_link_t wql
;
516 if (!wait_queue_is_queue(wq
) || !wait_queue_is_set(wq_set
)) {
517 return KERN_INVALID_ARGUMENT
;
523 wq_element
= (wait_queue_element_t
) queue_first(q
);
524 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
525 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
526 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
527 wql
= (wait_queue_link_t
)wq_element
;
529 if (wql
->wql_setqueue
== wq_set
) {
531 wait_queue_unlink_locked(wq
, wq_set
, wql
);
533 wait_queue_unlock(wq
);
535 kfree((vm_offset_t
)wql
, sizeof(struct wait_queue_link
));
539 wq_element
= (wait_queue_element_t
)
540 queue_next((queue_t
) wq_element
);
542 wait_queue_unlock(wq
);
544 return KERN_NOT_IN_SET
;
549 * Routine: wait_queue_unlinkall_nofree
551 * Remove the linkage between a wait queue and all its
552 * sets. The caller is responsible for freeing
553 * the wait queue link structures.
557 wait_queue_unlinkall_nofree(
560 wait_queue_element_t wq_element
;
561 wait_queue_element_t wq_next_element
;
562 wait_queue_set_t wq_set
;
563 wait_queue_link_t wql
;
564 queue_head_t links_queue_head
;
565 queue_t links
= &links_queue_head
;
569 if (!wait_queue_is_queue(wq
)) {
570 return KERN_INVALID_ARGUMENT
;
580 wq_element
= (wait_queue_element_t
) queue_first(q
);
581 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
582 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
583 wq_next_element
= (wait_queue_element_t
)
584 queue_next((queue_t
) wq_element
);
586 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
587 wql
= (wait_queue_link_t
)wq_element
;
588 wq_set
= wql
->wql_setqueue
;
590 wait_queue_unlink_locked(wq
, wq_set
, wql
);
593 wq_element
= wq_next_element
;
595 wait_queue_unlock(wq
);
597 return(KERN_SUCCESS
);
602 * Routine: wait_queue_unlink_all
604 * Remove the linkage between a wait queue and all its sets.
605 * All the linkage structures are freed.
607 * Nothing of interest locked.
611 wait_queue_unlink_all(
614 wait_queue_element_t wq_element
;
615 wait_queue_element_t wq_next_element
;
616 wait_queue_set_t wq_set
;
617 wait_queue_link_t wql
;
618 queue_head_t links_queue_head
;
619 queue_t links
= &links_queue_head
;
623 if (!wait_queue_is_queue(wq
)) {
624 return KERN_INVALID_ARGUMENT
;
634 wq_element
= (wait_queue_element_t
) queue_first(q
);
635 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
636 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
637 wq_next_element
= (wait_queue_element_t
)
638 queue_next((queue_t
) wq_element
);
640 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
641 wql
= (wait_queue_link_t
)wq_element
;
642 wq_set
= wql
->wql_setqueue
;
644 wait_queue_unlink_locked(wq
, wq_set
, wql
);
646 enqueue(links
, &wql
->wql_links
);
648 wq_element
= wq_next_element
;
650 wait_queue_unlock(wq
);
653 while(!queue_empty(links
)) {
654 wql
= (wait_queue_link_t
) dequeue(links
);
655 kfree((vm_offset_t
) wql
, sizeof(struct wait_queue_link
));
658 return(KERN_SUCCESS
);
662 * Routine: wait_queue_set_unlink_all_nofree
664 * Remove the linkage between a set wait queue and all its
665 * member wait queues. The link structures are not freed, nor
666 * returned. It is the caller's responsibility to track and free
669 * The wait queue being must be a member set queue
672 wait_queue_set_unlink_all_nofree(
673 wait_queue_set_t wq_set
)
675 wait_queue_link_t wql
;
681 if (!wait_queue_is_set(wq_set
)) {
682 return KERN_INVALID_ARGUMENT
;
689 q
= &wq_set
->wqs_setlinks
;
691 wql
= (wait_queue_link_t
)queue_first(q
);
692 while (!queue_end(q
, (queue_entry_t
)wql
)) {
693 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
695 if (wait_queue_lock_try(wq
)) {
696 wait_queue_unlink_locked(wq
, wq_set
, wql
);
697 wait_queue_unlock(wq
);
698 wql
= (wait_queue_link_t
)queue_first(q
);
709 return(KERN_SUCCESS
);
712 /* legacy interface naming */
714 wait_subqueue_unlink_all(
715 wait_queue_set_t wq_set
)
717 return wait_queue_set_unlink_all_nofree(wq_set
);
722 * Routine: wait_queue_set_unlink_all
724 * Remove the linkage between a set wait queue and all its
725 * member wait queues. The link structures are freed.
727 * The wait queue must be a set
730 wait_queue_set_unlink_all(
731 wait_queue_set_t wq_set
)
733 wait_queue_link_t wql
;
736 queue_head_t links_queue_head
;
737 queue_t links
= &links_queue_head
;
741 if (!wait_queue_is_set(wq_set
)) {
742 return KERN_INVALID_ARGUMENT
;
751 q
= &wq_set
->wqs_setlinks
;
753 wql
= (wait_queue_link_t
)queue_first(q
);
754 while (!queue_end(q
, (queue_entry_t
)wql
)) {
755 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
757 if (wait_queue_lock_try(wq
)) {
758 wait_queue_unlink_locked(wq
, wq_set
, wql
);
759 wait_queue_unlock(wq
);
760 enqueue(links
, &wql
->wql_links
);
761 wql
= (wait_queue_link_t
)queue_first(q
);
772 while (!queue_empty (links
)) {
773 wql
= (wait_queue_link_t
) dequeue(links
);
774 kfree((vm_offset_t
)wql
, sizeof(struct wait_queue_link
));
776 return(KERN_SUCCESS
);
781 * Routine: wait_queue_unlink_one
783 * Find and unlink one set wait queue
785 * Nothing of interest locked.
788 wait_queue_unlink_one(
790 wait_queue_set_t
*wq_setp
)
792 wait_queue_element_t wq_element
;
801 wq_element
= (wait_queue_element_t
) queue_first(q
);
802 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
804 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
805 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
806 wait_queue_set_t wq_set
= wql
->wql_setqueue
;
809 wait_queue_unlink_locked(wq
, wq_set
, wql
);
811 wait_queue_unlock(wq
);
813 kfree((vm_offset_t
)wql
,sizeof(struct wait_queue_link
));
818 wq_element
= (wait_queue_element_t
)
819 queue_next((queue_t
) wq_element
);
821 wait_queue_unlock(wq
);
823 *wq_setp
= WAIT_QUEUE_SET_NULL
;
828 * Routine: wait_queue_assert_wait64_locked
830 * Insert the current thread into the supplied wait queue
831 * waiting for a particular event to be posted to that queue.
834 * The wait queue is assumed locked.
835 * The waiting thread is assumed locked.
838 __private_extern__ wait_result_t
839 wait_queue_assert_wait64_locked(
842 wait_interrupt_t interruptible
,
845 wait_result_t wait_result
;
847 if (!wait_queue_assert_possible(thread
))
848 panic("wait_queue_assert_wait64_locked");
850 if (wq
->wq_type
== _WAIT_QUEUE_SET_inited
) {
851 wait_queue_set_t wqs
= (wait_queue_set_t
)wq
;
853 if (wqs
->wqs_isprepost
&& wqs
->wqs_refcount
> 0)
854 return(THREAD_AWAKENED
);
858 * This is the extent to which we currently take scheduling attributes
859 * into account. If the thread is vm priviledged, we stick it at
860 * the front of the queue. Later, these queues will honor the policy
861 * value set at wait_queue_init time.
863 wait_result
= thread_mark_wait_locked(thread
, interruptible
);
864 if (wait_result
== THREAD_WAITING
) {
865 if (thread
->vm_privilege
)
866 enqueue_head(&wq
->wq_queue
, (queue_entry_t
) thread
);
868 enqueue_tail(&wq
->wq_queue
, (queue_entry_t
) thread
);
869 thread
->wait_event
= event
;
870 thread
->wait_queue
= wq
;
876 * Routine: wait_queue_assert_wait
878 * Insert the current thread into the supplied wait queue
879 * waiting for a particular event to be posted to that queue.
882 * nothing of interest locked.
885 wait_queue_assert_wait(
888 wait_interrupt_t interruptible
)
892 thread_t cur_thread
= current_thread();
894 /* If it is an invalid wait queue, you can't wait on it */
895 if (!wait_queue_is_valid(wq
)) {
896 thread_t thread
= current_thread();
897 return (thread
->wait_result
= THREAD_RESTART
);
902 thread_lock(cur_thread
);
903 ret
= wait_queue_assert_wait64_locked(
904 wq
, (event64_t
)((uint32_t)event
),
905 interruptible
, cur_thread
);
906 thread_unlock(cur_thread
);
907 wait_queue_unlock(wq
);
913 * Routine: wait_queue_assert_wait64
915 * Insert the current thread into the supplied wait queue
916 * waiting for a particular event to be posted to that queue.
918 * nothing of interest locked.
921 wait_queue_assert_wait64(
924 wait_interrupt_t interruptible
)
928 thread_t cur_thread
= current_thread();
930 /* If it is an invalid wait queue, you cant wait on it */
931 if (!wait_queue_is_valid(wq
)) {
932 thread_t thread
= current_thread();
933 return (thread
->wait_result
= THREAD_RESTART
);
938 thread_lock(cur_thread
);
939 ret
= wait_queue_assert_wait64_locked(wq
, event
, interruptible
, cur_thread
);
940 thread_unlock(cur_thread
);
941 wait_queue_unlock(wq
);
948 * Routine: _wait_queue_select64_all
950 * Select all threads off a wait queue that meet the
955 * wake_queue initialized and ready for insertion
958 * a queue of locked threads
961 _wait_queue_select64_all(
966 wait_queue_element_t wq_element
;
967 wait_queue_element_t wqe_next
;
972 wq_element
= (wait_queue_element_t
) queue_first(q
);
973 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
974 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
975 wqe_next
= (wait_queue_element_t
)
976 queue_next((queue_t
) wq_element
);
979 * We may have to recurse if this is a compound wait queue.
981 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
982 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
983 wait_queue_t set_queue
;
986 * We have to check the set wait queue.
988 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
989 wait_queue_lock(set_queue
);
990 if (set_queue
->wq_isprepost
) {
991 wait_queue_set_t wqs
= (wait_queue_set_t
)set_queue
;
994 * Preposting is only for sets and wait queue
995 * is the first element of set
999 if (! wait_queue_empty(set_queue
))
1000 _wait_queue_select64_all(set_queue
, event
, wake_queue
);
1001 wait_queue_unlock(set_queue
);
1005 * Otherwise, its a thread. If it is waiting on
1006 * the event we are posting to this queue, pull
1007 * it off the queue and stick it in out wake_queue.
1009 thread_t t
= (thread_t
)wq_element
;
1011 if (t
->wait_event
== event
) {
1013 remqueue(q
, (queue_entry_t
) t
);
1014 enqueue (wake_queue
, (queue_entry_t
) t
);
1015 t
->wait_queue
= WAIT_QUEUE_NULL
;
1016 t
->wait_event
= NO_EVENT64
;
1017 t
->at_safe_point
= FALSE
;
1018 /* returned locked */
1021 wq_element
= wqe_next
;
1026 * Routine: wait_queue_wakeup64_all_locked
1028 * Wakeup some number of threads that are in the specified
1029 * wait queue and waiting on the specified event.
1031 * wait queue already locked (may be released).
1033 * KERN_SUCCESS - Threads were woken up
1034 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1036 __private_extern__ kern_return_t
1037 wait_queue_wakeup64_all_locked(
1040 wait_result_t result
,
1043 queue_head_t wake_queue_head
;
1044 queue_t q
= &wake_queue_head
;
1047 assert(wait_queue_held(wq
));
1051 * Select the threads that we will wake up. The threads
1052 * are returned to us locked and cleanly removed from the
1055 _wait_queue_select64_all(wq
, event
, q
);
1057 wait_queue_unlock(wq
);
1060 * For each thread, set it running.
1062 res
= KERN_NOT_WAITING
;
1063 while (!queue_empty (q
)) {
1064 thread_t thread
= (thread_t
) dequeue(q
);
1065 res
= thread_go_locked(thread
, result
);
1066 assert(res
== KERN_SUCCESS
);
1067 thread_unlock(thread
);
1074 * Routine: wait_queue_wakeup_all
1076 * Wakeup some number of threads that are in the specified
1077 * wait queue and waiting on the specified event.
1081 * KERN_SUCCESS - Threads were woken up
1082 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1085 wait_queue_wakeup_all(
1088 wait_result_t result
)
1093 if (!wait_queue_is_valid(wq
)) {
1094 return KERN_INVALID_ARGUMENT
;
1098 wait_queue_lock(wq
);
1099 ret
= wait_queue_wakeup64_all_locked(
1100 wq
, (event64_t
)((uint32_t)event
),
1108 * Routine: wait_queue_wakeup64_all
1110 * Wakeup some number of threads that are in the specified
1111 * wait queue and waiting on the specified event.
1115 * KERN_SUCCESS - Threads were woken up
1116 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1119 wait_queue_wakeup64_all(
1122 wait_result_t result
)
1127 if (!wait_queue_is_valid(wq
)) {
1128 return KERN_INVALID_ARGUMENT
;
1132 wait_queue_lock(wq
);
1133 ret
= wait_queue_wakeup64_all_locked(wq
, event
, result
, TRUE
);
1140 * Routine: _wait_queue_select64_one
1142 * Select the best thread off a wait queue that meet the
1143 * supplied criteria.
1147 * possibly recursive
1149 * a locked thread - if one found
1151 * This is where the sync policy of the wait queue comes
1152 * into effect. For now, we just assume FIFO.
1155 _wait_queue_select64_one(
1159 wait_queue_element_t wq_element
;
1160 wait_queue_element_t wqe_next
;
1161 thread_t t
= THREAD_NULL
;
1164 assert(wq
->wq_fifo
);
1168 wq_element
= (wait_queue_element_t
) queue_first(q
);
1169 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1170 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1171 wqe_next
= (wait_queue_element_t
)
1172 queue_next((queue_t
) wq_element
);
1175 * We may have to recurse if this is a compound wait queue.
1177 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1178 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1179 wait_queue_t set_queue
;
1182 * We have to check the set wait queue.
1184 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1185 wait_queue_lock(set_queue
);
1186 if (! wait_queue_empty(set_queue
)) {
1187 t
= _wait_queue_select64_one(set_queue
, event
);
1189 wait_queue_unlock(set_queue
);
1190 if (t
!= THREAD_NULL
)
1195 * Otherwise, its a thread. If it is waiting on
1196 * the event we are posting to this queue, pull
1197 * it off the queue and stick it in out wake_queue.
1199 thread_t t
= (thread_t
)wq_element
;
1201 if (t
->wait_event
== event
) {
1203 remqueue(q
, (queue_entry_t
) t
);
1204 t
->wait_queue
= WAIT_QUEUE_NULL
;
1205 t
->wait_event
= NO_EVENT64
;
1206 t
->at_safe_point
= FALSE
;
1207 return t
; /* still locked */
1210 wq_element
= wqe_next
;
1216 * Routine: wait_queue_peek64_locked
1218 * Select the best thread from a wait queue that meet the
1219 * supplied criteria, but leave it on the queue it was
1220 * found on. The thread, and the actual wait_queue the
1221 * thread was found on are identified.
1225 * possibly recursive
1227 * a locked thread - if one found
1228 * a locked waitq - the one the thread was found on
1230 * Both the waitq the thread was actually found on, and
1231 * the supplied wait queue, are locked after this.
1233 __private_extern__
void
1234 wait_queue_peek64_locked(
1240 wait_queue_element_t wq_element
;
1241 wait_queue_element_t wqe_next
;
1245 assert(wq
->wq_fifo
);
1251 wq_element
= (wait_queue_element_t
) queue_first(q
);
1252 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1253 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1254 wqe_next
= (wait_queue_element_t
)
1255 queue_next((queue_t
) wq_element
);
1258 * We may have to recurse if this is a compound wait queue.
1260 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1261 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1262 wait_queue_t set_queue
;
1265 * We have to check the set wait queue.
1267 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1268 wait_queue_lock(set_queue
);
1269 if (! wait_queue_empty(set_queue
)) {
1270 wait_queue_peek64_locked(set_queue
, event
, tp
, wqp
);
1272 if (*tp
!= THREAD_NULL
) {
1273 if (*wqp
!= set_queue
)
1274 wait_queue_unlock(set_queue
);
1275 return; /* thread and its waitq locked */
1278 wait_queue_unlock(set_queue
);
1282 * Otherwise, its a thread. If it is waiting on
1283 * the event we are posting to this queue, return
1284 * it locked, but leave it on the queue.
1286 thread_t t
= (thread_t
)wq_element
;
1288 if (t
->wait_event
== event
) {
1295 wq_element
= wqe_next
;
1300 * Routine: wait_queue_pull_thread_locked
1302 * Pull a thread that was previously "peeked" off the wait
1303 * queue and (possibly) unlock the waitq.
1309 * with the thread still locked.
1312 wait_queue_pull_thread_locked(
1318 assert(thread
->wait_queue
== waitq
);
1320 remqueue(&waitq
->wq_queue
, (queue_entry_t
)thread
);
1321 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1322 thread
->wait_event
= NO_EVENT64
;
1323 thread
->at_safe_point
= FALSE
;
1325 wait_queue_unlock(waitq
);
1330 * Routine: wait_queue_select64_thread
1332 * Look for a thread and remove it from the queues, if
1333 * (and only if) the thread is waiting on the supplied
1334 * <wait_queue, event> pair.
1338 * possibly recursive
1340 * KERN_NOT_WAITING: Thread is not waiting here.
1341 * KERN_SUCCESS: It was, and is now removed (returned locked)
1343 static kern_return_t
1344 _wait_queue_select64_thread(
1349 wait_queue_element_t wq_element
;
1350 wait_queue_element_t wqe_next
;
1351 kern_return_t res
= KERN_NOT_WAITING
;
1352 queue_t q
= &wq
->wq_queue
;
1354 thread_lock(thread
);
1355 if ((thread
->wait_queue
== wq
) && (thread
->wait_event
== event
)) {
1356 remqueue(q
, (queue_entry_t
) thread
);
1357 thread
->at_safe_point
= FALSE
;
1358 thread
->wait_event
= NO_EVENT64
;
1359 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1360 /* thread still locked */
1361 return KERN_SUCCESS
;
1363 thread_unlock(thread
);
1366 * The wait_queue associated with the thread may be one of this
1367 * wait queue's sets. Go see. If so, removing it from
1368 * there is like removing it from here.
1370 wq_element
= (wait_queue_element_t
) queue_first(q
);
1371 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1372 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1373 wqe_next
= (wait_queue_element_t
)
1374 queue_next((queue_t
) wq_element
);
1376 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
) {
1377 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1378 wait_queue_t set_queue
;
1380 set_queue
= (wait_queue_t
)wql
->wql_setqueue
;
1381 wait_queue_lock(set_queue
);
1382 if (! wait_queue_empty(set_queue
)) {
1383 res
= _wait_queue_select64_thread(set_queue
,
1387 wait_queue_unlock(set_queue
);
1388 if (res
== KERN_SUCCESS
)
1389 return KERN_SUCCESS
;
1391 wq_element
= wqe_next
;
1398 * Routine: wait_queue_wakeup64_identity_locked
1400 * Select a single thread that is most-eligible to run and set
1401 * set it running. But return the thread locked.
1406 * possibly recursive
1408 * a pointer to the locked thread that was awakened
1410 __private_extern__ thread_t
1411 wait_queue_wakeup64_identity_locked(
1414 wait_result_t result
,
1420 assert(wait_queue_held(wq
));
1423 thread
= _wait_queue_select64_one(wq
, event
);
1425 wait_queue_unlock(wq
);
1428 res
= thread_go_locked(thread
, result
);
1429 assert(res
== KERN_SUCCESS
);
1431 return thread
; /* still locked if not NULL */
1436 * Routine: wait_queue_wakeup64_one_locked
1438 * Select a single thread that is most-eligible to run and set
1444 * possibly recursive
1446 * KERN_SUCCESS: It was, and is, now removed.
1447 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1449 __private_extern__ kern_return_t
1450 wait_queue_wakeup64_one_locked(
1453 wait_result_t result
,
1458 assert(wait_queue_held(wq
));
1460 thread
= _wait_queue_select64_one(wq
, event
);
1462 wait_queue_unlock(wq
);
1467 res
= thread_go_locked(thread
, result
);
1468 assert(res
== KERN_SUCCESS
);
1469 thread_unlock(thread
);
1473 return KERN_NOT_WAITING
;
1477 * Routine: wait_queue_wakeup_one
1479 * Wakeup the most appropriate thread that is in the specified
1480 * wait queue for the specified event.
1484 * KERN_SUCCESS - Thread was woken up
1485 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1488 wait_queue_wakeup_one(
1491 wait_result_t result
)
1496 if (!wait_queue_is_valid(wq
)) {
1497 return KERN_INVALID_ARGUMENT
;
1501 wait_queue_lock(wq
);
1502 thread
= _wait_queue_select64_one(wq
, (event64_t
)((uint32_t)event
));
1503 wait_queue_unlock(wq
);
1508 res
= thread_go_locked(thread
, result
);
1509 assert(res
== KERN_SUCCESS
);
1510 thread_unlock(thread
);
1516 return KERN_NOT_WAITING
;
1520 * Routine: wait_queue_wakeup64_one
1522 * Wakeup the most appropriate thread that is in the specified
1523 * wait queue for the specified event.
1527 * KERN_SUCCESS - Thread was woken up
1528 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1531 wait_queue_wakeup64_one(
1534 wait_result_t result
)
1539 if (!wait_queue_is_valid(wq
)) {
1540 return KERN_INVALID_ARGUMENT
;
1543 wait_queue_lock(wq
);
1544 thread
= _wait_queue_select64_one(wq
, event
);
1545 wait_queue_unlock(wq
);
1550 res
= thread_go_locked(thread
, result
);
1551 assert(res
== KERN_SUCCESS
);
1552 thread_unlock(thread
);
1558 return KERN_NOT_WAITING
;
1563 * Routine: wait_queue_wakeup64_thread_locked
1565 * Wakeup the particular thread that was specified if and only
1566 * it was in this wait queue (or one of it's set queues)
1567 * and waiting on the specified event.
1569 * This is much safer than just removing the thread from
1570 * whatever wait queue it happens to be on. For instance, it
1571 * may have already been awoken from the wait you intended to
1572 * interrupt and waited on something else (like another
1576 * wait queue already locked (may be released).
1578 * KERN_SUCCESS - the thread was found waiting and awakened
1579 * KERN_NOT_WAITING - the thread was not waiting here
1581 __private_extern__ kern_return_t
1582 wait_queue_wakeup64_thread_locked(
1586 wait_result_t result
,
1591 assert(wait_queue_held(wq
));
1594 * See if the thread was still waiting there. If so, it got
1595 * dequeued and returned locked.
1597 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1599 wait_queue_unlock(wq
);
1601 if (res
!= KERN_SUCCESS
)
1602 return KERN_NOT_WAITING
;
1604 res
= thread_go_locked(thread
, result
);
1605 assert(res
== KERN_SUCCESS
);
1606 thread_unlock(thread
);
1611 * Routine: wait_queue_wakeup_thread
1613 * Wakeup the particular thread that was specified if and only
1614 * it was in this wait queue (or one of it's set queues)
1615 * and waiting on the specified event.
1617 * This is much safer than just removing the thread from
1618 * whatever wait queue it happens to be on. For instance, it
1619 * may have already been awoken from the wait you intended to
1620 * interrupt and waited on something else (like another
1623 * nothing of interest locked
1624 * we need to assume spl needs to be raised
1626 * KERN_SUCCESS - the thread was found waiting and awakened
1627 * KERN_NOT_WAITING - the thread was not waiting here
1630 wait_queue_wakeup_thread(
1634 wait_result_t result
)
1639 if (!wait_queue_is_valid(wq
)) {
1640 return KERN_INVALID_ARGUMENT
;
1644 wait_queue_lock(wq
);
1645 res
= _wait_queue_select64_thread(wq
, (event64_t
)((uint32_t)event
), thread
);
1646 wait_queue_unlock(wq
);
1648 if (res
== KERN_SUCCESS
) {
1649 res
= thread_go_locked(thread
, result
);
1650 assert(res
== KERN_SUCCESS
);
1651 thread_unlock(thread
);
1656 return KERN_NOT_WAITING
;
1660 * Routine: wait_queue_wakeup64_thread
1662 * Wakeup the particular thread that was specified if and only
1663 * it was in this wait queue (or one of it's set's queues)
1664 * and waiting on the specified event.
1666 * This is much safer than just removing the thread from
1667 * whatever wait queue it happens to be on. For instance, it
1668 * may have already been awoken from the wait you intended to
1669 * interrupt and waited on something else (like another
1672 * nothing of interest locked
1673 * we need to assume spl needs to be raised
1675 * KERN_SUCCESS - the thread was found waiting and awakened
1676 * KERN_NOT_WAITING - the thread was not waiting here
1679 wait_queue_wakeup64_thread(
1683 wait_result_t result
)
1688 if (!wait_queue_is_valid(wq
)) {
1689 return KERN_INVALID_ARGUMENT
;
1693 wait_queue_lock(wq
);
1694 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1695 wait_queue_unlock(wq
);
1697 if (res
== KERN_SUCCESS
) {
1698 res
= thread_go_locked(thread
, result
);
1699 assert(res
== KERN_SUCCESS
);
1700 thread_unlock(thread
);
1705 return KERN_NOT_WAITING
;