2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: wait_queue.c (adapted from sched_prim.c)
60 * Author: Avadis Tevanian, Jr.
63 * Primitives for manipulating wait queues: either global
64 * ones from sched_prim.c, or private ones associated with
65 * particular structures(pots, semaphores, etc..).
68 #include <kern/kern_types.h>
69 #include <kern/simple_lock.h>
70 #include <kern/zalloc.h>
71 #include <kern/queue.h>
73 #include <mach/sync_policy.h>
74 #include <kern/mach_param.h>
75 #include <kern/sched_prim.h>
77 #include <kern/wait_queue.h>
78 #include <vm/vm_kern.h>
80 /* forward declarations */
81 static boolean_t
wait_queue_member_locked(
83 wait_queue_set_t wq_set
);
85 static void wait_queues_init(void) __attribute__((section("__TEXT, initcode")));
88 #define WAIT_QUEUE_MAX thread_max
89 #define WAIT_QUEUE_SET_MAX task_max * 3
90 #define WAIT_QUEUE_LINK_MAX PORT_MAX / 2 + (WAIT_QUEUE_MAX * WAIT_QUEUE_SET_MAX) / 64
92 static zone_t _wait_queue_link_zone
;
93 static zone_t _wait_queue_set_zone
;
94 static zone_t _wait_queue_zone
;
96 /* see rdar://6737748&5561610; we need an unshadowed
97 * definition of a WaitQueueLink for debugging,
98 * but it needs to be used somewhere to wind up in
100 volatile WaitQueueLink
*unused_except_for_debugging
;
104 * Waiting protocols and implementation:
106 * Each thread may be waiting for exactly one event; this event
107 * is set using assert_wait(). That thread may be awakened either
108 * by performing a thread_wakeup_prim() on its event,
109 * or by directly waking that thread up with clear_wait().
111 * The implementation of wait events uses a hash table. Each
112 * bucket is queue of threads having the same hash function
113 * value; the chain for the queue (linked list) is the run queue
114 * field. [It is not possible to be waiting and runnable at the
117 * Locks on both the thread and on the hash buckets govern the
118 * wait event field and the queue chain field. Because wakeup
119 * operations only have the event as an argument, the event hash
120 * bucket must be locked before any thread.
122 * Scheduling operations may also occur at interrupt level; therefore,
123 * interrupts below splsched() must be prevented when holding
124 * thread or hash bucket locks.
126 * The wait event hash table declarations are as follows:
129 struct wait_queue boot_wait_queue
[1];
130 __private_extern__
struct wait_queue
*wait_queues
= &boot_wait_queue
[0];
132 __private_extern__
uint32_t num_wait_queues
= 1;
135 compute_wait_hash_size(__unused
unsigned cpu_count
, __unused
uint64_t memsize
) {
136 uint32_t hsize
= (uint32_t)round_page_64((thread_max
/ 11) * sizeof(struct wait_queue
));
139 if (PE_parse_boot_argn("wqsize", &bhsize
, sizeof(bhsize
)))
146 wait_queues_init(void)
151 whsize
= compute_wait_hash_size(processor_avail_count
, machine_info
.max_mem
);
152 num_wait_queues
= (whsize
/ ((uint32_t)sizeof(struct wait_queue
))) - 1;
154 kret
= kernel_memory_allocate(kernel_map
, (vm_offset_t
*) &wait_queues
, whsize
, 0, KMA_KOBJECT
|KMA_NOPAGEWAIT
);
156 if (kret
!= KERN_SUCCESS
|| wait_queues
== NULL
)
157 panic("kernel_memory_allocate() failed to allocate wait queues, error: %d, whsize: 0x%x", kret
, whsize
);
159 for (i
= 0; i
< num_wait_queues
; i
++) {
160 wait_queue_init(&wait_queues
[i
], SYNC_POLICY_FIFO
);
165 wait_queue_bootstrap(void)
168 _wait_queue_zone
= zinit(sizeof(struct wait_queue
),
169 WAIT_QUEUE_MAX
* sizeof(struct wait_queue
),
170 sizeof(struct wait_queue
),
172 _wait_queue_set_zone
= zinit(sizeof(struct wait_queue_set
),
173 WAIT_QUEUE_SET_MAX
* sizeof(struct wait_queue_set
),
174 sizeof(struct wait_queue_set
),
176 _wait_queue_link_zone
= zinit(sizeof(struct _wait_queue_link
),
177 WAIT_QUEUE_LINK_MAX
* sizeof(struct _wait_queue_link
),
178 sizeof(struct _wait_queue_link
),
183 * Routine: wait_queue_init
185 * Initialize a previously allocated wait queue.
187 * KERN_SUCCESS - The wait_queue_t was initialized
188 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
195 /* only FIFO and LIFO for now */
196 if ((policy
& SYNC_POLICY_FIXED_PRIORITY
) != 0)
197 return KERN_INVALID_ARGUMENT
;
199 wq
->wq_fifo
= ((policy
& SYNC_POLICY_REVERSED
) == 0);
200 wq
->wq_type
= _WAIT_QUEUE_inited
;
201 queue_init(&wq
->wq_queue
);
202 hw_lock_init(&wq
->wq_interlock
);
207 * Routine: wait_queue_alloc
209 * Allocate and initialize a wait queue for use outside of
210 * of the mach part of the kernel.
212 * Nothing locked - can block.
214 * The allocated and initialized wait queue
215 * WAIT_QUEUE_NULL if there is a resource shortage
224 wq
= (wait_queue_t
) zalloc(_wait_queue_zone
);
225 if (wq
!= WAIT_QUEUE_NULL
) {
226 ret
= wait_queue_init(wq
, policy
);
227 if (ret
!= KERN_SUCCESS
) {
228 zfree(_wait_queue_zone
, wq
);
229 wq
= WAIT_QUEUE_NULL
;
236 * Routine: wait_queue_free
238 * Free an allocated wait queue.
246 if (!wait_queue_is_queue(wq
))
247 return KERN_INVALID_ARGUMENT
;
248 if (!queue_empty(&wq
->wq_queue
))
250 zfree(_wait_queue_zone
, wq
);
255 * Routine: wait_queue_set_init
257 * Initialize a previously allocated wait queue set.
259 * KERN_SUCCESS - The wait_queue_set_t was initialized
260 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
264 wait_queue_set_t wqset
,
269 ret
= wait_queue_init(&wqset
->wqs_wait_queue
, policy
);
270 if (ret
!= KERN_SUCCESS
)
273 wqset
->wqs_wait_queue
.wq_type
= _WAIT_QUEUE_SET_inited
;
274 if (policy
& SYNC_POLICY_PREPOST
)
275 wqset
->wqs_wait_queue
.wq_prepost
= TRUE
;
277 wqset
->wqs_wait_queue
.wq_prepost
= FALSE
;
278 queue_init(&wqset
->wqs_setlinks
);
279 queue_init(&wqset
->wqs_preposts
);
286 wait_queue_set_t wqset
,
289 return wait_queue_set_init(wqset
, policy
);
293 wait_queue_sub_clearrefs(
294 wait_queue_set_t wq_set
)
296 wait_queue_link_t wql
;
300 if (!wait_queue_is_set(wq_set
))
301 return KERN_INVALID_ARGUMENT
;
305 q
= &wq_set
->wqs_preposts
;
306 while (!queue_empty(q
)) {
307 queue_remove_first(q
, wql
, wait_queue_link_t
, wql_preposts
);
308 assert(!wql_is_preposted(wql
));
316 * Routine: wait_queue_set_alloc
318 * Allocate and initialize a wait queue set for
319 * use outside of the mach part of the kernel.
323 * The allocated and initialized wait queue set
324 * WAIT_QUEUE_SET_NULL if there is a resource shortage
327 wait_queue_set_alloc(
330 wait_queue_set_t wq_set
;
332 wq_set
= (wait_queue_set_t
) zalloc(_wait_queue_set_zone
);
333 if (wq_set
!= WAIT_QUEUE_SET_NULL
) {
336 ret
= wait_queue_set_init(wq_set
, policy
);
337 if (ret
!= KERN_SUCCESS
) {
338 zfree(_wait_queue_set_zone
, wq_set
);
339 wq_set
= WAIT_QUEUE_SET_NULL
;
346 * Routine: wait_queue_set_free
348 * Free an allocated wait queue set
354 wait_queue_set_t wq_set
)
356 if (!wait_queue_is_set(wq_set
))
357 return KERN_INVALID_ARGUMENT
;
359 if (!queue_empty(&wq_set
->wqs_wait_queue
.wq_queue
))
362 zfree(_wait_queue_set_zone
, wq_set
);
369 * Routine: wait_queue_set_size
370 * Routine: wait_queue_link_size
372 * Return the size of opaque wait queue structures
374 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet
); }
375 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink
); }
377 /* declare a unique type for wait queue link structures */
378 static unsigned int _wait_queue_link
;
379 static unsigned int _wait_queue_link_noalloc
;
380 static unsigned int _wait_queue_unlinked
;
382 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
383 #define WAIT_QUEUE_LINK_NOALLOC ((void *)&_wait_queue_link_noalloc)
384 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
386 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
387 WQASSERT(((wqe)->wqe_queue == (wq) && \
388 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
389 "wait queue element list corruption: wq=%#x, wqe=%#x", \
392 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
393 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
394 (queue_t)(wql) : &(wql)->wql_setlinks)))
396 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
397 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
398 (queue_t)(wql) : &(wql)->wql_setlinks)))
400 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
401 WQASSERT(((((wql)->wql_type == WAIT_QUEUE_LINK) || \
402 ((wql)->wql_type == WAIT_QUEUE_LINK_NOALLOC)) && \
403 ((wql)->wql_setqueue == (wqs)) && \
404 (((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) || \
405 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_SET_inited)) && \
406 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
407 "wait queue set links corruption: wqs=%#x, wql=%#x", \
410 #if defined(_WAIT_QUEUE_DEBUG_)
412 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
414 #define WAIT_QUEUE_CHECK(wq) \
416 queue_t q2 = &(wq)->wq_queue; \
417 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
418 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
419 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
420 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
424 #define WAIT_QUEUE_SET_CHECK(wqs) \
426 queue_t q2 = &(wqs)->wqs_setlinks; \
427 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
428 while (!queue_end(q2, (queue_entry_t)wql2)) { \
429 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
430 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
434 #else /* !_WAIT_QUEUE_DEBUG_ */
436 #define WQASSERT(e, s, p0, p1) assert(e)
438 #define WAIT_QUEUE_CHECK(wq)
439 #define WAIT_QUEUE_SET_CHECK(wqs)
441 #endif /* !_WAIT_QUEUE_DEBUG_ */
444 * Routine: wait_queue_member_locked
446 * Indicate if this set queue is a member of the queue
448 * The wait queue is locked
449 * The set queue is just that, a set queue
452 wait_queue_member_locked(
454 wait_queue_set_t wq_set
)
456 wait_queue_element_t wq_element
;
459 assert(wait_queue_held(wq
));
460 assert(wait_queue_is_set(wq_set
));
464 wq_element
= (wait_queue_element_t
) queue_first(q
);
465 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
466 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
467 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
) ||
468 (wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
)) {
469 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
471 if (wql
->wql_setqueue
== wq_set
)
474 wq_element
= (wait_queue_element_t
)
475 queue_next((queue_t
) wq_element
);
482 * Routine: wait_queue_member
484 * Indicate if this set queue is a member of the queue
486 * The set queue is just that, a set queue
491 wait_queue_set_t wq_set
)
496 if (!wait_queue_is_set(wq_set
))
501 ret
= wait_queue_member_locked(wq
, wq_set
);
502 wait_queue_unlock(wq
);
510 * Routine: wait_queue_link_internal
512 * Insert a set wait queue into a wait queue. This
513 * requires us to link the two together using a wait_queue_link
514 * structure that was provided.
516 * The wait queue being inserted must be inited as a set queue
517 * The wait_queue_link structure must already be properly typed
521 wait_queue_link_internal(
523 wait_queue_set_t wq_set
,
524 wait_queue_link_t wql
)
526 wait_queue_element_t wq_element
;
530 if (!wait_queue_is_valid(wq
) || !wait_queue_is_set(wq_set
))
531 return KERN_INVALID_ARGUMENT
;
534 * There are probably fewer threads and sets associated with
535 * the wait queue than there are wait queues associated with
536 * the set. So let's validate it that way.
541 wq_element
= (wait_queue_element_t
) queue_first(q
);
542 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
543 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
544 if ((wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
545 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) &&
546 ((wait_queue_link_t
)wq_element
)->wql_setqueue
== wq_set
) {
547 wait_queue_unlock(wq
);
549 return KERN_ALREADY_IN_SET
;
551 wq_element
= (wait_queue_element_t
)
552 queue_next((queue_t
) wq_element
);
556 * Not already a member, so we can add it.
560 WAIT_QUEUE_SET_CHECK(wq_set
);
562 assert(wql
->wql_type
== WAIT_QUEUE_LINK
||
563 wql
->wql_type
== WAIT_QUEUE_LINK_NOALLOC
);
566 wql_clear_prepost(wql
);
567 queue_enter(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
568 wql
->wql_setqueue
= wq_set
;
569 queue_enter(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
572 wait_queue_unlock(wq
);
579 * Routine: wait_queue_link_noalloc
581 * Insert a set wait queue into a wait queue. This
582 * requires us to link the two together using a wait_queue_link
583 * structure that we allocate.
585 * The wait queue being inserted must be inited as a set queue
588 wait_queue_link_noalloc(
590 wait_queue_set_t wq_set
,
591 wait_queue_link_t wql
)
593 wql
->wql_type
= WAIT_QUEUE_LINK_NOALLOC
;
594 return wait_queue_link_internal(wq
, wq_set
, wql
);
598 * Routine: wait_queue_link
600 * Insert a set wait queue into a wait queue. This
601 * requires us to link the two together using a wait_queue_link
602 * structure that we allocate.
604 * The wait queue being inserted must be inited as a set queue
609 wait_queue_set_t wq_set
)
611 wait_queue_link_t wql
;
614 wql
= (wait_queue_link_t
) zalloc(_wait_queue_link_zone
);
615 if (wql
== WAIT_QUEUE_LINK_NULL
)
616 return KERN_RESOURCE_SHORTAGE
;
618 wql
->wql_type
= WAIT_QUEUE_LINK
;
619 ret
= wait_queue_link_internal(wq
, wq_set
, wql
);
620 if (ret
!= KERN_SUCCESS
)
621 zfree(_wait_queue_link_zone
, wql
);
628 * Routine: wait_queue_unlink_locked
630 * Undo the linkage between a wait queue and a set.
633 wait_queue_unlink_locked(
635 wait_queue_set_t wq_set
,
636 wait_queue_link_t wql
)
638 assert(wait_queue_held(wq
));
639 assert(wait_queue_held(&wq_set
->wqs_wait_queue
));
641 wql
->wql_queue
= WAIT_QUEUE_NULL
;
642 queue_remove(&wq
->wq_queue
, wql
, wait_queue_link_t
, wql_links
);
643 wql
->wql_setqueue
= WAIT_QUEUE_SET_NULL
;
644 queue_remove(&wq_set
->wqs_setlinks
, wql
, wait_queue_link_t
, wql_setlinks
);
645 if (wql_is_preposted(wql
)) {
646 queue_t ppq
= &wq_set
->wqs_preposts
;
647 queue_remove(ppq
, wql
, wait_queue_link_t
, wql_preposts
);
649 wql
->wql_type
= WAIT_QUEUE_UNLINKED
;
651 WAIT_QUEUE_CHECK(wq
);
652 WAIT_QUEUE_SET_CHECK(wq_set
);
656 * Routine: wait_queue_unlink
658 * Remove the linkage between a wait queue and a set,
659 * freeing the linkage structure.
661 * The wait queue being must be a member set queue
666 wait_queue_set_t wq_set
)
668 wait_queue_element_t wq_element
;
669 wait_queue_link_t wql
;
673 if (!wait_queue_is_valid(wq
) || !wait_queue_is_set(wq_set
)) {
674 return KERN_INVALID_ARGUMENT
;
680 wq_element
= (wait_queue_element_t
) queue_first(q
);
681 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
682 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
683 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
684 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
686 wql
= (wait_queue_link_t
)wq_element
;
688 if (wql
->wql_setqueue
== wq_set
) {
691 alloced
= (wql
->wql_type
== WAIT_QUEUE_LINK
);
693 wait_queue_unlink_locked(wq
, wq_set
, wql
);
695 wait_queue_unlock(wq
);
698 zfree(_wait_queue_link_zone
, wql
);
702 wq_element
= (wait_queue_element_t
)
703 queue_next((queue_t
) wq_element
);
705 wait_queue_unlock(wq
);
707 return KERN_NOT_IN_SET
;
711 * Routine: wait_queue_unlink_all
713 * Remove the linkage between a wait queue and all its sets.
714 * All the linkage structures that were allocated internally
715 * are freed. The others are the caller's responsibility.
717 * Nothing of interest locked.
721 wait_queue_unlink_all(
724 wait_queue_element_t wq_element
;
725 wait_queue_element_t wq_next_element
;
726 wait_queue_set_t wq_set
;
727 wait_queue_link_t wql
;
728 queue_head_t links_queue_head
;
729 queue_t links
= &links_queue_head
;
733 if (!wait_queue_is_valid(wq
)) {
734 return KERN_INVALID_ARGUMENT
;
744 wq_element
= (wait_queue_element_t
) queue_first(q
);
745 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
748 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
749 wq_next_element
= (wait_queue_element_t
)
750 queue_next((queue_t
) wq_element
);
752 alloced
= (wq_element
->wqe_type
== WAIT_QUEUE_LINK
);
753 if (alloced
|| wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
754 wql
= (wait_queue_link_t
)wq_element
;
755 wq_set
= wql
->wql_setqueue
;
757 wait_queue_unlink_locked(wq
, wq_set
, wql
);
760 enqueue(links
, &wql
->wql_links
);
762 wq_element
= wq_next_element
;
764 wait_queue_unlock(wq
);
767 while(!queue_empty(links
)) {
768 wql
= (wait_queue_link_t
) dequeue(links
);
769 zfree(_wait_queue_link_zone
, wql
);
772 return(KERN_SUCCESS
);
775 /* legacy interface naming */
777 wait_subqueue_unlink_all(
778 wait_queue_set_t wq_set
)
780 return wait_queue_set_unlink_all(wq_set
);
785 * Routine: wait_queue_set_unlink_all
787 * Remove the linkage between a set wait queue and all its
788 * member wait queues. The link structures are freed for those
789 * links which were dynamically allocated.
791 * The wait queue must be a set
794 wait_queue_set_unlink_all(
795 wait_queue_set_t wq_set
)
797 wait_queue_link_t wql
;
800 queue_head_t links_queue_head
;
801 queue_t links
= &links_queue_head
;
804 if (!wait_queue_is_set(wq_set
)) {
805 return KERN_INVALID_ARGUMENT
;
814 q
= &wq_set
->wqs_setlinks
;
816 wql
= (wait_queue_link_t
)queue_first(q
);
817 while (!queue_end(q
, (queue_entry_t
)wql
)) {
818 WAIT_QUEUE_SET_LINK_CHECK(wq_set
, wql
);
820 if (wait_queue_lock_try(wq
)) {
823 alloced
= (wql
->wql_type
== WAIT_QUEUE_LINK
);
824 wait_queue_unlink_locked(wq
, wq_set
, wql
);
825 wait_queue_unlock(wq
);
827 enqueue(links
, &wql
->wql_links
);
828 wql
= (wait_queue_link_t
)queue_first(q
);
839 while (!queue_empty (links
)) {
840 wql
= (wait_queue_link_t
) dequeue(links
);
841 zfree(_wait_queue_link_zone
, wql
);
843 return(KERN_SUCCESS
);
847 * Routine: wait_queue_assert_wait64_locked
849 * Insert the current thread into the supplied wait queue
850 * waiting for a particular event to be posted to that queue.
853 * The wait queue is assumed locked.
854 * The waiting thread is assumed locked.
857 __private_extern__ wait_result_t
858 wait_queue_assert_wait64_locked(
861 wait_interrupt_t interruptible
,
865 wait_result_t wait_result
;
867 if (!wait_queue_assert_possible(thread
))
868 panic("wait_queue_assert_wait64_locked");
870 if (wq
->wq_type
== _WAIT_QUEUE_SET_inited
) {
871 wait_queue_set_t wqs
= (wait_queue_set_t
)wq
;
873 if (event
== NO_EVENT64
&& wqs_is_preposted(wqs
))
874 return(THREAD_AWAKENED
);
878 * This is the extent to which we currently take scheduling attributes
879 * into account. If the thread is vm priviledged, we stick it at
880 * the front of the queue. Later, these queues will honor the policy
881 * value set at wait_queue_init time.
883 wait_result
= thread_mark_wait_locked(thread
, interruptible
);
884 if (wait_result
== THREAD_WAITING
) {
885 if (!wq
->wq_fifo
|| thread
->options
& TH_OPT_VMPRIV
)
886 enqueue_head(&wq
->wq_queue
, (queue_entry_t
) thread
);
888 enqueue_tail(&wq
->wq_queue
, (queue_entry_t
) thread
);
890 thread
->wait_event
= event
;
891 thread
->wait_queue
= wq
;
894 if (!timer_call_enter(&thread
->wait_timer
, deadline
))
895 thread
->wait_timer_active
++;
896 thread
->wait_timer_is_set
= TRUE
;
903 * Routine: wait_queue_assert_wait
905 * Insert the current thread into the supplied wait queue
906 * waiting for a particular event to be posted to that queue.
909 * nothing of interest locked.
912 wait_queue_assert_wait(
915 wait_interrupt_t interruptible
,
920 thread_t thread
= current_thread();
922 /* If it is an invalid wait queue, you can't wait on it */
923 if (!wait_queue_is_valid(wq
))
924 return (thread
->wait_result
= THREAD_RESTART
);
929 ret
= wait_queue_assert_wait64_locked(wq
, CAST_DOWN(event64_t
,event
),
930 interruptible
, deadline
, thread
);
931 thread_unlock(thread
);
932 wait_queue_unlock(wq
);
938 * Routine: wait_queue_assert_wait64
940 * Insert the current thread into the supplied wait queue
941 * waiting for a particular event to be posted to that queue.
943 * nothing of interest locked.
946 wait_queue_assert_wait64(
949 wait_interrupt_t interruptible
,
954 thread_t thread
= current_thread();
956 /* If it is an invalid wait queue, you cant wait on it */
957 if (!wait_queue_is_valid(wq
))
958 return (thread
->wait_result
= THREAD_RESTART
);
963 ret
= wait_queue_assert_wait64_locked(wq
, event
, interruptible
, deadline
, thread
);
964 thread_unlock(thread
);
965 wait_queue_unlock(wq
);
971 * Routine: _wait_queue_select64_all
973 * Select all threads off a wait queue that meet the
978 * wake_queue initialized and ready for insertion
981 * a queue of locked threads
984 _wait_queue_select64_all(
989 wait_queue_element_t wq_element
;
990 wait_queue_element_t wqe_next
;
995 wq_element
= (wait_queue_element_t
) queue_first(q
);
996 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
997 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
998 wqe_next
= (wait_queue_element_t
)
999 queue_next((queue_t
) wq_element
);
1002 * We may have to recurse if this is a compound wait queue.
1004 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
1005 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
1006 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1007 wait_queue_set_t set_queue
= wql
->wql_setqueue
;
1010 * We have to check the set wait queue. If it is marked
1011 * as pre-post, and it is the "generic event" then mark
1012 * it pre-posted now (if not already).
1014 wqs_lock(set_queue
);
1015 if (event
== NO_EVENT64
&& set_queue
->wqs_prepost
&& !wql_is_preposted(wql
)) {
1016 queue_t ppq
= &set_queue
->wqs_preposts
;
1017 queue_enter(ppq
, wql
, wait_queue_link_t
, wql_preposts
);
1019 if (! wait_queue_empty(&set_queue
->wqs_wait_queue
))
1020 _wait_queue_select64_all(&set_queue
->wqs_wait_queue
, event
, wake_queue
);
1021 wqs_unlock(set_queue
);
1025 * Otherwise, its a thread. If it is waiting on
1026 * the event we are posting to this queue, pull
1027 * it off the queue and stick it in out wake_queue.
1029 thread_t t
= (thread_t
)wq_element
;
1031 if (t
->wait_event
== event
) {
1033 remqueue(q
, (queue_entry_t
) t
);
1034 enqueue (wake_queue
, (queue_entry_t
) t
);
1035 t
->wait_queue
= WAIT_QUEUE_NULL
;
1036 t
->wait_event
= NO_EVENT64
;
1037 t
->at_safe_point
= FALSE
;
1038 /* returned locked */
1041 wq_element
= wqe_next
;
1046 * Routine: wait_queue_wakeup64_all_locked
1048 * Wakeup some number of threads that are in the specified
1049 * wait queue and waiting on the specified event.
1051 * wait queue already locked (may be released).
1053 * KERN_SUCCESS - Threads were woken up
1054 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1056 __private_extern__ kern_return_t
1057 wait_queue_wakeup64_all_locked(
1060 wait_result_t result
,
1063 queue_head_t wake_queue_head
;
1064 queue_t q
= &wake_queue_head
;
1067 // assert(wait_queue_held(wq));
1068 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1069 // panic("wait_queue_wakeup64_all_locked: lock not held on %p\n", wq); /* (BRINGUP) */
1075 * Select the threads that we will wake up. The threads
1076 * are returned to us locked and cleanly removed from the
1079 _wait_queue_select64_all(wq
, event
, q
);
1081 wait_queue_unlock(wq
);
1084 * For each thread, set it running.
1086 res
= KERN_NOT_WAITING
;
1087 while (!queue_empty (q
)) {
1088 thread_t thread
= (thread_t
) dequeue(q
);
1089 res
= thread_go(thread
, result
);
1090 assert(res
== KERN_SUCCESS
);
1091 thread_unlock(thread
);
1098 * Routine: wait_queue_wakeup_all
1100 * Wakeup some number of threads that are in the specified
1101 * wait queue and waiting on the specified event.
1105 * KERN_SUCCESS - Threads were woken up
1106 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1109 wait_queue_wakeup_all(
1112 wait_result_t result
)
1117 if (!wait_queue_is_valid(wq
)) {
1118 return KERN_INVALID_ARGUMENT
;
1122 wait_queue_lock(wq
);
1123 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1124 // panic("wait_queue_wakeup_all: we did not get the lock on %p\n", wq); /* (BRINGUP) */
1126 ret
= wait_queue_wakeup64_all_locked(
1127 wq
, CAST_DOWN(event64_t
,event
),
1135 * Routine: wait_queue_wakeup64_all
1137 * Wakeup some number of threads that are in the specified
1138 * wait queue and waiting on the specified event.
1142 * KERN_SUCCESS - Threads were woken up
1143 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1146 wait_queue_wakeup64_all(
1149 wait_result_t result
)
1154 if (!wait_queue_is_valid(wq
)) {
1155 return KERN_INVALID_ARGUMENT
;
1159 wait_queue_lock(wq
);
1160 ret
= wait_queue_wakeup64_all_locked(wq
, event
, result
, TRUE
);
1167 * Routine: _wait_queue_select64_one
1169 * Select the best thread off a wait queue that meet the
1170 * supplied criteria.
1174 * possibly recursive
1176 * a locked thread - if one found
1178 * This is where the sync policy of the wait queue comes
1179 * into effect. For now, we just assume FIFO/LIFO.
1182 _wait_queue_select64_one(
1186 wait_queue_element_t wq_element
;
1187 wait_queue_element_t wqe_next
;
1188 thread_t t
= THREAD_NULL
;
1193 wq_element
= (wait_queue_element_t
) queue_first(q
);
1194 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1195 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1196 wqe_next
= (wait_queue_element_t
)
1197 queue_next((queue_t
) wq_element
);
1200 * We may have to recurse if this is a compound wait queue.
1202 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
1203 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
1204 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1205 wait_queue_set_t set_queue
= wql
->wql_setqueue
;
1208 * We have to check the set wait queue. If the set
1209 * supports pre-posting, it isn't already preposted,
1210 * and we didn't find a thread in the set, then mark it.
1212 * If we later find a thread, there may be a spurious
1213 * pre-post here on this set. The wait side has to check
1214 * for that either pre- or post-wait.
1216 wqs_lock(set_queue
);
1217 if (! wait_queue_empty(&set_queue
->wqs_wait_queue
)) {
1218 t
= _wait_queue_select64_one(&set_queue
->wqs_wait_queue
, event
);
1220 if (t
!= THREAD_NULL
) {
1221 wqs_unlock(set_queue
);
1224 if (event
== NO_EVENT64
&& set_queue
->wqs_prepost
&& !wql_is_preposted(wql
)) {
1225 queue_t ppq
= &set_queue
->wqs_preposts
;
1226 queue_enter(ppq
, wql
, wait_queue_link_t
, wql_preposts
);
1228 wqs_unlock(set_queue
);
1233 * Otherwise, its a thread. If it is waiting on
1234 * the event we are posting to this queue, pull
1235 * it off the queue and stick it in out wake_queue.
1237 t
= (thread_t
)wq_element
;
1238 if (t
->wait_event
== event
) {
1240 remqueue(q
, (queue_entry_t
) t
);
1241 t
->wait_queue
= WAIT_QUEUE_NULL
;
1242 t
->wait_event
= NO_EVENT64
;
1243 t
->at_safe_point
= FALSE
;
1244 return t
; /* still locked */
1249 wq_element
= wqe_next
;
1256 * Routine: wait_queue_pull_thread_locked
1258 * Pull a thread off its wait queue and (possibly) unlock
1265 * with the thread still locked.
1268 wait_queue_pull_thread_locked(
1274 assert(thread
->wait_queue
== waitq
);
1276 remqueue(&waitq
->wq_queue
, (queue_entry_t
)thread
);
1277 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1278 thread
->wait_event
= NO_EVENT64
;
1279 thread
->at_safe_point
= FALSE
;
1281 wait_queue_unlock(waitq
);
1286 * Routine: wait_queue_select64_thread
1288 * Look for a thread and remove it from the queues, if
1289 * (and only if) the thread is waiting on the supplied
1290 * <wait_queue, event> pair.
1294 * possibly recursive
1296 * KERN_NOT_WAITING: Thread is not waiting here.
1297 * KERN_SUCCESS: It was, and is now removed (returned locked)
1299 static kern_return_t
1300 _wait_queue_select64_thread(
1305 wait_queue_element_t wq_element
;
1306 wait_queue_element_t wqe_next
;
1307 kern_return_t res
= KERN_NOT_WAITING
;
1308 queue_t q
= &wq
->wq_queue
;
1310 thread_lock(thread
);
1311 if ((thread
->wait_queue
== wq
) && (thread
->wait_event
== event
)) {
1312 remqueue(q
, (queue_entry_t
) thread
);
1313 thread
->at_safe_point
= FALSE
;
1314 thread
->wait_event
= NO_EVENT64
;
1315 thread
->wait_queue
= WAIT_QUEUE_NULL
;
1316 /* thread still locked */
1317 return KERN_SUCCESS
;
1319 thread_unlock(thread
);
1322 * The wait_queue associated with the thread may be one of this
1323 * wait queue's sets. Go see. If so, removing it from
1324 * there is like removing it from here.
1326 wq_element
= (wait_queue_element_t
) queue_first(q
);
1327 while (!queue_end(q
, (queue_entry_t
)wq_element
)) {
1328 WAIT_QUEUE_ELEMENT_CHECK(wq
, wq_element
);
1329 wqe_next
= (wait_queue_element_t
)
1330 queue_next((queue_t
) wq_element
);
1332 if (wq_element
->wqe_type
== WAIT_QUEUE_LINK
||
1333 wq_element
->wqe_type
== WAIT_QUEUE_LINK_NOALLOC
) {
1334 wait_queue_link_t wql
= (wait_queue_link_t
)wq_element
;
1335 wait_queue_set_t set_queue
= wql
->wql_setqueue
;
1337 wqs_lock(set_queue
);
1338 if (! wait_queue_empty(&set_queue
->wqs_wait_queue
)) {
1339 res
= _wait_queue_select64_thread(&set_queue
->wqs_wait_queue
,
1343 wqs_unlock(set_queue
);
1344 if (res
== KERN_SUCCESS
)
1345 return KERN_SUCCESS
;
1347 wq_element
= wqe_next
;
1354 * Routine: wait_queue_wakeup64_identity_locked
1356 * Select a single thread that is most-eligible to run and set
1357 * set it running. But return the thread locked.
1362 * possibly recursive
1364 * a pointer to the locked thread that was awakened
1366 __private_extern__ thread_t
1367 wait_queue_wakeup64_identity_locked(
1370 wait_result_t result
,
1376 assert(wait_queue_held(wq
));
1378 thread
= _wait_queue_select64_one(wq
, event
);
1380 wait_queue_unlock(wq
);
1383 res
= thread_go(thread
, result
);
1384 assert(res
== KERN_SUCCESS
);
1386 return thread
; /* still locked if not NULL */
1391 * Routine: wait_queue_wakeup64_one_locked
1393 * Select a single thread that is most-eligible to run and set
1399 * possibly recursive
1401 * KERN_SUCCESS: It was, and is, now removed.
1402 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1404 __private_extern__ kern_return_t
1405 wait_queue_wakeup64_one_locked(
1408 wait_result_t result
,
1413 assert(wait_queue_held(wq
));
1415 thread
= _wait_queue_select64_one(wq
, event
);
1417 wait_queue_unlock(wq
);
1422 res
= thread_go(thread
, result
);
1423 assert(res
== KERN_SUCCESS
);
1424 thread_unlock(thread
);
1428 return KERN_NOT_WAITING
;
1432 * Routine: wait_queue_wakeup_one
1434 * Wakeup the most appropriate thread that is in the specified
1435 * wait queue for the specified event.
1439 * KERN_SUCCESS - Thread was woken up
1440 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1443 wait_queue_wakeup_one(
1446 wait_result_t result
)
1451 if (!wait_queue_is_valid(wq
)) {
1452 return KERN_INVALID_ARGUMENT
;
1456 wait_queue_lock(wq
);
1457 thread
= _wait_queue_select64_one(wq
, CAST_DOWN(event64_t
,event
));
1458 wait_queue_unlock(wq
);
1463 res
= thread_go(thread
, result
);
1464 assert(res
== KERN_SUCCESS
);
1465 thread_unlock(thread
);
1471 return KERN_NOT_WAITING
;
1475 * Routine: wait_queue_wakeup64_one
1477 * Wakeup the most appropriate thread that is in the specified
1478 * wait queue for the specified event.
1482 * KERN_SUCCESS - Thread was woken up
1483 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1486 wait_queue_wakeup64_one(
1489 wait_result_t result
)
1494 if (!wait_queue_is_valid(wq
)) {
1495 return KERN_INVALID_ARGUMENT
;
1498 wait_queue_lock(wq
);
1499 thread
= _wait_queue_select64_one(wq
, event
);
1500 wait_queue_unlock(wq
);
1505 res
= thread_go(thread
, result
);
1506 assert(res
== KERN_SUCCESS
);
1507 thread_unlock(thread
);
1513 return KERN_NOT_WAITING
;
1518 * Routine: wait_queue_wakeup64_thread_locked
1520 * Wakeup the particular thread that was specified if and only
1521 * it was in this wait queue (or one of it's set queues)
1522 * and waiting on the specified event.
1524 * This is much safer than just removing the thread from
1525 * whatever wait queue it happens to be on. For instance, it
1526 * may have already been awoken from the wait you intended to
1527 * interrupt and waited on something else (like another
1531 * wait queue already locked (may be released).
1533 * KERN_SUCCESS - the thread was found waiting and awakened
1534 * KERN_NOT_WAITING - the thread was not waiting here
1536 __private_extern__ kern_return_t
1537 wait_queue_wakeup64_thread_locked(
1541 wait_result_t result
,
1546 assert(wait_queue_held(wq
));
1549 * See if the thread was still waiting there. If so, it got
1550 * dequeued and returned locked.
1552 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1554 wait_queue_unlock(wq
);
1556 if (res
!= KERN_SUCCESS
)
1557 return KERN_NOT_WAITING
;
1559 res
= thread_go(thread
, result
);
1560 assert(res
== KERN_SUCCESS
);
1561 thread_unlock(thread
);
1566 * Routine: wait_queue_wakeup_thread
1568 * Wakeup the particular thread that was specified if and only
1569 * it was in this wait queue (or one of it's set queues)
1570 * and waiting on the specified event.
1572 * This is much safer than just removing the thread from
1573 * whatever wait queue it happens to be on. For instance, it
1574 * may have already been awoken from the wait you intended to
1575 * interrupt and waited on something else (like another
1578 * nothing of interest locked
1579 * we need to assume spl needs to be raised
1581 * KERN_SUCCESS - the thread was found waiting and awakened
1582 * KERN_NOT_WAITING - the thread was not waiting here
1585 wait_queue_wakeup_thread(
1589 wait_result_t result
)
1594 if (!wait_queue_is_valid(wq
)) {
1595 return KERN_INVALID_ARGUMENT
;
1599 wait_queue_lock(wq
);
1600 res
= _wait_queue_select64_thread(wq
, CAST_DOWN(event64_t
,event
), thread
);
1601 wait_queue_unlock(wq
);
1603 if (res
== KERN_SUCCESS
) {
1604 res
= thread_go(thread
, result
);
1605 assert(res
== KERN_SUCCESS
);
1606 thread_unlock(thread
);
1611 return KERN_NOT_WAITING
;
1615 * Routine: wait_queue_wakeup64_thread
1617 * Wakeup the particular thread that was specified if and only
1618 * it was in this wait queue (or one of it's set's queues)
1619 * and waiting on the specified event.
1621 * This is much safer than just removing the thread from
1622 * whatever wait queue it happens to be on. For instance, it
1623 * may have already been awoken from the wait you intended to
1624 * interrupt and waited on something else (like another
1627 * nothing of interest locked
1628 * we need to assume spl needs to be raised
1630 * KERN_SUCCESS - the thread was found waiting and awakened
1631 * KERN_NOT_WAITING - the thread was not waiting here
1634 wait_queue_wakeup64_thread(
1638 wait_result_t result
)
1643 if (!wait_queue_is_valid(wq
)) {
1644 return KERN_INVALID_ARGUMENT
;
1648 wait_queue_lock(wq
);
1649 res
= _wait_queue_select64_thread(wq
, event
, thread
);
1650 wait_queue_unlock(wq
);
1652 if (res
== KERN_SUCCESS
) {
1653 res
= thread_go(thread
, result
);
1654 assert(res
== KERN_SUCCESS
);
1655 thread_unlock(thread
);
1660 return KERN_NOT_WAITING
;