2 * Copyright (c) 2015-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 #include <kern/backtrace.h>
58 #include <kern/kern_types.h>
59 #include <kern/ltable.h>
60 #include <kern/mach_param.h>
61 #include <kern/queue.h>
62 #include <kern/sched_prim.h>
63 #include <kern/simple_lock.h>
65 #include <kern/waitq.h>
66 #include <kern/zalloc.h>
67 #include <kern/policy_internal.h>
69 #include <libkern/OSAtomic.h>
70 #include <mach/sync_policy.h>
71 #include <vm/vm_kern.h>
73 #include <sys/kdebug.h>
75 #if defined(CONFIG_WAITQ_LINK_STATS) || defined(CONFIG_WAITQ_PREPOST_STATS)
76 # if !defined(CONFIG_LTABLE_STATS)
77 # error "You must configure LTABLE_STATS to use WAITQ_[LINK|PREPOST]_STATS"
79 # if !defined(CONFIG_WAITQ_STATS)
80 # error "You must configure WAITQ_STATS to use WAITQ_[LINK|PREPOST]_STATS"
84 #if CONFIG_WAITQ_DEBUG
85 #define wqdbg(fmt,...) \
86 printf("WQ[%s]: " fmt "\n", __func__, ## __VA_ARGS__)
88 #define wqdbg(fmt,...) do { } while (0)
91 #ifdef WAITQ_VERBOSE_DEBUG
92 #define wqdbg_v(fmt,...) \
93 printf("WQ[v:%s]: " fmt "\n", __func__, ## __VA_ARGS__)
95 #define wqdbg_v(fmt,...) do { } while (0)
98 #define wqinfo(fmt,...) \
99 printf("WQ[%s]: " fmt "\n", __func__, ## __VA_ARGS__)
101 #define wqerr(fmt,...) \
102 printf("WQ[%s] ERROR: " fmt "\n", __func__, ## __VA_ARGS__)
106 * un-comment the following lines to debug the link/prepost tables
107 * NOTE: this expands each element by ~40 bytes
109 //#define CONFIG_WAITQ_LINK_STATS
110 //#define CONFIG_WAITQ_PREPOST_STATS
113 * file-static functions / data
115 static thread_t
waitq_select_one_locked(struct waitq
*waitq
, event64_t event
,
116 uint64_t *reserved_preposts
,
117 int priority
, spl_t
*spl
);
119 static kern_return_t
waitq_select_thread_locked(struct waitq
*waitq
,
121 thread_t thread
, spl_t
*spl
);
123 #define WAITQ_SET_MAX (task_max * 3)
124 static zone_t waitq_set_zone
;
127 #define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align)))
128 #define ROUNDDOWN(x,y) (((x)/(y))*(y))
131 #if defined(CONFIG_LTABLE_STATS) || defined(CONFIG_WAITQ_STATS)
132 static __inline__
void waitq_grab_backtrace(uintptr_t bt
[NWAITQ_BTFRAMES
], int skip
);
136 #define waitq_lock_to(wq,to) \
137 (hw_lock_to(&(wq)->waitq_interlock, to))
139 #define waitq_lock_unlock(wq) \
140 (hw_lock_unlock(&(wq)->waitq_interlock))
142 #define waitq_lock_init(wq) \
143 (hw_lock_init(&(wq)->waitq_interlock))
147 * Prepost callback function for specially marked waitq sets
148 * (prepost alternative)
150 extern void waitq_set__CALLING_PREPOST_HOOK__(void *ctx
, void *memberctx
, int priority
);
152 #define DEFAULT_MIN_FREE_TABLE_ELEM 100
153 static uint32_t g_min_free_table_elem
;
154 static uint32_t g_min_free_cache
;
157 /* ----------------------------------------------------------------------
159 * SetID Link Table Implementation
161 * ---------------------------------------------------------------------- */
162 static struct link_table g_wqlinktable
;
175 /* wqt_type == WQL_WQS (LT_ELEM) */
177 struct waitq_set
*wql_set
;
178 /* uint64_t sl_prepost_id; */
181 /* wqt_type == WQL_LINK (LT_LINK) */
184 uint64_t right_setid
;
187 #ifdef CONFIG_WAITQ_LINK_STATS
188 thread_t sl_alloc_th
;
189 task_t sl_alloc_task
;
190 uintptr_t sl_alloc_bt
[NWAITQ_BTFRAMES
];
191 uint64_t sl_alloc_ts
;
192 uintptr_t sl_invalidate_bt
[NWAITQ_BTFRAMES
];
193 uint64_t sl_invalidate_ts
;
194 uintptr_t sl_mkvalid_bt
[NWAITQ_BTFRAMES
];
195 uint64_t sl_mkvalid_ts
;
199 #if !defined(CONFIG_WAITQ_LINK_STATS)
200 static_assert((sizeof(struct waitq_link
) & (sizeof(struct waitq_link
) - 1)) == 0,
201 "waitq_link struct must be a power of two!");
204 #define wql_refcnt(link) \
205 (lt_bits_refcnt((link)->wqte.lt_bits))
207 #define wql_type(link) \
208 (lt_bits_type((link)->wqte.lt_bits))
210 #define wql_mkvalid(link) \
212 lt_elem_mkvalid(&(link)->wqte); \
213 wql_do_mkvalid_stats(&(link)->wqte); \
216 #define wql_is_valid(link) \
217 lt_bits_valid((link)->wqte.lt_bits)
219 #define wql_setid wqte.lt_id
221 #define WQL_WQS_POISON ((void *)(0xf00df00d))
222 #define WQL_LINK_POISON (0x0bad0badffffffffull)
224 static void wql_poison(struct link_table
*table
, struct lt_elem
*elem
)
226 struct waitq_link
*link
= (struct waitq_link
*)elem
;
229 switch (wql_type(link
)) {
231 link
->wql_wqs
.wql_set
= WQL_WQS_POISON
;
234 link
->wql_link
.left_setid
= WQL_LINK_POISON
;
235 link
->wql_link
.right_setid
= WQL_LINK_POISON
;
240 #ifdef CONFIG_WAITQ_LINK_STATS
241 memset(link
->sl_alloc_bt
, 0, sizeof(link
->sl_alloc_bt
));
242 link
->sl_alloc_ts
= 0;
243 memset(link
->sl_mkvalid_bt
, 0, sizeof(link
->sl_mkvalid_bt
));
244 link
->sl_mkvalid_ts
= 0;
246 link
->sl_alloc_th
= THREAD_NULL
;
247 /* leave the sl_alloc_task in place for debugging */
249 link
->sl_free_ts
= mach_absolute_time();
253 #ifdef CONFIG_WAITQ_LINK_STATS
254 static __inline__
void wql_do_alloc_stats(struct lt_elem
*elem
)
257 struct waitq_link
*link
= (struct waitq_link
*)elem
;
258 memset(link
->sl_alloc_bt
, 0, sizeof(link
->sl_alloc_bt
));
259 waitq_grab_backtrace(link
->sl_alloc_bt
, 0);
260 link
->sl_alloc_th
= current_thread();
261 link
->sl_alloc_task
= current_task();
263 assert(link
->sl_alloc_ts
== 0);
264 link
->sl_alloc_ts
= mach_absolute_time();
266 memset(link
->sl_invalidate_bt
, 0, sizeof(link
->sl_invalidate_bt
));
267 link
->sl_invalidate_ts
= 0;
271 static __inline__
void wql_do_invalidate_stats(struct lt_elem
*elem
)
273 struct waitq_link
*link
= (struct waitq_link
*)elem
;
278 assert(link
->sl_mkvalid_ts
> 0);
280 memset(link
->sl_invalidate_bt
, 0, sizeof(link
->sl_invalidate_bt
));
281 link
->sl_invalidate_ts
= mach_absolute_time();
282 waitq_grab_backtrace(link
->sl_invalidate_bt
, 0);
285 static __inline__
void wql_do_mkvalid_stats(struct lt_elem
*elem
)
287 struct waitq_link
*link
= (struct waitq_link
*)elem
;
292 memset(link
->sl_mkvalid_bt
, 0, sizeof(link
->sl_mkvalid_bt
));
293 link
->sl_mkvalid_ts
= mach_absolute_time();
294 waitq_grab_backtrace(link
->sl_mkvalid_bt
, 0);
297 #define wql_do_alloc_stats(e)
298 #define wql_do_invalidate_stats(e)
299 #define wql_do_mkvalid_stats(e)
300 #endif /* CONFIG_WAITQ_LINK_STATS */
302 static void wql_init(void)
304 uint32_t tablesz
= 0, max_links
= 0;
306 if (PE_parse_boot_argn("wql_tsize", &tablesz
, sizeof(tablesz
)) != TRUE
)
307 tablesz
= (uint32_t)g_lt_max_tbl_size
;
309 tablesz
= P2ROUNDUP(tablesz
, PAGE_SIZE
);
310 max_links
= tablesz
/ sizeof(struct waitq_link
);
311 assert(max_links
> 0 && tablesz
> 0);
313 /* we have a restricted index range */
314 if (max_links
> (LT_IDX_MAX
+ 1))
315 max_links
= LT_IDX_MAX
+ 1;
317 wqinfo("init linktable with max:%d elements (%d bytes)",
319 ltable_init(&g_wqlinktable
, "wqslab.wql", max_links
,
320 sizeof(struct waitq_link
), wql_poison
);
323 static void wql_ensure_free_space(void)
325 if (g_wqlinktable
.nelem
- g_wqlinktable
.used_elem
< g_min_free_table_elem
) {
327 * we don't hold locks on these values, so check for underflow
329 if (g_wqlinktable
.used_elem
<= g_wqlinktable
.nelem
) {
330 wqdbg_v("Forcing table growth: nelem=%d, used=%d, min_free=%d",
331 g_wqlinktable
.nelem
, g_wqlinktable
.used_elem
,
332 g_min_free_table_elem
);
333 ltable_grow(&g_wqlinktable
, g_min_free_table_elem
);
338 static struct waitq_link
*wql_alloc_link(int type
)
340 struct lt_elem
*elem
;
342 elem
= ltable_alloc_elem(&g_wqlinktable
, type
, 1, 0);
343 wql_do_alloc_stats(elem
);
344 return (struct waitq_link
*)elem
;
347 static void wql_realloc_link(struct waitq_link
*link
, int type
)
349 ltable_realloc_elem(&g_wqlinktable
, &link
->wqte
, type
);
350 #ifdef CONFIG_WAITQ_LINK_STATS
351 memset(link
->sl_alloc_bt
, 0, sizeof(link
->sl_alloc_bt
));
352 link
->sl_alloc_ts
= 0;
353 wql_do_alloc_stats(&link
->wqte
);
355 memset(link
->sl_invalidate_bt
, 0, sizeof(link
->sl_invalidate_bt
));
356 link
->sl_invalidate_ts
= 0;
360 static void wql_invalidate(struct waitq_link
*link
)
362 lt_elem_invalidate(&link
->wqte
);
363 wql_do_invalidate_stats(&link
->wqte
);
366 static struct waitq_link
*wql_get_link(uint64_t setid
)
368 struct lt_elem
*elem
;
370 elem
= ltable_get_elem(&g_wqlinktable
, setid
);
371 return (struct waitq_link
*)elem
;
374 static void wql_put_link(struct waitq_link
*link
)
378 ltable_put_elem(&g_wqlinktable
, (struct lt_elem
*)link
);
381 static struct waitq_link
*wql_get_reserved(uint64_t setid
, int type
)
383 struct lt_elem
*elem
;
385 elem
= lt_elem_list_first(&g_wqlinktable
, setid
);
388 ltable_realloc_elem(&g_wqlinktable
, elem
, type
);
389 return (struct waitq_link
*)elem
;
393 static inline int waitq_maybe_remove_link(struct waitq
*waitq
,
395 struct waitq_link
*parent
,
396 struct waitq_link
*left
,
397 struct waitq_link
*right
);
400 LINK_WALK_ONE_LEVEL
= 0,
401 LINK_WALK_FULL_DAG
= 1,
402 LINK_WALK_FULL_DAG_UNLOCKED
= 2,
405 typedef int (*wql_callback_func
)(struct waitq
*waitq
, void *ctx
,
406 struct waitq_link
*link
);
409 * walk_waitq_links: walk all table elements (of type 'link_type') pointed to by 'setid'
412 * waitq is locked (or NULL)
413 * 'setid' is managed by 'waitq'
414 * this could be direct (waitq->waitq_set_id == setid)
415 * OR indirect (setid is the left/right ID in a LINK chain,
416 * whose root is waitq->waitq_set_id)
419 * This function uses recursion to walk the set of table elements
420 * pointed to by 'setid'. For each element encountered, 'cb' will be
421 * called. If non-zero, the return value of this callback function can
422 * early-out of the table walk.
424 * For each link element encountered, the function takes a reference to
425 * it. The reference is dropped only after the callback and any recursion
428 * The assumed table/link/tree structure:
437 * /\ /\ ... ... ... ...
440 * WQS(wqset_q.waitq_setid == Sx)
441 * [waitq set is a membet of setid, 'Sx')
450 * The basic algorithm is as follows:
451 * *) take a reference to the table object pointed to by 'setid'
452 * *) if appropriate, call 'cb' (potentially early-out on non-zero return)
453 * *) if the link object points to a waitq set, and the walk type
454 * is 'FULL_DAG' (full directed-acyclic-graph), then try to lock
455 * the associated waitq set object and recursively walk all sets to
456 * which that set belongs. This is a DFS of the tree structure.
457 * *) recurse down the left side of the tree (following the
458 * 'left_setid' pointer in the link object
459 * *) recurse down the right side of the tree (following the
460 * 'right_setid' pointer in the link object
462 static __attribute__((noinline
))
463 int walk_waitq_links(int walk_type
, struct waitq
*waitq
,
464 uint64_t setid
, int link_type
,
465 void *ctx
, wql_callback_func cb
)
467 struct waitq_link
*link
;
471 link
= wql_get_link(setid
);
475 return WQ_ITERATE_CONTINUE
;
478 wqltype
= wql_type(link
);
479 if (wqltype
== WQL_LINK
) {
480 setid
= link
->wql_link
.left_setid
;
481 nextid
= link
->wql_link
.right_setid
;
485 * Make the callback only on specified link_type (or all links)
486 * Note that after the callback, the link object may be
487 * invalid. The only valid thing we can do is put our
488 * reference to it (which may put it back on the free list)
490 if (link_type
== WQL_ALL
|| link_type
== wqltype
) {
491 /* allow the callback to early-out */
492 int ret
= cb(waitq
, ctx
, link
);
493 if (ret
!= WQ_ITERATE_CONTINUE
) {
499 if (wqltype
== WQL_WQS
&&
500 (walk_type
== LINK_WALK_FULL_DAG
||
501 walk_type
== LINK_WALK_FULL_DAG_UNLOCKED
)) {
503 * Recurse down any sets to which this wait queue set was
504 * added. We do this just before we put our reference to
505 * the link object (which may free it).
507 struct waitq_set
*wqset
= link
->wql_wqs
.wql_set
;
508 int ret
= WQ_ITERATE_CONTINUE
;
509 int should_unlock
= 0;
510 uint64_t wqset_setid
= 0;
512 if (waitq_set_is_valid(wqset
) && walk_type
== LINK_WALK_FULL_DAG
) {
513 assert(!waitq_irq_safe(&wqset
->wqset_q
));
514 waitq_set_lock(wqset
);
519 * verify the linked waitq set as it could have been
520 * invalidated before we grabbed the lock!
522 if (wqset
->wqset_id
!= link
->wql_setid
.id
) {
523 /*This is the bottom of the tree: just get out */
525 waitq_set_unlock(wqset
);
528 return WQ_ITERATE_CONTINUE
;
531 wqset_setid
= wqset
->wqset_q
.waitq_set_id
;
534 ret
= walk_waitq_links(walk_type
, &wqset
->wqset_q
,
535 wqset_setid
, link_type
, ctx
, cb
);
537 waitq_set_unlock(wqset
);
539 if (ret
!= WQ_ITERATE_CONTINUE
) {
547 /* recurse down left side of the tree */
549 int ret
= walk_waitq_links(walk_type
, waitq
, setid
, link_type
, ctx
, cb
);
550 if (ret
!= WQ_ITERATE_CONTINUE
)
554 /* recurse down right side of the tree */
556 return walk_waitq_links(walk_type
, waitq
, nextid
, link_type
, ctx
, cb
);
558 return WQ_ITERATE_CONTINUE
;
561 /* ----------------------------------------------------------------------
563 * Prepost Link Table Implementation
565 * ---------------------------------------------------------------------- */
566 static struct link_table g_prepost_table
;
568 enum wq_prepost_type
{
578 /* wqt_type == WQP_WQ (LT_ELEM) */
580 struct waitq
*wqp_wq_ptr
;
582 /* wqt_type == WQP_POST (LT_LINK) */
584 uint64_t wqp_next_id
;
588 #ifdef CONFIG_WAITQ_PREPOST_STATS
589 thread_t wqp_alloc_th
;
590 task_t wqp_alloc_task
;
591 uintptr_t wqp_alloc_bt
[NWAITQ_BTFRAMES
];
594 #if !defined(CONFIG_WAITQ_PREPOST_STATS)
595 static_assert((sizeof(struct wq_prepost
) & (sizeof(struct wq_prepost
) - 1)) == 0,
596 "wq_prepost struct must be a power of two!");
599 #define wqp_refcnt(wqp) \
600 (lt_bits_refcnt((wqp)->wqte.lt_bits))
602 #define wqp_type(wqp) \
603 (lt_bits_type((wqp)->wqte.lt_bits))
605 #define wqp_set_valid(wqp) \
606 lt_elem_mkvalid(&(wqp)->wqte)
608 #define wqp_is_valid(wqp) \
609 lt_bits_valid((wqp)->wqte.lt_bits)
611 #define wqp_prepostid wqte.lt_id
613 #define WQP_WQ_POISON (0x0bad0badffffffffull)
614 #define WQP_POST_POISON (0xf00df00df00df00d)
616 static void wqp_poison(struct link_table
*table
, struct lt_elem
*elem
)
618 struct wq_prepost
*wqp
= (struct wq_prepost
*)elem
;
621 switch (wqp_type(wqp
)) {
625 wqp
->wqp_post
.wqp_next_id
= WQP_POST_POISON
;
626 wqp
->wqp_post
.wqp_wq_id
= WQP_POST_POISON
;
633 #ifdef CONFIG_WAITQ_PREPOST_STATS
634 static __inline__
void wqp_do_alloc_stats(struct lt_elem
*elem
)
639 struct wq_prepost
*wqp
= (struct wq_prepost
*)elem
;
640 uintptr_t alloc_bt
[sizeof(wqp
->wqp_alloc_bt
)];
642 waitq_grab_backtrace(alloc_bt
, NWAITQ_BTFRAMES
);
644 /* be sure the take stats for _all_ allocated objects */
646 memcpy(wqp
->wqp_alloc_bt
, alloc_bt
, sizeof(alloc_bt
));
647 wqp
->wqp_alloc_th
= current_thread();
648 wqp
->wqp_alloc_task
= current_task();
649 wqp
= (struct wq_prepost
*)lt_elem_list_next(&g_prepost_table
, &wqp
->wqte
);
655 #define wqp_do_alloc_stats(e)
656 #endif /* CONFIG_WAITQ_LINK_STATS */
658 static void wqp_init(void)
660 uint32_t tablesz
= 0, max_wqp
= 0;
662 if (PE_parse_boot_argn("wqp_tsize", &tablesz
, sizeof(tablesz
)) != TRUE
)
663 tablesz
= (uint32_t)g_lt_max_tbl_size
;
665 tablesz
= P2ROUNDUP(tablesz
, PAGE_SIZE
);
666 max_wqp
= tablesz
/ sizeof(struct wq_prepost
);
667 assert(max_wqp
> 0 && tablesz
> 0);
669 /* we have a restricted index range */
670 if (max_wqp
> (LT_IDX_MAX
+ 1))
671 max_wqp
= LT_IDX_MAX
+ 1;
673 wqinfo("init prepost table with max:%d elements (%d bytes)",
675 ltable_init(&g_prepost_table
, "wqslab.prepost", max_wqp
,
676 sizeof(struct wq_prepost
), wqp_poison
);
680 * Refill the per-CPU cache.
682 static void wq_prepost_refill_cpu_cache(uint32_t nalloc
)
684 struct lt_elem
*new_head
, *old_head
;
685 struct wqp_cache
*cache
;
687 /* require preemption enabled to allocate elements */
688 if (get_preemption_level() != 0)
691 new_head
= ltable_alloc_elem(&g_prepost_table
,
692 LT_RESERVED
, nalloc
, 1);
693 if (new_head
== NULL
)
696 disable_preemption();
697 cache
= &PROCESSOR_DATA(current_processor(), wqp_cache
);
699 /* check once more before putting these elements on the list */
700 if (cache
->avail
>= WQP_CACHE_MAX
) {
701 lt_elem_list_release(&g_prepost_table
, new_head
, LT_RESERVED
);
706 cache
->avail
+= nalloc
;
707 if (cache
->head
== 0 || cache
->head
== LT_IDX_MAX
) {
708 cache
->head
= new_head
->lt_id
.id
;
712 old_head
= lt_elem_list_first(&g_prepost_table
, cache
->head
);
713 (void)lt_elem_list_link(&g_prepost_table
, new_head
, old_head
);
714 cache
->head
= new_head
->lt_id
.id
;
721 static void wq_prepost_ensure_free_space(void)
725 struct wqp_cache
*cache
;
727 if (g_min_free_cache
== 0)
728 g_min_free_cache
= (WQP_CACHE_MAX
* ml_get_max_cpus());
731 * Ensure that we always have a pool of per-CPU prepost elements
733 disable_preemption();
734 cache
= &PROCESSOR_DATA(current_processor(), wqp_cache
);
735 free_elem
= cache
->avail
;
738 if (free_elem
< (WQP_CACHE_MAX
/ 3))
739 wq_prepost_refill_cpu_cache(WQP_CACHE_MAX
- free_elem
);
742 * Now ensure that we have a sufficient amount of free table space
744 free_elem
= g_prepost_table
.nelem
- g_prepost_table
.used_elem
;
745 min_free
= g_min_free_table_elem
+ g_min_free_cache
;
746 if (free_elem
< min_free
) {
748 * we don't hold locks on these values, so check for underflow
750 if (g_prepost_table
.used_elem
<= g_prepost_table
.nelem
) {
751 wqdbg_v("Forcing table growth: nelem=%d, used=%d, min_free=%d+%d",
752 g_prepost_table
.nelem
, g_prepost_table
.used_elem
,
753 g_min_free_table_elem
, g_min_free_cache
);
754 ltable_grow(&g_prepost_table
, min_free
);
759 static struct wq_prepost
*wq_prepost_alloc(int type
, int nelem
)
761 struct lt_elem
*elem
;
762 struct wq_prepost
*wqp
;
763 struct wqp_cache
*cache
;
765 if (type
!= LT_RESERVED
)
771 * First try to grab the elements from the per-CPU cache if we are
772 * allocating RESERVED elements
774 disable_preemption();
775 cache
= &PROCESSOR_DATA(current_processor(), wqp_cache
);
776 if (nelem
<= (int)cache
->avail
) {
777 struct lt_elem
*first
, *next
= NULL
;
780 cache
->avail
-= nelem
;
782 /* grab the first element */
783 first
= lt_elem_list_first(&g_prepost_table
, cache
->head
);
785 /* find the last element and re-adjust the cache head */
786 for (elem
= first
; elem
!= NULL
&& nalloc
> 0; elem
= next
) {
787 next
= lt_elem_list_next(&g_prepost_table
, elem
);
789 /* terminate the allocated list */
790 elem
->lt_next_idx
= LT_IDX_MAX
;
796 cache
->head
= LT_IDX_MAX
;
798 cache
->head
= next
->lt_id
.id
;
799 /* assert that we don't have mis-matched book keeping */
800 assert(!(cache
->head
== LT_IDX_MAX
&& cache
->avail
> 0));
808 /* fall-back to standard table allocation */
809 elem
= ltable_alloc_elem(&g_prepost_table
, type
, nelem
, 0);
814 wqp
= (struct wq_prepost
*)elem
;
815 wqp_do_alloc_stats(elem
);
819 static void wq_prepost_invalidate(struct wq_prepost
*wqp
)
821 lt_elem_invalidate(&wqp
->wqte
);
824 static struct wq_prepost
*wq_prepost_get(uint64_t wqp_id
)
826 struct lt_elem
*elem
;
828 elem
= ltable_get_elem(&g_prepost_table
, wqp_id
);
829 return (struct wq_prepost
*)elem
;
832 static void wq_prepost_put(struct wq_prepost
*wqp
)
834 ltable_put_elem(&g_prepost_table
, (struct lt_elem
*)wqp
);
837 static int wq_prepost_rlink(struct wq_prepost
*parent
, struct wq_prepost
*child
)
839 return lt_elem_list_link(&g_prepost_table
, &parent
->wqte
, &child
->wqte
);
842 static struct wq_prepost
*wq_prepost_get_rnext(struct wq_prepost
*head
)
844 struct lt_elem
*elem
;
845 struct wq_prepost
*wqp
;
848 elem
= lt_elem_list_next(&g_prepost_table
, &head
->wqte
);
852 elem
= ltable_get_elem(&g_prepost_table
, id
);
856 wqp
= (struct wq_prepost
*)elem
;
857 if (elem
->lt_id
.id
!= id
||
858 wqp_type(wqp
) != WQP_POST
||
859 wqp
->wqp_post
.wqp_next_id
!= head
->wqp_prepostid
.id
) {
860 ltable_put_elem(&g_prepost_table
, elem
);
867 static void wq_prepost_reset_rnext(struct wq_prepost
*wqp
)
869 (void)lt_elem_list_break(&g_prepost_table
, &wqp
->wqte
);
874 * remove 'wqp' from the prepost list on 'wqset'
878 * caller holds a reference on wqp (and is responsible to release it)
881 * wqp is invalidated, wqset is potentially updated with a new
882 * prepost ID, and the next element of the prepost list may be
883 * consumed as well (if the list contained only 2 objects)
885 static int wq_prepost_remove(struct waitq_set
*wqset
,
886 struct wq_prepost
*wqp
)
889 uint64_t next_id
= wqp
->wqp_post
.wqp_next_id
;
890 uint64_t wqp_id
= wqp
->wqp_prepostid
.id
;
891 struct wq_prepost
*prev_wqp
, *next_wqp
;
893 assert(wqp_type(wqp
) == WQP_POST
);
894 assert(wqset
->wqset_q
.waitq_prepost
== 1);
896 if (next_id
== wqp_id
) {
897 /* the list is singular and becoming empty */
898 wqset
->wqset_prepost_id
= 0;
903 prev_wqp
= wq_prepost_get_rnext(wqp
);
904 assert(prev_wqp
!= NULL
);
905 assert(prev_wqp
->wqp_post
.wqp_next_id
== wqp_id
);
906 assert(prev_wqp
->wqp_prepostid
.id
!= wqp_id
);
907 assert(wqp_type(prev_wqp
) == WQP_POST
);
909 if (prev_wqp
->wqp_prepostid
.id
== next_id
) {
911 * There are two items in the list, and we're removing one. We
912 * only need to keep the WQP_WQ pointer from 'prev_wqp'
914 wqset
->wqset_prepost_id
= prev_wqp
->wqp_post
.wqp_wq_id
;
915 wq_prepost_invalidate(prev_wqp
);
916 wq_prepost_put(prev_wqp
);
921 /* prev->next = next */
922 prev_wqp
->wqp_post
.wqp_next_id
= next_id
;
924 /* next->prev = prev */
925 next_wqp
= wq_prepost_get(next_id
);
926 assert(next_wqp
!= NULL
);
927 assert(next_wqp
!= wqp
);
928 assert(next_wqp
!= prev_wqp
);
929 assert(wqp_type(next_wqp
) == WQP_POST
);
931 wq_prepost_reset_rnext(next_wqp
);
932 wq_prepost_rlink(next_wqp
, prev_wqp
);
934 /* If we remove the head of the list, update the wqset */
935 if (wqp_id
== wqset
->wqset_prepost_id
)
936 wqset
->wqset_prepost_id
= next_id
;
938 wq_prepost_put(prev_wqp
);
939 wq_prepost_put(next_wqp
);
942 wq_prepost_reset_rnext(wqp
);
943 wq_prepost_invalidate(wqp
);
947 static struct wq_prepost
*wq_prepost_rfirst(uint64_t id
)
949 struct lt_elem
*elem
;
950 elem
= lt_elem_list_first(&g_prepost_table
, id
);
951 wqp_do_alloc_stats(elem
);
952 return (struct wq_prepost
*)(void *)elem
;
955 static struct wq_prepost
*wq_prepost_rpop(uint64_t *id
, int type
)
957 struct lt_elem
*elem
;
958 elem
= lt_elem_list_pop(&g_prepost_table
, id
, type
);
959 wqp_do_alloc_stats(elem
);
960 return (struct wq_prepost
*)(void *)elem
;
963 static void wq_prepost_release_rlist(struct wq_prepost
*wqp
)
966 struct wqp_cache
*cache
;
967 struct lt_elem
*elem
;
975 * These are reserved elements: release them back to the per-cpu pool
976 * if our cache is running low.
978 disable_preemption();
979 cache
= &PROCESSOR_DATA(current_processor(), wqp_cache
);
980 if (cache
->avail
< WQP_CACHE_MAX
) {
981 struct lt_elem
*tmp
= NULL
;
982 if (cache
->head
!= LT_IDX_MAX
)
983 tmp
= lt_elem_list_first(&g_prepost_table
, cache
->head
);
984 nelem
= lt_elem_list_link(&g_prepost_table
, elem
, tmp
);
985 cache
->head
= elem
->lt_id
.id
;
986 cache
->avail
+= nelem
;
992 /* release these elements back to the main table */
993 nelem
= lt_elem_list_release(&g_prepost_table
, elem
, LT_RESERVED
);
995 #if CONFIG_WAITQ_STATS
996 g_prepost_table
.nreserved_releases
+= 1;
997 OSDecrementAtomic64(&g_prepost_table
.nreservations
);
1001 typedef int (*wqp_callback_func
)(struct waitq_set
*wqset
,
1003 struct wq_prepost
*wqp
,
1004 struct waitq
*waitq
);
1007 * iterate over a chain of preposts associated with a waitq set.
1013 * This loop performs automatic prepost chain management / culling, and
1014 * may reset or adjust the waitq set's prepost ID pointer. If you don't
1015 * want this extra processing, you can use wq_prepost_iterate().
1017 static int wq_prepost_foreach_locked(struct waitq_set
*wqset
,
1018 void *ctx
, wqp_callback_func cb
)
1020 int ret
= WQ_ITERATE_SUCCESS
;
1021 struct wq_prepost
*wqp
, *tmp_wqp
;
1025 if (!wqset
|| !waitq_set_maybe_preposted(wqset
))
1026 return WQ_ITERATE_SUCCESS
;
1029 wqp
= wq_prepost_get(wqset
->wqset_prepost_id
);
1032 * The prepost object is no longer valid, reset the waitq
1035 wqset
->wqset_prepost_id
= 0;
1036 return WQ_ITERATE_SUCCESS
;
1039 if (wqp_type(wqp
) == WQP_WQ
) {
1040 uint64_t __assert_only wqp_id
= wqp
->wqp_prepostid
.id
;
1042 ret
= cb(wqset
, ctx
, wqp
, wqp
->wqp_wq
.wqp_wq_ptr
);
1045 case WQ_ITERATE_INVALIDATE_CONTINUE
:
1046 /* the caller wants to remove the only prepost here */
1047 assert(wqp_id
== wqset
->wqset_prepost_id
);
1048 wqset
->wqset_prepost_id
= 0;
1050 case WQ_ITERATE_CONTINUE
:
1051 wq_prepost_put(wqp
);
1052 ret
= WQ_ITERATE_SUCCESS
;
1054 case WQ_ITERATE_RESTART
:
1055 wq_prepost_put(wqp
);
1057 case WQ_ITERATE_DROPPED
:
1060 wq_prepost_put(wqp
);
1066 assert(wqp
->wqp_prepostid
.id
== wqset
->wqset_prepost_id
);
1067 assert(wqp_type(wqp
) == WQP_POST
);
1070 * At this point we know we have a list of POST objects.
1071 * Grab a handle to the last element in the list and start
1074 tmp_wqp
= wq_prepost_get_rnext(wqp
);
1075 assert(tmp_wqp
!= NULL
&& wqp_type(tmp_wqp
) == WQP_POST
);
1077 uint64_t last_id
= tmp_wqp
->wqp_prepostid
.id
;
1078 wq_prepost_put(tmp_wqp
);
1080 ret
= WQ_ITERATE_SUCCESS
;
1082 uint64_t wqp_id
, first_id
, next_id
;
1084 wqp_id
= wqp
->wqp_prepostid
.id
;
1085 first_id
= wqset
->wqset_prepost_id
;
1086 next_id
= wqp
->wqp_post
.wqp_next_id
;
1088 /* grab the WQP_WQ object this _POST points to */
1089 tmp_wqp
= wq_prepost_get(wqp
->wqp_post
.wqp_wq_id
);
1092 * This WQP_POST object points to an invalid
1093 * WQP_WQ object - remove the POST object from
1096 if (wq_prepost_remove(wqset
, wqp
) == 0) {
1097 wq_prepost_put(wqp
);
1102 assert(wqp_type(tmp_wqp
) == WQP_WQ
);
1104 * make the callback: note that this could remove 'wqp' or
1105 * drop the lock on our waitq set. We need to re-validate
1106 * our state when this function returns.
1108 ret
= cb(wqset
, ctx
, wqp
, tmp_wqp
->wqp_wq
.wqp_wq_ptr
);
1109 wq_prepost_put(tmp_wqp
);
1112 case WQ_ITERATE_CONTINUE
:
1113 /* continue iteration */
1115 case WQ_ITERATE_INVALIDATE_CONTINUE
:
1116 assert(next_id
== wqp
->wqp_post
.wqp_next_id
);
1117 if (wq_prepost_remove(wqset
, wqp
) == 0) {
1118 wq_prepost_put(wqp
);
1122 case WQ_ITERATE_RESTART
:
1123 wq_prepost_put(wqp
);
1125 case WQ_ITERATE_DROPPED
:
1126 /* the callback dropped the ref to wqp: just restart */
1129 /* break out of the iteration for some other reason */
1130 goto finish_prepost_foreach
;
1134 * the set lock may have been dropped during callback,
1135 * if something looks different, restart the prepost iteration
1137 if (!wqp_is_valid(wqp
) ||
1138 (wqp
->wqp_post
.wqp_next_id
!= next_id
) ||
1139 wqset
->wqset_prepost_id
!= first_id
) {
1140 wq_prepost_put(wqp
);
1145 /* this was the last object in the list */
1146 if (wqp_id
== last_id
)
1149 /* get the next object */
1150 tmp_wqp
= wq_prepost_get(next_id
);
1153 * At this point we've already checked our state
1154 * after the callback (which may have dropped the set
1155 * lock). If we find an invalid member of the list
1156 * then something is wrong.
1158 panic("Invalid WQP_POST member 0x%llx in waitq set "
1159 "0x%llx prepost list (first:%llx, "
1161 next_id
, wqset
->wqset_id
, first_id
, wqp
);
1163 wq_prepost_put(wqp
);
1166 assert(wqp_type(wqp
) == WQP_POST
);
1169 finish_prepost_foreach
:
1170 wq_prepost_put(wqp
);
1171 if (ret
== WQ_ITERATE_CONTINUE
)
1172 ret
= WQ_ITERATE_SUCCESS
;
1178 * Perform a simple loop over a chain of prepost objects
1181 * If 'prepost_id' is associated with a waitq (set) then that object must
1182 * be locked before calling this function.
1183 * Callback function, 'cb', must be able to handle a NULL wqset pointer
1184 * and a NULL waitq pointer!
1187 * This prepost chain iteration will _not_ automatically adjust any chain
1188 * element or linkage. This is the responsibility of the caller! If you
1189 * want automatic prepost chain management (at a cost of extra CPU time),
1190 * you can use: wq_prepost_foreach_locked().
1192 static int wq_prepost_iterate(uint64_t prepost_id
,
1193 void *ctx
, wqp_callback_func cb
)
1196 struct wq_prepost
*wqp
;
1199 return WQ_ITERATE_SUCCESS
;
1201 wqp
= wq_prepost_get(prepost_id
);
1203 return WQ_ITERATE_SUCCESS
;
1205 if (wqp_type(wqp
) == WQP_WQ
) {
1206 ret
= WQ_ITERATE_SUCCESS
;
1208 ret
= cb(NULL
, ctx
, wqp
, wqp
->wqp_wq
.wqp_wq_ptr
);
1210 if (ret
!= WQ_ITERATE_DROPPED
)
1211 wq_prepost_put(wqp
);
1215 assert(wqp
->wqp_prepostid
.id
== prepost_id
);
1216 assert(wqp_type(wqp
) == WQP_POST
);
1218 /* at this point we know we have a list of POST objects */
1221 ret
= WQ_ITERATE_CONTINUE
;
1223 struct wq_prepost
*tmp_wqp
;
1224 struct waitq
*wq
= NULL
;
1226 next_id
= wqp
->wqp_post
.wqp_next_id
;
1228 /* grab the WQP_WQ object this _POST points to */
1229 tmp_wqp
= wq_prepost_get(wqp
->wqp_post
.wqp_wq_id
);
1231 assert(wqp_type(tmp_wqp
) == WQP_WQ
);
1232 wq
= tmp_wqp
->wqp_wq
.wqp_wq_ptr
;
1236 ret
= cb(NULL
, ctx
, wqp
, wq
);
1238 wq_prepost_put(tmp_wqp
);
1240 if (ret
!= WQ_ITERATE_CONTINUE
)
1243 tmp_wqp
= wq_prepost_get(next_id
);
1246 * the chain is broken: nothing we can do here besides
1247 * bail from the iteration.
1249 ret
= WQ_ITERATE_ABORTED
;
1253 wq_prepost_put(wqp
);
1256 assert(wqp_type(wqp
) == WQP_POST
);
1257 } while (next_id
!= prepost_id
);
1259 if (ret
!= WQ_ITERATE_DROPPED
)
1260 wq_prepost_put(wqp
);
1262 if (ret
== WQ_ITERATE_CONTINUE
)
1263 ret
= WQ_ITERATE_SUCCESS
;
1268 struct _is_posted_ctx
{
1269 struct waitq
*posting_wq
;
1273 static int wq_is_preposted_on_set_cb(struct waitq_set
*wqset
, void *ctx
,
1274 struct wq_prepost
*wqp
, struct waitq
*waitq
)
1276 struct _is_posted_ctx
*pctx
= (struct _is_posted_ctx
*)ctx
;
1282 * Don't early-out, run through the _entire_ list:
1283 * This ensures that we retain a minimum number of invalid elements.
1285 if (pctx
->posting_wq
== waitq
)
1286 pctx
->did_prepost
= 1;
1288 return WQ_ITERATE_CONTINUE
;
1293 * checks if 'waitq' has already preposted on 'wqset'
1296 * waitq The waitq that's preposting
1297 * wqset The set onto which waitq may be preposted
1300 * both waitq and wqset are locked
1302 * Returns non-zero if 'waitq' has already preposted to 'wqset'
1304 static int wq_is_preposted_on_set(struct waitq
*waitq
, struct waitq_set
*wqset
)
1307 struct _is_posted_ctx pctx
;
1310 * If the set's only prepost matches the waitq's prepost ID,
1311 * then it obviously already preposted to the set.
1313 if (waitq
->waitq_prepost_id
!= 0 &&
1314 wqset
->wqset_prepost_id
== waitq
->waitq_prepost_id
)
1317 /* use full prepost iteration: always trim the list */
1318 pctx
.posting_wq
= waitq
;
1319 pctx
.did_prepost
= 0;
1320 ret
= wq_prepost_foreach_locked(wqset
, (void *)&pctx
,
1321 wq_is_preposted_on_set_cb
);
1322 return pctx
.did_prepost
;
1325 static struct wq_prepost
*wq_get_prepost_obj(uint64_t *reserved
, int type
)
1327 struct wq_prepost
*wqp
= NULL
;
1329 * don't fail just because the caller doesn't have enough
1330 * reservations, we've kept a low-water mark on the prepost table,
1331 * so there should be some available for us.
1333 if (reserved
&& *reserved
) {
1334 wqp
= wq_prepost_rpop(reserved
, type
);
1335 assert(wqp
->wqte
.lt_id
.idx
< g_prepost_table
.nelem
);
1338 * TODO: if in interrupt context, grab from a special
1339 * region / reserved list!
1341 wqp
= wq_prepost_alloc(type
, 1);
1345 panic("Couldn't allocate prepost object!");
1351 * prepost a waitq onto a waitq set
1354 * wqset The set onto which waitq will be preposted
1355 * waitq The waitq that's preposting
1356 * reserved List (lt_elem_list_ style) of pre-allocated prepost elements
1360 * both wqset and waitq are locked
1363 * If reserved is NULL, this may block on prepost table growth.
1365 static void wq_prepost_do_post_locked(struct waitq_set
*wqset
,
1366 struct waitq
*waitq
,
1369 struct wq_prepost
*wqp_post
, *wqp_head
, *wqp_tail
;
1371 assert(waitq_held(waitq
) && waitq_held(&wqset
->wqset_q
));
1374 * nothing to do if it's already preposted:
1375 * note that this also culls any invalid prepost objects
1377 if (wq_is_preposted_on_set(waitq
, wqset
))
1381 * This function is called because an event is being posted to 'waitq'.
1382 * We need a prepost object associated with this queue. Allocate one
1383 * now if the waitq isn't already associated with one.
1385 if (waitq
->waitq_prepost_id
== 0) {
1386 struct wq_prepost
*wqp
;
1387 wqp
= wq_get_prepost_obj(reserved
, WQP_WQ
);
1388 wqp
->wqp_wq
.wqp_wq_ptr
= waitq
;
1390 waitq
->waitq_prepost_id
= wqp
->wqp_prepostid
.id
;
1391 wq_prepost_put(wqp
);
1394 #if CONFIG_LTABLE_STATS
1395 g_prepost_table
.npreposts
+= 1;
1398 wqdbg_v("preposting waitq %p (0x%llx) to set 0x%llx",
1399 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq
),
1400 waitq
->waitq_prepost_id
, wqset
->wqset_id
);
1402 if (wqset
->wqset_prepost_id
== 0) {
1403 /* the set has no previous preposts */
1404 wqset
->wqset_prepost_id
= waitq
->waitq_prepost_id
;
1408 wqp_head
= wq_prepost_get(wqset
->wqset_prepost_id
);
1410 /* the previous prepost has become invalid */
1411 wqset
->wqset_prepost_id
= waitq
->waitq_prepost_id
;
1415 assert(wqp_head
->wqp_prepostid
.id
== wqset
->wqset_prepost_id
);
1418 * If we get here, we're going to need at least one new wq_prepost
1419 * object. If the previous wqset_prepost_id points to a WQP_WQ, we
1420 * actually need to allocate 2 wq_prepost objects because the WQP_WQ
1421 * is tied to the waitq and shared across all sets.
1423 wqp_post
= wq_get_prepost_obj(reserved
, WQP_POST
);
1425 wqp_post
->wqp_post
.wqp_wq_id
= waitq
->waitq_prepost_id
;
1426 wqdbg_v("POST 0x%llx :: WQ 0x%llx", wqp_post
->wqp_prepostid
.id
,
1427 waitq
->waitq_prepost_id
);
1429 if (wqp_type(wqp_head
) == WQP_WQ
) {
1431 * We must replace the wqset_prepost_id with a pointer
1432 * to two new WQP_POST objects
1434 uint64_t wqp_id
= wqp_head
->wqp_prepostid
.id
;
1435 wqdbg_v("set 0x%llx previous had 1 WQ prepost (0x%llx): "
1436 "replacing with two POST preposts",
1437 wqset
->wqset_id
, wqp_id
);
1439 /* drop the old reference */
1440 wq_prepost_put(wqp_head
);
1442 /* grab another new object (the 2nd of two) */
1443 wqp_head
= wq_get_prepost_obj(reserved
, WQP_POST
);
1445 /* point this one to the original WQP_WQ object */
1446 wqp_head
->wqp_post
.wqp_wq_id
= wqp_id
;
1447 wqdbg_v("POST 0x%llx :: WQ 0x%llx",
1448 wqp_head
->wqp_prepostid
.id
, wqp_id
);
1450 /* link it to the new wqp_post object allocated earlier */
1451 wqp_head
->wqp_post
.wqp_next_id
= wqp_post
->wqp_prepostid
.id
;
1452 /* make the list a double-linked and circular */
1453 wq_prepost_rlink(wqp_head
, wqp_post
);
1456 * Finish setting up the new prepost: point it back to the
1457 * POST object we allocated to replace the original wqset
1460 wqp_post
->wqp_post
.wqp_next_id
= wqp_head
->wqp_prepostid
.id
;
1461 wq_prepost_rlink(wqp_post
, wqp_head
);
1463 /* mark objects valid, and reset the wqset prepost list head */
1464 wqp_set_valid(wqp_head
);
1465 wqp_set_valid(wqp_post
);
1466 wqset
->wqset_prepost_id
= wqp_head
->wqp_prepostid
.id
;
1468 /* release both references */
1469 wq_prepost_put(wqp_head
);
1470 wq_prepost_put(wqp_post
);
1472 wqdbg_v("set 0x%llx: 0x%llx/0x%llx -> 0x%llx/0x%llx -> 0x%llx",
1473 wqset
->wqset_id
, wqset
->wqset_prepost_id
,
1474 wqp_head
->wqp_prepostid
.id
, wqp_head
->wqp_post
.wqp_next_id
,
1475 wqp_post
->wqp_prepostid
.id
,
1476 wqp_post
->wqp_post
.wqp_next_id
);
1480 assert(wqp_type(wqp_head
) == WQP_POST
);
1483 * Add the new prepost to the end of the prepost list
1485 wqp_tail
= wq_prepost_get_rnext(wqp_head
);
1486 assert(wqp_tail
!= NULL
);
1487 assert(wqp_tail
->wqp_post
.wqp_next_id
== wqset
->wqset_prepost_id
);
1490 * link the head to the new tail
1491 * NOTE: this needs to happen first in case wqp_tail == wqp_head
1493 wq_prepost_reset_rnext(wqp_head
);
1494 wq_prepost_rlink(wqp_head
, wqp_post
);
1496 /* point the new object to the list head, and list tail */
1497 wqp_post
->wqp_post
.wqp_next_id
= wqp_head
->wqp_prepostid
.id
;
1498 wq_prepost_rlink(wqp_post
, wqp_tail
);
1500 /* point the last item in the waitq set's list to the new object */
1501 wqp_tail
->wqp_post
.wqp_next_id
= wqp_post
->wqp_prepostid
.id
;
1503 wqp_set_valid(wqp_post
);
1505 wq_prepost_put(wqp_head
);
1506 wq_prepost_put(wqp_tail
);
1507 wq_prepost_put(wqp_post
);
1509 wqdbg_v("set 0x%llx (wqp:0x%llx) last_prepost:0x%llx, "
1510 "new_prepost:0x%llx->0x%llx", wqset
->wqset_id
,
1511 wqset
->wqset_prepost_id
, wqp_head
->wqp_prepostid
.id
,
1512 wqp_post
->wqp_prepostid
.id
, wqp_post
->wqp_post
.wqp_next_id
);
1518 /* ----------------------------------------------------------------------
1520 * Stats collection / reporting
1522 * ---------------------------------------------------------------------- */
1523 #if defined(CONFIG_LTABLE_STATS) && defined(CONFIG_WAITQ_STATS)
1524 static void wq_table_stats(struct link_table
*table
, struct wq_table_stats
*stats
)
1526 stats
->version
= WAITQ_STATS_VERSION
;
1527 stats
->table_elements
= table
->nelem
;
1528 stats
->table_used_elems
= table
->used_elem
;
1529 stats
->table_elem_sz
= table
->elem_sz
;
1530 stats
->table_slabs
= table
->nslabs
;
1531 stats
->table_slab_sz
= table
->slab_sz
;
1533 stats
->table_num_allocs
= table
->nallocs
;
1534 stats
->table_num_preposts
= table
->npreposts
;
1535 stats
->table_num_reservations
= table
->nreservations
;
1537 stats
->table_max_used
= table
->max_used
;
1538 stats
->table_avg_used
= table
->avg_used
;
1539 stats
->table_max_reservations
= table
->max_reservations
;
1540 stats
->table_avg_reservations
= table
->avg_reservations
;
1543 void waitq_link_stats(struct wq_table_stats
*stats
)
1547 wq_table_stats(&g_wqlinktable
, stats
);
1550 void waitq_prepost_stats(struct wq_table_stats
*stats
)
1552 wq_table_stats(&g_prepost_table
, stats
);
1557 /* ----------------------------------------------------------------------
1559 * Global Wait Queues
1561 * ---------------------------------------------------------------------- */
1563 static struct waitq g_boot_waitq
;
1564 static struct waitq
*global_waitqs
= &g_boot_waitq
;
1565 static uint32_t g_num_waitqs
= 1;
1568 * Zero out the used MSBs of the event.
1570 #define _CAST_TO_EVENT_MASK(event) ((uintptr_t)(event) & ((1ul << _EVENT_MASK_BITS) - 1ul))
1572 static __inline__
uint32_t waitq_hash(char *key
, size_t length
)
1574 uint32_t hash
= jenkins_hash(key
, length
);
1576 hash
&= (g_num_waitqs
- 1);
1580 /* return a global waitq pointer corresponding to the given event */
1581 struct waitq
*_global_eventq(char *event
, size_t event_length
)
1583 return &global_waitqs
[waitq_hash(event
, event_length
)];
1586 /* return an indexed global waitq pointer */
1587 struct waitq
*global_waitq(int index
)
1589 return &global_waitqs
[index
% g_num_waitqs
];
1593 #if defined(CONFIG_LTABLE_STATS) || defined(CONFIG_WAITQ_STATS)
1594 /* this global is for lldb */
1595 const uint32_t g_nwaitq_btframes
= NWAITQ_BTFRAMES
;
1597 static __inline__
void waitq_grab_backtrace(uintptr_t bt
[NWAITQ_BTFRAMES
], int skip
)
1599 uintptr_t buf
[NWAITQ_BTFRAMES
+ skip
];
1602 memset(buf
, 0, (NWAITQ_BTFRAMES
+ skip
) * sizeof(uintptr_t));
1603 backtrace(buf
, g_nwaitq_btframes
+ skip
);
1604 memcpy(&bt
[0], &buf
[skip
], NWAITQ_BTFRAMES
* sizeof(uintptr_t));
1606 #else /* no stats */
1607 #define waitq_grab_backtrace(...)
1610 #if CONFIG_WAITQ_STATS
1612 struct wq_stats g_boot_stats
;
1613 struct wq_stats
*g_waitq_stats
= &g_boot_stats
;
1615 static __inline__
struct wq_stats
*waitq_global_stats(struct waitq
*waitq
) {
1616 struct wq_stats
*wqs
;
1619 if (!waitq_is_global(waitq
))
1622 idx
= (uint32_t)(((uintptr_t)waitq
- (uintptr_t)global_waitqs
) / sizeof(*waitq
));
1623 assert(idx
< g_num_waitqs
);
1624 wqs
= &g_waitq_stats
[idx
];
1628 static __inline__
void waitq_stats_count_wait(struct waitq
*waitq
)
1630 struct wq_stats
*wqs
= waitq_global_stats(waitq
);
1633 waitq_grab_backtrace(wqs
->last_wait
, 2);
1637 static __inline__
void waitq_stats_count_wakeup(struct waitq
*waitq
)
1639 struct wq_stats
*wqs
= waitq_global_stats(waitq
);
1642 waitq_grab_backtrace(wqs
->last_wakeup
, 2);
1646 static __inline__
void waitq_stats_count_clear_wakeup(struct waitq
*waitq
)
1648 struct wq_stats
*wqs
= waitq_global_stats(waitq
);
1652 waitq_grab_backtrace(wqs
->last_wakeup
, 2);
1656 static __inline__
void waitq_stats_count_fail(struct waitq
*waitq
)
1658 struct wq_stats
*wqs
= waitq_global_stats(waitq
);
1660 wqs
->failed_wakeups
++;
1661 waitq_grab_backtrace(wqs
->last_failed_wakeup
, 2);
1664 #else /* !CONFIG_WAITQ_STATS */
1665 #define waitq_stats_count_wait(q) do { } while (0)
1666 #define waitq_stats_count_wakeup(q) do { } while (0)
1667 #define waitq_stats_count_clear_wakeup(q) do { } while (0)
1668 #define waitq_stats_count_fail(q) do { } while (0)
1671 int waitq_is_valid(struct waitq
*waitq
)
1673 return (waitq
!= NULL
) && waitq
->waitq_isvalid
&& ((waitq
->waitq_type
& ~1) == WQT_QUEUE
);
1676 int waitq_set_is_valid(struct waitq_set
*wqset
)
1678 return (wqset
!= NULL
) && wqset
->wqset_q
.waitq_isvalid
&& waitqs_is_set(wqset
);
1681 int waitq_is_global(struct waitq
*waitq
)
1683 if (waitq
>= global_waitqs
&& waitq
< global_waitqs
+ g_num_waitqs
)
1688 int waitq_irq_safe(struct waitq
*waitq
)
1690 /* global wait queues have this bit set on initialization */
1691 return waitq
->waitq_irq
;
1694 static uint32_t waitq_hash_size(void)
1696 uint32_t hsize
, queues
;
1698 if (PE_parse_boot_argn("wqsize", &hsize
, sizeof(hsize
)))
1701 queues
= thread_max
/ 5;
1702 hsize
= P2ROUNDUP(queues
* sizeof(struct waitq
), PAGE_SIZE
);
1707 void waitq_bootstrap(void)
1710 uint32_t whsize
, qsz
, tmp32
;
1712 g_min_free_table_elem
= DEFAULT_MIN_FREE_TABLE_ELEM
;
1713 if (PE_parse_boot_argn("wqt_min_free", &tmp32
, sizeof(tmp32
)) == TRUE
)
1714 g_min_free_table_elem
= tmp32
;
1715 wqdbg("Minimum free table elements: %d", tmp32
);
1718 * Determine the amount of memory we're willing to reserve for
1719 * the waitqueue hash table
1721 whsize
= waitq_hash_size();
1723 /* Determine the number of waitqueues we can fit. */
1724 qsz
= sizeof(struct waitq
);
1725 whsize
= ROUNDDOWN(whsize
, qsz
);
1726 g_num_waitqs
= whsize
/ qsz
;
1729 * The hash algorithm requires that this be a power of 2, so we
1730 * just mask off all the low-order bits.
1732 for (uint32_t i
= 0; i
< 31; i
++) {
1733 uint32_t bit
= (1 << i
);
1734 if ((g_num_waitqs
& bit
) == g_num_waitqs
)
1736 g_num_waitqs
&= ~bit
;
1738 assert(g_num_waitqs
> 0);
1740 /* Now determine how much memory we really need. */
1741 whsize
= P2ROUNDUP(g_num_waitqs
* qsz
, PAGE_SIZE
);
1743 wqdbg("allocating %d global queues (%d bytes)", g_num_waitqs
, whsize
);
1744 kret
= kernel_memory_allocate(kernel_map
, (vm_offset_t
*)&global_waitqs
,
1745 whsize
, 0, KMA_KOBJECT
|KMA_NOPAGEWAIT
, VM_KERN_MEMORY_WAITQ
);
1746 if (kret
!= KERN_SUCCESS
|| global_waitqs
== NULL
)
1747 panic("kernel_memory_allocate() failed to alloc global_waitqs"
1748 ", error: %d, whsize: 0x%x", kret
, whsize
);
1750 #if CONFIG_WAITQ_STATS
1751 whsize
= P2ROUNDUP(g_num_waitqs
* sizeof(struct wq_stats
), PAGE_SIZE
);
1752 kret
= kernel_memory_allocate(kernel_map
, (vm_offset_t
*)&g_waitq_stats
,
1753 whsize
, 0, KMA_KOBJECT
|KMA_NOPAGEWAIT
, VM_KERN_MEMORY_WAITQ
);
1754 if (kret
!= KERN_SUCCESS
|| global_waitqs
== NULL
)
1755 panic("kernel_memory_allocate() failed to alloc g_waitq_stats"
1756 ", error: %d, whsize: 0x%x", kret
, whsize
);
1757 memset(g_waitq_stats
, 0, whsize
);
1760 for (uint32_t i
= 0; i
< g_num_waitqs
; i
++) {
1761 waitq_init(&global_waitqs
[i
], SYNC_POLICY_FIFO
|SYNC_POLICY_DISABLE_IRQ
);
1764 waitq_set_zone
= zinit(sizeof(struct waitq_set
),
1765 WAITQ_SET_MAX
* sizeof(struct waitq_set
),
1766 sizeof(struct waitq_set
),
1768 zone_change(waitq_set_zone
, Z_NOENCRYPT
, TRUE
);
1770 /* initialize the global waitq link table */
1773 /* initialize the global waitq prepost table */
1778 /* ----------------------------------------------------------------------
1780 * Wait Queue Implementation
1782 * ---------------------------------------------------------------------- */
1785 * Double the standard lock timeout, because wait queues tend
1786 * to iterate over a number of threads - locking each. If there is
1787 * a problem with a thread lock, it normally times out at the wait
1788 * queue level first, hiding the real problem.
1790 /* For x86, the hardware timeout is in TSC units. */
1791 #if defined(__i386__) || defined(__x86_64__)
1792 #define hwLockTimeOut LockTimeOutTSC
1794 #define hwLockTimeOut LockTimeOut
1797 void waitq_lock(struct waitq
*wq
)
1799 if (__improbable(waitq_lock_to(wq
,
1800 hwLockTimeOut
* 2) == 0)) {
1801 boolean_t wql_acquired
= FALSE
;
1803 while (machine_timeout_suspended()) {
1804 mp_enable_preemption();
1805 wql_acquired
= waitq_lock_to(wq
,
1810 if (wql_acquired
== FALSE
)
1811 panic("waitq deadlock - waitq=%p, cpu=%d\n",
1814 #if defined(__x86_64__)
1817 assert(waitq_held(wq
));
1820 void waitq_unlock(struct waitq
*wq
)
1822 assert(waitq_held(wq
));
1823 #if defined(__x86_64__)
1826 waitq_lock_unlock(wq
);
1831 * clear the thread-related waitq state
1834 * 'thread' is locked
1836 static inline void thread_clear_waitq_state(thread_t thread
)
1838 thread
->waitq
= NULL
;
1839 thread
->wait_event
= NO_EVENT64
;
1840 thread
->at_safe_point
= FALSE
;
1844 typedef thread_t (*waitq_select_cb
)(void *ctx
, struct waitq
*waitq
,
1845 int is_global
, thread_t thread
);
1847 struct waitq_select_args
{
1848 /* input parameters */
1849 struct waitq
*posted_waitq
;
1850 struct waitq
*waitq
;
1852 waitq_select_cb select_cb
;
1855 uint64_t *reserved_preposts
;
1857 /* output parameters */
1864 static void do_waitq_select_n_locked(struct waitq_select_args
*args
);
1867 * callback invoked once for every waitq set to which a waitq belongs
1870 * ctx->posted_waitq is locked
1871 * 'link' points to a valid waitq set
1874 * Takes the waitq set lock on the set pointed to by 'link'
1875 * Calls do_waitq_select_n_locked() which could recurse back into
1876 * this function if the waitq set is a member of other sets.
1877 * If no threads were selected, it preposts the input waitq
1878 * onto the waitq set pointed to by 'link'.
1880 static int waitq_select_walk_cb(struct waitq
*waitq
, void *ctx
,
1881 struct waitq_link
*link
)
1883 int ret
= WQ_ITERATE_CONTINUE
;
1884 struct waitq_select_args args
= *((struct waitq_select_args
*)ctx
);
1885 struct waitq_set
*wqset
;
1888 assert(wql_type(link
) == WQL_WQS
);
1890 wqset
= link
->wql_wqs
.wql_set
;
1891 args
.waitq
= &wqset
->wqset_q
;
1893 assert(!waitq_irq_safe(waitq
));
1894 assert(!waitq_irq_safe(&wqset
->wqset_q
));
1896 waitq_set_lock(wqset
);
1898 * verify that the link wasn't invalidated just before
1899 * we were able to take the lock.
1901 if (wqset
->wqset_id
!= link
->wql_setid
.id
)
1905 * Find any threads waiting on this wait queue set,
1906 * and recurse into any waitq set to which this set belongs.
1908 do_waitq_select_n_locked(&args
);
1910 if (*(args
.nthreads
) > 0 ||
1911 (args
.threadq
&& !queue_empty(args
.threadq
))) {
1912 /* at least 1 thread was selected and returned: don't prepost */
1913 if (args
.max_threads
> 0 &&
1914 *(args
.nthreads
) >= args
.max_threads
) {
1915 /* break out of the setid walk */
1916 ret
= WQ_ITERATE_FOUND
;
1921 * No thread selected: prepost 'waitq' to 'wqset'
1922 * if wqset can handle preposts and the event is set to 0.
1923 * We also make sure to not post waitq sets to other sets.
1925 * If the set doesn't support preposts, but does support
1926 * prepost callout/hook interaction, invoke the predefined
1927 * callout function and pass the set's 'prepost_hook.' This
1928 * could potentially release another thread to handle events.
1930 if (args
.event
== NO_EVENT64
) {
1931 if (waitq_set_can_prepost(wqset
)) {
1932 wq_prepost_do_post_locked(
1933 wqset
, waitq
, args
.reserved_preposts
);
1934 } else if (waitq_set_has_prepost_hook(wqset
)) {
1935 waitq_set__CALLING_PREPOST_HOOK__(
1936 wqset
->wqset_prepost_hook
, waitq
, 0);
1942 waitq_set_unlock(wqset
);
1947 * generic thread selection from a waitq (and sets to which the waitq belongs)
1950 * args->waitq (and args->posted_waitq) is locked
1953 * Uses the optional select callback function to refine the selection
1954 * of one or more threads from a waitq and any set to which the waitq
1955 * belongs. The select callback is invoked once for every thread that
1956 * is found to be waiting on the input args->waitq.
1958 * If one or more threads are selected, this may disable interrupts.
1959 * The previous interrupt state is returned in args->spl and should
1960 * be used in a call to splx() if threads are returned to the caller.
1962 static void do_waitq_select_n_locked(struct waitq_select_args
*args
)
1964 struct waitq
*waitq
= args
->waitq
;
1965 int max_threads
= args
->max_threads
;
1966 thread_t thread
= THREAD_NULL
, first_thread
= THREAD_NULL
;
1967 struct waitq
*safeq
;
1968 uint32_t remaining_eventmask
= 0;
1970 int *nthreads
= args
->nthreads
;
1973 assert(max_threads
!= 0);
1975 if (!waitq_irq_safe(waitq
)) {
1976 /* JMM - add flag to waitq to avoid global lookup if no waiters */
1977 eventmask
= _CAST_TO_EVENT_MASK(waitq
);
1978 safeq
= global_eventq(waitq
);
1983 eventmask
= _CAST_TO_EVENT_MASK(args
->event
);
1988 * If the safeq doesn't have an eventmask (not global) or the event
1989 * we're looking for IS set in its eventmask, then scan the threads
1990 * in that queue for ones that match the original <waitq,event> pair.
1992 if (!waitq_is_global(safeq
) ||
1993 (safeq
->waitq_eventmask
& eventmask
) == eventmask
) {
1995 /* look through each thread waiting directly on the safeq */
1996 qe_foreach_element_safe(thread
, &safeq
->waitq_queue
, wait_links
) {
1997 thread_t t
= THREAD_NULL
;
1998 assert_thread_magic(thread
);
2000 if (thread
->waitq
== waitq
&& thread
->wait_event
== args
->event
) {
2002 if (first_thread
== THREAD_NULL
)
2003 first_thread
= thread
;
2005 /* allow the caller to futher refine the selection */
2006 if (args
->select_cb
)
2007 t
= args
->select_cb(args
->select_ctx
, waitq
,
2008 waitq_is_global(waitq
), thread
);
2009 if (t
!= THREAD_NULL
) {
2011 if (args
->threadq
) {
2013 *(args
->spl
) = (safeq
!= waitq
) ? spl
: splsched();
2015 thread_clear_waitq_state(t
);
2016 /* put locked thread on output queue */
2017 re_queue_tail(args
->threadq
, &t
->wait_links
);
2019 /* only enqueue up to 'max' threads */
2020 if (*nthreads
>= max_threads
&& max_threads
> 0)
2024 /* thread wasn't selected so track it's event */
2025 if (t
== THREAD_NULL
) {
2026 remaining_eventmask
|= (thread
->waitq
!= safeq
) ?
2027 _CAST_TO_EVENT_MASK(thread
->waitq
):
2028 _CAST_TO_EVENT_MASK(thread
->wait_event
);
2033 * Update the eventmask of global queues we just scanned:
2034 * - If we selected all the threads in the queue, we can clear its
2037 * - If we didn't find enough threads to fill our needs, then we can
2038 * assume we looked at every thread in the queue and the mask we
2039 * computed is complete - so reset it.
2041 if (waitq_is_global(safeq
)) {
2042 if (queue_empty(&safeq
->waitq_queue
))
2043 safeq
->waitq_eventmask
= 0;
2044 else if (max_threads
< 0 || *nthreads
< max_threads
)
2045 safeq
->waitq_eventmask
= remaining_eventmask
;
2050 * Grab the first thread in the queue if no other thread was selected.
2051 * We can guarantee that no one has manipulated this thread because
2052 * it's waiting on the given waitq, and we have that waitq locked.
2054 if (*nthreads
== 0 && first_thread
!= THREAD_NULL
&& args
->threadq
) {
2055 /* we know this is the first (and only) thread */
2057 *(args
->spl
) = (safeq
!= waitq
) ? spl
: splsched();
2058 thread_lock(first_thread
);
2059 thread_clear_waitq_state(first_thread
);
2060 re_queue_tail(args
->threadq
, &first_thread
->wait_links
);
2062 /* update the eventmask on [now] empty global queues */
2063 if (waitq_is_global(safeq
) && queue_empty(&safeq
->waitq_queue
))
2064 safeq
->waitq_eventmask
= 0;
2067 /* unlock the safe queue if we locked one above */
2068 if (safeq
!= waitq
) {
2069 waitq_unlock(safeq
);
2074 if (max_threads
> 0 && *nthreads
>= max_threads
)
2078 * wait queues that are not in any sets
2079 * are the bottom of the recursion
2081 if (!waitq
->waitq_set_id
)
2084 /* check to see if the set ID for this wait queue is valid */
2085 struct waitq_link
*link
= wql_get_link(waitq
->waitq_set_id
);
2087 /* the waitq set to which this waitq belonged, has been invalidated */
2088 waitq
->waitq_set_id
= 0;
2095 * If this waitq is a member of any wait queue sets, we need to look
2096 * for waiting thread(s) in any of those sets, and prepost all sets that
2097 * don't have active waiters.
2099 * Note that we do a local walk of this waitq's links - we manually
2100 * recurse down wait queue set's with non-zero wqset_q.waitq_set_id
2102 (void)walk_waitq_links(LINK_WALK_ONE_LEVEL
, waitq
, waitq
->waitq_set_id
,
2103 WQL_WQS
, (void *)args
, waitq_select_walk_cb
);
2107 * main entry point for thread selection from a waitq
2113 * The number of threads waiting on 'waitq' for 'event' which have
2114 * been placed onto the input 'threadq'
2117 * The 'select_cb' function is invoked for every thread found waiting
2118 * on 'waitq' for 'event'. The thread is _not_ locked upon callback
2119 * invocation. This parameter may be NULL.
2121 * If one or more threads are returned in 'threadq' then the caller is
2122 * responsible to call splx() using the returned 'spl' value. Each
2123 * returned thread is locked.
2125 static __inline__
int waitq_select_n_locked(struct waitq
*waitq
,
2127 waitq_select_cb select_cb
,
2129 uint64_t *reserved_preposts
,
2131 int max_threads
, spl_t
*spl
)
2135 struct waitq_select_args args
= {
2136 .posted_waitq
= waitq
,
2139 .select_cb
= select_cb
,
2140 .select_ctx
= select_ctx
,
2141 .reserved_preposts
= reserved_preposts
,
2143 .max_threads
= max_threads
,
2144 .nthreads
= &nthreads
,
2148 do_waitq_select_n_locked(&args
);
2154 * callback function that uses thread parameters to determine wakeup eligibility
2158 * 'thread' is not locked
2160 static thread_t
waitq_select_one_cb(void *ctx
, struct waitq
*waitq
,
2161 int is_global
, thread_t thread
)
2163 int fifo_q
, realtime
;
2164 boolean_t thread_imp_donor
= FALSE
;
2171 fifo_q
= 1; /* default to FIFO for all queues for now */
2172 #if IMPORTANCE_INHERITANCE
2174 fifo_q
= 0; /* 'thread_imp_donor' takes the place of FIFO checking */
2177 if (thread
->sched_pri
>= BASEPRI_REALTIME
)
2180 #if IMPORTANCE_INHERITANCE
2182 * Checking imp donor bit does not need thread lock or
2183 * or task lock since we have the wait queue lock and
2184 * thread can not be removed from it without acquiring
2185 * wait queue lock. The imp donor bit may change
2186 * once we read its value, but it is ok to wake
2187 * a thread while someone drops importance assertion
2188 * on the that thread.
2190 thread_imp_donor
= task_is_importance_donor(thread
->task
);
2191 #endif /* IMPORTANCE_INHERITANCE */
2193 if (fifo_q
|| thread_imp_donor
== TRUE
2194 || realtime
|| (thread
->options
& TH_OPT_VMPRIV
)) {
2196 * If this thread's task is an importance donor,
2197 * or it's a realtime thread, or it's a VM privileged
2198 * thread, OR the queue is marked as FIFO:
2204 /* by default, _don't_ select the thread */
2211 * select from a waitq a single thread waiting for a given event
2217 * A locked thread that's been removed from the waitq, but has not
2218 * yet been put on a run queue. Caller is responsible to call splx
2219 * with the '*spl' value.
2221 static thread_t
waitq_select_one_locked(struct waitq
*waitq
, event64_t event
,
2222 uint64_t *reserved_preposts
,
2223 int priority
, spl_t
*spl
)
2227 queue_head_t threadq
;
2229 queue_init(&threadq
);
2231 nthreads
= waitq_select_n_locked(waitq
, event
, waitq_select_one_cb
, NULL
,
2232 reserved_preposts
, &threadq
, 1, spl
);
2234 /* if we selected a thread, return it (still locked) */
2235 if (!queue_empty(&threadq
)) {
2237 queue_entry_t qe
= dequeue_head(&threadq
);
2238 t
= qe_element(qe
, struct thread
, wait_links
);
2239 assert(queue_empty(&threadq
)); /* there should be 1 entry */
2240 /* t has been locked and removed from all queues */
2247 struct find_max_pri_ctx
{
2248 integer_t max_sched_pri
;
2249 integer_t max_base_pri
;
2250 thread_t highest_thread
;
2254 * callback function that finds the max priority thread
2258 * 'thread' is not locked
2261 waitq_find_max_pri_cb(void *ctx_in
,
2262 __unused
struct waitq
*waitq
,
2263 __unused
int is_global
,
2266 struct find_max_pri_ctx
*ctx
= (struct find_max_pri_ctx
*)ctx_in
;
2269 * thread is not locked, use pri as a hint only
2270 * wake up the highest base pri, and find the highest sched pri at that base pri
2272 integer_t sched_pri
= *(volatile int16_t *)&thread
->sched_pri
;
2273 integer_t base_pri
= *(volatile int16_t *)&thread
->base_pri
;
2275 if (ctx
->highest_thread
== THREAD_NULL
||
2276 (base_pri
> ctx
->max_base_pri
) ||
2277 (base_pri
== ctx
->max_base_pri
&& sched_pri
> ctx
->max_sched_pri
)) {
2278 /* don't select the thread, just update ctx */
2280 ctx
->max_sched_pri
= sched_pri
;
2281 ctx
->max_base_pri
= base_pri
;
2282 ctx
->highest_thread
= thread
;
2289 * select from a waitq the highest priority thread waiting for a given event
2295 * A locked thread that's been removed from the waitq, but has not
2296 * yet been put on a run queue. Caller is responsible to call splx
2297 * with the '*spl' value.
2300 waitq_select_max_locked(struct waitq
*waitq
, event64_t event
,
2301 uint64_t *reserved_preposts
,
2304 __assert_only
int nthreads
;
2305 assert(!waitq
->waitq_set_id
); /* doesn't support recursive sets */
2307 struct find_max_pri_ctx ctx
= {
2310 .highest_thread
= THREAD_NULL
,
2314 * Scan the waitq to find the highest priority thread.
2315 * This doesn't remove any thread from the queue
2317 nthreads
= waitq_select_n_locked(waitq
, event
, waitq_find_max_pri_cb
, &ctx
,
2318 reserved_preposts
, NULL
, 1, spl
);
2320 assert(nthreads
== 0);
2322 if (ctx
.highest_thread
!= THREAD_NULL
) {
2323 __assert_only kern_return_t ret
;
2325 /* Remove only the thread we just found */
2326 ret
= waitq_select_thread_locked(waitq
, event
, ctx
.highest_thread
, spl
);
2328 assert(ret
== KERN_SUCCESS
);
2329 return ctx
.highest_thread
;
2336 struct select_thread_ctx
{
2343 * link walk callback invoked once for each set to which a waitq belongs
2346 * initial waitq is locked
2347 * ctx->thread is unlocked
2350 * This may disable interrupts and early-out of the full DAG link walk by
2351 * returning KERN_ALREADY_IN_SET. In this case, the returned thread has
2352 * been removed from the waitq, it's waitq state has been reset, and the
2353 * caller is responsible to call splx() with the returned interrupt state
2356 static int waitq_select_thread_cb(struct waitq
*waitq
, void *ctx
,
2357 struct waitq_link
*link
)
2359 struct select_thread_ctx
*stctx
= (struct select_thread_ctx
*)ctx
;
2360 struct waitq_set
*wqset
;
2361 struct waitq
*wqsetq
;
2362 struct waitq
*safeq
;
2367 thread_t thread
= stctx
->thread
;
2368 event64_t event
= stctx
->event
;
2370 if (wql_type(link
) != WQL_WQS
)
2371 return WQ_ITERATE_CONTINUE
;
2373 wqset
= link
->wql_wqs
.wql_set
;
2374 wqsetq
= &wqset
->wqset_q
;
2376 assert(!waitq_irq_safe(waitq
));
2377 assert(!waitq_irq_safe(wqsetq
));
2379 waitq_set_lock(wqset
);
2383 /* find and lock the interrupt-safe waitq the thread is thought to be on */
2384 safeq
= global_eventq(wqsetq
);
2387 thread_lock(thread
);
2389 if ((thread
->waitq
== wqsetq
) && (thread
->wait_event
== event
)) {
2390 remqueue(&thread
->wait_links
);
2391 if (queue_empty(&safeq
->waitq_queue
)) {
2392 safeq
->waitq_eventmask
= 0;
2394 thread_clear_waitq_state(thread
);
2395 waitq_unlock(safeq
);
2396 waitq_set_unlock(wqset
);
2398 * thread still locked,
2399 * return non-zero to break out of WQS walk
2402 return WQ_ITERATE_FOUND
;
2405 thread_unlock(thread
);
2406 waitq_set_unlock(wqset
);
2407 waitq_unlock(safeq
);
2410 return WQ_ITERATE_CONTINUE
;
2414 * returns KERN_SUCCESS and locks 'thread' if-and-only-if 'thread' is waiting
2415 * on 'waitq' (or any set to which waitq belongs) for 'event'
2419 * 'thread' is unlocked
2421 static kern_return_t
waitq_select_thread_locked(struct waitq
*waitq
,
2423 thread_t thread
, spl_t
*spl
)
2425 struct waitq
*safeq
;
2426 struct waitq_link
*link
;
2427 struct select_thread_ctx ctx
;
2433 /* Find and lock the interrupts disabled queue the thread is actually on */
2434 if (!waitq_irq_safe(waitq
)) {
2435 safeq
= global_eventq(waitq
);
2441 thread_lock(thread
);
2443 if ((thread
->waitq
== waitq
) && (thread
->wait_event
== event
)) {
2444 remqueue(&thread
->wait_links
);
2445 if (queue_empty(&safeq
->waitq_queue
)) {
2446 safeq
->waitq_eventmask
= 0;
2448 thread_clear_waitq_state(thread
);
2450 /* thread still locked */
2451 return KERN_SUCCESS
;
2454 thread_unlock(thread
);
2457 waitq_unlock(safeq
);
2461 if (!waitq
->waitq_set_id
)
2462 return KERN_NOT_WAITING
;
2464 /* check to see if the set ID for this wait queue is valid */
2465 link
= wql_get_link(waitq
->waitq_set_id
);
2467 /* the waitq to which this set belonged, has been invalidated */
2468 waitq
->waitq_set_id
= 0;
2469 return KERN_NOT_WAITING
;
2473 * The thread may be waiting on a wait queue set to which
2474 * the input 'waitq' belongs. Go look for the thread in
2475 * all wait queue sets. If it's there, we'll remove it
2476 * because it's equivalent to waiting directly on the input waitq.
2478 ctx
.thread
= thread
;
2481 kr
= walk_waitq_links(LINK_WALK_FULL_DAG
, waitq
, waitq
->waitq_set_id
,
2482 WQL_WQS
, (void *)&ctx
, waitq_select_thread_cb
);
2486 /* we found a thread, return success */
2487 if (kr
== WQ_ITERATE_FOUND
)
2488 return KERN_SUCCESS
;
2490 return KERN_NOT_WAITING
;
2493 static int prepost_exists_cb(struct waitq_set __unused
*wqset
,
2495 struct wq_prepost __unused
*wqp
,
2496 struct waitq __unused
*waitq
)
2498 /* if we get here, then we know that there is a valid prepost object! */
2499 return WQ_ITERATE_FOUND
;
2503 * declare a thread's intent to wait on 'waitq' for 'wait_event'
2508 wait_result_t
waitq_assert_wait64_locked(struct waitq
*waitq
,
2509 event64_t wait_event
,
2510 wait_interrupt_t interruptible
,
2511 wait_timeout_urgency_t urgency
,
2516 wait_result_t wait_result
;
2518 struct waitq
*safeq
;
2519 uintptr_t eventmask
;
2524 * Warning: Do _not_ place debugging print statements here.
2525 * The waitq is locked!
2527 assert(!thread
->started
|| thread
== current_thread());
2529 if (thread
->waitq
!= NULL
)
2530 panic("thread already waiting on %p", thread
->waitq
);
2532 if (waitq_is_set(waitq
)) {
2533 struct waitq_set
*wqset
= (struct waitq_set
*)waitq
;
2535 * early-out if the thread is waiting on a wait queue set
2536 * that has already been pre-posted.
2538 if (wait_event
== NO_EVENT64
&& waitq_set_maybe_preposted(wqset
)) {
2541 * Run through the list of potential preposts. Because
2542 * this is a hot path, we short-circuit the iteration
2543 * if we find just one prepost object.
2545 ret
= wq_prepost_foreach_locked(wqset
, NULL
,
2547 if (ret
== WQ_ITERATE_FOUND
) {
2549 thread_lock(thread
);
2550 thread
->wait_result
= THREAD_AWAKENED
;
2551 thread_unlock(thread
);
2553 return THREAD_AWAKENED
;
2561 * If already dealing with an irq safe wait queue, we are all set.
2562 * Otherwise, determine a global queue to use and lock it.
2564 if (!waitq_irq_safe(waitq
)) {
2565 safeq
= global_eventq(waitq
);
2566 eventmask
= _CAST_TO_EVENT_MASK(waitq
);
2570 eventmask
= _CAST_TO_EVENT_MASK(wait_event
);
2573 /* lock the thread now that we have the irq-safe waitq locked */
2574 thread_lock(thread
);
2577 * Realtime threads get priority for wait queue placements.
2578 * This allows wait_queue_wakeup_one to prefer a waiting
2579 * realtime thread, similar in principle to performing
2580 * a wait_queue_wakeup_all and allowing scheduler prioritization
2581 * to run the realtime thread, but without causing the
2582 * lock contention of that scenario.
2584 if (thread
->sched_pri
>= BASEPRI_REALTIME
)
2588 * This is the extent to which we currently take scheduling attributes
2589 * into account. If the thread is vm priviledged, we stick it at
2590 * the front of the queue. Later, these queues will honor the policy
2591 * value set at waitq_init time.
2593 wait_result
= thread_mark_wait_locked(thread
, interruptible
);
2594 /* thread->wait_result has been set */
2595 if (wait_result
== THREAD_WAITING
) {
2597 if (!safeq
->waitq_fifo
2598 || (thread
->options
& TH_OPT_VMPRIV
) || realtime
)
2599 enqueue_head(&safeq
->waitq_queue
, &thread
->wait_links
);
2601 enqueue_tail(&safeq
->waitq_queue
, &thread
->wait_links
);
2603 /* mark the event and real waitq, even if enqueued on a global safeq */
2604 thread
->wait_event
= wait_event
;
2605 thread
->waitq
= waitq
;
2607 if (deadline
!= 0) {
2610 act
= timer_call_enter_with_leeway(&thread
->wait_timer
,
2615 thread
->wait_timer_active
++;
2616 thread
->wait_timer_is_set
= TRUE
;
2619 if (waitq_is_global(safeq
))
2620 safeq
->waitq_eventmask
|= eventmask
;
2622 waitq_stats_count_wait(waitq
);
2625 /* unlock the thread */
2626 thread_unlock(thread
);
2628 /* unlock the safeq if we locked it here */
2629 if (safeq
!= waitq
) {
2630 waitq_unlock(safeq
);
2639 * remove 'thread' from its current blocking state on 'waitq'
2642 * 'thread' is locked
2645 * This function is primarily used by clear_wait_internal in
2646 * sched_prim.c from the thread timer wakeup path
2647 * (i.e. the thread was waiting on 'waitq' with a timeout that expired)
2649 int waitq_pull_thread_locked(struct waitq
*waitq
, thread_t thread
)
2651 struct waitq
*safeq
;
2653 assert_thread_magic(thread
);
2654 assert(thread
->waitq
== waitq
);
2656 /* Find the interrupts disabled queue thread is waiting on */
2657 if (!waitq_irq_safe(waitq
)) {
2658 safeq
= global_eventq(waitq
);
2663 /* thread is already locked so have to try for the waitq lock */
2664 if (!waitq_lock_try(safeq
))
2667 remqueue(&thread
->wait_links
);
2668 thread_clear_waitq_state(thread
);
2669 waitq_stats_count_clear_wakeup(waitq
);
2671 /* clear the global event mask if this was the last thread there! */
2672 if (waitq_is_global(safeq
) && queue_empty(&safeq
->waitq_queue
)) {
2673 safeq
->waitq_eventmask
= 0;
2674 /* JMM - also mark no-waiters on waitq (if not the same as the safeq) */
2677 waitq_unlock(safeq
);
2684 void maybe_adjust_thread_pri(thread_t thread
, int priority
) {
2685 if (thread
->sched_pri
< priority
) {
2686 if (priority
<= MAXPRI
) {
2687 set_sched_pri(thread
, priority
);
2689 thread
->was_promoted_on_wakeup
= 1;
2690 thread
->sched_flags
|= TH_SFLAG_PROMOTED
;
2696 * If the caller is requesting the waitq subsystem to promote the
2697 * priority of the awoken thread, then boost the thread's priority to
2698 * the default WAITQ_BOOST_PRIORITY (if it's not already equal or
2699 * higher priority). This boost must be removed via a call to
2700 * waitq_clear_promotion_locked.
2702 if (priority
== WAITQ_PROMOTE_PRIORITY
&&
2703 (thread
->sched_pri
< WAITQ_BOOST_PRIORITY
||
2704 !(thread
->sched_flags
& TH_SFLAG_WAITQ_PROMOTED
))) {
2706 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAITQ_PROMOTE
) | DBG_FUNC_NONE
,
2707 (uintptr_t)thread_tid(thread
),
2708 thread
->sched_pri
, thread
->base_pri
,
2709 WAITQ_BOOST_PRIORITY
, 0);
2710 thread
->sched_flags
|= TH_SFLAG_WAITQ_PROMOTED
;
2711 if (thread
->sched_pri
< WAITQ_BOOST_PRIORITY
)
2712 set_sched_pri(thread
, WAITQ_BOOST_PRIORITY
);
2717 * Clear a thread's waitq priority promotion state and the waitq's boost flag
2719 * This function will always clear the waitq's 'waitq_boost' flag. If the
2720 * 'thread' parameter is non-null, the this function will also check the
2721 * priority promotion (boost) state of that thread. If this thread was boosted
2722 * (by having been awoken from a boosting waitq), then this boost state is
2723 * cleared. This function is to be paired with waitq_enable_promote_locked.
2725 void waitq_clear_promotion_locked(struct waitq
*waitq
, thread_t thread
)
2729 assert(waitq_held(waitq
));
2730 if (thread
== THREAD_NULL
)
2733 if (!waitq_irq_safe(waitq
))
2735 thread_lock(thread
);
2737 if (thread
->sched_flags
& TH_SFLAG_WAITQ_PROMOTED
) {
2738 thread
->sched_flags
&= ~TH_SFLAG_WAITQ_PROMOTED
;
2740 if (thread
->sched_flags
& TH_SFLAG_PROMOTED_MASK
) {
2741 /* it still has other promotions (mutex/rw_lock) */
2742 } else if (thread
->sched_flags
& TH_SFLAG_DEPRESSED_MASK
) {
2743 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAITQ_DEMOTE
) | DBG_FUNC_NONE
,
2744 (uintptr_t)thread_tid(thread
),
2748 set_sched_pri(thread
, DEPRESSPRI
);
2750 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAITQ_DEMOTE
) | DBG_FUNC_NONE
,
2751 (uintptr_t)thread_tid(thread
),
2754 thread
->base_pri
, 0);
2755 thread_recompute_sched_pri(thread
, FALSE
);
2759 thread_unlock(thread
);
2760 if (!waitq_irq_safe(waitq
))
2765 * wakeup all threads waiting on 'waitq' for 'wake_event'
2771 * May temporarily disable and re-enable interrupts
2772 * and re-adjust thread priority of each awoken thread.
2774 * If the input 'lock_state' == WAITQ_UNLOCK then the waitq will have
2775 * been unlocked before calling thread_go() on any returned threads, and
2776 * is guaranteed to be unlocked upon function return.
2778 kern_return_t
waitq_wakeup64_all_locked(struct waitq
*waitq
,
2779 event64_t wake_event
,
2780 wait_result_t result
,
2781 uint64_t *reserved_preposts
,
2783 waitq_lock_state_t lock_state
)
2789 queue_head_t wakeup_queue
;
2791 assert(waitq_held(waitq
));
2792 queue_init(&wakeup_queue
);
2794 nthreads
= waitq_select_n_locked(waitq
, wake_event
, NULL
, NULL
,
2796 &wakeup_queue
, -1, &th_spl
);
2798 /* set each thread running */
2799 ret
= KERN_NOT_WAITING
;
2801 #if CONFIG_WAITQ_STATS
2802 qe_foreach_element(thread
, &wakeup_queue
, wait_links
)
2803 waitq_stats_count_wakeup(waitq
);
2805 if (lock_state
== WAITQ_UNLOCK
)
2806 waitq_unlock(waitq
);
2808 qe_foreach_element_safe(thread
, &wakeup_queue
, wait_links
) {
2809 assert_thread_magic(thread
);
2810 remqueue(&thread
->wait_links
);
2811 maybe_adjust_thread_pri(thread
, priority
);
2812 ret
= thread_go(thread
, result
);
2813 assert(ret
== KERN_SUCCESS
);
2814 thread_unlock(thread
);
2819 waitq_stats_count_fail(waitq
);
2825 * wakeup one thread waiting on 'waitq' for 'wake_event'
2831 * May temporarily disable and re-enable interrupts.
2833 kern_return_t
waitq_wakeup64_one_locked(struct waitq
*waitq
,
2834 event64_t wake_event
,
2835 wait_result_t result
,
2836 uint64_t *reserved_preposts
,
2838 waitq_lock_state_t lock_state
)
2843 assert(waitq_held(waitq
));
2845 if (priority
== WAITQ_SELECT_MAX_PRI
) {
2846 thread
= waitq_select_max_locked(waitq
, wake_event
,
2850 thread
= waitq_select_one_locked(waitq
, wake_event
,
2856 if (thread
!= THREAD_NULL
)
2857 waitq_stats_count_wakeup(waitq
);
2859 waitq_stats_count_fail(waitq
);
2861 if (lock_state
== WAITQ_UNLOCK
)
2862 waitq_unlock(waitq
);
2864 if (thread
!= THREAD_NULL
) {
2865 maybe_adjust_thread_pri(thread
, priority
);
2866 kern_return_t ret
= thread_go(thread
, result
);
2867 assert(ret
== KERN_SUCCESS
);
2868 thread_unlock(thread
);
2873 return KERN_NOT_WAITING
;
2877 * wakeup one thread waiting on 'waitq' for 'wake_event'
2883 * A locked, runnable thread.
2884 * If return value is non-NULL, interrupts have also
2885 * been disabled, and the caller is responsible to call
2886 * splx() with the returned '*spl' value.
2889 waitq_wakeup64_identify_locked(struct waitq
*waitq
,
2890 event64_t wake_event
,
2891 wait_result_t result
,
2893 uint64_t *reserved_preposts
,
2895 waitq_lock_state_t lock_state
)
2899 assert(waitq_held(waitq
));
2901 if (priority
== WAITQ_SELECT_MAX_PRI
) {
2902 thread
= waitq_select_max_locked(waitq
, wake_event
,
2906 thread
= waitq_select_one_locked(waitq
, wake_event
,
2911 if (thread
!= THREAD_NULL
)
2912 waitq_stats_count_wakeup(waitq
);
2914 waitq_stats_count_fail(waitq
);
2916 if (lock_state
== WAITQ_UNLOCK
)
2917 waitq_unlock(waitq
);
2919 if (thread
!= THREAD_NULL
) {
2920 kern_return_t __assert_only ret
;
2921 ret
= thread_go(thread
, result
);
2922 assert(ret
== KERN_SUCCESS
);
2925 return thread
; /* locked if not NULL (caller responsible for spl) */
2929 * wakeup a specific thread iff it's waiting on 'waitq' for 'wake_event'
2933 * 'thread' is unlocked
2936 * May temporarily disable and re-enable interrupts
2938 * If the input lock_state == WAITQ_UNLOCK then the waitq will have been
2939 * unlocked before calling thread_go() if 'thread' is to be awoken, and
2940 * is guaranteed to be unlocked upon function return.
2942 kern_return_t
waitq_wakeup64_thread_locked(struct waitq
*waitq
,
2943 event64_t wake_event
,
2945 wait_result_t result
,
2946 waitq_lock_state_t lock_state
)
2951 assert(waitq_held(waitq
));
2952 assert_thread_magic(thread
);
2955 * See if the thread was still waiting there. If so, it got
2956 * dequeued and returned locked.
2958 ret
= waitq_select_thread_locked(waitq
, wake_event
, thread
, &th_spl
);
2960 if (ret
== KERN_SUCCESS
)
2961 waitq_stats_count_wakeup(waitq
);
2963 waitq_stats_count_fail(waitq
);
2965 if (lock_state
== WAITQ_UNLOCK
)
2966 waitq_unlock(waitq
);
2968 if (ret
!= KERN_SUCCESS
)
2969 return KERN_NOT_WAITING
;
2971 ret
= thread_go(thread
, result
);
2972 assert(ret
== KERN_SUCCESS
);
2973 thread_unlock(thread
);
2981 /* ----------------------------------------------------------------------
2985 * ---------------------------------------------------------------------- */
2988 * initialize a waitq object
2990 kern_return_t
waitq_init(struct waitq
*waitq
, int policy
)
2992 assert(waitq
!= NULL
);
2994 /* only FIFO and LIFO for now */
2995 if ((policy
& SYNC_POLICY_FIXED_PRIORITY
) != 0)
2996 return KERN_INVALID_ARGUMENT
;
2998 waitq
->waitq_fifo
= ((policy
& SYNC_POLICY_REVERSED
) == 0);
2999 waitq
->waitq_irq
= !!(policy
& SYNC_POLICY_DISABLE_IRQ
);
3000 waitq
->waitq_prepost
= 0;
3001 waitq
->waitq_type
= WQT_QUEUE
;
3002 waitq
->waitq_eventmask
= 0;
3004 waitq
->waitq_set_id
= 0;
3005 waitq
->waitq_prepost_id
= 0;
3007 waitq_lock_init(waitq
);
3008 queue_init(&waitq
->waitq_queue
);
3010 waitq
->waitq_isvalid
= 1;
3011 return KERN_SUCCESS
;
3014 struct wq_unlink_ctx
{
3015 struct waitq
*unlink_wq
;
3016 struct waitq_set
*unlink_wqset
;
3019 static int waitq_unlink_prepost_cb(struct waitq_set __unused
*wqset
, void *ctx
,
3020 struct wq_prepost
*wqp
, struct waitq
*waitq
);
3023 * walk_waitq_links callback to invalidate 'link' parameter
3026 * Called from walk_waitq_links.
3027 * Note that unlink other callbacks, this one make no assumptions about
3028 * the 'waitq' parameter, specifically it does not have to be locked or
3031 static int waitq_unlink_all_cb(struct waitq
*waitq
, void *ctx
,
3032 struct waitq_link
*link
)
3036 if (wql_type(link
) == WQL_LINK
&& wql_is_valid(link
))
3037 wql_invalidate(link
);
3039 if (wql_type(link
) == WQL_WQS
) {
3040 struct waitq_set
*wqset
;
3041 struct wq_unlink_ctx ulctx
;
3044 * When destroying the waitq, take the time to clear out any
3045 * preposts it may have made. This could potentially save time
3046 * on the IPC send path which would otherwise have to iterate
3047 * over lots of dead port preposts.
3049 if (waitq
->waitq_prepost_id
== 0)
3052 wqset
= link
->wql_wqs
.wql_set
;
3053 assert(wqset
!= NULL
);
3054 assert(!waitq_irq_safe(&wqset
->wqset_q
));
3056 waitq_set_lock(wqset
);
3058 if (!waitq_set_is_valid(wqset
)) {
3059 /* someone raced us to teardown */
3062 if (!waitq_set_maybe_preposted(wqset
))
3065 ulctx
.unlink_wq
= waitq
;
3066 ulctx
.unlink_wqset
= wqset
;
3067 (void)wq_prepost_iterate(wqset
->wqset_prepost_id
, &ulctx
,
3068 waitq_unlink_prepost_cb
);
3070 waitq_set_unlock(wqset
);
3074 return WQ_ITERATE_CONTINUE
;
3079 * cleanup any link/prepost table resources associated with a waitq
3081 void waitq_deinit(struct waitq
*waitq
)
3085 if (!waitq
|| !waitq_is_queue(waitq
))
3088 if (waitq_irq_safe(waitq
))
3091 if (!waitq_valid(waitq
)) {
3092 waitq_unlock(waitq
);
3093 if (waitq_irq_safe(waitq
))
3098 waitq
->waitq_type
= WQT_INVALID
;
3099 waitq
->waitq_isvalid
= 0;
3101 if (!waitq_irq_safe(waitq
)) {
3102 waitq_unlink_all_unlock(waitq
);
3103 /* waitq unlocked and set links deallocated */
3105 waitq_unlock(waitq
);
3109 assert(queue_empty(&waitq
->waitq_queue
));
3112 void waitq_invalidate_locked(struct waitq
*waitq
)
3114 assert(waitq_held(waitq
));
3115 assert(waitq_is_valid(waitq
));
3116 waitq
->waitq_isvalid
= 0;
3120 * invalidate the given wq_prepost object
3123 * Called from wq_prepost_iterate (_not_ from wq_prepost_foreach_locked!)
3125 static int wqset_clear_prepost_chain_cb(struct waitq_set __unused
*wqset
,
3127 struct wq_prepost
*wqp
,
3128 struct waitq __unused
*waitq
)
3130 if (wqp_type(wqp
) == WQP_POST
)
3131 wq_prepost_invalidate(wqp
);
3132 return WQ_ITERATE_CONTINUE
;
3137 * allocate and initialize a waitq set object
3143 * allocated / initialized waitq_set object
3146 struct waitq_set
*waitq_set_alloc(int policy
, void *prepost_hook
)
3148 struct waitq_set
*wqset
;
3150 wqset
= (struct waitq_set
*)zalloc(waitq_set_zone
);
3152 panic("Can't allocate a new waitq set from zone %p", waitq_set_zone
);
3155 ret
= waitq_set_init(wqset
, policy
, NULL
, prepost_hook
);
3156 if (ret
!= KERN_SUCCESS
) {
3157 zfree(waitq_set_zone
, wqset
);
3165 * initialize a waitq set object
3168 * may (rarely) block if link table needs to grow, and
3169 * no 'reserved_link' object is passed.
3171 kern_return_t
waitq_set_init(struct waitq_set
*wqset
,
3172 int policy
, uint64_t *reserved_link
,
3175 struct waitq_link
*link
;
3178 memset(wqset
, 0, sizeof(*wqset
));
3180 ret
= waitq_init(&wqset
->wqset_q
, policy
);
3181 if (ret
!= KERN_SUCCESS
)
3184 wqset
->wqset_q
.waitq_type
= WQT_SET
;
3185 if (policy
& SYNC_POLICY_PREPOST
) {
3186 wqset
->wqset_q
.waitq_prepost
= 1;
3187 wqset
->wqset_prepost_id
= 0;
3188 assert(prepost_hook
== NULL
);
3190 wqset
->wqset_q
.waitq_prepost
= 0;
3191 wqset
->wqset_prepost_hook
= prepost_hook
;
3194 if (reserved_link
&& *reserved_link
!= 0) {
3195 link
= wql_get_reserved(*reserved_link
, WQL_WQS
);
3196 /* always consume the caller's reference */
3199 link
= wql_alloc_link(WQL_WQS
);
3202 panic("Can't allocate link object for waitq set: %p", wqset
);
3204 link
->wql_wqs
.wql_set
= wqset
;
3207 wqset
->wqset_id
= link
->wql_setid
.id
;
3210 return KERN_SUCCESS
;
3214 * clear out / release any resources associated with a waitq set
3219 * This will render the waitq set invalid, and it must
3220 * be re-initialized with waitq_set_init before it can be used again
3222 void waitq_set_deinit(struct waitq_set
*wqset
)
3224 struct waitq_link
*link
= NULL
;
3225 uint64_t set_id
, prepost_id
;
3227 if (!waitqs_is_set(wqset
))
3228 panic("trying to de-initialize an invalid wqset @%p", wqset
);
3230 assert(!waitq_irq_safe(&wqset
->wqset_q
));
3231 waitq_set_lock(wqset
);
3233 set_id
= wqset
->wqset_id
;
3235 /* grab the set's link object */
3236 link
= wql_get_link(set_id
);
3238 wql_invalidate(link
);
3240 /* someone raced us to deinit */
3241 if (!link
|| wqset
->wqset_id
!= set_id
|| set_id
!= link
->wql_setid
.id
) {
3244 waitq_set_unlock(wqset
);
3248 /* every wait queue set should have a valid link object */
3249 assert(link
!= NULL
&& wql_type(link
) == WQL_WQS
);
3251 wqset
->wqset_id
= 0;
3254 * This set may have a lot of preposts, or may have been a member of
3255 * many other sets. To minimize spinlock hold times, we clear out the
3256 * waitq set data structure under the lock-hold, but don't clear any
3257 * table objects. We keep handles to the prepost and set linkage
3258 * objects and free those outside the critical section.
3261 if (wqset
->wqset_q
.waitq_prepost
&& wqset
->wqset_prepost_id
)
3262 prepost_id
= wqset
->wqset_prepost_id
;
3263 /* else { TODO: notify kqueue subsystem? } */
3264 wqset
->wqset_prepost_id
= 0;
3266 wqset
->wqset_q
.waitq_type
= WQT_INVALID
;
3267 wqset
->wqset_q
.waitq_fifo
= 0;
3268 wqset
->wqset_q
.waitq_prepost
= 0;
3269 wqset
->wqset_q
.waitq_isvalid
= 0;
3271 /* don't clear the 'waitq_irq' bit: it's used in locking! */
3272 wqset
->wqset_q
.waitq_eventmask
= 0;
3274 waitq_unlink_all_unlock(&wqset
->wqset_q
);
3275 /* wqset->wqset_q unlocked and set links deallocated */
3278 * walk_waitq_links may race with us for access to the waitq set.
3279 * If walk_waitq_links has a reference to the set, then we should wait
3280 * until the link's refcount goes to 1 (our reference) before we exit
3281 * this function. That way we ensure that the waitq set memory will
3282 * remain valid even though it's been cleared out.
3284 while (wql_refcnt(link
) > 1)
3288 /* drop / unlink all the prepost table objects */
3289 /* JMM - can this happen before the delay? */
3291 (void)wq_prepost_iterate(prepost_id
, NULL
,
3292 wqset_clear_prepost_chain_cb
);
3296 * de-initialize and free an allocated waitq set object
3301 kern_return_t
waitq_set_free(struct waitq_set
*wqset
)
3303 waitq_set_deinit(wqset
);
3305 memset(wqset
, 0, sizeof(*wqset
));
3306 zfree(waitq_set_zone
, wqset
);
3308 return KERN_SUCCESS
;
3311 #if defined(DEVLEOPMENT) || defined(DEBUG)
3312 #if CONFIG_WAITQ_DEBUG
3314 * return the set ID of 'wqset'
3316 uint64_t wqset_id(struct waitq_set
*wqset
)
3321 assert(waitqs_is_set(wqset
));
3322 return wqset
->wqset_id
;
3326 * returns a pointer to the waitq object embedded in 'wqset'
3328 struct waitq
*wqset_waitq(struct waitq_set
*wqset
)
3333 assert(waitqs_is_set(wqset
));
3335 return &wqset
->wqset_q
;
3337 #endif /* CONFIG_WAITQ_DEBUG */
3338 #endif /* DEVELOPMENT || DEBUG */
3342 * clear all preposts originating from 'waitq'
3346 * may (rarely) spin waiting for another on-core thread to
3347 * release the last reference to the waitq's prepost link object
3350 * If this function needs to spin, it will drop the waitq lock!
3351 * The return value of the function indicates whether or not this
3352 * happened: 1 == lock was dropped, 0 == lock held
3354 int waitq_clear_prepost_locked(struct waitq
*waitq
)
3356 struct wq_prepost
*wqp
;
3357 int dropped_lock
= 0;
3359 assert(!waitq_irq_safe(waitq
));
3361 if (waitq
->waitq_prepost_id
== 0)
3364 wqp
= wq_prepost_get(waitq
->waitq_prepost_id
);
3365 waitq
->waitq_prepost_id
= 0;
3367 uint64_t wqp_id
= wqp
->wqp_prepostid
.id
;
3368 wqdbg_v("invalidate prepost 0x%llx (refcnt:%d)",
3369 wqp
->wqp_prepostid
.id
, wqp_refcnt(wqp
));
3370 wq_prepost_invalidate(wqp
);
3371 while (wqp_refcnt(wqp
) > 1) {
3374 * Some other thread must have raced us to grab a link
3375 * object reference before we invalidated it. This
3376 * means that they are probably trying to access the
3377 * waitq to which the prepost object points. We need
3378 * to wait here until the other thread drops their
3379 * reference. We know that no one else can get a
3380 * reference (the object has been invalidated), and
3381 * that prepost references are short-lived (dropped on
3382 * a call to wq_prepost_put). We also know that no one
3383 * blocks while holding a reference therefore the
3384 * other reference holder must be on-core. We'll just
3385 * sit and wait for the other reference to be dropped.
3387 disable_preemption();
3389 waitq_unlock(waitq
);
3392 * don't yield here, just spin and assume the other
3393 * consumer is already on core...
3399 enable_preemption();
3401 if (wqp_refcnt(wqp
) > 0 && wqp
->wqp_prepostid
.id
== wqp_id
)
3402 wq_prepost_put(wqp
);
3405 return dropped_lock
;
3409 * clear all preposts originating from 'waitq'
3412 * 'waitq' is not locked
3413 * may disable and re-enable interrupts
3415 void waitq_clear_prepost(struct waitq
*waitq
)
3417 assert(waitq_valid(waitq
));
3418 assert(!waitq_irq_safe(waitq
));
3421 /* it doesn't matter to us if the lock is dropped here */
3422 (void)waitq_clear_prepost_locked(waitq
);
3423 waitq_unlock(waitq
);
3427 * return a the waitq's prepost object ID (allocate if necessary)
3430 * 'waitq' is unlocked
3432 uint64_t waitq_get_prepost_id(struct waitq
*waitq
)
3434 struct wq_prepost
*wqp
;
3435 uint64_t wqp_id
= 0;
3437 if (!waitq_valid(waitq
))
3440 assert(!waitq_irq_safe(waitq
));
3444 if (!waitq_valid(waitq
))
3447 if (waitq
->waitq_prepost_id
) {
3448 wqp_id
= waitq
->waitq_prepost_id
;
3452 /* don't hold a spinlock while allocating a prepost object */
3453 waitq_unlock(waitq
);
3455 wqp
= wq_prepost_alloc(WQP_WQ
, 1);
3459 /* re-acquire the waitq lock */
3462 if (!waitq_valid(waitq
)) {
3463 wq_prepost_put(wqp
);
3468 if (waitq
->waitq_prepost_id
) {
3469 /* we were beat by someone else */
3470 wq_prepost_put(wqp
);
3471 wqp_id
= waitq
->waitq_prepost_id
;
3475 wqp
->wqp_wq
.wqp_wq_ptr
= waitq
;
3478 wqp_id
= wqp
->wqp_prepostid
.id
;
3479 waitq
->waitq_prepost_id
= wqp_id
;
3481 wq_prepost_put(wqp
);
3484 waitq_unlock(waitq
);
3490 static int waitq_inset_cb(struct waitq
*waitq
, void *ctx
, struct waitq_link
*link
)
3492 uint64_t setid
= *(uint64_t *)ctx
;
3493 int wqltype
= wql_type(link
);
3495 if (wqltype
== WQL_WQS
&& link
->wql_setid
.id
== setid
) {
3496 wqdbg_v(" waitq already in set 0x%llx", setid
);
3497 return WQ_ITERATE_FOUND
;
3498 } else if (wqltype
== WQL_LINK
) {
3500 * break out early if we see a link that points to the setid
3501 * in question. This saves us a step in the
3502 * iteration/recursion
3504 wqdbg_v(" waitq already in set 0x%llx (WQL_LINK)", setid
);
3505 if (link
->wql_link
.left_setid
== setid
||
3506 link
->wql_link
.right_setid
== setid
)
3507 return WQ_ITERATE_FOUND
;
3510 return WQ_ITERATE_CONTINUE
;
3514 * determine if 'waitq' is a member of 'wqset'
3517 * neither 'waitq' nor 'wqset' is not locked
3518 * may disable and re-enable interrupts while locking 'waitq'
3520 boolean_t
waitq_member(struct waitq
*waitq
, struct waitq_set
*wqset
)
3522 kern_return_t kr
= WQ_ITERATE_SUCCESS
;
3525 if (!waitq_valid(waitq
))
3526 panic("Invalid waitq: %p", waitq
);
3527 assert(!waitq_irq_safe(waitq
));
3529 if (!waitqs_is_set(wqset
))
3534 setid
= wqset
->wqset_id
;
3538 /* fast path: most waitqs are members of only 1 set */
3539 if (waitq
->waitq_set_id
== setid
) {
3540 waitq_unlock(waitq
);
3544 /* walk the link table and look for the Set ID of wqset */
3545 kr
= walk_waitq_links(LINK_WALK_ONE_LEVEL
, waitq
, waitq
->waitq_set_id
,
3546 WQL_ALL
, (void *)&setid
, waitq_inset_cb
);
3549 waitq_unlock(waitq
);
3550 return (kr
== WQ_ITERATE_FOUND
);
3554 * Returns true is the given waitq is a member of at least 1 set
3556 boolean_t
waitq_in_set(struct waitq
*waitq
)
3558 struct waitq_link
*link
;
3559 boolean_t inset
= FALSE
;
3561 if (waitq_irq_safe(waitq
))
3566 if (!waitq
->waitq_set_id
)
3569 link
= wql_get_link(waitq
->waitq_set_id
);
3571 /* if we get here, the waitq is in _at_least_one_ set */
3575 /* we can just optimize this for next time */
3576 waitq
->waitq_set_id
= 0;
3580 waitq_unlock(waitq
);
3586 * pre-allocate a waitq link structure from the link table
3589 * 'waitq' is not locked
3590 * may (rarely) block if link table needs to grow
3592 uint64_t waitq_link_reserve(struct waitq
*waitq
)
3594 struct waitq_link
*link
;
3595 uint64_t reserved_id
= 0;
3597 assert(get_preemption_level() == 0 && waitq_wait_possible(current_thread()));
3600 * We've asserted that the caller can block, so we enforce a
3601 * minimum-free table element policy here.
3603 wql_ensure_free_space();
3606 link
= wql_alloc_link(LT_RESERVED
);
3610 reserved_id
= link
->wql_setid
.id
;
3616 * release a pre-allocated waitq link structure
3618 void waitq_link_release(uint64_t id
)
3620 struct waitq_link
*link
;
3625 link
= wql_get_reserved(id
, WQL_LINK
);
3630 * if we successfully got a link object, then we know
3631 * it's not been marked valid, and can be released with
3632 * a standard wql_put_link() which should free the element.
3635 #if CONFIG_LTABLE_STATS
3636 g_wqlinktable
.nreserved_releases
+= 1;
3641 * link 'waitq' to the set identified by 'setid' using the 'link' structure
3645 * caller should have a reference to the 'link' object
3647 static kern_return_t
waitq_link_internal(struct waitq
*waitq
,
3648 uint64_t setid
, struct waitq_link
*link
)
3650 struct waitq_link
*qlink
;
3653 assert(waitq_held(waitq
));
3656 * If the waitq_set_id field is empty, then this waitq is not
3657 * a member of any other set. All we have to do is update the
3660 if (!waitq
->waitq_set_id
) {
3661 waitq
->waitq_set_id
= setid
;
3662 return KERN_SUCCESS
;
3665 qlink
= wql_get_link(waitq
->waitq_set_id
);
3668 * The set to which this wait queue belonged has been
3669 * destroyed / invalidated. We can re-use the waitq field.
3671 waitq
->waitq_set_id
= setid
;
3672 return KERN_SUCCESS
;
3674 wql_put_link(qlink
);
3677 * Check to see if it's already a member of the set.
3679 * TODO: check for cycles!
3681 kr
= walk_waitq_links(LINK_WALK_ONE_LEVEL
, waitq
, waitq
->waitq_set_id
,
3682 WQL_ALL
, (void *)&setid
, waitq_inset_cb
);
3683 if (kr
== WQ_ITERATE_FOUND
)
3687 * This wait queue is a member of at least one set already,
3688 * and _not_ a member of the given set. Use our previously
3689 * allocated link object, and hook it up to the wait queue.
3690 * Note that it's possible that one or more of the wait queue sets to
3691 * which the wait queue belongs was invalidated before we allocated
3692 * this link object. That's OK because the next time we use that
3693 * object we'll just ignore it.
3695 link
->wql_link
.left_setid
= setid
;
3696 link
->wql_link
.right_setid
= waitq
->waitq_set_id
;
3699 waitq
->waitq_set_id
= link
->wql_setid
.id
;
3701 return KERN_SUCCESS
;
3705 * link 'waitq' to 'wqset'
3708 * if 'lock_state' contains WAITQ_SHOULD_LOCK, 'waitq' must be unlocked.
3709 * Otherwise, 'waitq' must be locked.
3711 * may (rarely) block on link table allocation if the table has to grow,
3712 * and no 'reserved_link' object is passed.
3715 * The caller can guarantee that this function will never block by
3716 * pre-allocating a link table object and passing its ID in 'reserved_link'
3718 kern_return_t
waitq_link(struct waitq
*waitq
, struct waitq_set
*wqset
,
3719 waitq_lock_state_t lock_state
, uint64_t *reserved_link
)
3722 struct waitq_link
*link
;
3723 int should_lock
= (lock_state
== WAITQ_SHOULD_LOCK
);
3725 if (!waitq_valid(waitq
) || waitq_irq_safe(waitq
))
3726 panic("Invalid waitq: %p", waitq
);
3728 if (!waitqs_is_set(wqset
))
3729 return KERN_INVALID_ARGUMENT
;
3731 wqdbg_v("Link waitq %p to wqset 0x%llx",
3732 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq
), wqset
->wqset_id
);
3735 * We _might_ need a new link object here, so we'll grab outside
3736 * the lock because the alloc call _might_ block.
3738 * If the caller reserved a link beforehand, then wql_get_link
3739 * is guaranteed not to block because the caller holds an extra
3740 * reference to the link which, in turn, hold a reference to the
3743 if (reserved_link
&& *reserved_link
!= 0) {
3744 link
= wql_get_reserved(*reserved_link
, WQL_LINK
);
3745 /* always consume the caller's reference */
3748 link
= wql_alloc_link(WQL_LINK
);
3751 return KERN_NO_SPACE
;
3757 kr
= waitq_link_internal(waitq
, wqset
->wqset_id
, link
);
3760 waitq_unlock(waitq
);
3769 * helper: unlink 'waitq' from waitq set identified by 'setid'
3770 * this function also prunes invalid objects from the tree
3773 * MUST be called from walk_waitq_links link table walk
3777 * This is a helper function which compresses the link table by culling
3778 * unused or unnecessary links. See comments below for different
3781 static inline int waitq_maybe_remove_link(struct waitq
*waitq
,
3783 struct waitq_link
*parent
,
3784 struct waitq_link
*left
,
3785 struct waitq_link
*right
)
3787 uint64_t *wq_setid
= &waitq
->waitq_set_id
;
3790 * There are two scenarios:
3793 * --------------------------------------------------------------------
3794 * waitq->waitq_set_id == parent
3800 * L(LINK/WQS_l) R(LINK/WQS_r)
3802 * In this scenario, we assert that the original waitq points to the
3803 * parent link we were passed in. If WQS_l (or WQS_r) is the waitq
3804 * set we're looking for, we can set the corresponding parent
3805 * link id (left or right) to 0. To compress the tree, we can reset the
3806 * waitq_set_id of the original waitq to point to the side of the
3807 * parent that is still valid. We then discard the parent link object.
3809 if (*wq_setid
== parent
->wql_setid
.id
) {
3810 if (!left
&& !right
) {
3811 /* completely invalid children */
3812 wql_invalidate(parent
);
3815 return WQ_ITERATE_INVALID
;
3816 } else if (!left
|| left
->wql_setid
.id
== setid
) {
3818 * left side matches we know it points either to the
3819 * WQS we're unlinking, or to an invalid object:
3820 * no need to invalidate it
3822 *wq_setid
= right
? right
->wql_setid
.id
: 0;
3823 wql_invalidate(parent
);
3825 return left
? WQ_ITERATE_UNLINKED
: WQ_ITERATE_INVALID
;
3826 } else if (!right
|| right
->wql_setid
.id
== setid
) {
3828 * if right side matches we know it points either to the
3829 * WQS we're unlinking, or to an invalid object:
3830 * no need to invalidate it
3832 *wq_setid
= left
? left
->wql_setid
.id
: 0;
3833 wql_invalidate(parent
);
3835 return right
? WQ_ITERATE_UNLINKED
: WQ_ITERATE_INVALID
;
3840 * the tree walk starts at the top-of-tree and moves down,
3841 * so these are safe asserts.
3843 assert(left
|| right
); /* one of them has to be valid at this point */
3847 * --------------------------------------------------------------------
3848 * waitq->waitq_set_id == ... (OR parent)
3861 * In this scenario, a leaf node of either the left or right side
3862 * could be the wait queue set we're looking to unlink. We also handle
3863 * the case where one of these links is invalid. If a leaf node is
3864 * invalid or it's the set we're looking for, we can safely remove the
3865 * middle link (left or right) and point the parent link directly to
3866 * the remaining leaf node.
3868 if (left
&& wql_type(left
) == WQL_LINK
) {
3870 struct waitq_link
*linkLl
, *linkLr
;
3871 assert(left
->wql_setid
.id
!= setid
);
3872 Ll
= left
->wql_link
.left_setid
;
3873 Lr
= left
->wql_link
.right_setid
;
3874 linkLl
= wql_get_link(Ll
);
3875 linkLr
= wql_get_link(Lr
);
3876 if (!linkLl
&& !linkLr
) {
3878 * The left object points to two invalid objects!
3879 * We can invalidate the left w/o touching the parent.
3881 wql_invalidate(left
);
3882 wqdbg_v("S2, Ll+Lr");
3883 return WQ_ITERATE_INVALID
;
3884 } else if (!linkLl
|| Ll
== setid
) {
3885 /* Ll is invalid and/or the wait queue set we're looking for */
3886 parent
->wql_link
.left_setid
= Lr
;
3887 wql_invalidate(left
);
3888 wql_put_link(linkLl
);
3889 wql_put_link(linkLr
);
3891 return linkLl
? WQ_ITERATE_UNLINKED
: WQ_ITERATE_INVALID
;
3892 } else if (!linkLr
|| Lr
== setid
) {
3893 /* Lr is invalid and/or the wait queue set we're looking for */
3894 parent
->wql_link
.left_setid
= Ll
;
3895 wql_invalidate(left
);
3896 wql_put_link(linkLr
);
3897 wql_put_link(linkLl
);
3899 return linkLr
? WQ_ITERATE_UNLINKED
: WQ_ITERATE_INVALID
;
3901 wql_put_link(linkLl
);
3902 wql_put_link(linkLr
);
3905 if (right
&& wql_type(right
) == WQL_LINK
) {
3907 struct waitq_link
*linkRl
, *linkRr
;
3908 assert(right
->wql_setid
.id
!= setid
);
3909 Rl
= right
->wql_link
.left_setid
;
3910 Rr
= right
->wql_link
.right_setid
;
3911 linkRl
= wql_get_link(Rl
);
3912 linkRr
= wql_get_link(Rr
);
3913 if (!linkRl
&& !linkRr
) {
3915 * The right object points to two invalid objects!
3916 * We can invalidate the right w/o touching the parent.
3918 wql_invalidate(right
);
3919 wqdbg_v("S2, Rl+Rr");
3920 return WQ_ITERATE_INVALID
;
3921 } else if (!linkRl
|| Rl
== setid
) {
3922 /* Rl is invalid and/or the wait queue set we're looking for */
3923 parent
->wql_link
.right_setid
= Rr
;
3924 wql_invalidate(right
);
3925 wql_put_link(linkRl
);
3926 wql_put_link(linkRr
);
3928 return linkRl
? WQ_ITERATE_UNLINKED
: WQ_ITERATE_INVALID
;
3929 } else if (!linkRr
|| Rr
== setid
) {
3930 /* Rr is invalid and/or the wait queue set we're looking for */
3931 parent
->wql_link
.right_setid
= Rl
;
3932 wql_invalidate(right
);
3933 wql_put_link(linkRl
);
3934 wql_put_link(linkRr
);
3936 return linkRr
? WQ_ITERATE_UNLINKED
: WQ_ITERATE_INVALID
;
3938 wql_put_link(linkRl
);
3939 wql_put_link(linkRr
);
3942 return WQ_ITERATE_CONTINUE
;
3946 * link table walk callback that unlinks 'waitq' from 'ctx->setid'
3949 * called from walk_waitq_links
3953 * uses waitq_maybe_remove_link() to compress the linktable and
3954 * perform the actual unlinking
3956 static int waitq_unlink_cb(struct waitq
*waitq
, void *ctx
,
3957 struct waitq_link
*link
)
3959 uint64_t setid
= *((uint64_t *)ctx
);
3960 struct waitq_link
*right
, *left
;
3963 if (wql_type(link
) != WQL_LINK
)
3964 return WQ_ITERATE_CONTINUE
;
3967 left
= wql_get_link(link
->wql_link
.left_setid
);
3968 right
= wql_get_link(link
->wql_link
.right_setid
);
3970 ret
= waitq_maybe_remove_link(waitq
, setid
, link
, left
, right
);
3973 wql_put_link(right
);
3975 if (!wql_is_valid(link
))
3976 return WQ_ITERATE_INVALID
;
3977 /* A ret value of UNLINKED will break us out of table walk */
3978 } while (ret
== WQ_ITERATE_INVALID
);
3985 * undo/remove a prepost from 'ctx' (waitq) to 'wqset'
3988 * Called from wq_prepost_foreach_locked OR wq_prepost_iterate
3989 * 'wqset' may be NULL
3990 * (ctx)->unlink_wqset is locked
3992 static int waitq_unlink_prepost_cb(struct waitq_set __unused
*wqset
, void *ctx
,
3993 struct wq_prepost
*wqp
, struct waitq
*waitq
)
3995 struct wq_unlink_ctx
*ulctx
= (struct wq_unlink_ctx
*)ctx
;
3997 if (waitq
!= ulctx
->unlink_wq
)
3998 return WQ_ITERATE_CONTINUE
;
4000 if (wqp_type(wqp
) == WQP_WQ
&&
4001 wqp
->wqp_prepostid
.id
== ulctx
->unlink_wqset
->wqset_prepost_id
) {
4002 /* this is the only prepost on this wait queue set */
4003 wqdbg_v("unlink wqp (WQ) 0x%llx", wqp
->wqp_prepostid
.id
);
4004 ulctx
->unlink_wqset
->wqset_prepost_id
= 0;
4005 return WQ_ITERATE_BREAK
;
4008 assert(wqp_type(wqp
) == WQP_POST
);
4011 * The prepost object 'wqp' points to a waitq which should no longer
4012 * be preposted to 'ulctx->unlink_wqset'. We can remove the prepost
4013 * object from the list and break out of the iteration. Using the
4014 * context object in this way allows this same callback function to be
4015 * used from both wq_prepost_foreach_locked and wq_prepost_iterate.
4017 wq_prepost_remove(ulctx
->unlink_wqset
, wqp
);
4018 return WQ_ITERATE_BREAK
;
4022 * unlink 'waitq' from 'wqset'
4026 * 'wqset' is _not_ locked
4027 * may (rarely) spin in prepost clear and drop/re-acquire 'waitq' lock
4028 * (see waitq_clear_prepost_locked)
4030 static kern_return_t
waitq_unlink_locked(struct waitq
*waitq
,
4031 struct waitq_set
*wqset
)
4036 assert(!waitq_irq_safe(waitq
));
4038 setid
= wqset
->wqset_id
;
4040 if (waitq
->waitq_set_id
== 0) {
4043 * it doesn't belong to anyone, and it has a prepost object?
4044 * This is an artifact of not cleaning up after kqueues when
4045 * they prepost into select sets...
4047 if (waitq
->waitq_prepost_id
!= 0)
4048 (void)waitq_clear_prepost_locked(waitq
);
4049 return KERN_NOT_IN_SET
;
4052 if (waitq
->waitq_set_id
== setid
) {
4053 waitq
->waitq_set_id
= 0;
4055 * This was the only set to which the waitq belonged: we can
4056 * safely release the waitq's prepost object. It doesn't
4057 * matter if this function drops and re-acquires the lock
4058 * because we're not manipulating waitq state any more.
4060 (void)waitq_clear_prepost_locked(waitq
);
4061 return KERN_SUCCESS
;
4065 * The waitq was a member of more that 1 set, so we need to
4066 * handle potentially compressing the link table, and
4067 * adjusting the waitq->waitq_set_id value.
4069 * Note: we can't free the waitq's associated prepost object (if any)
4070 * because it may be in use by the one or more _other_ sets to
4071 * which this queue belongs.
4073 * Note: This function only handles a single level of the queue linkage.
4074 * Removing a waitq from a set to which it does not directly
4075 * belong is undefined. For example, if a waitq belonged to set
4076 * A, and set A belonged to set B. You can't remove the waitq
4079 kr
= walk_waitq_links(LINK_WALK_ONE_LEVEL
, waitq
, waitq
->waitq_set_id
,
4080 WQL_LINK
, (void *)&setid
, waitq_unlink_cb
);
4082 if (kr
== WQ_ITERATE_UNLINKED
) {
4083 struct wq_unlink_ctx ulctx
;
4085 kr
= KERN_SUCCESS
; /* found it and dis-associated it */
4087 /* don't look for preposts if it's not prepost-enabled */
4088 if (!wqset
->wqset_q
.waitq_prepost
)
4091 assert(!waitq_irq_safe(&wqset
->wqset_q
));
4093 waitq_set_lock(wqset
);
4095 * clear out any prepost from waitq into wqset
4096 * TODO: this could be more efficient than a linear search of
4097 * the waitq set's prepost list.
4099 ulctx
.unlink_wq
= waitq
;
4100 ulctx
.unlink_wqset
= wqset
;
4101 (void)wq_prepost_iterate(wqset
->wqset_prepost_id
, (void *)&ulctx
,
4102 waitq_unlink_prepost_cb
);
4103 waitq_set_unlock(wqset
);
4105 kr
= KERN_NOT_IN_SET
; /* waitq is _not_ associated with wqset */
4113 * unlink 'waitq' from 'wqset'
4116 * neither 'waitq' nor 'wqset' is locked
4117 * may disable and re-enable interrupts
4118 * may (rarely) spin in prepost clear
4119 * (see waitq_clear_prepost_locked)
4121 kern_return_t
waitq_unlink(struct waitq
*waitq
, struct waitq_set
*wqset
)
4123 kern_return_t kr
= KERN_SUCCESS
;
4125 assert(waitqs_is_set(wqset
));
4128 * we allow the waitq to be invalid because the caller may be trying
4129 * to clear out old/dirty state
4131 if (!waitq_valid(waitq
))
4132 return KERN_INVALID_ARGUMENT
;
4134 wqdbg_v("unlink waitq %p from set 0x%llx",
4135 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq
), wqset
->wqset_id
);
4137 assert(!waitq_irq_safe(waitq
));
4141 kr
= waitq_unlink_locked(waitq
, wqset
);
4143 waitq_unlock(waitq
);
4148 * unlink a waitq from a waitq set, but reference the waitq by its prepost ID
4151 * 'wqset' is unlocked
4152 * wqp_id may be valid or invalid
4154 void waitq_unlink_by_prepost_id(uint64_t wqp_id
, struct waitq_set
*wqset
)
4156 struct wq_prepost
*wqp
;
4158 disable_preemption();
4159 wqp
= wq_prepost_get(wqp_id
);
4163 wq
= wqp
->wqp_wq
.wqp_wq_ptr
;
4166 * lock the waitq, then release our prepost ID reference, then
4167 * unlink the waitq from the wqset: this ensures that we don't
4168 * hold a prepost ID reference during the unlink, but we also
4169 * complete the unlink operation atomically to avoid a race
4170 * with waitq_unlink[_all].
4172 assert(!waitq_irq_safe(wq
));
4175 wq_prepost_put(wqp
);
4177 if (!waitq_valid(wq
)) {
4178 /* someone already tore down this waitq! */
4180 enable_preemption();
4184 /* this _may_ drop the wq lock, but that's OK */
4185 waitq_unlink_locked(wq
, wqset
);
4189 enable_preemption();
4195 * unlink 'waitq' from all sets to which it belongs
4198 * 'waitq' is locked on entry
4199 * returns with waitq lock dropped
4202 * may (rarely) spin (see waitq_clear_prepost_locked)
4204 kern_return_t
waitq_unlink_all_unlock(struct waitq
*waitq
)
4206 uint64_t old_set_id
= 0;
4207 wqdbg_v("unlink waitq %p from all sets",
4208 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq
));
4209 assert(!waitq_irq_safe(waitq
));
4211 /* it's not a member of any sets */
4212 if (waitq
->waitq_set_id
== 0) {
4213 waitq_unlock(waitq
);
4214 return KERN_SUCCESS
;
4217 old_set_id
= waitq
->waitq_set_id
;
4218 waitq
->waitq_set_id
= 0;
4221 * invalidate the prepost entry for this waitq.
4222 * This may drop and re-acquire the waitq lock, but that's OK because
4223 * if it was added to another set and preposted to that set in the
4224 * time we drop the lock, the state will remain consistent.
4226 (void)waitq_clear_prepost_locked(waitq
);
4228 waitq_unlock(waitq
);
4232 * Walk the link table and invalidate each LINK object that
4233 * used to connect this waitq to one or more sets: this works
4234 * because WQL_LINK objects are private to each wait queue
4236 (void)walk_waitq_links(LINK_WALK_ONE_LEVEL
, waitq
, old_set_id
,
4237 WQL_LINK
, NULL
, waitq_unlink_all_cb
);
4240 return KERN_SUCCESS
;
4244 * unlink 'waitq' from all sets to which it belongs
4247 * 'waitq' is not locked
4248 * may disable and re-enable interrupts
4250 * (see waitq_unlink_all_locked, waitq_clear_prepost_locked)
4252 kern_return_t
waitq_unlink_all(struct waitq
*waitq
)
4254 kern_return_t kr
= KERN_SUCCESS
;
4256 if (!waitq_valid(waitq
))
4257 panic("Invalid waitq: %p", waitq
);
4259 assert(!waitq_irq_safe(waitq
));
4261 if (!waitq_valid(waitq
)) {
4262 waitq_unlock(waitq
);
4263 return KERN_SUCCESS
;
4266 kr
= waitq_unlink_all_unlock(waitq
);
4267 /* waitq unlocked and set links deallocated */
4274 * unlink all waitqs from 'wqset'
4277 * 'wqset' is locked on entry
4278 * 'wqset' is unlocked on exit and spl is restored
4281 * may (rarely) spin/block (see waitq_clear_prepost_locked)
4283 kern_return_t
waitq_set_unlink_all_unlock(struct waitq_set
*wqset
)
4285 struct waitq_link
*link
;
4286 uint64_t prepost_id
;
4288 wqdbg_v("unlink all queues from set 0x%llx", wqset
->wqset_id
);
4291 * This operation does not require interaction with any of the set's
4292 * constituent wait queues. All we have to do is invalidate the SetID
4295 /* invalidate and re-alloc the link object first */
4296 link
= wql_get_link(wqset
->wqset_id
);
4298 /* we may have raced with a waitq_set_deinit: handle this */
4300 waitq_set_unlock(wqset
);
4301 return KERN_SUCCESS
;
4304 wql_invalidate(link
);
4306 /* re-alloc the object to get a new generation ID */
4307 wql_realloc_link(link
, WQL_WQS
);
4308 link
->wql_wqs
.wql_set
= wqset
;
4310 wqset
->wqset_id
= link
->wql_setid
.id
;
4314 /* clear any preposts attached to this set */
4316 if (wqset
->wqset_q
.waitq_prepost
&& wqset
->wqset_prepost_id
)
4317 prepost_id
= wqset
->wqset_prepost_id
;
4318 /* else { TODO: notify kqueue subsystem? } */
4319 wqset
->wqset_prepost_id
= 0;
4322 * clear set linkage and prepost object associated with this set:
4323 * waitq sets may prepost to other sets if, for example, they are
4324 * associated with a kqueue which is in a select set.
4326 * This releases all the set link objects
4327 * (links to other sets to which this set was previously added)
4329 waitq_unlink_all_unlock(&wqset
->wqset_q
);
4330 /* wqset->wqset_q unlocked */
4332 /* drop / unlink all the prepost table objects */
4334 (void)wq_prepost_iterate(prepost_id
, NULL
,
4335 wqset_clear_prepost_chain_cb
);
4337 return KERN_SUCCESS
;
4341 * unlink all waitqs from 'wqset'
4344 * 'wqset' is not locked
4345 * may (rarely) spin/block (see waitq_clear_prepost_locked)
4347 kern_return_t
waitq_set_unlink_all(struct waitq_set
*wqset
)
4349 assert(waitqs_is_set(wqset
));
4350 assert(!waitq_irq_safe(&wqset
->wqset_q
));
4352 waitq_set_lock(wqset
);
4353 return waitq_set_unlink_all_unlock(wqset
);
4354 /* wqset unlocked and set links and preposts deallocated */
4357 static int waitq_prepost_reserve_cb(struct waitq
*waitq
, void *ctx
,
4358 struct waitq_link
*link
)
4360 uint32_t *num
= (uint32_t *)ctx
;
4364 * In the worst case, we'll have to allocate 2 prepost objects
4365 * per waitq set (if the set was already preposted by another
4368 if (wql_type(link
) == WQL_WQS
) {
4370 * check to see if the associated waitq actually supports
4373 if (waitq_set_can_prepost(link
->wql_wqs
.wql_set
))
4376 return WQ_ITERATE_CONTINUE
;
4379 static int waitq_alloc_prepost_reservation(int nalloc
, struct waitq
*waitq
,
4380 int *did_unlock
, struct wq_prepost
**wqp
)
4382 struct wq_prepost
*tmp
;
4383 struct wqp_cache
*cache
;
4388 * Before we unlock the waitq, check the per-processor prepost object
4389 * cache to see if there's enough there for us. If so, do the
4390 * allocation, keep the lock and save an entire iteration over the set
4394 disable_preemption();
4395 cache
= &PROCESSOR_DATA(current_processor(), wqp_cache
);
4396 if (nalloc
<= (int)cache
->avail
)
4398 enable_preemption();
4400 /* unlock the waitq to perform the allocation */
4402 waitq_unlock(waitq
);
4406 tmp
= wq_prepost_alloc(LT_RESERVED
, nalloc
);
4408 panic("Couldn't reserve %d preposts for waitq @%p (wqp@%p)",
4409 nalloc
, waitq
, *wqp
);
4411 /* link the two lists */
4412 int __assert_only rc
;
4413 rc
= wq_prepost_rlink(tmp
, *wqp
);
4414 assert(rc
== nalloc
);
4419 * If the caller can block, then enforce a minimum-free table element
4420 * policy here. This helps ensure that we will have enough prepost
4421 * objects for callers such as selwakeup() that can be called with
4424 if (get_preemption_level() == 0)
4425 wq_prepost_ensure_free_space();
4428 if (*did_unlock
== 0) {
4429 /* decrement the preemption count if alloc from cache */
4430 enable_preemption();
4432 /* otherwise: re-lock the waitq */
4440 static int waitq_count_prepost_reservation(struct waitq
*waitq
, int extra
, int keep_locked
)
4445 * If the waitq is not currently part of a set, and we're not asked to
4446 * keep the waitq locked then we'll want to have 3 in reserve
4447 * just-in-case it becomes part of a set while we unlock and reserve.
4448 * We may need up to 1 object for the waitq, and 2 for the set.
4450 if (waitq
->waitq_set_id
== 0) {
4453 /* this queue has never been preposted before */
4454 if (waitq
->waitq_prepost_id
== 0)
4458 * Walk the set of table linkages associated with this waitq
4459 * and count the worst-case number of prepost objects that
4460 * may be needed during a wakeup_all. We can walk this without
4461 * locking each set along the way because the table-based IDs
4462 * disconnect us from the set pointers themselves, and the
4463 * table walking is careful to read the setid values only once.
4464 * Locking each set up the chain also doesn't guarantee that
4465 * their membership won't change between the time we unlock
4466 * that set and when we actually go to prepost, so our
4467 * situation is no worse than before and we've alleviated lock
4468 * contention on any sets to which this waitq belongs.
4470 (void)walk_waitq_links(LINK_WALK_FULL_DAG_UNLOCKED
,
4471 waitq
, waitq
->waitq_set_id
,
4472 WQL_WQS
, (void *)&npreposts
,
4473 waitq_prepost_reserve_cb
);
4479 if (npreposts
== 0 && !keep_locked
) {
4481 * If we get here, we were asked to reserve some prepost
4482 * objects for a waitq that's previously preposted, and is not
4483 * currently a member of any sets. We have also been
4484 * instructed to unlock the waitq when we're done. In this
4485 * case, we pre-allocated enough reserved objects to handle
4486 * the case where the waitq gets added to a single set when
4487 * the lock is released.
4497 * pre-allocate prepost objects for 'waitq'
4500 * 'waitq' is not locked
4505 * 0 on success, '*reserved' is set to the head of a singly-linked
4506 * list of pre-allocated prepost objects.
4509 * If 'lock_state' is WAITQ_KEEP_LOCKED, this function performs the pre-allocation
4510 * atomically and returns 'waitq' locked.
4512 * This function attempts to pre-allocate precisely enough prepost
4513 * objects based on the current set membership of 'waitq'. If the
4514 * operation is performed atomically, then the caller
4515 * is guaranteed to have enough pre-allocated prepost object to avoid
4516 * any (rare) blocking in the wakeup path.
4518 uint64_t waitq_prepost_reserve(struct waitq
*waitq
, int extra
,
4519 waitq_lock_state_t lock_state
)
4521 uint64_t reserved
= 0;
4522 uint64_t prev_setid
= 0, prev_prepostid
= 0;
4523 struct wq_prepost
*wqp
= NULL
;
4524 int nalloc
= 0, npreposts
= 0;
4525 int keep_locked
= (lock_state
== WAITQ_KEEP_LOCKED
);
4528 wqdbg_v("Attempting to reserve prepost linkages for waitq %p (extra:%d)",
4529 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq
), extra
);
4531 if (waitq
== NULL
&& extra
> 0) {
4533 * Simple prepost object allocation:
4534 * we'll add 2 more because the waitq might need an object,
4535 * and the set itself may need a new POST object in addition
4536 * to the number of preposts requested by the caller
4538 nalloc
= waitq_alloc_prepost_reservation(extra
+ 2, NULL
,
4540 assert(nalloc
== extra
+ 2);
4541 return wqp
->wqp_prepostid
.id
;
4544 assert(lock_state
== WAITQ_KEEP_LOCKED
|| lock_state
== WAITQ_UNLOCK
);
4546 assert(!waitq_irq_safe(waitq
));
4550 /* remember the set ID that we started with */
4551 prev_setid
= waitq
->waitq_set_id
;
4552 prev_prepostid
= waitq
->waitq_prepost_id
;
4555 * If the waitq is not part of a set, and we're asked to
4556 * keep the set locked, then we don't have to reserve
4559 if (prev_setid
== 0 && keep_locked
)
4562 npreposts
= waitq_count_prepost_reservation(waitq
, extra
, keep_locked
);
4564 /* nothing for us to do! */
4565 if (npreposts
== 0) {
4572 /* this _may_ unlock and relock the waitq! */
4573 nalloc
= waitq_alloc_prepost_reservation(npreposts
, waitq
,
4577 /* allocation held the waitq lock: we'd done! */
4584 * Before we return, if the allocation had to unlock the waitq, we
4585 * must check one more time to see if we have enough. If not, we'll
4586 * try to allocate the difference. If the caller requests it, we'll
4587 * also leave the waitq locked so that the use of the pre-allocated
4588 * prepost objects can be guaranteed to be enough if a wakeup_all is
4589 * performed before unlocking the waitq.
4593 * If the waitq is no longer associated with a set, or if the waitq's
4594 * set/prepostid has not changed since we first walked its linkage,
4597 if ((waitq
->waitq_set_id
== 0) ||
4598 (waitq
->waitq_set_id
== prev_setid
&&
4599 waitq
->waitq_prepost_id
== prev_prepostid
)) {
4605 npreposts
= waitq_count_prepost_reservation(waitq
, extra
, keep_locked
);
4607 if (npreposts
> nalloc
) {
4608 prev_setid
= waitq
->waitq_set_id
;
4609 prev_prepostid
= waitq
->waitq_prepost_id
;
4610 npreposts
= npreposts
- nalloc
; /* only allocate the diff */
4618 waitq_unlock(waitq
);
4621 reserved
= wqp
->wqp_prepostid
.id
;
4627 * release a linked list of prepost objects allocated via _prepost_reserve
4630 * may (rarely) spin waiting for prepost table growth memcpy
4632 void waitq_prepost_release_reserve(uint64_t id
)
4634 struct wq_prepost
*wqp
;
4636 wqdbg_v("releasing reserved preposts starting at: 0x%llx", id
);
4638 wqp
= wq_prepost_rfirst(id
);
4642 wq_prepost_release_rlist(wqp
);
4647 * clear all preposts from 'wqset'
4650 * 'wqset' is not locked
4652 void waitq_set_clear_preposts(struct waitq_set
*wqset
)
4654 uint64_t prepost_id
;
4657 assert(waitqs_is_set(wqset
));
4659 if (!wqset
->wqset_q
.waitq_prepost
|| !wqset
->wqset_prepost_id
)
4662 wqdbg_v("Clearing all preposted queues on waitq_set: 0x%llx",
4665 if (waitq_irq_safe(&wqset
->wqset_q
))
4667 waitq_set_lock(wqset
);
4668 prepost_id
= wqset
->wqset_prepost_id
;
4669 wqset
->wqset_prepost_id
= 0;
4670 waitq_set_unlock(wqset
);
4671 if (waitq_irq_safe(&wqset
->wqset_q
))
4674 /* drop / unlink all the prepost table objects */
4676 (void)wq_prepost_iterate(prepost_id
, NULL
,
4677 wqset_clear_prepost_chain_cb
);
4681 /* ----------------------------------------------------------------------
4683 * Iteration: waitq -> sets / waitq_set -> preposts
4685 * ---------------------------------------------------------------------- */
4690 waitq_iterator_t it
;
4693 static int waitq_iterate_sets_cb(struct waitq
*waitq
, void *ctx
,
4694 struct waitq_link
*link
)
4696 struct wq_it_ctx
*wctx
= (struct wq_it_ctx
*)(ctx
);
4697 struct waitq_set
*wqset
;
4701 assert(!waitq_irq_safe(waitq
));
4702 assert(wql_type(link
) == WQL_WQS
);
4705 * the waitq is locked, so we can just take the set lock
4706 * and call the iterator function
4708 wqset
= link
->wql_wqs
.wql_set
;
4709 assert(wqset
!= NULL
);
4710 assert(!waitq_irq_safe(&wqset
->wqset_q
));
4711 waitq_set_lock(wqset
);
4713 ret
= wctx
->it(wctx
->ctx
, (struct waitq
*)wctx
->input
, wqset
);
4715 waitq_set_unlock(wqset
);
4720 * call external iterator function for each prepost object in wqset
4723 * Called from wq_prepost_foreach_locked
4724 * (wqset locked, waitq _not_ locked)
4726 static int wqset_iterate_prepost_cb(struct waitq_set
*wqset
, void *ctx
,
4727 struct wq_prepost
*wqp
, struct waitq
*waitq
)
4729 struct wq_it_ctx
*wctx
= (struct wq_it_ctx
*)(ctx
);
4736 * This is a bit tricky. The 'wqset' is locked, but the 'waitq' is not.
4737 * Taking the 'waitq' lock is a lock order violation, so we need to be
4738 * careful. We also must realize that we may have taken a reference to
4739 * the 'wqp' just as the associated waitq was being torn down (or
4740 * clearing all its preposts) - see waitq_clear_prepost_locked(). If
4741 * the 'wqp' is valid and we can get the waitq lock, then we are good
4742 * to go. If not, we need to back off, check that the 'wqp' hasn't
4743 * been invalidated, and try to re-take the locks.
4745 assert(!waitq_irq_safe(waitq
));
4747 if (waitq_lock_try(waitq
))
4750 if (!wqp_is_valid(wqp
))
4751 return WQ_ITERATE_RESTART
;
4753 /* We are passed a prepost object with a reference on it. If neither
4754 * the waitq set nor the waitq require interrupts disabled, then we
4755 * may block on the delay(1) call below. We can't hold a prepost
4756 * object reference while blocking, so we have to give that up as well
4757 * and re-acquire it when we come back.
4759 wqp_id
= wqp
->wqp_prepostid
.id
;
4760 wq_prepost_put(wqp
);
4761 waitq_set_unlock(wqset
);
4762 wqdbg_v("dropped set:%p lock waiting for wqp:%p (0x%llx -> wq:%p)",
4763 wqset
, wqp
, wqp
->wqp_prepostid
.id
, waitq
);
4765 waitq_set_lock(wqset
);
4766 wqp
= wq_prepost_get(wqp_id
);
4768 /* someone cleared preposts while we slept! */
4769 return WQ_ITERATE_DROPPED
;
4773 * This differs slightly from the logic in ipc_mqueue.c:
4774 * ipc_mqueue_receive_on_thread(). There, if the waitq lock
4775 * can't be obtained, the prepost link is placed on the back of
4776 * the chain, and the iteration starts from the beginning. Here,
4777 * we just restart from the beginning.
4779 return WQ_ITERATE_RESTART
;
4782 if (!wqp_is_valid(wqp
)) {
4783 ret
= WQ_ITERATE_RESTART
;
4787 /* call the external callback */
4788 ret
= wctx
->it(wctx
->ctx
, waitq
, wqset
);
4790 if (ret
== WQ_ITERATE_BREAK_KEEP_LOCKED
) {
4791 ret
= WQ_ITERATE_BREAK
;
4796 waitq_unlock(waitq
);
4802 * iterator over all sets to which the given waitq has been linked
4807 int waitq_iterate_sets(struct waitq
*waitq
, void *ctx
, waitq_iterator_t it
)
4810 struct wq_it_ctx wctx
= {
4811 .input
= (void *)waitq
,
4816 return KERN_INVALID_ARGUMENT
;
4818 ret
= walk_waitq_links(LINK_WALK_ONE_LEVEL
, waitq
, waitq
->waitq_set_id
,
4819 WQL_WQS
, (void *)&wctx
, waitq_iterate_sets_cb
);
4820 if (ret
== WQ_ITERATE_CONTINUE
)
4821 ret
= WQ_ITERATE_SUCCESS
;
4826 * iterator over all preposts in the given wqset
4831 int waitq_set_iterate_preposts(struct waitq_set
*wqset
,
4832 void *ctx
, waitq_iterator_t it
)
4834 struct wq_it_ctx wctx
= {
4835 .input
= (void *)wqset
,
4840 return WQ_ITERATE_INVALID
;
4842 assert(waitq_held(&wqset
->wqset_q
));
4844 return wq_prepost_foreach_locked(wqset
, (void *)&wctx
,
4845 wqset_iterate_prepost_cb
);
4849 /* ----------------------------------------------------------------------
4853 * ---------------------------------------------------------------------- */
4857 * declare a thread's intent to wait on 'waitq' for 'wait_event'
4860 * 'waitq' is not locked
4862 wait_result_t
waitq_assert_wait64(struct waitq
*waitq
,
4863 event64_t wait_event
,
4864 wait_interrupt_t interruptible
,
4867 thread_t thread
= current_thread();
4871 if (!waitq_valid(waitq
))
4872 panic("Invalid waitq: %p", waitq
);
4874 if (waitq_irq_safe(waitq
))
4878 ret
= waitq_assert_wait64_locked(waitq
, wait_event
, interruptible
,
4879 TIMEOUT_URGENCY_SYS_NORMAL
,
4880 deadline
, TIMEOUT_NO_LEEWAY
, thread
);
4881 waitq_unlock(waitq
);
4883 if (waitq_irq_safe(waitq
))
4890 * declare a thread's intent to wait on 'waitq' for 'wait_event'
4893 * 'waitq' is not locked
4894 * will disable and re-enable interrupts while locking current_thread()
4896 wait_result_t
waitq_assert_wait64_leeway(struct waitq
*waitq
,
4897 event64_t wait_event
,
4898 wait_interrupt_t interruptible
,
4899 wait_timeout_urgency_t urgency
,
4904 thread_t thread
= current_thread();
4907 if (!waitq_valid(waitq
))
4908 panic("Invalid waitq: %p", waitq
);
4910 if (waitq_irq_safe(waitq
))
4914 ret
= waitq_assert_wait64_locked(waitq
, wait_event
, interruptible
,
4915 urgency
, deadline
, leeway
, thread
);
4916 waitq_unlock(waitq
);
4918 if (waitq_irq_safe(waitq
))
4925 * wakeup a single thread from a waitq that's waiting for a given event
4928 * 'waitq' is not locked
4929 * may (rarely) block if 'waitq' is non-global and a member of 1 or more sets
4930 * may disable and re-enable interrupts
4933 * will _not_ block if waitq is global (or not a member of any set)
4935 kern_return_t
waitq_wakeup64_one(struct waitq
*waitq
, event64_t wake_event
,
4936 wait_result_t result
, int priority
)
4939 uint64_t reserved_preposts
= 0;
4942 if (!waitq_valid(waitq
))
4943 panic("Invalid waitq: %p", waitq
);
4945 if (!waitq_irq_safe(waitq
)) {
4946 /* reserve preposts in addition to locking the waitq */
4947 reserved_preposts
= waitq_prepost_reserve(waitq
, 0, WAITQ_KEEP_LOCKED
);
4953 /* waitq is locked upon return */
4954 kr
= waitq_wakeup64_one_locked(waitq
, wake_event
, result
,
4955 &reserved_preposts
, priority
, WAITQ_UNLOCK
);
4957 if (waitq_irq_safe(waitq
))
4960 /* release any left-over prepost object (won't block/lock anything) */
4961 waitq_prepost_release_reserve(reserved_preposts
);
4967 * wakeup all threads from a waitq that are waiting for a given event
4970 * 'waitq' is not locked
4971 * may (rarely) block if 'waitq' is non-global and a member of 1 or more sets
4972 * may disable and re-enable interrupts
4975 * will _not_ block if waitq is global (or not a member of any set)
4977 kern_return_t
waitq_wakeup64_all(struct waitq
*waitq
,
4978 event64_t wake_event
,
4979 wait_result_t result
,
4983 uint64_t reserved_preposts
= 0;
4986 if (!waitq_valid(waitq
))
4987 panic("Invalid waitq: %p", waitq
);
4989 if (!waitq_irq_safe(waitq
)) {
4990 /* reserve preposts in addition to locking waitq */
4991 reserved_preposts
= waitq_prepost_reserve(waitq
, 0,
4998 ret
= waitq_wakeup64_all_locked(waitq
, wake_event
, result
,
4999 &reserved_preposts
, priority
,
5002 if (waitq_irq_safe(waitq
))
5005 waitq_prepost_release_reserve(reserved_preposts
);
5012 * wakeup a specific thread iff it's waiting on 'waitq' for 'wake_event'
5015 * 'waitq' is not locked
5018 * May temporarily disable and re-enable interrupts
5020 kern_return_t
waitq_wakeup64_thread(struct waitq
*waitq
,
5021 event64_t wake_event
,
5023 wait_result_t result
)
5028 if (!waitq_valid(waitq
))
5029 panic("Invalid waitq: %p", waitq
);
5031 if (waitq_irq_safe(waitq
))
5035 ret
= waitq_select_thread_locked(waitq
, wake_event
, thread
, &th_spl
);
5036 /* on success, returns 'thread' locked */
5038 waitq_unlock(waitq
);
5040 if (ret
== KERN_SUCCESS
) {
5041 ret
= thread_go(thread
, result
);
5042 assert(ret
== KERN_SUCCESS
);
5043 thread_unlock(thread
);
5045 waitq_stats_count_wakeup(waitq
);
5047 ret
= KERN_NOT_WAITING
;
5048 waitq_stats_count_fail(waitq
);
5051 if (waitq_irq_safe(waitq
))
5058 * wakeup a single thread from a waitq that's waiting for a given event
5059 * and return a reference to that thread
5060 * returns THREAD_NULL if no thread was waiting
5063 * 'waitq' is not locked
5064 * may (rarely) block if 'waitq' is non-global and a member of 1 or more sets
5065 * may disable and re-enable interrupts
5068 * will _not_ block if waitq is global (or not a member of any set)
5071 waitq_wakeup64_identify(struct waitq
*waitq
,
5072 event64_t wake_event
,
5073 wait_result_t result
,
5076 uint64_t reserved_preposts
= 0;
5077 spl_t thread_spl
= 0;
5081 if (!waitq_valid(waitq
))
5082 panic("Invalid waitq: %p", waitq
);
5084 if (!waitq_irq_safe(waitq
)) {
5085 /* reserve preposts in addition to locking waitq */
5086 reserved_preposts
= waitq_prepost_reserve(waitq
, 0, WAITQ_KEEP_LOCKED
);
5092 thread
= waitq_wakeup64_identify_locked(waitq
, wake_event
, result
,
5093 &thread_spl
, &reserved_preposts
,
5094 priority
, WAITQ_UNLOCK
);
5095 /* waitq is unlocked, thread is locked */
5097 if (thread
!= THREAD_NULL
) {
5098 thread_reference(thread
);
5099 thread_unlock(thread
);
5103 if (waitq_irq_safe(waitq
))
5106 /* release any left-over prepost object (won't block/lock anything) */
5107 waitq_prepost_release_reserve(reserved_preposts
);
5109 /* returns +1 ref to running thread or THREAD_NULL */