]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/waitq.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / kern / waitq.c
CommitLineData
3e170ce0 1/*
39037602 2 * Copyright (c) 2015-2016 Apple Inc. All rights reserved.
3e170ce0
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
5ba3f43e
A
56
57/*
58 * un-comment the following lines to debug the link/prepost tables
59 * NOTE: this expands each element by ~40 bytes
60 */
61//#define KEEP_WAITQ_LINK_STATS
62//#define KEEP_WAITQ_PREPOST_STATS
63
3e170ce0 64#include <kern/ast.h>
39037602 65#include <kern/backtrace.h>
3e170ce0 66#include <kern/kern_types.h>
39037602 67#include <kern/ltable.h>
3e170ce0
A
68#include <kern/mach_param.h>
69#include <kern/queue.h>
70#include <kern/sched_prim.h>
71#include <kern/simple_lock.h>
72#include <kern/spl.h>
73#include <kern/waitq.h>
74#include <kern/zalloc.h>
39037602 75#include <kern/policy_internal.h>
d9a64523 76#include <kern/turnstile.h>
39037602 77
0a7de745 78#include <os/hash.h>
3e170ce0
A
79#include <libkern/OSAtomic.h>
80#include <mach/sync_policy.h>
81#include <vm/vm_kern.h>
82
83#include <sys/kdebug.h>
84
5ba3f43e
A
85#if defined(KEEP_WAITQ_LINK_STATS) || defined(KEEP_WAITQ_PREPOST_STATS)
86# if !CONFIG_LTABLE_STATS
39037602
A
87# error "You must configure LTABLE_STATS to use WAITQ_[LINK|PREPOST]_STATS"
88# endif
5ba3f43e 89# if !CONFIG_WAITQ_STATS
39037602
A
90# error "You must configure WAITQ_STATS to use WAITQ_[LINK|PREPOST]_STATS"
91# endif
92#endif
93
3e170ce0 94#if CONFIG_WAITQ_DEBUG
0a7de745 95#define wqdbg(fmt, ...) \
3e170ce0
A
96 printf("WQ[%s]: " fmt "\n", __func__, ## __VA_ARGS__)
97#else
0a7de745 98#define wqdbg(fmt, ...) do { } while (0)
3e170ce0
A
99#endif
100
101#ifdef WAITQ_VERBOSE_DEBUG
0a7de745 102#define wqdbg_v(fmt, ...) \
3e170ce0
A
103 printf("WQ[v:%s]: " fmt "\n", __func__, ## __VA_ARGS__)
104#else
0a7de745 105#define wqdbg_v(fmt, ...) do { } while (0)
3e170ce0
A
106#endif
107
0a7de745 108#define wqinfo(fmt, ...) \
3e170ce0
A
109 printf("WQ[%s]: " fmt "\n", __func__, ## __VA_ARGS__)
110
0a7de745 111#define wqerr(fmt, ...) \
39037602
A
112 printf("WQ[%s] ERROR: " fmt "\n", __func__, ## __VA_ARGS__)
113
39037602
A
114/*
115 * file-static functions / data
116 */
117static thread_t waitq_select_one_locked(struct waitq *waitq, event64_t event,
0a7de745
A
118 uint64_t *reserved_preposts,
119 int priority, spl_t *spl);
3e170ce0 120
39037602 121static kern_return_t waitq_select_thread_locked(struct waitq *waitq,
0a7de745
A
122 event64_t event,
123 thread_t thread, spl_t *spl);
3e170ce0 124
39037602
A
125#define WAITQ_SET_MAX (task_max * 3)
126static zone_t waitq_set_zone;
3e170ce0 127
3e170ce0 128
0a7de745
A
129#define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align)))
130#define ROUNDDOWN(x, y) (((x)/(y))*(y))
3e170ce0 131
3e170ce0 132
5ba3f43e 133#if CONFIG_LTABLE_STATS || CONFIG_WAITQ_STATS
39037602
A
134static __inline__ void waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int skip);
135#endif
3e170ce0 136
0a7de745
A
137lck_grp_t waitq_lck_grp;
138
5ba3f43e
A
139#if __arm64__
140
0a7de745
A
141#define waitq_lock_to(wq, to) \
142 (hw_lock_bit_to(&(wq)->waitq_interlock, LCK_ILOCK, to, &waitq_lck_grp))
5ba3f43e
A
143
144#define waitq_lock_unlock(wq) \
145 (hw_unlock_bit(&(wq)->waitq_interlock, LCK_ILOCK))
146
147#define waitq_lock_init(wq) \
148 (wq->waitq_interlock = 0)
149
150#else
3e170ce0 151
0a7de745
A
152#define waitq_lock_to(wq, to) \
153 (hw_lock_to(&(wq)->waitq_interlock, to, &waitq_lck_grp))
3e170ce0 154
39037602
A
155#define waitq_lock_unlock(wq) \
156 (hw_lock_unlock(&(wq)->waitq_interlock))
3e170ce0 157
39037602
A
158#define waitq_lock_init(wq) \
159 (hw_lock_init(&(wq)->waitq_interlock))
3e170ce0 160
0a7de745 161#endif /* __arm64__ */
3e170ce0 162
39037602
A
163/*
164 * Prepost callback function for specially marked waitq sets
165 * (prepost alternative)
166 */
cb323159 167extern void waitq_set__CALLING_PREPOST_HOOK__(waitq_set_prepost_hook_t *ctx);
3e170ce0 168
39037602
A
169#define DEFAULT_MIN_FREE_TABLE_ELEM 100
170static uint32_t g_min_free_table_elem;
171static uint32_t g_min_free_cache;
3e170ce0
A
172
173
174/* ----------------------------------------------------------------------
175 *
176 * SetID Link Table Implementation
177 *
178 * ---------------------------------------------------------------------- */
39037602 179static struct link_table g_wqlinktable;
3e170ce0 180
39037602
A
181enum wq_link_type {
182 WQL_ALL = -1,
183 WQL_FREE = LT_FREE,
184 WQL_WQS = LT_ELEM,
185 WQL_LINK = LT_LINK,
3e170ce0
A
186};
187
39037602
A
188struct waitq_link {
189 struct lt_elem wqte;
3e170ce0
A
190
191 union {
39037602 192 /* wqt_type == WQL_WQS (LT_ELEM) */
3e170ce0 193 struct {
39037602 194 struct waitq_set *wql_set;
3e170ce0 195 /* uint64_t sl_prepost_id; */
39037602 196 } wql_wqs;
3e170ce0 197
39037602 198 /* wqt_type == WQL_LINK (LT_LINK) */
3e170ce0 199 struct {
39037602
A
200 uint64_t left_setid;
201 uint64_t right_setid;
202 } wql_link;
3e170ce0 203 };
5ba3f43e 204#ifdef KEEP_WAITQ_LINK_STATS
3e170ce0
A
205 thread_t sl_alloc_th;
206 task_t sl_alloc_task;
207 uintptr_t sl_alloc_bt[NWAITQ_BTFRAMES];
208 uint64_t sl_alloc_ts;
209 uintptr_t sl_invalidate_bt[NWAITQ_BTFRAMES];
210 uint64_t sl_invalidate_ts;
211 uintptr_t sl_mkvalid_bt[NWAITQ_BTFRAMES];
212 uint64_t sl_mkvalid_ts;
213 uint64_t sl_free_ts;
214#endif
215};
5ba3f43e 216#if !defined(KEEP_WAITQ_LINK_STATS)
39037602 217static_assert((sizeof(struct waitq_link) & (sizeof(struct waitq_link) - 1)) == 0,
0a7de745 218 "waitq_link struct must be a power of two!");
3e170ce0
A
219#endif
220
39037602
A
221#define wql_refcnt(link) \
222 (lt_bits_refcnt((link)->wqte.lt_bits))
3e170ce0 223
39037602
A
224#define wql_type(link) \
225 (lt_bits_type((link)->wqte.lt_bits))
3e170ce0 226
39037602 227#define wql_mkvalid(link) \
3e170ce0 228 do { \
0a7de745
A
229 lt_elem_mkvalid(&(link)->wqte); \
230 wql_do_mkvalid_stats(&(link)->wqte); \
3e170ce0
A
231 } while (0)
232
39037602
A
233#define wql_is_valid(link) \
234 lt_bits_valid((link)->wqte.lt_bits)
3e170ce0 235
39037602 236#define wql_setid wqte.lt_id
3e170ce0 237
39037602
A
238#define WQL_WQS_POISON ((void *)(0xf00df00d))
239#define WQL_LINK_POISON (0x0bad0badffffffffull)
3e170ce0 240
0a7de745
A
241static void
242wql_poison(struct link_table *table, struct lt_elem *elem)
3e170ce0 243{
39037602 244 struct waitq_link *link = (struct waitq_link *)elem;
3e170ce0
A
245 (void)table;
246
39037602
A
247 switch (wql_type(link)) {
248 case WQL_WQS:
249 link->wql_wqs.wql_set = WQL_WQS_POISON;
3e170ce0 250 break;
39037602
A
251 case WQL_LINK:
252 link->wql_link.left_setid = WQL_LINK_POISON;
253 link->wql_link.right_setid = WQL_LINK_POISON;
3e170ce0
A
254 break;
255 default:
256 break;
257 }
5ba3f43e 258#ifdef KEEP_WAITQ_LINK_STATS
39037602
A
259 memset(link->sl_alloc_bt, 0, sizeof(link->sl_alloc_bt));
260 link->sl_alloc_ts = 0;
261 memset(link->sl_mkvalid_bt, 0, sizeof(link->sl_mkvalid_bt));
262 link->sl_mkvalid_ts = 0;
3e170ce0 263
39037602 264 link->sl_alloc_th = THREAD_NULL;
3e170ce0
A
265 /* leave the sl_alloc_task in place for debugging */
266
39037602 267 link->sl_free_ts = mach_absolute_time();
3e170ce0
A
268#endif
269}
270
5ba3f43e 271#ifdef KEEP_WAITQ_LINK_STATS
0a7de745
A
272static __inline__ void
273wql_do_alloc_stats(struct lt_elem *elem)
3e170ce0
A
274{
275 if (elem) {
39037602 276 struct waitq_link *link = (struct waitq_link *)elem;
3e170ce0
A
277 memset(link->sl_alloc_bt, 0, sizeof(link->sl_alloc_bt));
278 waitq_grab_backtrace(link->sl_alloc_bt, 0);
279 link->sl_alloc_th = current_thread();
280 link->sl_alloc_task = current_task();
281
282 assert(link->sl_alloc_ts == 0);
283 link->sl_alloc_ts = mach_absolute_time();
284
285 memset(link->sl_invalidate_bt, 0, sizeof(link->sl_invalidate_bt));
286 link->sl_invalidate_ts = 0;
287 }
288}
289
0a7de745
A
290static __inline__ void
291wql_do_invalidate_stats(struct lt_elem *elem)
3e170ce0 292{
39037602 293 struct waitq_link *link = (struct waitq_link *)elem;
3e170ce0 294
0a7de745 295 if (!elem) {
3e170ce0 296 return;
0a7de745 297 }
3e170ce0
A
298
299 assert(link->sl_mkvalid_ts > 0);
300
301 memset(link->sl_invalidate_bt, 0, sizeof(link->sl_invalidate_bt));
302 link->sl_invalidate_ts = mach_absolute_time();
303 waitq_grab_backtrace(link->sl_invalidate_bt, 0);
304}
305
0a7de745
A
306static __inline__ void
307wql_do_mkvalid_stats(struct lt_elem *elem)
3e170ce0 308{
39037602 309 struct waitq_link *link = (struct waitq_link *)elem;
3e170ce0 310
0a7de745 311 if (!elem) {
3e170ce0 312 return;
0a7de745 313 }
3e170ce0
A
314
315 memset(link->sl_mkvalid_bt, 0, sizeof(link->sl_mkvalid_bt));
316 link->sl_mkvalid_ts = mach_absolute_time();
317 waitq_grab_backtrace(link->sl_mkvalid_bt, 0);
318}
319#else
39037602
A
320#define wql_do_alloc_stats(e)
321#define wql_do_invalidate_stats(e)
322#define wql_do_mkvalid_stats(e)
5ba3f43e 323#endif /* KEEP_WAITQ_LINK_STATS */
3e170ce0 324
0a7de745
A
325static void
326wql_init(void)
3e170ce0
A
327{
328 uint32_t tablesz = 0, max_links = 0;
329
0a7de745 330 if (PE_parse_boot_argn("wql_tsize", &tablesz, sizeof(tablesz)) != TRUE) {
39037602 331 tablesz = (uint32_t)g_lt_max_tbl_size;
0a7de745 332 }
3e170ce0
A
333
334 tablesz = P2ROUNDUP(tablesz, PAGE_SIZE);
39037602 335 max_links = tablesz / sizeof(struct waitq_link);
3e170ce0
A
336 assert(max_links > 0 && tablesz > 0);
337
338 /* we have a restricted index range */
0a7de745 339 if (max_links > (LT_IDX_MAX + 1)) {
39037602 340 max_links = LT_IDX_MAX + 1;
0a7de745 341 }
3e170ce0
A
342
343 wqinfo("init linktable with max:%d elements (%d bytes)",
0a7de745 344 max_links, tablesz);
39037602 345 ltable_init(&g_wqlinktable, "wqslab.wql", max_links,
0a7de745 346 sizeof(struct waitq_link), wql_poison);
3e170ce0
A
347}
348
0a7de745
A
349static void
350wql_ensure_free_space(void)
3e170ce0 351{
39037602 352 if (g_wqlinktable.nelem - g_wqlinktable.used_elem < g_min_free_table_elem) {
3e170ce0
A
353 /*
354 * we don't hold locks on these values, so check for underflow
355 */
39037602 356 if (g_wqlinktable.used_elem <= g_wqlinktable.nelem) {
3e170ce0 357 wqdbg_v("Forcing table growth: nelem=%d, used=%d, min_free=%d",
0a7de745
A
358 g_wqlinktable.nelem, g_wqlinktable.used_elem,
359 g_min_free_table_elem);
39037602 360 ltable_grow(&g_wqlinktable, g_min_free_table_elem);
3e170ce0
A
361 }
362 }
363}
364
0a7de745
A
365static struct waitq_link *
366wql_alloc_link(int type)
3e170ce0 367{
39037602 368 struct lt_elem *elem;
3e170ce0 369
39037602
A
370 elem = ltable_alloc_elem(&g_wqlinktable, type, 1, 0);
371 wql_do_alloc_stats(elem);
372 return (struct waitq_link *)elem;
3e170ce0
A
373}
374
0a7de745
A
375static void
376wql_realloc_link(struct waitq_link *link, int type)
3e170ce0 377{
39037602 378 ltable_realloc_elem(&g_wqlinktable, &link->wqte, type);
5ba3f43e 379#ifdef KEEP_WAITQ_LINK_STATS
3e170ce0
A
380 memset(link->sl_alloc_bt, 0, sizeof(link->sl_alloc_bt));
381 link->sl_alloc_ts = 0;
39037602 382 wql_do_alloc_stats(&link->wqte);
3e170ce0
A
383
384 memset(link->sl_invalidate_bt, 0, sizeof(link->sl_invalidate_bt));
385 link->sl_invalidate_ts = 0;
386#endif
387}
388
0a7de745
A
389static void
390wql_invalidate(struct waitq_link *link)
3e170ce0 391{
39037602
A
392 lt_elem_invalidate(&link->wqte);
393 wql_do_invalidate_stats(&link->wqte);
3e170ce0
A
394}
395
0a7de745
A
396static struct waitq_link *
397wql_get_link(uint64_t setid)
3e170ce0 398{
39037602 399 struct lt_elem *elem;
3e170ce0 400
39037602
A
401 elem = ltable_get_elem(&g_wqlinktable, setid);
402 return (struct waitq_link *)elem;
3e170ce0
A
403}
404
0a7de745
A
405static void
406wql_put_link(struct waitq_link *link)
3e170ce0 407{
0a7de745 408 if (!link) {
3e170ce0 409 return;
0a7de745 410 }
39037602 411 ltable_put_elem(&g_wqlinktable, (struct lt_elem *)link);
3e170ce0
A
412}
413
0a7de745
A
414static struct waitq_link *
415wql_get_reserved(uint64_t setid, int type)
3e170ce0 416{
39037602 417 struct lt_elem *elem;
3e170ce0 418
39037602 419 elem = lt_elem_list_first(&g_wqlinktable, setid);
0a7de745 420 if (!elem) {
3e170ce0 421 return NULL;
0a7de745 422 }
39037602
A
423 ltable_realloc_elem(&g_wqlinktable, elem, type);
424 return (struct waitq_link *)elem;
3e170ce0
A
425}
426
427
428static inline int waitq_maybe_remove_link(struct waitq *waitq,
0a7de745
A
429 uint64_t setid,
430 struct waitq_link *parent,
431 struct waitq_link *left,
432 struct waitq_link *right);
3e170ce0
A
433
434enum {
435 LINK_WALK_ONE_LEVEL = 0,
436 LINK_WALK_FULL_DAG = 1,
437 LINK_WALK_FULL_DAG_UNLOCKED = 2,
438};
439
39037602 440typedef int (*wql_callback_func)(struct waitq *waitq, void *ctx,
0a7de745 441 struct waitq_link *link);
3e170ce0
A
442
443/**
39037602 444 * walk_waitq_links: walk all table elements (of type 'link_type') pointed to by 'setid'
3e170ce0
A
445 *
446 * Conditions:
447 * waitq is locked (or NULL)
448 * 'setid' is managed by 'waitq'
449 * this could be direct (waitq->waitq_set_id == setid)
450 * OR indirect (setid is the left/right ID in a LINK chain,
451 * whose root is waitq->waitq_set_id)
452 *
453 * Notes:
454 * This function uses recursion to walk the set of table elements
455 * pointed to by 'setid'. For each element encountered, 'cb' will be
456 * called. If non-zero, the return value of this callback function can
457 * early-out of the table walk.
458 *
459 * For each link element encountered, the function takes a reference to
460 * it. The reference is dropped only after the callback and any recursion
461 * has completed.
462 *
463 * The assumed table/link/tree structure:
464 * 'setid'
465 * / \
466 * / \
467 * L(LINK) R(LINK)
468 * /\ /\
469 * / \ / \
470 * / \ Rl(*) Rr(*)
471 * Ll(*) Lr(*) /\ /\
472 * /\ /\ ... ... ... ...
473 * ... ... ... ...
474 * \
475 * WQS(wqset_q.waitq_setid == Sx)
476 * [waitq set is a membet of setid, 'Sx')
477 *
478 * 'Sx'
479 * / \
480 * / \
481 * L(LINK) R(LINK)
482 * /\ /\
483 * ... ... ... ...
484 *
485 * The basic algorithm is as follows:
486 * *) take a reference to the table object pointed to by 'setid'
487 * *) if appropriate, call 'cb' (potentially early-out on non-zero return)
488 * *) if the link object points to a waitq set, and the walk type
489 * is 'FULL_DAG' (full directed-acyclic-graph), then try to lock
490 * the associated waitq set object and recursively walk all sets to
491 * which that set belongs. This is a DFS of the tree structure.
492 * *) recurse down the left side of the tree (following the
39037602 493 * 'left_setid' pointer in the link object
3e170ce0 494 * *) recurse down the right side of the tree (following the
39037602 495 * 'right_setid' pointer in the link object
3e170ce0
A
496 */
497static __attribute__((noinline))
0a7de745
A
498int
499walk_waitq_links(int walk_type, struct waitq *waitq,
500 uint64_t setid, int link_type,
501 void *ctx, wql_callback_func cb)
3e170ce0 502{
39037602 503 struct waitq_link *link;
3e170ce0 504 uint64_t nextid;
39037602 505 int wqltype;
3e170ce0 506
39037602 507 link = wql_get_link(setid);
3e170ce0
A
508
509 /* invalid link */
0a7de745 510 if (!link) {
3e170ce0 511 return WQ_ITERATE_CONTINUE;
0a7de745 512 }
3e170ce0
A
513
514 setid = nextid = 0;
39037602
A
515 wqltype = wql_type(link);
516 if (wqltype == WQL_LINK) {
517 setid = link->wql_link.left_setid;
518 nextid = link->wql_link.right_setid;
3e170ce0
A
519 }
520
521 /*
522 * Make the callback only on specified link_type (or all links)
523 * Note that after the callback, the link object may be
524 * invalid. The only valid thing we can do is put our
525 * reference to it (which may put it back on the free list)
526 */
39037602 527 if (link_type == WQL_ALL || link_type == wqltype) {
3e170ce0
A
528 /* allow the callback to early-out */
529 int ret = cb(waitq, ctx, link);
530 if (ret != WQ_ITERATE_CONTINUE) {
39037602 531 wql_put_link(link);
3e170ce0
A
532 return ret;
533 }
534 }
535
39037602 536 if (wqltype == WQL_WQS &&
3e170ce0 537 (walk_type == LINK_WALK_FULL_DAG ||
0a7de745 538 walk_type == LINK_WALK_FULL_DAG_UNLOCKED)) {
3e170ce0
A
539 /*
540 * Recurse down any sets to which this wait queue set was
541 * added. We do this just before we put our reference to
542 * the link object (which may free it).
543 */
39037602 544 struct waitq_set *wqset = link->wql_wqs.wql_set;
3e170ce0 545 int ret = WQ_ITERATE_CONTINUE;
3e170ce0
A
546 int should_unlock = 0;
547 uint64_t wqset_setid = 0;
3e170ce0
A
548
549 if (waitq_set_is_valid(wqset) && walk_type == LINK_WALK_FULL_DAG) {
39037602 550 assert(!waitq_irq_safe(&wqset->wqset_q));
3e170ce0
A
551 waitq_set_lock(wqset);
552 should_unlock = 1;
553 }
554
555 /*
556 * verify the linked waitq set as it could have been
557 * invalidated before we grabbed the lock!
558 */
39037602 559 if (wqset->wqset_id != link->wql_setid.id) {
d9a64523 560 /* This is the bottom of the tree: just get out */
3e170ce0
A
561 if (should_unlock) {
562 waitq_set_unlock(wqset);
3e170ce0 563 }
39037602 564 wql_put_link(link);
3e170ce0
A
565 return WQ_ITERATE_CONTINUE;
566 }
567
568 wqset_setid = wqset->wqset_q.waitq_set_id;
569
0a7de745 570 if (wqset_setid > 0) {
39037602 571 ret = walk_waitq_links(walk_type, &wqset->wqset_q,
0a7de745
A
572 wqset_setid, link_type, ctx, cb);
573 }
3e170ce0
A
574 if (should_unlock) {
575 waitq_set_unlock(wqset);
3e170ce0
A
576 }
577 if (ret != WQ_ITERATE_CONTINUE) {
39037602 578 wql_put_link(link);
3e170ce0
A
579 return ret;
580 }
581 }
582
39037602 583 wql_put_link(link);
3e170ce0
A
584
585 /* recurse down left side of the tree */
586 if (setid) {
39037602 587 int ret = walk_waitq_links(walk_type, waitq, setid, link_type, ctx, cb);
0a7de745 588 if (ret != WQ_ITERATE_CONTINUE) {
3e170ce0 589 return ret;
0a7de745 590 }
3e170ce0
A
591 }
592
593 /* recurse down right side of the tree */
0a7de745 594 if (nextid) {
39037602 595 return walk_waitq_links(walk_type, waitq, nextid, link_type, ctx, cb);
0a7de745 596 }
3e170ce0
A
597
598 return WQ_ITERATE_CONTINUE;
599}
600
601/* ----------------------------------------------------------------------
602 *
603 * Prepost Link Table Implementation
604 *
605 * ---------------------------------------------------------------------- */
39037602 606static struct link_table g_prepost_table;
3e170ce0
A
607
608enum wq_prepost_type {
39037602
A
609 WQP_FREE = LT_FREE,
610 WQP_WQ = LT_ELEM,
611 WQP_POST = LT_LINK,
3e170ce0
A
612};
613
614struct wq_prepost {
39037602 615 struct lt_elem wqte;
3e170ce0
A
616
617 union {
39037602 618 /* wqt_type == WQP_WQ (LT_ELEM) */
3e170ce0
A
619 struct {
620 struct waitq *wqp_wq_ptr;
621 } wqp_wq;
39037602 622 /* wqt_type == WQP_POST (LT_LINK) */
3e170ce0
A
623 struct {
624 uint64_t wqp_next_id;
625 uint64_t wqp_wq_id;
626 } wqp_post;
627 };
5ba3f43e 628#ifdef KEEP_WAITQ_PREPOST_STATS
3e170ce0
A
629 thread_t wqp_alloc_th;
630 task_t wqp_alloc_task;
631 uintptr_t wqp_alloc_bt[NWAITQ_BTFRAMES];
632#endif
633};
5ba3f43e 634#if !defined(KEEP_WAITQ_PREPOST_STATS)
39037602 635static_assert((sizeof(struct wq_prepost) & (sizeof(struct wq_prepost) - 1)) == 0,
0a7de745 636 "wq_prepost struct must be a power of two!");
3e170ce0
A
637#endif
638
639#define wqp_refcnt(wqp) \
39037602 640 (lt_bits_refcnt((wqp)->wqte.lt_bits))
3e170ce0
A
641
642#define wqp_type(wqp) \
39037602 643 (lt_bits_type((wqp)->wqte.lt_bits))
3e170ce0
A
644
645#define wqp_set_valid(wqp) \
39037602 646 lt_elem_mkvalid(&(wqp)->wqte)
3e170ce0
A
647
648#define wqp_is_valid(wqp) \
39037602 649 lt_bits_valid((wqp)->wqte.lt_bits)
3e170ce0 650
39037602 651#define wqp_prepostid wqte.lt_id
3e170ce0
A
652
653#define WQP_WQ_POISON (0x0bad0badffffffffull)
654#define WQP_POST_POISON (0xf00df00df00df00d)
655
0a7de745
A
656static void
657wqp_poison(struct link_table *table, struct lt_elem *elem)
3e170ce0
A
658{
659 struct wq_prepost *wqp = (struct wq_prepost *)elem;
660 (void)table;
661
662 switch (wqp_type(wqp)) {
663 case WQP_WQ:
664 break;
665 case WQP_POST:
666 wqp->wqp_post.wqp_next_id = WQP_POST_POISON;
667 wqp->wqp_post.wqp_wq_id = WQP_POST_POISON;
668 break;
669 default:
670 break;
671 }
672}
673
5ba3f43e 674#ifdef KEEP_WAITQ_PREPOST_STATS
0a7de745
A
675static __inline__ void
676wqp_do_alloc_stats(struct lt_elem *elem)
3e170ce0 677{
0a7de745 678 if (!elem) {
39037602 679 return;
0a7de745 680 }
3e170ce0 681
39037602
A
682 struct wq_prepost *wqp = (struct wq_prepost *)elem;
683 uintptr_t alloc_bt[sizeof(wqp->wqp_alloc_bt)];
3e170ce0 684
39037602 685 waitq_grab_backtrace(alloc_bt, NWAITQ_BTFRAMES);
3e170ce0 686
39037602
A
687 /* be sure the take stats for _all_ allocated objects */
688 for (;;) {
689 memcpy(wqp->wqp_alloc_bt, alloc_bt, sizeof(alloc_bt));
690 wqp->wqp_alloc_th = current_thread();
691 wqp->wqp_alloc_task = current_task();
692 wqp = (struct wq_prepost *)lt_elem_list_next(&g_prepost_table, &wqp->wqte);
0a7de745 693 if (!wqp) {
39037602 694 break;
0a7de745 695 }
3e170ce0
A
696 }
697}
698#else
699#define wqp_do_alloc_stats(e)
5ba3f43e 700#endif /* KEEP_WAITQ_LINK_STATS */
3e170ce0 701
0a7de745
A
702static void
703wqp_init(void)
3e170ce0
A
704{
705 uint32_t tablesz = 0, max_wqp = 0;
706
0a7de745 707 if (PE_parse_boot_argn("wqp_tsize", &tablesz, sizeof(tablesz)) != TRUE) {
39037602 708 tablesz = (uint32_t)g_lt_max_tbl_size;
0a7de745 709 }
3e170ce0
A
710
711 tablesz = P2ROUNDUP(tablesz, PAGE_SIZE);
712 max_wqp = tablesz / sizeof(struct wq_prepost);
713 assert(max_wqp > 0 && tablesz > 0);
714
715 /* we have a restricted index range */
0a7de745 716 if (max_wqp > (LT_IDX_MAX + 1)) {
39037602 717 max_wqp = LT_IDX_MAX + 1;
0a7de745 718 }
3e170ce0
A
719
720 wqinfo("init prepost table with max:%d elements (%d bytes)",
0a7de745 721 max_wqp, tablesz);
39037602 722 ltable_init(&g_prepost_table, "wqslab.prepost", max_wqp,
0a7de745 723 sizeof(struct wq_prepost), wqp_poison);
3e170ce0
A
724}
725
726/*
727 * Refill the per-CPU cache.
728 */
0a7de745
A
729static void
730wq_prepost_refill_cpu_cache(uint32_t nalloc)
3e170ce0 731{
39037602 732 struct lt_elem *new_head, *old_head;
3e170ce0
A
733 struct wqp_cache *cache;
734
735 /* require preemption enabled to allocate elements */
0a7de745 736 if (get_preemption_level() != 0) {
3e170ce0 737 return;
0a7de745 738 }
3e170ce0 739
39037602 740 new_head = ltable_alloc_elem(&g_prepost_table,
0a7de745
A
741 LT_RESERVED, nalloc, 1);
742 if (new_head == NULL) {
3e170ce0 743 return;
0a7de745 744 }
3e170ce0
A
745
746 disable_preemption();
747 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
39037602
A
748
749 /* check once more before putting these elements on the list */
750 if (cache->avail >= WQP_CACHE_MAX) {
751 lt_elem_list_release(&g_prepost_table, new_head, LT_RESERVED);
752 enable_preemption();
753 return;
754 }
755
3e170ce0 756 cache->avail += nalloc;
39037602
A
757 if (cache->head == 0 || cache->head == LT_IDX_MAX) {
758 cache->head = new_head->lt_id.id;
3e170ce0
A
759 goto out;
760 }
761
39037602
A
762 old_head = lt_elem_list_first(&g_prepost_table, cache->head);
763 (void)lt_elem_list_link(&g_prepost_table, new_head, old_head);
764 cache->head = new_head->lt_id.id;
3e170ce0
A
765
766out:
767 enable_preemption();
768 return;
769}
770
0a7de745
A
771static void
772wq_prepost_ensure_free_space(void)
3e170ce0
A
773{
774 uint32_t free_elem;
775 uint32_t min_free;
776 struct wqp_cache *cache;
777
0a7de745 778 if (g_min_free_cache == 0) {
3e170ce0 779 g_min_free_cache = (WQP_CACHE_MAX * ml_get_max_cpus());
0a7de745 780 }
3e170ce0
A
781
782 /*
783 * Ensure that we always have a pool of per-CPU prepost elements
784 */
785 disable_preemption();
786 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
787 free_elem = cache->avail;
788 enable_preemption();
789
0a7de745 790 if (free_elem < (WQP_CACHE_MAX / 3)) {
3e170ce0 791 wq_prepost_refill_cpu_cache(WQP_CACHE_MAX - free_elem);
0a7de745 792 }
3e170ce0
A
793
794 /*
795 * Now ensure that we have a sufficient amount of free table space
796 */
797 free_elem = g_prepost_table.nelem - g_prepost_table.used_elem;
798 min_free = g_min_free_table_elem + g_min_free_cache;
799 if (free_elem < min_free) {
800 /*
801 * we don't hold locks on these values, so check for underflow
802 */
803 if (g_prepost_table.used_elem <= g_prepost_table.nelem) {
804 wqdbg_v("Forcing table growth: nelem=%d, used=%d, min_free=%d+%d",
0a7de745
A
805 g_prepost_table.nelem, g_prepost_table.used_elem,
806 g_min_free_table_elem, g_min_free_cache);
39037602 807 ltable_grow(&g_prepost_table, min_free);
3e170ce0
A
808 }
809 }
810}
811
0a7de745
A
812static struct wq_prepost *
813wq_prepost_alloc(int type, int nelem)
3e170ce0 814{
39037602 815 struct lt_elem *elem;
3e170ce0
A
816 struct wq_prepost *wqp;
817 struct wqp_cache *cache;
818
0a7de745 819 if (type != LT_RESERVED) {
3e170ce0 820 goto do_alloc;
0a7de745
A
821 }
822 if (nelem == 0) {
3e170ce0 823 return NULL;
0a7de745 824 }
3e170ce0
A
825
826 /*
827 * First try to grab the elements from the per-CPU cache if we are
828 * allocating RESERVED elements
829 */
830 disable_preemption();
831 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
832 if (nelem <= (int)cache->avail) {
39037602 833 struct lt_elem *first, *next = NULL;
3e170ce0
A
834 int nalloc = nelem;
835
836 cache->avail -= nelem;
837
838 /* grab the first element */
39037602 839 first = lt_elem_list_first(&g_prepost_table, cache->head);
3e170ce0
A
840
841 /* find the last element and re-adjust the cache head */
842 for (elem = first; elem != NULL && nalloc > 0; elem = next) {
39037602 843 next = lt_elem_list_next(&g_prepost_table, elem);
3e170ce0
A
844 if (--nalloc == 0) {
845 /* terminate the allocated list */
39037602 846 elem->lt_next_idx = LT_IDX_MAX;
3e170ce0
A
847 break;
848 }
849 }
850 assert(nalloc == 0);
0a7de745 851 if (!next) {
39037602 852 cache->head = LT_IDX_MAX;
0a7de745 853 } else {
39037602 854 cache->head = next->lt_id.id;
0a7de745 855 }
3e170ce0 856 /* assert that we don't have mis-matched book keeping */
39037602 857 assert(!(cache->head == LT_IDX_MAX && cache->avail > 0));
3e170ce0
A
858 enable_preemption();
859 elem = first;
860 goto out;
861 }
862 enable_preemption();
863
864do_alloc:
865 /* fall-back to standard table allocation */
39037602 866 elem = ltable_alloc_elem(&g_prepost_table, type, nelem, 0);
0a7de745 867 if (!elem) {
3e170ce0 868 return NULL;
0a7de745 869 }
3e170ce0
A
870
871out:
872 wqp = (struct wq_prepost *)elem;
873 wqp_do_alloc_stats(elem);
874 return wqp;
875}
876
0a7de745
A
877static void
878wq_prepost_invalidate(struct wq_prepost *wqp)
3e170ce0 879{
39037602 880 lt_elem_invalidate(&wqp->wqte);
3e170ce0
A
881}
882
0a7de745
A
883static struct wq_prepost *
884wq_prepost_get(uint64_t wqp_id)
3e170ce0 885{
39037602 886 struct lt_elem *elem;
3e170ce0 887
39037602 888 elem = ltable_get_elem(&g_prepost_table, wqp_id);
3e170ce0
A
889 return (struct wq_prepost *)elem;
890}
891
0a7de745
A
892static void
893wq_prepost_put(struct wq_prepost *wqp)
3e170ce0 894{
39037602 895 ltable_put_elem(&g_prepost_table, (struct lt_elem *)wqp);
3e170ce0
A
896}
897
0a7de745
A
898static int
899wq_prepost_rlink(struct wq_prepost *parent, struct wq_prepost *child)
3e170ce0 900{
39037602 901 return lt_elem_list_link(&g_prepost_table, &parent->wqte, &child->wqte);
3e170ce0
A
902}
903
0a7de745
A
904static struct wq_prepost *
905wq_prepost_get_rnext(struct wq_prepost *head)
3e170ce0 906{
39037602 907 struct lt_elem *elem;
3e170ce0
A
908 struct wq_prepost *wqp;
909 uint64_t id;
910
39037602 911 elem = lt_elem_list_next(&g_prepost_table, &head->wqte);
0a7de745 912 if (!elem) {
3e170ce0 913 return NULL;
0a7de745 914 }
39037602
A
915 id = elem->lt_id.id;
916 elem = ltable_get_elem(&g_prepost_table, id);
3e170ce0 917
0a7de745 918 if (!elem) {
3e170ce0 919 return NULL;
0a7de745 920 }
3e170ce0 921 wqp = (struct wq_prepost *)elem;
39037602 922 if (elem->lt_id.id != id ||
3e170ce0
A
923 wqp_type(wqp) != WQP_POST ||
924 wqp->wqp_post.wqp_next_id != head->wqp_prepostid.id) {
39037602 925 ltable_put_elem(&g_prepost_table, elem);
3e170ce0
A
926 return NULL;
927 }
928
929 return wqp;
930}
931
0a7de745
A
932static void
933wq_prepost_reset_rnext(struct wq_prepost *wqp)
3e170ce0 934{
39037602 935 (void)lt_elem_list_break(&g_prepost_table, &wqp->wqte);
3e170ce0
A
936}
937
938
939/**
940 * remove 'wqp' from the prepost list on 'wqset'
941 *
942 * Conditions:
943 * wqset is locked
944 * caller holds a reference on wqp (and is responsible to release it)
945 *
946 * Result:
947 * wqp is invalidated, wqset is potentially updated with a new
948 * prepost ID, and the next element of the prepost list may be
949 * consumed as well (if the list contained only 2 objects)
950 */
0a7de745
A
951static int
952wq_prepost_remove(struct waitq_set *wqset,
953 struct wq_prepost *wqp)
3e170ce0
A
954{
955 int more_posts = 1;
956 uint64_t next_id = wqp->wqp_post.wqp_next_id;
957 uint64_t wqp_id = wqp->wqp_prepostid.id;
958 struct wq_prepost *prev_wqp, *next_wqp;
959
960 assert(wqp_type(wqp) == WQP_POST);
39037602 961 assert(wqset->wqset_q.waitq_prepost == 1);
3e170ce0
A
962
963 if (next_id == wqp_id) {
964 /* the list is singular and becoming empty */
965 wqset->wqset_prepost_id = 0;
966 more_posts = 0;
967 goto out;
968 }
969
970 prev_wqp = wq_prepost_get_rnext(wqp);
971 assert(prev_wqp != NULL);
972 assert(prev_wqp->wqp_post.wqp_next_id == wqp_id);
973 assert(prev_wqp->wqp_prepostid.id != wqp_id);
974 assert(wqp_type(prev_wqp) == WQP_POST);
975
976 if (prev_wqp->wqp_prepostid.id == next_id) {
977 /*
978 * There are two items in the list, and we're removing one. We
979 * only need to keep the WQP_WQ pointer from 'prev_wqp'
980 */
981 wqset->wqset_prepost_id = prev_wqp->wqp_post.wqp_wq_id;
982 wq_prepost_invalidate(prev_wqp);
983 wq_prepost_put(prev_wqp);
984 more_posts = 0;
985 goto out;
986 }
987
988 /* prev->next = next */
989 prev_wqp->wqp_post.wqp_next_id = next_id;
990
991 /* next->prev = prev */
992 next_wqp = wq_prepost_get(next_id);
993 assert(next_wqp != NULL);
994 assert(next_wqp != wqp);
995 assert(next_wqp != prev_wqp);
996 assert(wqp_type(next_wqp) == WQP_POST);
997
998 wq_prepost_reset_rnext(next_wqp);
999 wq_prepost_rlink(next_wqp, prev_wqp);
1000
1001 /* If we remove the head of the list, update the wqset */
0a7de745 1002 if (wqp_id == wqset->wqset_prepost_id) {
3e170ce0 1003 wqset->wqset_prepost_id = next_id;
0a7de745 1004 }
3e170ce0
A
1005
1006 wq_prepost_put(prev_wqp);
1007 wq_prepost_put(next_wqp);
1008
1009out:
1010 wq_prepost_reset_rnext(wqp);
1011 wq_prepost_invalidate(wqp);
1012 return more_posts;
1013}
1014
0a7de745
A
1015static struct wq_prepost *
1016wq_prepost_rfirst(uint64_t id)
3e170ce0 1017{
39037602
A
1018 struct lt_elem *elem;
1019 elem = lt_elem_list_first(&g_prepost_table, id);
3e170ce0
A
1020 wqp_do_alloc_stats(elem);
1021 return (struct wq_prepost *)(void *)elem;
1022}
1023
0a7de745
A
1024static struct wq_prepost *
1025wq_prepost_rpop(uint64_t *id, int type)
3e170ce0 1026{
39037602
A
1027 struct lt_elem *elem;
1028 elem = lt_elem_list_pop(&g_prepost_table, id, type);
3e170ce0
A
1029 wqp_do_alloc_stats(elem);
1030 return (struct wq_prepost *)(void *)elem;
1031}
1032
0a7de745
A
1033static void
1034wq_prepost_release_rlist(struct wq_prepost *wqp)
3e170ce0
A
1035{
1036 int nelem = 0;
1037 struct wqp_cache *cache;
39037602 1038 struct lt_elem *elem;
3e170ce0 1039
0a7de745 1040 if (!wqp) {
3e170ce0 1041 return;
0a7de745 1042 }
3e170ce0
A
1043
1044 elem = &wqp->wqte;
1045
1046 /*
1047 * These are reserved elements: release them back to the per-cpu pool
1048 * if our cache is running low.
1049 */
1050 disable_preemption();
1051 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
1052 if (cache->avail < WQP_CACHE_MAX) {
39037602 1053 struct lt_elem *tmp = NULL;
0a7de745 1054 if (cache->head != LT_IDX_MAX) {
39037602 1055 tmp = lt_elem_list_first(&g_prepost_table, cache->head);
0a7de745 1056 }
39037602
A
1057 nelem = lt_elem_list_link(&g_prepost_table, elem, tmp);
1058 cache->head = elem->lt_id.id;
3e170ce0
A
1059 cache->avail += nelem;
1060 enable_preemption();
1061 return;
1062 }
1063 enable_preemption();
1064
1065 /* release these elements back to the main table */
39037602 1066 nelem = lt_elem_list_release(&g_prepost_table, elem, LT_RESERVED);
3e170ce0
A
1067
1068#if CONFIG_WAITQ_STATS
1069 g_prepost_table.nreserved_releases += 1;
1070 OSDecrementAtomic64(&g_prepost_table.nreservations);
1071#endif
1072}
1073
1074typedef int (*wqp_callback_func)(struct waitq_set *wqset,
0a7de745
A
1075 void *ctx,
1076 struct wq_prepost *wqp,
1077 struct waitq *waitq);
3e170ce0
A
1078
1079/**
1080 * iterate over a chain of preposts associated with a waitq set.
1081 *
1082 * Conditions:
1083 * wqset is locked
1084 *
1085 * Notes:
1086 * This loop performs automatic prepost chain management / culling, and
1087 * may reset or adjust the waitq set's prepost ID pointer. If you don't
1088 * want this extra processing, you can use wq_prepost_iterate().
1089 */
0a7de745
A
1090static int
1091wq_prepost_foreach_locked(struct waitq_set *wqset,
1092 void *ctx, wqp_callback_func cb)
3e170ce0 1093{
39037602 1094 int ret = WQ_ITERATE_SUCCESS;
3e170ce0
A
1095 struct wq_prepost *wqp, *tmp_wqp;
1096
39037602
A
1097 assert(cb != NULL);
1098
0a7de745 1099 if (!wqset || !waitq_set_maybe_preposted(wqset)) {
3e170ce0 1100 return WQ_ITERATE_SUCCESS;
0a7de745 1101 }
3e170ce0
A
1102
1103restart:
1104 wqp = wq_prepost_get(wqset->wqset_prepost_id);
1105 if (!wqp) {
1106 /*
1107 * The prepost object is no longer valid, reset the waitq
1108 * set's prepost id.
1109 */
1110 wqset->wqset_prepost_id = 0;
1111 return WQ_ITERATE_SUCCESS;
1112 }
1113
1114 if (wqp_type(wqp) == WQP_WQ) {
1115 uint64_t __assert_only wqp_id = wqp->wqp_prepostid.id;
39037602
A
1116
1117 ret = cb(wqset, ctx, wqp, wqp->wqp_wq.wqp_wq_ptr);
3e170ce0
A
1118
1119 switch (ret) {
1120 case WQ_ITERATE_INVALIDATE_CONTINUE:
1121 /* the caller wants to remove the only prepost here */
1122 assert(wqp_id == wqset->wqset_prepost_id);
1123 wqset->wqset_prepost_id = 0;
0a7de745 1124 /* fall through */
3e170ce0
A
1125 case WQ_ITERATE_CONTINUE:
1126 wq_prepost_put(wqp);
1127 ret = WQ_ITERATE_SUCCESS;
1128 break;
1129 case WQ_ITERATE_RESTART:
1130 wq_prepost_put(wqp);
0a7de745 1131 /* fall through */
3e170ce0
A
1132 case WQ_ITERATE_DROPPED:
1133 goto restart;
1134 default:
1135 wq_prepost_put(wqp);
1136 break;
1137 }
1138 return ret;
1139 }
1140
1141 assert(wqp->wqp_prepostid.id == wqset->wqset_prepost_id);
1142 assert(wqp_type(wqp) == WQP_POST);
1143
1144 /*
1145 * At this point we know we have a list of POST objects.
1146 * Grab a handle to the last element in the list and start
1147 * the iteration.
1148 */
1149 tmp_wqp = wq_prepost_get_rnext(wqp);
1150 assert(tmp_wqp != NULL && wqp_type(tmp_wqp) == WQP_POST);
1151
1152 uint64_t last_id = tmp_wqp->wqp_prepostid.id;
1153 wq_prepost_put(tmp_wqp);
1154
1155 ret = WQ_ITERATE_SUCCESS;
1156 for (;;) {
1157 uint64_t wqp_id, first_id, next_id;
1158
1159 wqp_id = wqp->wqp_prepostid.id;
1160 first_id = wqset->wqset_prepost_id;
1161 next_id = wqp->wqp_post.wqp_next_id;
1162
1163 /* grab the WQP_WQ object this _POST points to */
1164 tmp_wqp = wq_prepost_get(wqp->wqp_post.wqp_wq_id);
1165 if (!tmp_wqp) {
1166 /*
1167 * This WQP_POST object points to an invalid
1168 * WQP_WQ object - remove the POST object from
1169 * the list.
1170 */
1171 if (wq_prepost_remove(wqset, wqp) == 0) {
1172 wq_prepost_put(wqp);
1173 goto restart;
1174 }
1175 goto next_prepost;
1176 }
1177 assert(wqp_type(tmp_wqp) == WQP_WQ);
1178 /*
1179 * make the callback: note that this could remove 'wqp' or
1180 * drop the lock on our waitq set. We need to re-validate
1181 * our state when this function returns.
1182 */
39037602 1183 ret = cb(wqset, ctx, wqp, tmp_wqp->wqp_wq.wqp_wq_ptr);
3e170ce0
A
1184 wq_prepost_put(tmp_wqp);
1185
1186 switch (ret) {
1187 case WQ_ITERATE_CONTINUE:
1188 /* continue iteration */
1189 break;
1190 case WQ_ITERATE_INVALIDATE_CONTINUE:
1191 assert(next_id == wqp->wqp_post.wqp_next_id);
1192 if (wq_prepost_remove(wqset, wqp) == 0) {
1193 wq_prepost_put(wqp);
1194 goto restart;
1195 }
1196 goto next_prepost;
1197 case WQ_ITERATE_RESTART:
1198 wq_prepost_put(wqp);
0a7de745 1199 /* fall-through */
3e170ce0
A
1200 case WQ_ITERATE_DROPPED:
1201 /* the callback dropped the ref to wqp: just restart */
1202 goto restart;
1203 default:
1204 /* break out of the iteration for some other reason */
1205 goto finish_prepost_foreach;
1206 }
1207
1208 /*
1209 * the set lock may have been dropped during callback,
1210 * if something looks different, restart the prepost iteration
1211 */
1212 if (!wqp_is_valid(wqp) ||
1213 (wqp->wqp_post.wqp_next_id != next_id) ||
1214 wqset->wqset_prepost_id != first_id) {
1215 wq_prepost_put(wqp);
1216 goto restart;
1217 }
1218
1219next_prepost:
1220 /* this was the last object in the list */
0a7de745 1221 if (wqp_id == last_id) {
3e170ce0 1222 break;
0a7de745 1223 }
3e170ce0
A
1224
1225 /* get the next object */
1226 tmp_wqp = wq_prepost_get(next_id);
1227 if (!tmp_wqp) {
1228 /*
1229 * At this point we've already checked our state
1230 * after the callback (which may have dropped the set
1231 * lock). If we find an invalid member of the list
1232 * then something is wrong.
1233 */
1234 panic("Invalid WQP_POST member 0x%llx in waitq set "
0a7de745
A
1235 "0x%llx prepost list (first:%llx, "
1236 "wqp:%p)",
1237 next_id, wqset->wqset_id, first_id, wqp);
3e170ce0
A
1238 }
1239 wq_prepost_put(wqp);
1240 wqp = tmp_wqp;
1241
1242 assert(wqp_type(wqp) == WQP_POST);
1243 }
1244
1245finish_prepost_foreach:
1246 wq_prepost_put(wqp);
0a7de745 1247 if (ret == WQ_ITERATE_CONTINUE) {
3e170ce0 1248 ret = WQ_ITERATE_SUCCESS;
0a7de745 1249 }
3e170ce0
A
1250
1251 return ret;
1252}
1253
1254/**
1255 * Perform a simple loop over a chain of prepost objects
1256 *
1257 * Conditions:
1258 * If 'prepost_id' is associated with a waitq (set) then that object must
1259 * be locked before calling this function.
1260 * Callback function, 'cb', must be able to handle a NULL wqset pointer
1261 * and a NULL waitq pointer!
1262 *
1263 * Notes:
1264 * This prepost chain iteration will _not_ automatically adjust any chain
1265 * element or linkage. This is the responsibility of the caller! If you
1266 * want automatic prepost chain management (at a cost of extra CPU time),
1267 * you can use: wq_prepost_foreach_locked().
1268 */
0a7de745
A
1269static int
1270wq_prepost_iterate(uint64_t prepost_id,
1271 void *ctx, wqp_callback_func cb)
3e170ce0
A
1272{
1273 int ret;
1274 struct wq_prepost *wqp;
1275
0a7de745 1276 if (!prepost_id) {
3e170ce0 1277 return WQ_ITERATE_SUCCESS;
0a7de745 1278 }
3e170ce0
A
1279
1280 wqp = wq_prepost_get(prepost_id);
0a7de745 1281 if (!wqp) {
3e170ce0 1282 return WQ_ITERATE_SUCCESS;
0a7de745 1283 }
3e170ce0
A
1284
1285 if (wqp_type(wqp) == WQP_WQ) {
1286 ret = WQ_ITERATE_SUCCESS;
0a7de745 1287 if (cb) {
3e170ce0 1288 ret = cb(NULL, ctx, wqp, wqp->wqp_wq.wqp_wq_ptr);
0a7de745 1289 }
3e170ce0 1290
0a7de745 1291 if (ret != WQ_ITERATE_DROPPED) {
3e170ce0 1292 wq_prepost_put(wqp);
0a7de745 1293 }
3e170ce0
A
1294 return ret;
1295 }
1296
1297 assert(wqp->wqp_prepostid.id == prepost_id);
1298 assert(wqp_type(wqp) == WQP_POST);
1299
1300 /* at this point we know we have a list of POST objects */
1301 uint64_t next_id;
1302
1303 ret = WQ_ITERATE_CONTINUE;
1304 do {
1305 struct wq_prepost *tmp_wqp;
1306 struct waitq *wq = NULL;
1307
1308 next_id = wqp->wqp_post.wqp_next_id;
1309
1310 /* grab the WQP_WQ object this _POST points to */
1311 tmp_wqp = wq_prepost_get(wqp->wqp_post.wqp_wq_id);
1312 if (tmp_wqp) {
1313 assert(wqp_type(tmp_wqp) == WQP_WQ);
1314 wq = tmp_wqp->wqp_wq.wqp_wq_ptr;
1315 }
1316
0a7de745 1317 if (cb) {
3e170ce0 1318 ret = cb(NULL, ctx, wqp, wq);
0a7de745
A
1319 }
1320 if (tmp_wqp) {
3e170ce0 1321 wq_prepost_put(tmp_wqp);
0a7de745 1322 }
3e170ce0 1323
0a7de745 1324 if (ret != WQ_ITERATE_CONTINUE) {
3e170ce0 1325 break;
0a7de745 1326 }
3e170ce0
A
1327
1328 tmp_wqp = wq_prepost_get(next_id);
1329 if (!tmp_wqp) {
1330 /*
1331 * the chain is broken: nothing we can do here besides
1332 * bail from the iteration.
1333 */
1334 ret = WQ_ITERATE_ABORTED;
1335 break;
1336 }
1337
1338 wq_prepost_put(wqp);
1339 wqp = tmp_wqp;
1340
1341 assert(wqp_type(wqp) == WQP_POST);
1342 } while (next_id != prepost_id);
1343
0a7de745 1344 if (ret != WQ_ITERATE_DROPPED) {
3e170ce0 1345 wq_prepost_put(wqp);
0a7de745 1346 }
3e170ce0 1347
0a7de745 1348 if (ret == WQ_ITERATE_CONTINUE) {
3e170ce0 1349 ret = WQ_ITERATE_SUCCESS;
0a7de745 1350 }
3e170ce0
A
1351 return ret;
1352}
1353
1354
1355struct _is_posted_ctx {
1356 struct waitq *posting_wq;
1357 int did_prepost;
1358};
1359
0a7de745
A
1360static int
1361wq_is_preposted_on_set_cb(struct waitq_set *wqset, void *ctx,
1362 struct wq_prepost *wqp, struct waitq *waitq)
3e170ce0
A
1363{
1364 struct _is_posted_ctx *pctx = (struct _is_posted_ctx *)ctx;
1365
1366 (void)wqset;
1367 (void)wqp;
1368
1369 /*
1370 * Don't early-out, run through the _entire_ list:
1371 * This ensures that we retain a minimum number of invalid elements.
1372 */
0a7de745 1373 if (pctx->posting_wq == waitq) {
3e170ce0 1374 pctx->did_prepost = 1;
0a7de745 1375 }
3e170ce0
A
1376
1377 return WQ_ITERATE_CONTINUE;
1378}
1379
1380
1381/**
1382 * checks if 'waitq' has already preposted on 'wqset'
1383 *
1384 * Parameters:
1385 * waitq The waitq that's preposting
1386 * wqset The set onto which waitq may be preposted
1387 *
1388 * Conditions:
1389 * both waitq and wqset are locked
1390 *
1391 * Returns non-zero if 'waitq' has already preposted to 'wqset'
1392 */
0a7de745
A
1393static int
1394wq_is_preposted_on_set(struct waitq *waitq, struct waitq_set *wqset)
3e170ce0
A
1395{
1396 int ret;
1397 struct _is_posted_ctx pctx;
1398
1399 /*
1400 * If the set's only prepost matches the waitq's prepost ID,
1401 * then it obviously already preposted to the set.
1402 */
1403 if (waitq->waitq_prepost_id != 0 &&
0a7de745 1404 wqset->wqset_prepost_id == waitq->waitq_prepost_id) {
3e170ce0 1405 return 1;
0a7de745 1406 }
3e170ce0
A
1407
1408 /* use full prepost iteration: always trim the list */
1409 pctx.posting_wq = waitq;
1410 pctx.did_prepost = 0;
1411 ret = wq_prepost_foreach_locked(wqset, (void *)&pctx,
0a7de745 1412 wq_is_preposted_on_set_cb);
3e170ce0
A
1413 return pctx.did_prepost;
1414}
1415
0a7de745
A
1416static struct wq_prepost *
1417wq_get_prepost_obj(uint64_t *reserved, int type)
3e170ce0
A
1418{
1419 struct wq_prepost *wqp = NULL;
1420 /*
1421 * don't fail just because the caller doesn't have enough
1422 * reservations, we've kept a low-water mark on the prepost table,
1423 * so there should be some available for us.
1424 */
1425 if (reserved && *reserved) {
1426 wqp = wq_prepost_rpop(reserved, type);
39037602 1427 assert(wqp->wqte.lt_id.idx < g_prepost_table.nelem);
3e170ce0
A
1428 } else {
1429 /*
1430 * TODO: if in interrupt context, grab from a special
1431 * region / reserved list!
1432 */
1433 wqp = wq_prepost_alloc(type, 1);
1434 }
1435
0a7de745 1436 if (wqp == NULL) {
3e170ce0 1437 panic("Couldn't allocate prepost object!");
0a7de745 1438 }
3e170ce0
A
1439 return wqp;
1440}
1441
1442
1443/**
1444 * prepost a waitq onto a waitq set
1445 *
1446 * Parameters:
1447 * wqset The set onto which waitq will be preposted
1448 * waitq The waitq that's preposting
39037602 1449 * reserved List (lt_elem_list_ style) of pre-allocated prepost elements
3e170ce0
A
1450 * Could be NULL
1451 *
1452 * Conditions:
1453 * both wqset and waitq are locked
1454 *
1455 * Notes:
1456 * If reserved is NULL, this may block on prepost table growth.
1457 */
0a7de745
A
1458static void
1459wq_prepost_do_post_locked(struct waitq_set *wqset,
1460 struct waitq *waitq,
1461 uint64_t *reserved)
3e170ce0
A
1462{
1463 struct wq_prepost *wqp_post, *wqp_head, *wqp_tail;
1464
1465 assert(waitq_held(waitq) && waitq_held(&wqset->wqset_q));
1466
1467 /*
1468 * nothing to do if it's already preposted:
1469 * note that this also culls any invalid prepost objects
1470 */
0a7de745 1471 if (wq_is_preposted_on_set(waitq, wqset)) {
3e170ce0 1472 return;
0a7de745 1473 }
3e170ce0 1474
d9a64523
A
1475 assert(waitqs_is_linked(wqset));
1476
3e170ce0
A
1477 /*
1478 * This function is called because an event is being posted to 'waitq'.
1479 * We need a prepost object associated with this queue. Allocate one
1480 * now if the waitq isn't already associated with one.
1481 */
1482 if (waitq->waitq_prepost_id == 0) {
1483 struct wq_prepost *wqp;
1484 wqp = wq_get_prepost_obj(reserved, WQP_WQ);
1485 wqp->wqp_wq.wqp_wq_ptr = waitq;
1486 wqp_set_valid(wqp);
1487 waitq->waitq_prepost_id = wqp->wqp_prepostid.id;
1488 wq_prepost_put(wqp);
1489 }
1490
39037602 1491#if CONFIG_LTABLE_STATS
3e170ce0
A
1492 g_prepost_table.npreposts += 1;
1493#endif
1494
1495 wqdbg_v("preposting waitq %p (0x%llx) to set 0x%llx",
0a7de745
A
1496 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq),
1497 waitq->waitq_prepost_id, wqset->wqset_id);
3e170ce0
A
1498
1499 if (wqset->wqset_prepost_id == 0) {
1500 /* the set has no previous preposts */
1501 wqset->wqset_prepost_id = waitq->waitq_prepost_id;
1502 return;
1503 }
1504
1505 wqp_head = wq_prepost_get(wqset->wqset_prepost_id);
1506 if (!wqp_head) {
1507 /* the previous prepost has become invalid */
1508 wqset->wqset_prepost_id = waitq->waitq_prepost_id;
1509 return;
1510 }
1511
1512 assert(wqp_head->wqp_prepostid.id == wqset->wqset_prepost_id);
1513
1514 /*
1515 * If we get here, we're going to need at least one new wq_prepost
1516 * object. If the previous wqset_prepost_id points to a WQP_WQ, we
1517 * actually need to allocate 2 wq_prepost objects because the WQP_WQ
1518 * is tied to the waitq and shared across all sets.
1519 */
1520 wqp_post = wq_get_prepost_obj(reserved, WQP_POST);
1521
1522 wqp_post->wqp_post.wqp_wq_id = waitq->waitq_prepost_id;
1523 wqdbg_v("POST 0x%llx :: WQ 0x%llx", wqp_post->wqp_prepostid.id,
0a7de745 1524 waitq->waitq_prepost_id);
3e170ce0
A
1525
1526 if (wqp_type(wqp_head) == WQP_WQ) {
1527 /*
1528 * We must replace the wqset_prepost_id with a pointer
1529 * to two new WQP_POST objects
1530 */
1531 uint64_t wqp_id = wqp_head->wqp_prepostid.id;
1532 wqdbg_v("set 0x%llx previous had 1 WQ prepost (0x%llx): "
0a7de745
A
1533 "replacing with two POST preposts",
1534 wqset->wqset_id, wqp_id);
3e170ce0
A
1535
1536 /* drop the old reference */
1537 wq_prepost_put(wqp_head);
1538
1539 /* grab another new object (the 2nd of two) */
1540 wqp_head = wq_get_prepost_obj(reserved, WQP_POST);
1541
1542 /* point this one to the original WQP_WQ object */
1543 wqp_head->wqp_post.wqp_wq_id = wqp_id;
1544 wqdbg_v("POST 0x%llx :: WQ 0x%llx",
0a7de745
A
1545 wqp_head->wqp_prepostid.id, wqp_id);
1546
3e170ce0
A
1547 /* link it to the new wqp_post object allocated earlier */
1548 wqp_head->wqp_post.wqp_next_id = wqp_post->wqp_prepostid.id;
1549 /* make the list a double-linked and circular */
1550 wq_prepost_rlink(wqp_head, wqp_post);
1551
1552 /*
1553 * Finish setting up the new prepost: point it back to the
1554 * POST object we allocated to replace the original wqset
1555 * WQ prepost object
1556 */
1557 wqp_post->wqp_post.wqp_next_id = wqp_head->wqp_prepostid.id;
1558 wq_prepost_rlink(wqp_post, wqp_head);
1559
1560 /* mark objects valid, and reset the wqset prepost list head */
1561 wqp_set_valid(wqp_head);
1562 wqp_set_valid(wqp_post);
1563 wqset->wqset_prepost_id = wqp_head->wqp_prepostid.id;
1564
1565 /* release both references */
1566 wq_prepost_put(wqp_head);
1567 wq_prepost_put(wqp_post);
1568
1569 wqdbg_v("set 0x%llx: 0x%llx/0x%llx -> 0x%llx/0x%llx -> 0x%llx",
0a7de745
A
1570 wqset->wqset_id, wqset->wqset_prepost_id,
1571 wqp_head->wqp_prepostid.id, wqp_head->wqp_post.wqp_next_id,
1572 wqp_post->wqp_prepostid.id,
1573 wqp_post->wqp_post.wqp_next_id);
3e170ce0
A
1574 return;
1575 }
1576
1577 assert(wqp_type(wqp_head) == WQP_POST);
1578
1579 /*
1580 * Add the new prepost to the end of the prepost list
1581 */
1582 wqp_tail = wq_prepost_get_rnext(wqp_head);
1583 assert(wqp_tail != NULL);
1584 assert(wqp_tail->wqp_post.wqp_next_id == wqset->wqset_prepost_id);
1585
1586 /*
1587 * link the head to the new tail
1588 * NOTE: this needs to happen first in case wqp_tail == wqp_head
1589 */
1590 wq_prepost_reset_rnext(wqp_head);
1591 wq_prepost_rlink(wqp_head, wqp_post);
1592
1593 /* point the new object to the list head, and list tail */
1594 wqp_post->wqp_post.wqp_next_id = wqp_head->wqp_prepostid.id;
1595 wq_prepost_rlink(wqp_post, wqp_tail);
1596
1597 /* point the last item in the waitq set's list to the new object */
1598 wqp_tail->wqp_post.wqp_next_id = wqp_post->wqp_prepostid.id;
1599
1600 wqp_set_valid(wqp_post);
1601
1602 wq_prepost_put(wqp_head);
1603 wq_prepost_put(wqp_tail);
1604 wq_prepost_put(wqp_post);
1605
1606 wqdbg_v("set 0x%llx (wqp:0x%llx) last_prepost:0x%llx, "
0a7de745
A
1607 "new_prepost:0x%llx->0x%llx", wqset->wqset_id,
1608 wqset->wqset_prepost_id, wqp_head->wqp_prepostid.id,
1609 wqp_post->wqp_prepostid.id, wqp_post->wqp_post.wqp_next_id);
3e170ce0
A
1610
1611 return;
1612}
1613
1614
1615/* ----------------------------------------------------------------------
1616 *
1617 * Stats collection / reporting
1618 *
1619 * ---------------------------------------------------------------------- */
5ba3f43e 1620#if CONFIG_LTABLE_STATS && CONFIG_WAITQ_STATS
0a7de745
A
1621static void
1622wq_table_stats(struct link_table *table, struct wq_table_stats *stats)
3e170ce0
A
1623{
1624 stats->version = WAITQ_STATS_VERSION;
1625 stats->table_elements = table->nelem;
1626 stats->table_used_elems = table->used_elem;
1627 stats->table_elem_sz = table->elem_sz;
1628 stats->table_slabs = table->nslabs;
1629 stats->table_slab_sz = table->slab_sz;
1630
1631 stats->table_num_allocs = table->nallocs;
1632 stats->table_num_preposts = table->npreposts;
1633 stats->table_num_reservations = table->nreservations;
1634
1635 stats->table_max_used = table->max_used;
1636 stats->table_avg_used = table->avg_used;
1637 stats->table_max_reservations = table->max_reservations;
1638 stats->table_avg_reservations = table->avg_reservations;
1639}
1640
0a7de745
A
1641void
1642waitq_link_stats(struct wq_table_stats *stats)
3e170ce0 1643{
0a7de745 1644 if (!stats) {
3e170ce0 1645 return;
0a7de745 1646 }
39037602 1647 wq_table_stats(&g_wqlinktable, stats);
3e170ce0
A
1648}
1649
0a7de745
A
1650void
1651waitq_prepost_stats(struct wq_table_stats *stats)
3e170ce0
A
1652{
1653 wq_table_stats(&g_prepost_table, stats);
1654}
1655#endif
1656
1657
1658/* ----------------------------------------------------------------------
1659 *
1660 * Global Wait Queues
1661 *
1662 * ---------------------------------------------------------------------- */
1663
1664static struct waitq g_boot_waitq;
1665static struct waitq *global_waitqs = &g_boot_waitq;
1666static uint32_t g_num_waitqs = 1;
1667
1668/*
1669 * Zero out the used MSBs of the event.
1670 */
1671#define _CAST_TO_EVENT_MASK(event) ((uintptr_t)(event) & ((1ul << _EVENT_MASK_BITS) - 1ul))
1672
0a7de745
A
1673static __inline__ uint32_t
1674waitq_hash(char *key, size_t length)
3e170ce0 1675{
0a7de745 1676 uint32_t hash = os_hash_jenkins(key, length);
3e170ce0
A
1677
1678 hash &= (g_num_waitqs - 1);
1679 return hash;
1680}
1681
1682/* return a global waitq pointer corresponding to the given event */
0a7de745
A
1683struct waitq *
1684_global_eventq(char *event, size_t event_length)
3e170ce0
A
1685{
1686 return &global_waitqs[waitq_hash(event, event_length)];
1687}
1688
1689/* return an indexed global waitq pointer */
0a7de745
A
1690struct waitq *
1691global_waitq(int index)
3e170ce0
A
1692{
1693 return &global_waitqs[index % g_num_waitqs];
1694}
1695
1696
5ba3f43e 1697#if CONFIG_LTABLE_STATS || CONFIG_WAITQ_STATS
3e170ce0
A
1698/* this global is for lldb */
1699const uint32_t g_nwaitq_btframes = NWAITQ_BTFRAMES;
3e170ce0 1700
0a7de745
A
1701static __inline__ void
1702waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int skip)
3e170ce0
A
1703{
1704 uintptr_t buf[NWAITQ_BTFRAMES + skip];
0a7de745 1705 if (skip < 0) {
3e170ce0 1706 skip = 0;
0a7de745 1707 }
3e170ce0 1708 memset(buf, 0, (NWAITQ_BTFRAMES + skip) * sizeof(uintptr_t));
cb323159 1709 backtrace(buf, g_nwaitq_btframes + skip, NULL);
3e170ce0
A
1710 memcpy(&bt[0], &buf[skip], NWAITQ_BTFRAMES * sizeof(uintptr_t));
1711}
39037602
A
1712#else /* no stats */
1713#define waitq_grab_backtrace(...)
1714#endif
1715
1716#if CONFIG_WAITQ_STATS
1717
1718struct wq_stats g_boot_stats;
1719struct wq_stats *g_waitq_stats = &g_boot_stats;
3e170ce0 1720
0a7de745
A
1721static __inline__ struct wq_stats *
1722waitq_global_stats(struct waitq *waitq)
1723{
3e170ce0
A
1724 struct wq_stats *wqs;
1725 uint32_t idx;
1726
0a7de745 1727 if (!waitq_is_global(waitq)) {
3e170ce0 1728 return NULL;
0a7de745 1729 }
3e170ce0
A
1730
1731 idx = (uint32_t)(((uintptr_t)waitq - (uintptr_t)global_waitqs) / sizeof(*waitq));
1732 assert(idx < g_num_waitqs);
1733 wqs = &g_waitq_stats[idx];
1734 return wqs;
1735}
1736
0a7de745
A
1737static __inline__ void
1738waitq_stats_count_wait(struct waitq *waitq)
3e170ce0
A
1739{
1740 struct wq_stats *wqs = waitq_global_stats(waitq);
1741 if (wqs != NULL) {
1742 wqs->waits++;
1743 waitq_grab_backtrace(wqs->last_wait, 2);
1744 }
1745}
1746
0a7de745
A
1747static __inline__ void
1748waitq_stats_count_wakeup(struct waitq *waitq)
3e170ce0
A
1749{
1750 struct wq_stats *wqs = waitq_global_stats(waitq);
1751 if (wqs != NULL) {
1752 wqs->wakeups++;
1753 waitq_grab_backtrace(wqs->last_wakeup, 2);
1754 }
1755}
1756
0a7de745
A
1757static __inline__ void
1758waitq_stats_count_clear_wakeup(struct waitq *waitq)
3e170ce0
A
1759{
1760 struct wq_stats *wqs = waitq_global_stats(waitq);
1761 if (wqs != NULL) {
1762 wqs->wakeups++;
1763 wqs->clears++;
1764 waitq_grab_backtrace(wqs->last_wakeup, 2);
1765 }
1766}
1767
0a7de745
A
1768static __inline__ void
1769waitq_stats_count_fail(struct waitq *waitq)
3e170ce0
A
1770{
1771 struct wq_stats *wqs = waitq_global_stats(waitq);
1772 if (wqs != NULL) {
1773 wqs->failed_wakeups++;
1774 waitq_grab_backtrace(wqs->last_failed_wakeup, 2);
1775 }
1776}
39037602 1777#else /* !CONFIG_WAITQ_STATS */
3e170ce0
A
1778#define waitq_stats_count_wait(q) do { } while (0)
1779#define waitq_stats_count_wakeup(q) do { } while (0)
1780#define waitq_stats_count_clear_wakeup(q) do { } while (0)
1781#define waitq_stats_count_fail(q) do { } while (0)
1782#endif
1783
0a7de745
A
1784int
1785waitq_is_valid(struct waitq *waitq)
3e170ce0 1786{
d9a64523 1787 return (waitq != NULL) && waitq->waitq_isvalid;
3e170ce0
A
1788}
1789
0a7de745
A
1790int
1791waitq_set_is_valid(struct waitq_set *wqset)
3e170ce0 1792{
39037602 1793 return (wqset != NULL) && wqset->wqset_q.waitq_isvalid && waitqs_is_set(wqset);
3e170ce0
A
1794}
1795
0a7de745
A
1796int
1797waitq_is_global(struct waitq *waitq)
3e170ce0 1798{
0a7de745 1799 if (waitq >= global_waitqs && waitq < global_waitqs + g_num_waitqs) {
3e170ce0 1800 return 1;
0a7de745 1801 }
3e170ce0
A
1802 return 0;
1803}
1804
0a7de745
A
1805int
1806waitq_irq_safe(struct waitq *waitq)
3e170ce0
A
1807{
1808 /* global wait queues have this bit set on initialization */
1809 return waitq->waitq_irq;
1810}
1811
94ff46dc
A
1812static inline bool
1813waitq_empty(struct waitq *wq)
d9a64523 1814{
94ff46dc
A
1815 if (waitq_is_turnstile_queue(wq)) {
1816 return priority_queue_empty(&wq->waitq_prio_queue);
1817 } else if (waitq_is_turnstile_proxy(wq)) {
1818 struct turnstile *ts = wq->waitq_ts;
1819 return ts == TURNSTILE_NULL ||
1820 priority_queue_empty(&ts->ts_waitq.waitq_prio_queue);
1821 } else {
1822 return queue_empty(&wq->waitq_queue);
1823 }
1824}
d9a64523 1825
94ff46dc
A
1826static struct waitq *
1827waitq_get_safeq(struct waitq *waitq)
1828{
d9a64523 1829 /* Check if it's a port waitq */
94ff46dc
A
1830 if (waitq_is_turnstile_proxy(waitq)) {
1831 struct turnstile *ts = waitq->waitq_ts;
1832 return ts ? &ts->ts_waitq : NULL;
d9a64523 1833 }
94ff46dc 1834 return global_eventq(waitq);
d9a64523
A
1835}
1836
0a7de745
A
1837static uint32_t
1838waitq_hash_size(void)
3e170ce0
A
1839{
1840 uint32_t hsize, queues;
0a7de745
A
1841
1842 if (PE_parse_boot_argn("wqsize", &hsize, sizeof(hsize))) {
1843 return hsize;
1844 }
3e170ce0 1845
39037602 1846 queues = thread_max / 5;
3e170ce0
A
1847 hsize = P2ROUNDUP(queues * sizeof(struct waitq), PAGE_SIZE);
1848
1849 return hsize;
1850}
1851
0a7de745
A
1852/*
1853 * Since the priority ordered waitq uses basepri as the
d9a64523
A
1854 * ordering key assert that this value fits in a uint8_t.
1855 */
1856static_assert(MAXPRI <= UINT8_MAX);
1857
0a7de745
A
1858static inline void
1859waitq_thread_insert(struct waitq *wq,
1860 thread_t thread, boolean_t fifo)
d9a64523
A
1861{
1862 if (waitq_is_turnstile_queue(wq)) {
d9a64523 1863 turnstile_stats_update(0, TSU_TURNSTILE_BLOCK_COUNT, NULL);
cb323159 1864 turnstile_waitq_add_thread_priority_queue(wq, thread);
d9a64523
A
1865 } else {
1866 turnstile_stats_update(0, TSU_REGULAR_WAITQ_BLOCK_COUNT, NULL);
1867 if (fifo) {
1868 enqueue_tail(&wq->waitq_queue, &thread->wait_links);
1869 } else {
1870 enqueue_head(&wq->waitq_queue, &thread->wait_links);
1871 }
1872 }
1873}
1874
0a7de745
A
1875static inline void
1876waitq_thread_remove(struct waitq *wq,
1877 thread_t thread)
d9a64523
A
1878{
1879 if (waitq_is_turnstile_queue(wq)) {
1880 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
1881 (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (THREAD_REMOVED_FROM_TURNSTILE_WAITQ))) | DBG_FUNC_NONE,
1882 VM_KERNEL_UNSLIDE_OR_PERM(waitq_to_turnstile(wq)),
1883 thread_tid(thread),
1884 0, 0, 0);
d9a64523 1885 priority_queue_remove(&wq->waitq_prio_queue, &thread->wait_prioq_links,
0a7de745 1886 PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
d9a64523
A
1887 } else {
1888 remqueue(&(thread->wait_links));
1889 }
1890}
1891
0a7de745
A
1892void
1893waitq_bootstrap(void)
3e170ce0
A
1894{
1895 kern_return_t kret;
39037602 1896 uint32_t whsize, qsz, tmp32;
3e170ce0 1897
39037602 1898 g_min_free_table_elem = DEFAULT_MIN_FREE_TABLE_ELEM;
0a7de745 1899 if (PE_parse_boot_argn("wqt_min_free", &tmp32, sizeof(tmp32)) == TRUE) {
39037602 1900 g_min_free_table_elem = tmp32;
0a7de745 1901 }
39037602 1902 wqdbg("Minimum free table elements: %d", tmp32);
3e170ce0 1903
0a7de745
A
1904 lck_grp_init(&waitq_lck_grp, "waitq", LCK_GRP_ATTR_NULL);
1905
3e170ce0
A
1906 /*
1907 * Determine the amount of memory we're willing to reserve for
1908 * the waitqueue hash table
1909 */
1910 whsize = waitq_hash_size();
1911
1912 /* Determine the number of waitqueues we can fit. */
1913 qsz = sizeof(struct waitq);
1914 whsize = ROUNDDOWN(whsize, qsz);
1915 g_num_waitqs = whsize / qsz;
1916
1917 /*
1918 * The hash algorithm requires that this be a power of 2, so we
1919 * just mask off all the low-order bits.
1920 */
1921 for (uint32_t i = 0; i < 31; i++) {
1922 uint32_t bit = (1 << i);
0a7de745 1923 if ((g_num_waitqs & bit) == g_num_waitqs) {
3e170ce0 1924 break;
0a7de745 1925 }
3e170ce0
A
1926 g_num_waitqs &= ~bit;
1927 }
1928 assert(g_num_waitqs > 0);
1929
1930 /* Now determine how much memory we really need. */
1931 whsize = P2ROUNDUP(g_num_waitqs * qsz, PAGE_SIZE);
1932
1933 wqdbg("allocating %d global queues (%d bytes)", g_num_waitqs, whsize);
1934 kret = kernel_memory_allocate(kernel_map, (vm_offset_t *)&global_waitqs,
0a7de745
A
1935 whsize, 0, KMA_KOBJECT | KMA_NOPAGEWAIT, VM_KERN_MEMORY_WAITQ);
1936 if (kret != KERN_SUCCESS || global_waitqs == NULL) {
3e170ce0 1937 panic("kernel_memory_allocate() failed to alloc global_waitqs"
0a7de745
A
1938 ", error: %d, whsize: 0x%x", kret, whsize);
1939 }
3e170ce0
A
1940
1941#if CONFIG_WAITQ_STATS
1942 whsize = P2ROUNDUP(g_num_waitqs * sizeof(struct wq_stats), PAGE_SIZE);
1943 kret = kernel_memory_allocate(kernel_map, (vm_offset_t *)&g_waitq_stats,
0a7de745
A
1944 whsize, 0, KMA_KOBJECT | KMA_NOPAGEWAIT, VM_KERN_MEMORY_WAITQ);
1945 if (kret != KERN_SUCCESS || global_waitqs == NULL) {
3e170ce0 1946 panic("kernel_memory_allocate() failed to alloc g_waitq_stats"
0a7de745
A
1947 ", error: %d, whsize: 0x%x", kret, whsize);
1948 }
3e170ce0
A
1949 memset(g_waitq_stats, 0, whsize);
1950#endif
1951
1952 for (uint32_t i = 0; i < g_num_waitqs; i++) {
0a7de745 1953 waitq_init(&global_waitqs[i], SYNC_POLICY_FIFO | SYNC_POLICY_DISABLE_IRQ);
3e170ce0
A
1954 }
1955
3e170ce0 1956 waitq_set_zone = zinit(sizeof(struct waitq_set),
0a7de745
A
1957 WAITQ_SET_MAX * sizeof(struct waitq_set),
1958 sizeof(struct waitq_set),
1959 "waitq sets");
3e170ce0 1960 zone_change(waitq_set_zone, Z_NOENCRYPT, TRUE);
39037602
A
1961
1962 /* initialize the global waitq link table */
1963 wql_init();
1964
1965 /* initialize the global waitq prepost table */
1966 wqp_init();
3e170ce0
A
1967}
1968
1969
1970/* ----------------------------------------------------------------------
1971 *
1972 * Wait Queue Implementation
1973 *
1974 * ---------------------------------------------------------------------- */
1975
1976/*
1977 * Double the standard lock timeout, because wait queues tend
1978 * to iterate over a number of threads - locking each. If there is
1979 * a problem with a thread lock, it normally times out at the wait
1980 * queue level first, hiding the real problem.
1981 */
1982/* For x86, the hardware timeout is in TSC units. */
1983#if defined(__i386__) || defined(__x86_64__)
0a7de745 1984#define hwLockTimeOut LockTimeOutTSC
3e170ce0 1985#else
0a7de745 1986#define hwLockTimeOut LockTimeOut
3e170ce0
A
1987#endif
1988
0a7de745
A
1989void
1990waitq_lock(struct waitq *wq)
3e170ce0 1991{
39037602 1992 if (__improbable(waitq_lock_to(wq,
0a7de745 1993 hwLockTimeOut * 2) == 0)) {
3e170ce0
A
1994 boolean_t wql_acquired = FALSE;
1995
1996 while (machine_timeout_suspended()) {
3e170ce0 1997 mp_enable_preemption();
39037602 1998 wql_acquired = waitq_lock_to(wq,
0a7de745
A
1999 hwLockTimeOut * 2);
2000 if (wql_acquired) {
3e170ce0 2001 break;
0a7de745 2002 }
3e170ce0 2003 }
0a7de745 2004 if (wql_acquired == FALSE) {
3e170ce0 2005 panic("waitq deadlock - waitq=%p, cpu=%d\n",
0a7de745
A
2006 wq, cpu_number());
2007 }
3e170ce0 2008 }
39037602
A
2009#if defined(__x86_64__)
2010 pltrace(FALSE);
2011#endif
3e170ce0
A
2012 assert(waitq_held(wq));
2013}
2014
0a7de745
A
2015void
2016waitq_unlock(struct waitq *wq)
3e170ce0
A
2017{
2018 assert(waitq_held(wq));
39037602
A
2019#if defined(__x86_64__)
2020 pltrace(TRUE);
2021#endif
2022 waitq_lock_unlock(wq);
3e170ce0
A
2023}
2024
2025
2026/**
2027 * clear the thread-related waitq state
2028 *
2029 * Conditions:
2030 * 'thread' is locked
2031 */
0a7de745
A
2032static inline void
2033thread_clear_waitq_state(thread_t thread)
3e170ce0
A
2034{
2035 thread->waitq = NULL;
2036 thread->wait_event = NO_EVENT64;
2037 thread->at_safe_point = FALSE;
2038}
2039
2040
2041typedef thread_t (*waitq_select_cb)(void *ctx, struct waitq *waitq,
0a7de745 2042 int is_global, thread_t thread);
3e170ce0
A
2043
2044struct waitq_select_args {
2045 /* input parameters */
2046 struct waitq *posted_waitq;
2047 struct waitq *waitq;
2048 event64_t event;
2049 waitq_select_cb select_cb;
2050 void *select_ctx;
cb323159 2051 int priority;
3e170ce0
A
2052
2053 uint64_t *reserved_preposts;
2054
2055 /* output parameters */
2056 queue_t threadq;
2057 int max_threads;
2058 int *nthreads;
2059 spl_t *spl;
2060};
2061
2062static void do_waitq_select_n_locked(struct waitq_select_args *args);
2063
2064/**
2065 * callback invoked once for every waitq set to which a waitq belongs
2066 *
2067 * Conditions:
2068 * ctx->posted_waitq is locked
2069 * 'link' points to a valid waitq set
2070 *
2071 * Notes:
2072 * Takes the waitq set lock on the set pointed to by 'link'
2073 * Calls do_waitq_select_n_locked() which could recurse back into
2074 * this function if the waitq set is a member of other sets.
2075 * If no threads were selected, it preposts the input waitq
2076 * onto the waitq set pointed to by 'link'.
2077 */
0a7de745
A
2078static int
2079waitq_select_walk_cb(struct waitq *waitq, void *ctx,
2080 struct waitq_link *link)
3e170ce0
A
2081{
2082 int ret = WQ_ITERATE_CONTINUE;
2083 struct waitq_select_args args = *((struct waitq_select_args *)ctx);
2084 struct waitq_set *wqset;
3e170ce0
A
2085
2086 (void)waitq;
39037602 2087 assert(wql_type(link) == WQL_WQS);
3e170ce0 2088
39037602 2089 wqset = link->wql_wqs.wql_set;
3e170ce0
A
2090 args.waitq = &wqset->wqset_q;
2091
39037602
A
2092 assert(!waitq_irq_safe(waitq));
2093 assert(!waitq_irq_safe(&wqset->wqset_q));
2094
3e170ce0
A
2095 waitq_set_lock(wqset);
2096 /*
2097 * verify that the link wasn't invalidated just before
2098 * we were able to take the lock.
2099 */
0a7de745 2100 if (wqset->wqset_id != link->wql_setid.id) {
3e170ce0 2101 goto out_unlock;
0a7de745 2102 }
3e170ce0 2103
d9a64523
A
2104 assert(waitqs_is_linked(wqset));
2105
3e170ce0
A
2106 /*
2107 * Find any threads waiting on this wait queue set,
2108 * and recurse into any waitq set to which this set belongs.
2109 */
2110 do_waitq_select_n_locked(&args);
2111
cb323159 2112 if (*args.nthreads > 0 || (args.threadq && !queue_empty(args.threadq))) {
3e170ce0 2113 /* at least 1 thread was selected and returned: don't prepost */
cb323159 2114 if (args.max_threads > 0 && *args.nthreads >= args.max_threads) {
3e170ce0
A
2115 /* break out of the setid walk */
2116 ret = WQ_ITERATE_FOUND;
2117 }
cb323159 2118 } else if (args.event == NO_EVENT64) {
3e170ce0
A
2119 /*
2120 * No thread selected: prepost 'waitq' to 'wqset'
2121 * if wqset can handle preposts and the event is set to 0.
2122 * We also make sure to not post waitq sets to other sets.
2123 *
39037602
A
2124 * If the set doesn't support preposts, but does support
2125 * prepost callout/hook interaction, invoke the predefined
2126 * callout function and pass the set's 'prepost_hook.' This
2127 * could potentially release another thread to handle events.
3e170ce0 2128 */
cb323159
A
2129 if (waitq_set_can_prepost(wqset)) {
2130 wq_prepost_do_post_locked(
2131 wqset, waitq, args.reserved_preposts);
2132 } else if (waitq_set_has_prepost_hook(wqset)) {
2133 waitq_set_prepost_hook_t *hook = wqset->wqset_prepost_hook;
2134
2135 /*
2136 * When calling out to the prepost hook,
2137 * we drop the waitq lock, to allow for the kevent
2138 * subsytem to call into the waitq subsystem again,
2139 * without risking a deadlock.
2140 *
2141 * However, we need to guard against wqset going away,
2142 * so we increment the prepost hook use count
2143 * while the lock is dropped.
2144 *
2145 * This lets waitq_set_deinit() know to wait for the
2146 * prepost hook call to be done before it can proceed.
2147 *
2148 * Note: we need to keep preemption disabled the whole
2149 * time as waitq_set_deinit will spin on this.
2150 */
2151
2152 disable_preemption();
2153 os_atomic_inc(hook, relaxed);
2154 waitq_set_unlock(wqset);
2155
2156 waitq_set__CALLING_PREPOST_HOOK__(hook);
2157
2158 /* Note: after this decrement, the wqset may be deallocated */
2159 os_atomic_dec(hook, relaxed);
2160 enable_preemption();
2161 return ret;
3e170ce0
A
2162 }
2163 }
2164
2165out_unlock:
2166 waitq_set_unlock(wqset);
3e170ce0
A
2167 return ret;
2168}
2169
d9a64523
A
2170/**
2171 * Routine to iterate over the waitq for non-priority ordered waitqs
2172 *
2173 * Conditions:
2174 * args->waitq (and args->posted_waitq) is locked
2175 *
2176 * Notes:
2177 * Uses the optional select callback function to refine the selection
2178 * of one or more threads from a waitq. The select callback is invoked
2179 * once for every thread that is found to be waiting on the input args->waitq.
2180 *
2181 * If one or more threads are selected, this may disable interrupts.
2182 * The previous interrupt state is returned in args->spl and should
2183 * be used in a call to splx() if threads are returned to the caller.
2184 */
0a7de745
A
2185static thread_t
2186waitq_queue_iterate_locked(struct waitq *safeq, struct waitq *waitq,
2187 spl_t spl, struct waitq_select_args *args,
2188 uint32_t *remaining_eventmask)
d9a64523
A
2189{
2190 int max_threads = args->max_threads;
2191 int *nthreads = args->nthreads;
2192 thread_t thread = THREAD_NULL;
2193 thread_t first_thread = THREAD_NULL;
2194
2195 qe_foreach_element_safe(thread, &safeq->waitq_queue, wait_links) {
2196 thread_t t = THREAD_NULL;
2197 assert_thread_magic(thread);
2198
2199 /*
2200 * For non-priority ordered waitqs, we allow multiple events to be
0a7de745 2201 * mux'ed into the same waitq. Also safeqs may contain threads from
d9a64523 2202 * multiple waitqs. Only pick threads that match the
0a7de745 2203 * requested wait event.
d9a64523
A
2204 */
2205 if (thread->waitq == waitq && thread->wait_event == args->event) {
2206 t = thread;
0a7de745 2207 if (first_thread == THREAD_NULL) {
d9a64523 2208 first_thread = thread;
0a7de745 2209 }
d9a64523
A
2210
2211 /* allow the caller to futher refine the selection */
0a7de745 2212 if (args->select_cb) {
d9a64523 2213 t = args->select_cb(args->select_ctx, waitq,
0a7de745
A
2214 waitq_is_global(waitq), thread);
2215 }
d9a64523
A
2216 if (t != THREAD_NULL) {
2217 *nthreads += 1;
2218 if (args->threadq) {
2219 /* if output queue, add locked thread to it */
0a7de745 2220 if (*nthreads == 1) {
d9a64523 2221 *(args->spl) = (safeq != waitq) ? spl : splsched();
0a7de745 2222 }
d9a64523
A
2223 thread_lock(t);
2224 thread_clear_waitq_state(t);
2225 re_queue_tail(args->threadq, &t->wait_links);
2226 }
2227 /* only enqueue up to 'max' threads */
0a7de745 2228 if (*nthreads >= max_threads && max_threads > 0) {
d9a64523 2229 break;
0a7de745 2230 }
d9a64523
A
2231 }
2232 }
2233 /* thread wasn't selected so track it's event */
2234 if (t == THREAD_NULL) {
2235 *remaining_eventmask |= (thread->waitq != safeq) ?
0a7de745 2236 _CAST_TO_EVENT_MASK(thread->waitq) : _CAST_TO_EVENT_MASK(thread->wait_event);
d9a64523
A
2237 }
2238 }
2239
2240 return first_thread;
2241}
2242
2243/**
2244 * Routine to iterate and remove threads from priority ordered waitqs
2245 *
2246 * Conditions:
2247 * args->waitq (and args->posted_waitq) is locked
2248 *
2249 * Notes:
2250 * The priority ordered waitqs only support maximum priority element removal.
2251 *
2252 * Also, the implementation makes sure that all threads in a priority ordered
2253 * waitq are waiting on the same wait event. This is not necessarily true for
2254 * non-priority ordered waitqs. If one or more threads are selected, this may
2255 * disable interrupts. The previous interrupt state is returned in args->spl
2256 * and should be used in a call to splx() if threads are returned to the caller.
2257 *
2258 * In the future, we could support priority ordered waitqs with multiple wait
2259 * events in the same queue. The way to implement that would be to keep removing
2260 * elements from the waitq and if the event does not match the requested one,
2261 * add it to a local list. This local list of elements needs to be re-inserted
0a7de745
A
2262 * into the priority queue at the end and the select_cb return value &
2263 * remaining_eventmask would need to be handled appropriately. The implementation
2264 * is not very efficient but would work functionally.
d9a64523 2265 */
0a7de745
A
2266static thread_t
2267waitq_prioq_iterate_locked(struct waitq *safeq, struct waitq *waitq,
2268 spl_t spl, struct waitq_select_args *args,
2269 uint32_t *remaining_eventmask)
d9a64523
A
2270{
2271 int max_threads = args->max_threads;
2272 int *nthreads = args->nthreads;
2273 thread_t first_thread = THREAD_NULL;
2274 thread_t thread = THREAD_NULL;
2275
0a7de745 2276 /*
d9a64523
A
2277 * The waitq select routines need to handle two cases:
2278 * Case 1: Peek at maximum priority thread in the waitq (remove_op = 0)
2279 * Get the maximum priority thread from the waitq without removing it.
2280 * In that case args->threadq == NULL and max_threads == 1.
2281 * Case 2: Remove 'n' highest priority threads from waitq (remove_op = 1)
2282 * Get max_threads (if available) while removing them from the waitq.
2283 * In that case args->threadq != NULL and max_threads is one of {-1, 1}.
0a7de745
A
2284 *
2285 * The only possible values for remaining_eventmask for the priority queue
2286 * waitq are either 0 (for the remove all threads case) or the original
d9a64523
A
2287 * safeq->waitq_eventmask (for the lookup/remove one thread cases).
2288 */
2289 *remaining_eventmask = safeq->waitq_eventmask;
2290 boolean_t remove_op = !!(args->threadq);
2291
2292 while ((max_threads <= 0) || (*nthreads < max_threads)) {
d9a64523
A
2293 if (priority_queue_empty(&(safeq->waitq_prio_queue))) {
2294 *remaining_eventmask = 0;
2295 break;
2296 }
2297
2298 if (remove_op) {
2299 thread = priority_queue_remove_max(&safeq->waitq_prio_queue,
0a7de745
A
2300 struct thread, wait_prioq_links,
2301 PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
d9a64523
A
2302 } else {
2303 /* For the peek operation, the only valid value for max_threads is 1 */
2304 assert(max_threads == 1);
2305 thread = priority_queue_max(&safeq->waitq_prio_queue,
0a7de745 2306 struct thread, wait_prioq_links);
d9a64523 2307 }
0a7de745
A
2308 /*
2309 * Ensure the wait event matches since priority ordered waitqs do not
d9a64523
A
2310 * support multiple events in the same waitq.
2311 */
2312 assert((thread->waitq == waitq) && (thread->wait_event == args->event));
0a7de745 2313
d9a64523
A
2314 if (args->select_cb) {
2315 /*
0a7de745
A
2316 * Call the select_cb passed into the waitq_select args. The callback
2317 * updates the select_ctx with information about the highest priority
2318 * thread which is eventually used by the caller.
d9a64523 2319 */
0a7de745
A
2320 thread_t __assert_only ret_thread = args->select_cb(args->select_ctx, waitq,
2321 waitq_is_global(waitq), thread);
d9a64523
A
2322 if (!remove_op) {
2323 /* For the peek operation, the thread should not be selected for addition */
2324 assert(ret_thread == THREAD_NULL);
2325 } else {
0a7de745
A
2326 /*
2327 * For the remove operation, the select routine should always return a valid
2328 * thread for priority waitqs. Since all threads in a prioq are equally
2329 * eligible, it should match the thread removed from the prioq. If this
2330 * invariant changes, the implementation would need to handle the
d9a64523
A
2331 * remaining_eventmask here correctly.
2332 */
2333 assert(ret_thread == thread);
2334 }
2335 }
0a7de745
A
2336
2337 if (first_thread == THREAD_NULL) {
d9a64523 2338 first_thread = thread;
cb323159
A
2339 /*
2340 * turnstile_kernel_update_inheritor_on_wake_locked will lock
2341 * first_thread, so call it before locking it.
2342 */
2343 if (args->priority == WAITQ_PROMOTE_ON_WAKE && first_thread != THREAD_NULL && waitq_is_turnstile_queue(safeq)) {
2344 turnstile_kernel_update_inheritor_on_wake_locked(waitq_to_turnstile(safeq), (turnstile_inheritor_t)first_thread, TURNSTILE_INHERITOR_THREAD);
2345 }
0a7de745 2346 }
d9a64523
A
2347
2348 /* For the peek operation, break out early */
0a7de745 2349 if (!remove_op) {
d9a64523 2350 break;
0a7de745 2351 }
d9a64523
A
2352
2353 /* Add the thread to the result thread list */
2354 *nthreads += 1;
0a7de745 2355 if (*nthreads == 1) {
d9a64523 2356 *(args->spl) = (safeq != waitq) ? spl : splsched();
0a7de745 2357 }
d9a64523
A
2358 thread_lock(thread);
2359 thread_clear_waitq_state(thread);
2360 enqueue_tail(args->threadq, &(thread->wait_links));
2361 }
2362
2363 return first_thread;
2364}
2365
3e170ce0
A
2366/**
2367 * generic thread selection from a waitq (and sets to which the waitq belongs)
2368 *
2369 * Conditions:
2370 * args->waitq (and args->posted_waitq) is locked
2371 *
2372 * Notes:
2373 * Uses the optional select callback function to refine the selection
2374 * of one or more threads from a waitq and any set to which the waitq
2375 * belongs. The select callback is invoked once for every thread that
2376 * is found to be waiting on the input args->waitq.
2377 *
2378 * If one or more threads are selected, this may disable interrupts.
2379 * The previous interrupt state is returned in args->spl and should
2380 * be used in a call to splx() if threads are returned to the caller.
2381 */
0a7de745
A
2382static void
2383do_waitq_select_n_locked(struct waitq_select_args *args)
3e170ce0
A
2384{
2385 struct waitq *waitq = args->waitq;
2386 int max_threads = args->max_threads;
d9a64523 2387 thread_t first_thread = THREAD_NULL;
39037602
A
2388 struct waitq *safeq;
2389 uint32_t remaining_eventmask = 0;
2390 uint32_t eventmask;
3e170ce0 2391 int *nthreads = args->nthreads;
39037602 2392 spl_t spl = 0;
3e170ce0
A
2393
2394 assert(max_threads != 0);
2395
39037602
A
2396 if (!waitq_irq_safe(waitq)) {
2397 /* JMM - add flag to waitq to avoid global lookup if no waiters */
2398 eventmask = _CAST_TO_EVENT_MASK(waitq);
d9a64523 2399 safeq = waitq_get_safeq(waitq);
94ff46dc
A
2400 if (safeq == NULL) {
2401 /*
2402 * in the WQT_TSPROXY case, if there's no turnstile,
2403 * there's no queue and no waiters, so we can move straight
2404 * to the waitq set recursion
2405 */
2406 goto handle_waitq_set;
2407 }
2408
0a7de745 2409 if (*nthreads == 0) {
39037602 2410 spl = splsched();
0a7de745 2411 }
39037602
A
2412 waitq_lock(safeq);
2413 } else {
3e170ce0 2414 eventmask = _CAST_TO_EVENT_MASK(args->event);
39037602 2415 safeq = waitq;
3e170ce0
A
2416 }
2417
39037602
A
2418 /*
2419 * If the safeq doesn't have an eventmask (not global) or the event
2420 * we're looking for IS set in its eventmask, then scan the threads
2421 * in that queue for ones that match the original <waitq,event> pair.
2422 */
2423 if (!waitq_is_global(safeq) ||
2424 (safeq->waitq_eventmask & eventmask) == eventmask) {
d9a64523
A
2425 if (waitq_is_turnstile_queue(safeq)) {
2426 first_thread = waitq_prioq_iterate_locked(safeq, waitq,
0a7de745
A
2427 spl, args,
2428 &remaining_eventmask);
d9a64523
A
2429 } else {
2430 first_thread = waitq_queue_iterate_locked(safeq, waitq,
0a7de745
A
2431 spl, args,
2432 &remaining_eventmask);
3e170ce0 2433 }
3e170ce0 2434
39037602
A
2435 /*
2436 * Update the eventmask of global queues we just scanned:
2437 * - If we selected all the threads in the queue, we can clear its
2438 * eventmask.
2439 *
2440 * - If we didn't find enough threads to fill our needs, then we can
2441 * assume we looked at every thread in the queue and the mask we
2442 * computed is complete - so reset it.
2443 */
2444 if (waitq_is_global(safeq)) {
0a7de745 2445 if (waitq_empty(safeq)) {
39037602 2446 safeq->waitq_eventmask = 0;
0a7de745 2447 } else if (max_threads < 0 || *nthreads < max_threads) {
39037602 2448 safeq->waitq_eventmask = remaining_eventmask;
0a7de745 2449 }
39037602
A
2450 }
2451 }
3e170ce0
A
2452
2453 /*
2454 * Grab the first thread in the queue if no other thread was selected.
2455 * We can guarantee that no one has manipulated this thread because
2456 * it's waiting on the given waitq, and we have that waitq locked.
2457 */
2458 if (*nthreads == 0 && first_thread != THREAD_NULL && args->threadq) {
2459 /* we know this is the first (and only) thread */
2460 ++(*nthreads);
39037602 2461 *(args->spl) = (safeq != waitq) ? spl : splsched();
cb323159 2462
3e170ce0
A
2463 thread_lock(first_thread);
2464 thread_clear_waitq_state(first_thread);
d9a64523
A
2465 waitq_thread_remove(safeq, first_thread);
2466 enqueue_tail(args->threadq, &(first_thread->wait_links));
3e170ce0 2467
39037602 2468 /* update the eventmask on [now] empty global queues */
0a7de745 2469 if (waitq_is_global(safeq) && waitq_empty(safeq)) {
39037602 2470 safeq->waitq_eventmask = 0;
0a7de745 2471 }
3e170ce0
A
2472 }
2473
39037602
A
2474 /* unlock the safe queue if we locked one above */
2475 if (safeq != waitq) {
2476 waitq_unlock(safeq);
0a7de745 2477 if (*nthreads == 0) {
39037602 2478 splx(spl);
0a7de745 2479 }
39037602 2480 }
0a7de745
A
2481
2482 if (max_threads > 0 && *nthreads >= max_threads) {
3e170ce0 2483 return;
0a7de745 2484 }
3e170ce0 2485
94ff46dc 2486handle_waitq_set:
3e170ce0
A
2487 /*
2488 * wait queues that are not in any sets
2489 * are the bottom of the recursion
2490 */
0a7de745 2491 if (!waitq->waitq_set_id) {
3e170ce0 2492 return;
0a7de745 2493 }
3e170ce0
A
2494
2495 /* check to see if the set ID for this wait queue is valid */
39037602 2496 struct waitq_link *link = wql_get_link(waitq->waitq_set_id);
3e170ce0
A
2497 if (!link) {
2498 /* the waitq set to which this waitq belonged, has been invalidated */
2499 waitq->waitq_set_id = 0;
2500 return;
2501 }
2502
39037602 2503 wql_put_link(link);
3e170ce0
A
2504
2505 /*
2506 * If this waitq is a member of any wait queue sets, we need to look
2507 * for waiting thread(s) in any of those sets, and prepost all sets that
2508 * don't have active waiters.
2509 *
2510 * Note that we do a local walk of this waitq's links - we manually
2511 * recurse down wait queue set's with non-zero wqset_q.waitq_set_id
2512 */
39037602 2513 (void)walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
0a7de745 2514 WQL_WQS, (void *)args, waitq_select_walk_cb);
3e170ce0
A
2515}
2516
2517/**
2518 * main entry point for thread selection from a waitq
2519 *
2520 * Conditions:
2521 * waitq is locked
2522 *
2523 * Returns:
2524 * The number of threads waiting on 'waitq' for 'event' which have
2525 * been placed onto the input 'threadq'
2526 *
2527 * Notes:
0a7de745
A
2528 * The 'select_cb' function is invoked for every thread found waiting on
2529 * 'waitq' for 'event'. The thread is _not_ locked upon callback
3e170ce0
A
2530 * invocation. This parameter may be NULL.
2531 *
2532 * If one or more threads are returned in 'threadq' then the caller is
2533 * responsible to call splx() using the returned 'spl' value. Each
2534 * returned thread is locked.
2535 */
0a7de745
A
2536static __inline__ int
2537waitq_select_n_locked(struct waitq *waitq,
2538 event64_t event,
2539 waitq_select_cb select_cb,
2540 void *select_ctx,
2541 uint64_t *reserved_preposts,
2542 queue_t threadq,
cb323159
A
2543 int max_threads, spl_t *spl,
2544 int priority)
3e170ce0
A
2545{
2546 int nthreads = 0;
2547
2548 struct waitq_select_args args = {
2549 .posted_waitq = waitq,
2550 .waitq = waitq,
2551 .event = event,
2552 .select_cb = select_cb,
2553 .select_ctx = select_ctx,
cb323159 2554 .priority = priority,
3e170ce0
A
2555 .reserved_preposts = reserved_preposts,
2556 .threadq = threadq,
2557 .max_threads = max_threads,
2558 .nthreads = &nthreads,
2559 .spl = spl,
2560 };
2561
2562 do_waitq_select_n_locked(&args);
2563 return nthreads;
2564}
2565
3e170ce0 2566/**
39037602 2567 * select from a waitq a single thread waiting for a given event
3e170ce0
A
2568 *
2569 * Conditions:
2570 * 'waitq' is locked
2571 *
2572 * Returns:
2573 * A locked thread that's been removed from the waitq, but has not
2574 * yet been put on a run queue. Caller is responsible to call splx
2575 * with the '*spl' value.
2576 */
0a7de745
A
2577static thread_t
2578waitq_select_one_locked(struct waitq *waitq, event64_t event,
2579 uint64_t *reserved_preposts,
2580 int priority, spl_t *spl)
3e170ce0
A
2581{
2582 int nthreads;
2583 queue_head_t threadq;
2584
3e170ce0
A
2585 queue_init(&threadq);
2586
5ba3f43e 2587 nthreads = waitq_select_n_locked(waitq, event, NULL, NULL,
cb323159 2588 reserved_preposts, &threadq, 1, spl, priority);
3e170ce0
A
2589
2590 /* if we selected a thread, return it (still locked) */
2591 if (!queue_empty(&threadq)) {
2592 thread_t t;
2593 queue_entry_t qe = dequeue_head(&threadq);
39037602 2594 t = qe_element(qe, struct thread, wait_links);
3e170ce0
A
2595 assert(queue_empty(&threadq)); /* there should be 1 entry */
2596 /* t has been locked and removed from all queues */
2597 return t;
2598 }
2599
2600 return THREAD_NULL;
2601}
2602
3e170ce0
A
2603struct select_thread_ctx {
2604 thread_t thread;
2605 event64_t event;
2606 spl_t *spl;
2607};
2608
2609/**
2610 * link walk callback invoked once for each set to which a waitq belongs
2611 *
2612 * Conditions:
2613 * initial waitq is locked
2614 * ctx->thread is unlocked
2615 *
2616 * Notes:
2617 * This may disable interrupts and early-out of the full DAG link walk by
2618 * returning KERN_ALREADY_IN_SET. In this case, the returned thread has
2619 * been removed from the waitq, it's waitq state has been reset, and the
2620 * caller is responsible to call splx() with the returned interrupt state
2621 * in ctx->spl.
2622 */
0a7de745
A
2623static int
2624waitq_select_thread_cb(struct waitq *waitq, void *ctx,
2625 struct waitq_link *link)
3e170ce0
A
2626{
2627 struct select_thread_ctx *stctx = (struct select_thread_ctx *)ctx;
2628 struct waitq_set *wqset;
39037602
A
2629 struct waitq *wqsetq;
2630 struct waitq *safeq;
2631 spl_t s;
3e170ce0
A
2632
2633 (void)waitq;
0a7de745 2634
3e170ce0
A
2635 thread_t thread = stctx->thread;
2636 event64_t event = stctx->event;
2637
0a7de745 2638 if (wql_type(link) != WQL_WQS) {
3e170ce0 2639 return WQ_ITERATE_CONTINUE;
0a7de745 2640 }
3e170ce0 2641
39037602
A
2642 wqset = link->wql_wqs.wql_set;
2643 wqsetq = &wqset->wqset_q;
3e170ce0 2644
39037602
A
2645 assert(!waitq_irq_safe(waitq));
2646 assert(!waitq_irq_safe(wqsetq));
2647
2648 waitq_set_lock(wqset);
2649
2650 s = splsched();
3e170ce0 2651
39037602 2652 /* find and lock the interrupt-safe waitq the thread is thought to be on */
d9a64523 2653 safeq = waitq_get_safeq(wqsetq);
39037602
A
2654 waitq_lock(safeq);
2655
2656 thread_lock(thread);
2657
2658 if ((thread->waitq == wqsetq) && (thread->wait_event == event)) {
d9a64523
A
2659 waitq_thread_remove(wqsetq, thread);
2660 if (waitq_empty(safeq)) {
39037602
A
2661 safeq->waitq_eventmask = 0;
2662 }
3e170ce0 2663 thread_clear_waitq_state(thread);
39037602
A
2664 waitq_unlock(safeq);
2665 waitq_set_unlock(wqset);
3e170ce0
A
2666 /*
2667 * thread still locked,
2668 * return non-zero to break out of WQS walk
2669 */
39037602 2670 *(stctx->spl) = s;
3e170ce0
A
2671 return WQ_ITERATE_FOUND;
2672 }
2673
2674 thread_unlock(thread);
2675 waitq_set_unlock(wqset);
39037602
A
2676 waitq_unlock(safeq);
2677 splx(s);
3e170ce0
A
2678
2679 return WQ_ITERATE_CONTINUE;
2680}
2681
2682/**
2683 * returns KERN_SUCCESS and locks 'thread' if-and-only-if 'thread' is waiting
2684 * on 'waitq' (or any set to which waitq belongs) for 'event'
2685 *
2686 * Conditions:
2687 * 'waitq' is locked
2688 * 'thread' is unlocked
2689 */
0a7de745
A
2690static kern_return_t
2691waitq_select_thread_locked(struct waitq *waitq,
2692 event64_t event,
2693 thread_t thread, spl_t *spl)
3e170ce0 2694{
39037602
A
2695 struct waitq *safeq;
2696 struct waitq_link *link;
3e170ce0
A
2697 struct select_thread_ctx ctx;
2698 kern_return_t kr;
39037602
A
2699 spl_t s;
2700
39037602
A
2701 /* Find and lock the interrupts disabled queue the thread is actually on */
2702 if (!waitq_irq_safe(waitq)) {
d9a64523 2703 safeq = waitq_get_safeq(waitq);
94ff46dc
A
2704 if (safeq == NULL) {
2705 /*
2706 * in the WQT_TSPROXY case, if there's no turnstile,
2707 * there's no queue and no waiters, so we can move straight
2708 * to the waitq set recursion
2709 */
2710 goto handle_waitq_set;
2711 }
2712
2713 s = splsched();
39037602
A
2714 waitq_lock(safeq);
2715 } else {
94ff46dc 2716 s = splsched();
39037602
A
2717 safeq = waitq;
2718 }
3e170ce0 2719
3e170ce0
A
2720 thread_lock(thread);
2721
2722 if ((thread->waitq == waitq) && (thread->wait_event == event)) {
d9a64523
A
2723 waitq_thread_remove(safeq, thread);
2724 if (waitq_empty(safeq)) {
39037602
A
2725 safeq->waitq_eventmask = 0;
2726 }
3e170ce0 2727 thread_clear_waitq_state(thread);
39037602 2728 *spl = s;
3e170ce0
A
2729 /* thread still locked */
2730 return KERN_SUCCESS;
2731 }
2732
2733 thread_unlock(thread);
39037602 2734
0a7de745 2735 if (safeq != waitq) {
39037602 2736 waitq_unlock(safeq);
0a7de745 2737 }
39037602
A
2738
2739 splx(s);
3e170ce0 2740
94ff46dc 2741handle_waitq_set:
0a7de745 2742 if (!waitq->waitq_set_id) {
3e170ce0 2743 return KERN_NOT_WAITING;
0a7de745 2744 }
3e170ce0
A
2745
2746 /* check to see if the set ID for this wait queue is valid */
39037602 2747 link = wql_get_link(waitq->waitq_set_id);
3e170ce0
A
2748 if (!link) {
2749 /* the waitq to which this set belonged, has been invalidated */
2750 waitq->waitq_set_id = 0;
2751 return KERN_NOT_WAITING;
2752 }
2753
2754 /*
2755 * The thread may be waiting on a wait queue set to which
2756 * the input 'waitq' belongs. Go look for the thread in
2757 * all wait queue sets. If it's there, we'll remove it
2758 * because it's equivalent to waiting directly on the input waitq.
2759 */
2760 ctx.thread = thread;
2761 ctx.event = event;
2762 ctx.spl = spl;
39037602 2763 kr = walk_waitq_links(LINK_WALK_FULL_DAG, waitq, waitq->waitq_set_id,
0a7de745 2764 WQL_WQS, (void *)&ctx, waitq_select_thread_cb);
3e170ce0 2765
39037602 2766 wql_put_link(link);
3e170ce0
A
2767
2768 /* we found a thread, return success */
0a7de745 2769 if (kr == WQ_ITERATE_FOUND) {
3e170ce0 2770 return KERN_SUCCESS;
0a7de745 2771 }
3e170ce0
A
2772
2773 return KERN_NOT_WAITING;
2774}
2775
0a7de745
A
2776static int
2777prepost_exists_cb(struct waitq_set __unused *wqset,
2778 void __unused *ctx,
2779 struct wq_prepost __unused *wqp,
2780 struct waitq __unused *waitq)
3e170ce0
A
2781{
2782 /* if we get here, then we know that there is a valid prepost object! */
2783 return WQ_ITERATE_FOUND;
2784}
2785
2786/**
2787 * declare a thread's intent to wait on 'waitq' for 'wait_event'
2788 *
2789 * Conditions:
2790 * 'waitq' is locked
3e170ce0 2791 */
0a7de745
A
2792wait_result_t
2793waitq_assert_wait64_locked(struct waitq *waitq,
2794 event64_t wait_event,
2795 wait_interrupt_t interruptible,
2796 wait_timeout_urgency_t urgency,
2797 uint64_t deadline,
2798 uint64_t leeway,
2799 thread_t thread)
3e170ce0
A
2800{
2801 wait_result_t wait_result;
2802 int realtime = 0;
39037602
A
2803 struct waitq *safeq;
2804 uintptr_t eventmask;
2805 spl_t s;
2806
3e170ce0
A
2807
2808 /*
2809 * Warning: Do _not_ place debugging print statements here.
39037602 2810 * The waitq is locked!
3e170ce0 2811 */
39037602 2812 assert(!thread->started || thread == current_thread());
3e170ce0 2813
0a7de745 2814 if (thread->waitq != NULL) {
3e170ce0 2815 panic("thread already waiting on %p", thread->waitq);
0a7de745 2816 }
3e170ce0
A
2817
2818 if (waitq_is_set(waitq)) {
2819 struct waitq_set *wqset = (struct waitq_set *)waitq;
2820 /*
2821 * early-out if the thread is waiting on a wait queue set
2822 * that has already been pre-posted.
2823 */
2824 if (wait_event == NO_EVENT64 && waitq_set_maybe_preposted(wqset)) {
2825 int ret;
2826 /*
2827 * Run through the list of potential preposts. Because
2828 * this is a hot path, we short-circuit the iteration
2829 * if we find just one prepost object.
2830 */
2831 ret = wq_prepost_foreach_locked(wqset, NULL,
0a7de745 2832 prepost_exists_cb);
3e170ce0 2833 if (ret == WQ_ITERATE_FOUND) {
39037602
A
2834 s = splsched();
2835 thread_lock(thread);
3e170ce0 2836 thread->wait_result = THREAD_AWAKENED;
39037602
A
2837 thread_unlock(thread);
2838 splx(s);
3e170ce0
A
2839 return THREAD_AWAKENED;
2840 }
2841 }
2842 }
2843
39037602
A
2844 s = splsched();
2845
2846 /*
2847 * If already dealing with an irq safe wait queue, we are all set.
2848 * Otherwise, determine a global queue to use and lock it.
2849 */
2850 if (!waitq_irq_safe(waitq)) {
d9a64523 2851 safeq = waitq_get_safeq(waitq);
94ff46dc
A
2852 if (__improbable(safeq == NULL)) {
2853 panic("Trying to assert_wait on a turnstile proxy "
2854 "that hasn't been donated one (waitq: %p)", waitq);
2855 }
39037602
A
2856 eventmask = _CAST_TO_EVENT_MASK(waitq);
2857 waitq_lock(safeq);
2858 } else {
2859 safeq = waitq;
2860 eventmask = _CAST_TO_EVENT_MASK(wait_event);
2861 }
2862
2863 /* lock the thread now that we have the irq-safe waitq locked */
2864 thread_lock(thread);
2865
3e170ce0
A
2866 /*
2867 * Realtime threads get priority for wait queue placements.
2868 * This allows wait_queue_wakeup_one to prefer a waiting
2869 * realtime thread, similar in principle to performing
2870 * a wait_queue_wakeup_all and allowing scheduler prioritization
2871 * to run the realtime thread, but without causing the
2872 * lock contention of that scenario.
2873 */
0a7de745 2874 if (thread->sched_pri >= BASEPRI_REALTIME) {
3e170ce0 2875 realtime = 1;
0a7de745 2876 }
3e170ce0
A
2877
2878 /*
2879 * This is the extent to which we currently take scheduling attributes
2880 * into account. If the thread is vm priviledged, we stick it at
2881 * the front of the queue. Later, these queues will honor the policy
2882 * value set at waitq_init time.
2883 */
2884 wait_result = thread_mark_wait_locked(thread, interruptible);
2885 /* thread->wait_result has been set */
2886 if (wait_result == THREAD_WAITING) {
39037602 2887 if (!safeq->waitq_fifo
0a7de745 2888 || (thread->options & TH_OPT_VMPRIV) || realtime) {
d9a64523 2889 waitq_thread_insert(safeq, thread, false);
0a7de745 2890 } else {
d9a64523 2891 waitq_thread_insert(safeq, thread, true);
0a7de745 2892 }
3e170ce0 2893
39037602 2894 /* mark the event and real waitq, even if enqueued on a global safeq */
3e170ce0
A
2895 thread->wait_event = wait_event;
2896 thread->waitq = waitq;
2897
2898 if (deadline != 0) {
2899 boolean_t act;
39037602 2900
3e170ce0 2901 act = timer_call_enter_with_leeway(&thread->wait_timer,
0a7de745
A
2902 NULL,
2903 deadline, leeway,
2904 urgency, FALSE);
2905 if (!act) {
3e170ce0 2906 thread->wait_timer_active++;
0a7de745 2907 }
3e170ce0
A
2908 thread->wait_timer_is_set = TRUE;
2909 }
2910
0a7de745 2911 if (waitq_is_global(safeq)) {
39037602 2912 safeq->waitq_eventmask |= eventmask;
0a7de745 2913 }
3e170ce0
A
2914
2915 waitq_stats_count_wait(waitq);
2916 }
2917
39037602
A
2918 /* unlock the thread */
2919 thread_unlock(thread);
2920
d9a64523
A
2921 /* update the inheritor's thread priority if the waitq is embedded in turnstile */
2922 if (waitq_is_turnstile_queue(safeq) && wait_result == THREAD_WAITING) {
2923 turnstile_recompute_priority_locked(waitq_to_turnstile(safeq));
2924 turnstile_update_inheritor_locked(waitq_to_turnstile(safeq));
2925 }
2926
39037602
A
2927 /* unlock the safeq if we locked it here */
2928 if (safeq != waitq) {
2929 waitq_unlock(safeq);
2930 }
2931
2932 splx(s);
2933
3e170ce0
A
2934 return wait_result;
2935}
2936
2937/**
2938 * remove 'thread' from its current blocking state on 'waitq'
2939 *
2940 * Conditions:
3e170ce0
A
2941 * 'thread' is locked
2942 *
2943 * Notes:
2944 * This function is primarily used by clear_wait_internal in
2945 * sched_prim.c from the thread timer wakeup path
2946 * (i.e. the thread was waiting on 'waitq' with a timeout that expired)
2947 */
0a7de745
A
2948int
2949waitq_pull_thread_locked(struct waitq *waitq, thread_t thread)
3e170ce0 2950{
39037602
A
2951 struct waitq *safeq;
2952
2953 assert_thread_magic(thread);
3e170ce0
A
2954 assert(thread->waitq == waitq);
2955
39037602
A
2956 /* Find the interrupts disabled queue thread is waiting on */
2957 if (!waitq_irq_safe(waitq)) {
d9a64523 2958 safeq = waitq_get_safeq(waitq);
94ff46dc
A
2959 if (__improbable(safeq == NULL)) {
2960 panic("Trying to clear_wait on a turnstile proxy "
2961 "that hasn't been donated one (waitq: %p)", waitq);
2962 }
39037602
A
2963 } else {
2964 safeq = waitq;
2965 }
2966
2967 /* thread is already locked so have to try for the waitq lock */
0a7de745 2968 if (!waitq_lock_try(safeq)) {
39037602 2969 return 0;
0a7de745 2970 }
39037602 2971
d9a64523 2972 waitq_thread_remove(safeq, thread);
3e170ce0
A
2973 thread_clear_waitq_state(thread);
2974 waitq_stats_count_clear_wakeup(waitq);
2975
2976 /* clear the global event mask if this was the last thread there! */
d9a64523 2977 if (waitq_is_global(safeq) && waitq_empty(safeq)) {
39037602
A
2978 safeq->waitq_eventmask = 0;
2979 /* JMM - also mark no-waiters on waitq (if not the same as the safeq) */
2980 }
2981
2982 waitq_unlock(safeq);
2983
2984 return 1;
3e170ce0
A
2985}
2986
2987
2988static __inline__
0a7de745
A
2989void
2990maybe_adjust_thread_pri(thread_t thread,
2991 int priority,
2992 __kdebug_only struct waitq *waitq)
d9a64523 2993{
3e170ce0
A
2994 /*
2995 * If the caller is requesting the waitq subsystem to promote the
2996 * priority of the awoken thread, then boost the thread's priority to
2997 * the default WAITQ_BOOST_PRIORITY (if it's not already equal or
2998 * higher priority). This boost must be removed via a call to
d9a64523
A
2999 * waitq_clear_promotion_locked before the thread waits again.
3000 *
3001 * WAITQ_PROMOTE_PRIORITY is -2.
3002 * Anything above 0 represents a mutex promotion.
3003 * The default 'no action' value is -1.
3004 * TODO: define this in a header
3e170ce0 3005 */
d9a64523
A
3006 if (priority == WAITQ_PROMOTE_PRIORITY) {
3007 uintptr_t trace_waitq = 0;
0a7de745 3008 if (__improbable(kdebug_enable)) {
d9a64523 3009 trace_waitq = VM_KERNEL_UNSLIDE_OR_PERM(waitq);
0a7de745 3010 }
d9a64523
A
3011
3012 sched_thread_promote_reason(thread, TH_SFLAG_WAITQ_PROMOTED, trace_waitq);
3e170ce0
A
3013 }
3014}
3015
d9a64523
A
3016/*
3017 * Clear a potential thread priority promotion from a waitq wakeup
3018 * with WAITQ_PROMOTE_PRIORITY.
3e170ce0 3019 *
d9a64523 3020 * This must be called on the thread which was woken up with TH_SFLAG_WAITQ_PROMOTED.
3e170ce0 3021 */
0a7de745
A
3022void
3023waitq_clear_promotion_locked(struct waitq *waitq, thread_t thread)
3e170ce0
A
3024{
3025 spl_t s;
3026
3027 assert(waitq_held(waitq));
d9a64523
A
3028 assert(thread != THREAD_NULL);
3029 assert(thread == current_thread());
3030
3031 /* This flag is only cleared by the thread itself, so safe to check outside lock */
0a7de745 3032 if ((thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED) != TH_SFLAG_WAITQ_PROMOTED) {
3e170ce0 3033 return;
0a7de745 3034 }
3e170ce0 3035
0a7de745 3036 if (!waitq_irq_safe(waitq)) {
3e170ce0 3037 s = splsched();
0a7de745 3038 }
3e170ce0
A
3039 thread_lock(thread);
3040
d9a64523 3041 sched_thread_unpromote_reason(thread, TH_SFLAG_WAITQ_PROMOTED, 0);
3e170ce0
A
3042
3043 thread_unlock(thread);
0a7de745 3044 if (!waitq_irq_safe(waitq)) {
3e170ce0 3045 splx(s);
0a7de745 3046 }
3e170ce0
A
3047}
3048
3049/**
3050 * wakeup all threads waiting on 'waitq' for 'wake_event'
3051 *
3052 * Conditions:
3053 * 'waitq' is locked
3054 *
3055 * Notes:
3056 * May temporarily disable and re-enable interrupts
3057 * and re-adjust thread priority of each awoken thread.
3058 *
3059 * If the input 'lock_state' == WAITQ_UNLOCK then the waitq will have
3060 * been unlocked before calling thread_go() on any returned threads, and
3061 * is guaranteed to be unlocked upon function return.
3062 */
0a7de745
A
3063kern_return_t
3064waitq_wakeup64_all_locked(struct waitq *waitq,
3065 event64_t wake_event,
3066 wait_result_t result,
3067 uint64_t *reserved_preposts,
3068 int priority,
3069 waitq_lock_state_t lock_state)
3e170ce0
A
3070{
3071 kern_return_t ret;
3072 thread_t thread;
3073 spl_t th_spl;
3074 int nthreads;
3075 queue_head_t wakeup_queue;
3076
3077 assert(waitq_held(waitq));
3078 queue_init(&wakeup_queue);
3079
3080 nthreads = waitq_select_n_locked(waitq, wake_event, NULL, NULL,
0a7de745 3081 reserved_preposts,
cb323159 3082 &wakeup_queue, -1, &th_spl, priority);
3e170ce0
A
3083
3084 /* set each thread running */
3085 ret = KERN_NOT_WAITING;
3086
3087#if CONFIG_WAITQ_STATS
39037602 3088 qe_foreach_element(thread, &wakeup_queue, wait_links)
0a7de745 3089 waitq_stats_count_wakeup(waitq);
3e170ce0 3090#endif
0a7de745 3091 if (lock_state == WAITQ_UNLOCK) {
3e170ce0 3092 waitq_unlock(waitq);
0a7de745 3093 }
3e170ce0 3094
39037602
A
3095 qe_foreach_element_safe(thread, &wakeup_queue, wait_links) {
3096 assert_thread_magic(thread);
3097 remqueue(&thread->wait_links);
d9a64523 3098 maybe_adjust_thread_pri(thread, priority, waitq);
3e170ce0
A
3099 ret = thread_go(thread, result);
3100 assert(ret == KERN_SUCCESS);
3101 thread_unlock(thread);
3102 }
0a7de745 3103 if (nthreads > 0) {
3e170ce0 3104 splx(th_spl);
0a7de745 3105 } else {
3e170ce0 3106 waitq_stats_count_fail(waitq);
0a7de745 3107 }
3e170ce0
A
3108
3109 return ret;
3110}
3111
3112/**
3113 * wakeup one thread waiting on 'waitq' for 'wake_event'
3114 *
3115 * Conditions:
3116 * 'waitq' is locked
3117 *
3118 * Notes:
3119 * May temporarily disable and re-enable interrupts.
3120 */
0a7de745
A
3121kern_return_t
3122waitq_wakeup64_one_locked(struct waitq *waitq,
3123 event64_t wake_event,
3124 wait_result_t result,
3125 uint64_t *reserved_preposts,
3126 int priority,
3127 waitq_lock_state_t lock_state)
3e170ce0
A
3128{
3129 thread_t thread;
3130 spl_t th_spl;
3131
3132 assert(waitq_held(waitq));
3133
cb323159
A
3134 thread = waitq_select_one_locked(waitq, wake_event,
3135 reserved_preposts,
3136 priority, &th_spl);
3e170ce0 3137
0a7de745 3138 if (thread != THREAD_NULL) {
3e170ce0 3139 waitq_stats_count_wakeup(waitq);
0a7de745 3140 } else {
3e170ce0 3141 waitq_stats_count_fail(waitq);
0a7de745 3142 }
3e170ce0 3143
0a7de745 3144 if (lock_state == WAITQ_UNLOCK) {
3e170ce0 3145 waitq_unlock(waitq);
0a7de745 3146 }
3e170ce0
A
3147
3148 if (thread != THREAD_NULL) {
d9a64523 3149 maybe_adjust_thread_pri(thread, priority, waitq);
3e170ce0
A
3150 kern_return_t ret = thread_go(thread, result);
3151 assert(ret == KERN_SUCCESS);
3152 thread_unlock(thread);
3153 splx(th_spl);
3154 return ret;
3155 }
3156
3157 return KERN_NOT_WAITING;
3158}
3159
3160/**
3161 * wakeup one thread waiting on 'waitq' for 'wake_event'
3162 *
3163 * Conditions:
3164 * 'waitq' is locked
3165 *
3166 * Returns:
3167 * A locked, runnable thread.
3168 * If return value is non-NULL, interrupts have also
3169 * been disabled, and the caller is responsible to call
3170 * splx() with the returned '*spl' value.
3171 */
39037602
A
3172thread_t
3173waitq_wakeup64_identify_locked(struct waitq *waitq,
0a7de745
A
3174 event64_t wake_event,
3175 wait_result_t result,
3176 spl_t *spl,
3177 uint64_t *reserved_preposts,
3178 int priority,
3179 waitq_lock_state_t lock_state)
3e170ce0
A
3180{
3181 thread_t thread;
3182
3183 assert(waitq_held(waitq));
3184
cb323159
A
3185 thread = waitq_select_one_locked(waitq, wake_event,
3186 reserved_preposts,
3187 priority, spl);
3e170ce0 3188
0a7de745 3189 if (thread != THREAD_NULL) {
3e170ce0 3190 waitq_stats_count_wakeup(waitq);
0a7de745 3191 } else {
3e170ce0 3192 waitq_stats_count_fail(waitq);
0a7de745 3193 }
3e170ce0 3194
0a7de745 3195 if (lock_state == WAITQ_UNLOCK) {
3e170ce0 3196 waitq_unlock(waitq);
0a7de745 3197 }
3e170ce0
A
3198
3199 if (thread != THREAD_NULL) {
3200 kern_return_t __assert_only ret;
3201 ret = thread_go(thread, result);
3202 assert(ret == KERN_SUCCESS);
3203 }
3204
3205 return thread; /* locked if not NULL (caller responsible for spl) */
3206}
3207
3208/**
3209 * wakeup a specific thread iff it's waiting on 'waitq' for 'wake_event'
3210 *
3211 * Conditions:
3212 * 'waitq' is locked
3213 * 'thread' is unlocked
3214 *
3215 * Notes:
3216 * May temporarily disable and re-enable interrupts
3217 *
3218 * If the input lock_state == WAITQ_UNLOCK then the waitq will have been
3219 * unlocked before calling thread_go() if 'thread' is to be awoken, and
3220 * is guaranteed to be unlocked upon function return.
3221 */
0a7de745
A
3222kern_return_t
3223waitq_wakeup64_thread_locked(struct waitq *waitq,
3224 event64_t wake_event,
3225 thread_t thread,
3226 wait_result_t result,
3227 waitq_lock_state_t lock_state)
3e170ce0
A
3228{
3229 kern_return_t ret;
3230 spl_t th_spl;
3231
3232 assert(waitq_held(waitq));
39037602 3233 assert_thread_magic(thread);
3e170ce0
A
3234
3235 /*
3236 * See if the thread was still waiting there. If so, it got
3237 * dequeued and returned locked.
3238 */
3239 ret = waitq_select_thread_locked(waitq, wake_event, thread, &th_spl);
3240
0a7de745 3241 if (ret == KERN_SUCCESS) {
3e170ce0 3242 waitq_stats_count_wakeup(waitq);
0a7de745 3243 } else {
3e170ce0 3244 waitq_stats_count_fail(waitq);
0a7de745 3245 }
3e170ce0 3246
0a7de745 3247 if (lock_state == WAITQ_UNLOCK) {
3e170ce0 3248 waitq_unlock(waitq);
0a7de745 3249 }
3e170ce0 3250
0a7de745 3251 if (ret != KERN_SUCCESS) {
3e170ce0 3252 return KERN_NOT_WAITING;
0a7de745 3253 }
3e170ce0
A
3254
3255 ret = thread_go(thread, result);
3256 assert(ret == KERN_SUCCESS);
3257 thread_unlock(thread);
3258 splx(th_spl);
3259
3260 return ret;
3261}
3262
3263
3264
3265/* ----------------------------------------------------------------------
3266 *
3267 * In-Kernel API
3268 *
3269 * ---------------------------------------------------------------------- */
3270
3271/**
3272 * initialize a waitq object
3273 */
0a7de745
A
3274kern_return_t
3275waitq_init(struct waitq *waitq, int policy)
3e170ce0
A
3276{
3277 assert(waitq != NULL);
3278
3279 /* only FIFO and LIFO for now */
0a7de745 3280 if ((policy & SYNC_POLICY_FIXED_PRIORITY) != 0) {
3e170ce0 3281 return KERN_INVALID_ARGUMENT;
0a7de745 3282 }
3e170ce0
A
3283
3284 waitq->waitq_fifo = ((policy & SYNC_POLICY_REVERSED) == 0);
3285 waitq->waitq_irq = !!(policy & SYNC_POLICY_DISABLE_IRQ);
3286 waitq->waitq_prepost = 0;
94ff46dc
A
3287 if (policy & SYNC_POLICY_TURNSTILE_PROXY) {
3288 waitq->waitq_type = WQT_TSPROXY;
3289 } else {
3290 waitq->waitq_type = WQT_QUEUE;
3291 }
3292 waitq->waitq_turnstile = !!(policy & SYNC_POLICY_TURNSTILE);
3e170ce0
A
3293 waitq->waitq_eventmask = 0;
3294
3295 waitq->waitq_set_id = 0;
3296 waitq->waitq_prepost_id = 0;
3297
39037602 3298 waitq_lock_init(waitq);
d9a64523
A
3299 if (waitq_is_turnstile_queue(waitq)) {
3300 /* For turnstile, initialize it as a priority queue */
3301 priority_queue_init(&waitq->waitq_prio_queue,
0a7de745 3302 PRIORITY_QUEUE_BUILTIN_MAX_HEAP);
d9a64523 3303 assert(waitq->waitq_fifo == 0);
94ff46dc
A
3304 } else if (policy & SYNC_POLICY_TURNSTILE_PROXY) {
3305 waitq->waitq_ts = TURNSTILE_NULL;
3306 waitq->waitq_tspriv = NULL;
d9a64523
A
3307 } else {
3308 queue_init(&waitq->waitq_queue);
3309 }
3e170ce0 3310
39037602 3311 waitq->waitq_isvalid = 1;
3e170ce0
A
3312 return KERN_SUCCESS;
3313}
3314
3315struct wq_unlink_ctx {
3316 struct waitq *unlink_wq;
3317 struct waitq_set *unlink_wqset;
3318};
3319
3320static int waitq_unlink_prepost_cb(struct waitq_set __unused *wqset, void *ctx,
0a7de745 3321 struct wq_prepost *wqp, struct waitq *waitq);
3e170ce0
A
3322
3323/**
39037602 3324 * walk_waitq_links callback to invalidate 'link' parameter
3e170ce0
A
3325 *
3326 * Conditions:
39037602 3327 * Called from walk_waitq_links.
3e170ce0
A
3328 * Note that unlink other callbacks, this one make no assumptions about
3329 * the 'waitq' parameter, specifically it does not have to be locked or
3330 * even valid.
3331 */
0a7de745
A
3332static int
3333waitq_unlink_all_cb(struct waitq *waitq, void *ctx,
3334 struct waitq_link *link)
3e170ce0
A
3335{
3336 (void)waitq;
3337 (void)ctx;
0a7de745 3338 if (wql_type(link) == WQL_LINK && wql_is_valid(link)) {
39037602 3339 wql_invalidate(link);
0a7de745 3340 }
3e170ce0 3341
39037602 3342 if (wql_type(link) == WQL_WQS) {
3e170ce0 3343 struct waitq_set *wqset;
3e170ce0
A
3344 struct wq_unlink_ctx ulctx;
3345
3346 /*
3347 * When destroying the waitq, take the time to clear out any
3348 * preposts it may have made. This could potentially save time
3349 * on the IPC send path which would otherwise have to iterate
3350 * over lots of dead port preposts.
3351 */
0a7de745 3352 if (waitq->waitq_prepost_id == 0) {
3e170ce0 3353 goto out;
0a7de745 3354 }
3e170ce0 3355
39037602 3356 wqset = link->wql_wqs.wql_set;
3e170ce0 3357 assert(wqset != NULL);
39037602 3358 assert(!waitq_irq_safe(&wqset->wqset_q));
3e170ce0 3359
3e170ce0
A
3360 waitq_set_lock(wqset);
3361
3362 if (!waitq_set_is_valid(wqset)) {
3363 /* someone raced us to teardown */
3364 goto out_unlock;
3365 }
0a7de745 3366 if (!waitq_set_maybe_preposted(wqset)) {
3e170ce0 3367 goto out_unlock;
0a7de745 3368 }
3e170ce0
A
3369
3370 ulctx.unlink_wq = waitq;
3371 ulctx.unlink_wqset = wqset;
3372 (void)wq_prepost_iterate(wqset->wqset_prepost_id, &ulctx,
0a7de745 3373 waitq_unlink_prepost_cb);
3e170ce0
A
3374out_unlock:
3375 waitq_set_unlock(wqset);
3e170ce0
A
3376 }
3377
3378out:
3379 return WQ_ITERATE_CONTINUE;
3380}
3381
3382
3383/**
3384 * cleanup any link/prepost table resources associated with a waitq
3385 */
0a7de745
A
3386void
3387waitq_deinit(struct waitq *waitq)
3e170ce0 3388{
3e170ce0
A
3389 spl_t s;
3390
94ff46dc
A
3391 assert(waitq);
3392 if (!waitq_is_valid(waitq)) {
3393 return;
3394 }
3395
3396 if (!waitq_is_queue(waitq) && !waitq_is_turnstile_proxy(waitq)) {
3e170ce0 3397 return;
0a7de745 3398 }
3e170ce0 3399
0a7de745 3400 if (waitq_irq_safe(waitq)) {
3e170ce0 3401 s = splsched();
0a7de745 3402 }
3e170ce0 3403 waitq_lock(waitq);
94ff46dc
A
3404
3405 if (waitq_valid(waitq)) {
3406 waitq->waitq_isvalid = 0;
3407 if (!waitq_irq_safe(waitq)) {
3408 waitq_unlink_all_unlock(waitq);
3409 /* waitq unlocked and set links deallocated */
3410 goto out;
0a7de745 3411 }
39037602 3412 }
3e170ce0 3413
94ff46dc
A
3414 waitq_unlock(waitq);
3415 if (waitq_irq_safe(waitq)) {
3e170ce0 3416 splx(s);
39037602 3417 }
3e170ce0 3418
94ff46dc
A
3419out:
3420#if MACH_ASSERT
3421 if (waitq_is_turnstile_queue(waitq)) {
3422 assert(priority_queue_empty(&waitq->waitq_prio_queue));
3423 } else if (waitq_is_turnstile_proxy(waitq)) {
3424 assert(waitq->waitq_ts == TURNSTILE_NULL);
3425 } else {
3426 assert(queue_empty(&waitq->waitq_queue));
3427 }
3428#else
3429 (void)0;
3430#endif // MACH_ASSERT
3e170ce0
A
3431}
3432
0a7de745
A
3433void
3434waitq_invalidate_locked(struct waitq *waitq)
39037602
A
3435{
3436 assert(waitq_held(waitq));
3437 assert(waitq_is_valid(waitq));
3438 waitq->waitq_isvalid = 0;
3439}
3e170ce0
A
3440
3441/**
3442 * invalidate the given wq_prepost object
3443 *
3444 * Conditions:
3445 * Called from wq_prepost_iterate (_not_ from wq_prepost_foreach_locked!)
3446 */
0a7de745
A
3447static int
3448wqset_clear_prepost_chain_cb(struct waitq_set __unused *wqset,
3449 void __unused *ctx,
3450 struct wq_prepost *wqp,
3451 struct waitq __unused *waitq)
3e170ce0 3452{
0a7de745 3453 if (wqp_type(wqp) == WQP_POST) {
3e170ce0 3454 wq_prepost_invalidate(wqp);
0a7de745 3455 }
3e170ce0
A
3456 return WQ_ITERATE_CONTINUE;
3457}
3458
3459
3460/**
3461 * allocate and initialize a waitq set object
3462 *
3463 * Conditions:
3464 * may block
3465 *
3466 * Returns:
d9a64523
A
3467 * allocated / initialized waitq_set object.
3468 * the waits_set object returned does not have
3469 * a waitq_link associated.
3470 *
3e170ce0
A
3471 * NULL on failure
3472 */
0a7de745 3473struct waitq_set *
cb323159 3474waitq_set_alloc(int policy, waitq_set_prepost_hook_t *prepost_hook)
3e170ce0
A
3475{
3476 struct waitq_set *wqset;
3477
3478 wqset = (struct waitq_set *)zalloc(waitq_set_zone);
0a7de745 3479 if (!wqset) {
3e170ce0 3480 panic("Can't allocate a new waitq set from zone %p", waitq_set_zone);
0a7de745 3481 }
3e170ce0
A
3482
3483 kern_return_t ret;
39037602 3484 ret = waitq_set_init(wqset, policy, NULL, prepost_hook);
3e170ce0
A
3485 if (ret != KERN_SUCCESS) {
3486 zfree(waitq_set_zone, wqset);
3487 wqset = NULL;
3488 }
3489
3490 return wqset;
3491}
3492
3493/**
3494 * initialize a waitq set object
3495 *
d9a64523
A
3496 * if no 'reserved_link' object is passed
3497 * the waitq_link will be lazily allocated
3498 * on demand through waitq_set_lazy_init_link.
3e170ce0 3499 */
0a7de745
A
3500kern_return_t
3501waitq_set_init(struct waitq_set *wqset,
3502 int policy, uint64_t *reserved_link,
cb323159 3503 waitq_set_prepost_hook_t *prepost_hook)
3e170ce0 3504{
39037602 3505 struct waitq_link *link;
3e170ce0
A
3506 kern_return_t ret;
3507
3508 memset(wqset, 0, sizeof(*wqset));
3509
3510 ret = waitq_init(&wqset->wqset_q, policy);
0a7de745 3511 if (ret != KERN_SUCCESS) {
3e170ce0 3512 return ret;
0a7de745 3513 }
3e170ce0
A
3514
3515 wqset->wqset_q.waitq_type = WQT_SET;
39037602 3516 if (policy & SYNC_POLICY_PREPOST) {
3e170ce0 3517 wqset->wqset_q.waitq_prepost = 1;
39037602
A
3518 wqset->wqset_prepost_id = 0;
3519 assert(prepost_hook == NULL);
3520 } else {
3e170ce0 3521 wqset->wqset_q.waitq_prepost = 0;
39037602
A
3522 wqset->wqset_prepost_hook = prepost_hook;
3523 }
3e170ce0
A
3524
3525 if (reserved_link && *reserved_link != 0) {
39037602 3526 link = wql_get_reserved(*reserved_link, WQL_WQS);
d9a64523 3527
0a7de745 3528 if (!link) {
d9a64523 3529 panic("Can't allocate link object for waitq set: %p", wqset);
0a7de745 3530 }
d9a64523 3531
3e170ce0
A
3532 /* always consume the caller's reference */
3533 *reserved_link = 0;
d9a64523
A
3534
3535 link->wql_wqs.wql_set = wqset;
3536 wql_mkvalid(link);
3537
3538 wqset->wqset_id = link->wql_setid.id;
3539 wql_put_link(link);
3e170ce0 3540 } else {
d9a64523
A
3541 /*
3542 * Lazy allocate the link only when an actual id is needed.
3543 */
3544 wqset->wqset_id = WQSET_NOT_LINKED;
3545 }
3546
3547 return KERN_SUCCESS;
3548}
3549
3550#if DEVELOPMENT || DEBUG
3551
3552int
3553sysctl_helper_waitq_set_nelem(void)
3554{
3555 return ltable_nelem(&g_wqlinktable);
3556}
3557
3558#endif
3559
3560/**
3561 * initialize a waitq set link.
3562 *
3563 * Conditions:
3564 * may block
3565 * locks and unlocks the waiq set lock
3566 *
3567 */
3568void
3569waitq_set_lazy_init_link(struct waitq_set *wqset)
3570{
3571 struct waitq_link *link;
3572
3573 assert(get_preemption_level() == 0 && waitq_wait_possible(current_thread()));
3574
3575 waitq_set_lock(wqset);
0a7de745 3576 if (!waitq_set_should_lazy_init_link(wqset)) {
d9a64523
A
3577 waitq_set_unlock(wqset);
3578 return;
3e170ce0 3579 }
d9a64523
A
3580
3581 assert(wqset->wqset_id == WQSET_NOT_LINKED);
3582 waitq_set_unlock(wqset);
3583
3584 link = wql_alloc_link(WQL_WQS);
0a7de745 3585 if (!link) {
3e170ce0 3586 panic("Can't allocate link object for waitq set: %p", wqset);
0a7de745 3587 }
3e170ce0 3588
39037602 3589 link->wql_wqs.wql_set = wqset;
3e170ce0 3590
d9a64523
A
3591 waitq_set_lock(wqset);
3592 if (waitq_set_should_lazy_init_link(wqset)) {
3593 wql_mkvalid(link);
3594 wqset->wqset_id = link->wql_setid.id;
3595 }
3596
3597 assert(wqset->wqset_id != 0);
3598 assert(wqset->wqset_id != WQSET_NOT_LINKED);
3599
3600 waitq_set_unlock(wqset);
3601
39037602 3602 wql_put_link(link);
3e170ce0 3603
d9a64523
A
3604 return;
3605}
3606
3607/**
3608 * checks if a waitq set needs to be linked.
3609 *
3610 */
3611boolean_t
3612waitq_set_should_lazy_init_link(struct waitq_set *wqset)
3613{
3614 if (waitqs_is_linked(wqset) || wqset->wqset_id == 0) {
3615 return FALSE;
3616 }
3617 return TRUE;
3e170ce0
A
3618}
3619
3620/**
3621 * clear out / release any resources associated with a waitq set
3622 *
3623 * Conditions:
3624 * may block
3625 * Note:
3626 * This will render the waitq set invalid, and it must
3627 * be re-initialized with waitq_set_init before it can be used again
3628 */
0a7de745
A
3629void
3630waitq_set_deinit(struct waitq_set *wqset)
3e170ce0 3631{
39037602
A
3632 struct waitq_link *link = NULL;
3633 uint64_t set_id, prepost_id;
3e170ce0 3634
0a7de745 3635 if (!waitqs_is_set(wqset)) {
3e170ce0 3636 panic("trying to de-initialize an invalid wqset @%p", wqset);
0a7de745 3637 }
3e170ce0 3638
39037602 3639 assert(!waitq_irq_safe(&wqset->wqset_q));
d9a64523 3640
3e170ce0 3641 waitq_set_lock(wqset);
cb323159
A
3642
3643 if (waitq_set_has_prepost_hook(wqset)) {
3644 waitq_set_prepost_hook_t *hook = wqset->wqset_prepost_hook;
3645 /*
3646 * If the wqset_prepost_hook value is non 0,
3647 * then another core is currently posting to this waitq set
3648 * and we need for it to finish what it's doing.
3649 */
3650 while (os_atomic_load(hook, relaxed) != 0) {
3651 waitq_set_unlock(wqset);
3652 delay(1);
3653 waitq_set_lock(wqset);
3654 }
3655 }
3e170ce0
A
3656
3657 set_id = wqset->wqset_id;
3658
d9a64523 3659 if (waitqs_is_linked(wqset) || set_id == 0) {
d9a64523
A
3660 /* grab the set's link object */
3661 link = wql_get_link(set_id);
3662 if (link) {
3663 wql_invalidate(link);
3664 }
3665 /* someone raced us to deinit */
3666 if (!link || wqset->wqset_id != set_id || set_id != link->wql_setid.id) {
3667 if (link) {
3668 wql_put_link(link);
3669 }
3670 waitq_set_unlock(wqset);
3671 return;
3672 }
3e170ce0 3673
d9a64523
A
3674 /* the link should be a valid link object at this point */
3675 assert(link != NULL && wql_type(link) == WQL_WQS);
3e170ce0 3676
d9a64523
A
3677 wqset->wqset_id = 0;
3678 }
3e170ce0 3679
3e170ce0
A
3680 /*
3681 * This set may have a lot of preposts, or may have been a member of
3682 * many other sets. To minimize spinlock hold times, we clear out the
3683 * waitq set data structure under the lock-hold, but don't clear any
3684 * table objects. We keep handles to the prepost and set linkage
3685 * objects and free those outside the critical section.
3686 */
39037602 3687 prepost_id = 0;
d9a64523
A
3688 if (wqset->wqset_q.waitq_prepost && wqset->wqset_prepost_id) {
3689 assert(link != NULL);
39037602 3690 prepost_id = wqset->wqset_prepost_id;
d9a64523 3691 }
39037602 3692 /* else { TODO: notify kqueue subsystem? } */
3e170ce0
A
3693 wqset->wqset_prepost_id = 0;
3694
39037602
A
3695 wqset->wqset_q.waitq_fifo = 0;
3696 wqset->wqset_q.waitq_prepost = 0;
3697 wqset->wqset_q.waitq_isvalid = 0;
3e170ce0 3698
39037602
A
3699 /* don't clear the 'waitq_irq' bit: it's used in locking! */
3700 wqset->wqset_q.waitq_eventmask = 0;
3701
3702 waitq_unlink_all_unlock(&wqset->wqset_q);
3703 /* wqset->wqset_q unlocked and set links deallocated */
3e170ce0 3704
d9a64523
A
3705
3706 if (link) {
3707 /*
3708 * walk_waitq_links may race with us for access to the waitq set.
3709 * If walk_waitq_links has a reference to the set, then we should wait
3710 * until the link's refcount goes to 1 (our reference) before we exit
3711 * this function. That way we ensure that the waitq set memory will
3712 * remain valid even though it's been cleared out.
3713 */
0a7de745 3714 while (wql_refcnt(link) > 1) {
d9a64523 3715 delay(1);
0a7de745 3716 }
d9a64523
A
3717 wql_put_link(link);
3718 }
3e170ce0
A
3719
3720 /* drop / unlink all the prepost table objects */
39037602 3721 /* JMM - can this happen before the delay? */
0a7de745 3722 if (prepost_id) {
39037602 3723 (void)wq_prepost_iterate(prepost_id, NULL,
0a7de745
A
3724 wqset_clear_prepost_chain_cb);
3725 }
3e170ce0
A
3726}
3727
3728/**
3729 * de-initialize and free an allocated waitq set object
3730 *
3731 * Conditions:
3732 * may block
3733 */
0a7de745
A
3734kern_return_t
3735waitq_set_free(struct waitq_set *wqset)
3e170ce0
A
3736{
3737 waitq_set_deinit(wqset);
3738
3739 memset(wqset, 0, sizeof(*wqset));
3740 zfree(waitq_set_zone, wqset);
3741
3742 return KERN_SUCCESS;
3743}
3744
5ba3f43e 3745#if DEVELOPMENT || DEBUG
3e170ce0
A
3746#if CONFIG_WAITQ_DEBUG
3747/**
3748 * return the set ID of 'wqset'
3749 */
0a7de745
A
3750uint64_t
3751wqset_id(struct waitq_set *wqset)
3e170ce0 3752{
0a7de745 3753 if (!wqset) {
3e170ce0 3754 return 0;
0a7de745 3755 }
3e170ce0
A
3756
3757 assert(waitqs_is_set(wqset));
d9a64523
A
3758
3759 if (!waitqs_is_linked(wqset)) {
3760 waitq_set_lazy_init_link(wqset);
3761 }
3762
3e170ce0
A
3763 return wqset->wqset_id;
3764}
3765
3766/**
3767 * returns a pointer to the waitq object embedded in 'wqset'
3768 */
0a7de745
A
3769struct waitq *
3770wqset_waitq(struct waitq_set *wqset)
3e170ce0 3771{
0a7de745 3772 if (!wqset) {
3e170ce0 3773 return NULL;
0a7de745 3774 }
3e170ce0
A
3775
3776 assert(waitqs_is_set(wqset));
3777
3778 return &wqset->wqset_q;
3779}
3780#endif /* CONFIG_WAITQ_DEBUG */
3781#endif /* DEVELOPMENT || DEBUG */
3782
3783
3784/**
3785 * clear all preposts originating from 'waitq'
3786 *
3787 * Conditions:
3788 * 'waitq' locked
3789 * may (rarely) spin waiting for another on-core thread to
3790 * release the last reference to the waitq's prepost link object
3791 *
3792 * NOTE:
3793 * If this function needs to spin, it will drop the waitq lock!
3794 * The return value of the function indicates whether or not this
3795 * happened: 1 == lock was dropped, 0 == lock held
3796 */
0a7de745
A
3797int
3798waitq_clear_prepost_locked(struct waitq *waitq)
3e170ce0
A
3799{
3800 struct wq_prepost *wqp;
3801 int dropped_lock = 0;
3802
39037602
A
3803 assert(!waitq_irq_safe(waitq));
3804
0a7de745 3805 if (waitq->waitq_prepost_id == 0) {
3e170ce0 3806 return 0;
0a7de745 3807 }
3e170ce0
A
3808
3809 wqp = wq_prepost_get(waitq->waitq_prepost_id);
3810 waitq->waitq_prepost_id = 0;
3811 if (wqp) {
3812 uint64_t wqp_id = wqp->wqp_prepostid.id;
3813 wqdbg_v("invalidate prepost 0x%llx (refcnt:%d)",
0a7de745 3814 wqp->wqp_prepostid.id, wqp_refcnt(wqp));
3e170ce0
A
3815 wq_prepost_invalidate(wqp);
3816 while (wqp_refcnt(wqp) > 1) {
3e170ce0
A
3817 /*
3818 * Some other thread must have raced us to grab a link
3819 * object reference before we invalidated it. This
3820 * means that they are probably trying to access the
3821 * waitq to which the prepost object points. We need
3822 * to wait here until the other thread drops their
3823 * reference. We know that no one else can get a
3824 * reference (the object has been invalidated), and
3825 * that prepost references are short-lived (dropped on
3826 * a call to wq_prepost_put). We also know that no one
3827 * blocks while holding a reference therefore the
3828 * other reference holder must be on-core. We'll just
3829 * sit and wait for the other reference to be dropped.
3830 */
3831 disable_preemption();
3832
3833 waitq_unlock(waitq);
3e170ce0
A
3834 dropped_lock = 1;
3835 /*
3836 * don't yield here, just spin and assume the other
3837 * consumer is already on core...
3838 */
3839 delay(1);
39037602 3840
3e170ce0
A
3841 waitq_lock(waitq);
3842
3843 enable_preemption();
3844 }
0a7de745 3845 if (wqp_refcnt(wqp) > 0 && wqp->wqp_prepostid.id == wqp_id) {
3e170ce0 3846 wq_prepost_put(wqp);
0a7de745 3847 }
3e170ce0
A
3848 }
3849
3850 return dropped_lock;
3851}
3852
3853/**
3854 * clear all preposts originating from 'waitq'
3855 *
3856 * Conditions:
3857 * 'waitq' is not locked
3858 * may disable and re-enable interrupts
3859 */
0a7de745
A
3860void
3861waitq_clear_prepost(struct waitq *waitq)
3e170ce0 3862{
3e170ce0 3863 assert(waitq_valid(waitq));
39037602 3864 assert(!waitq_irq_safe(waitq));
3e170ce0 3865
3e170ce0
A
3866 waitq_lock(waitq);
3867 /* it doesn't matter to us if the lock is dropped here */
39037602 3868 (void)waitq_clear_prepost_locked(waitq);
3e170ce0 3869 waitq_unlock(waitq);
3e170ce0
A
3870}
3871
3872/**
3873 * return a the waitq's prepost object ID (allocate if necessary)
3874 *
3875 * Conditions:
3876 * 'waitq' is unlocked
3877 */
0a7de745
A
3878uint64_t
3879waitq_get_prepost_id(struct waitq *waitq)
3e170ce0
A
3880{
3881 struct wq_prepost *wqp;
3882 uint64_t wqp_id = 0;
3e170ce0 3883
0a7de745 3884 if (!waitq_valid(waitq)) {
3e170ce0 3885 return 0;
0a7de745
A
3886 }
3887
39037602 3888 assert(!waitq_irq_safe(waitq));
3e170ce0 3889
3e170ce0
A
3890 waitq_lock(waitq);
3891
0a7de745 3892 if (!waitq_valid(waitq)) {
3e170ce0 3893 goto out_unlock;
0a7de745 3894 }
3e170ce0
A
3895
3896 if (waitq->waitq_prepost_id) {
3897 wqp_id = waitq->waitq_prepost_id;
3898 goto out_unlock;
3899 }
3900
3901 /* don't hold a spinlock while allocating a prepost object */
3902 waitq_unlock(waitq);
3e170ce0
A
3903
3904 wqp = wq_prepost_alloc(WQP_WQ, 1);
0a7de745 3905 if (!wqp) {
3e170ce0 3906 return 0;
0a7de745 3907 }
3e170ce0
A
3908
3909 /* re-acquire the waitq lock */
3e170ce0
A
3910 waitq_lock(waitq);
3911
3912 if (!waitq_valid(waitq)) {
3913 wq_prepost_put(wqp);
3914 wqp_id = 0;
3915 goto out_unlock;
3916 }
3917
3918 if (waitq->waitq_prepost_id) {
3919 /* we were beat by someone else */
3920 wq_prepost_put(wqp);
3921 wqp_id = waitq->waitq_prepost_id;
3922 goto out_unlock;
3923 }
3924
3925 wqp->wqp_wq.wqp_wq_ptr = waitq;
3926
3927 wqp_set_valid(wqp);
3928 wqp_id = wqp->wqp_prepostid.id;
3929 waitq->waitq_prepost_id = wqp_id;
3930
3931 wq_prepost_put(wqp);
3932
3933out_unlock:
3934 waitq_unlock(waitq);
3e170ce0
A
3935
3936 return wqp_id;
3937}
3938
3939
0a7de745
A
3940static int
3941waitq_inset_cb(struct waitq *waitq, void *ctx, struct waitq_link *link)
3e170ce0
A
3942{
3943 uint64_t setid = *(uint64_t *)ctx;
39037602 3944 int wqltype = wql_type(link);
3e170ce0 3945 (void)waitq;
39037602 3946 if (wqltype == WQL_WQS && link->wql_setid.id == setid) {
3e170ce0
A
3947 wqdbg_v(" waitq already in set 0x%llx", setid);
3948 return WQ_ITERATE_FOUND;
39037602 3949 } else if (wqltype == WQL_LINK) {
3e170ce0
A
3950 /*
3951 * break out early if we see a link that points to the setid
3952 * in question. This saves us a step in the
3953 * iteration/recursion
3954 */
39037602
A
3955 wqdbg_v(" waitq already in set 0x%llx (WQL_LINK)", setid);
3956 if (link->wql_link.left_setid == setid ||
0a7de745 3957 link->wql_link.right_setid == setid) {
3e170ce0 3958 return WQ_ITERATE_FOUND;
0a7de745 3959 }
3e170ce0
A
3960 }
3961
3962 return WQ_ITERATE_CONTINUE;
3963}
3964
3965/**
3966 * determine if 'waitq' is a member of 'wqset'
3967 *
3968 * Conditions:
3969 * neither 'waitq' nor 'wqset' is not locked
3970 * may disable and re-enable interrupts while locking 'waitq'
3971 */
0a7de745
A
3972boolean_t
3973waitq_member(struct waitq *waitq, struct waitq_set *wqset)
3e170ce0
A
3974{
3975 kern_return_t kr = WQ_ITERATE_SUCCESS;
3976 uint64_t setid;
3e170ce0 3977
0a7de745 3978 if (!waitq_valid(waitq)) {
3e170ce0 3979 panic("Invalid waitq: %p", waitq);
0a7de745 3980 }
39037602 3981 assert(!waitq_irq_safe(waitq));
3e170ce0 3982
0a7de745 3983 if (!waitqs_is_set(wqset)) {
3e170ce0 3984 return FALSE;
0a7de745 3985 }
d9a64523 3986
3e170ce0
A
3987 waitq_lock(waitq);
3988
0a7de745
A
3989 if (!waitqs_is_linked(wqset)) {
3990 goto out_unlock;
3991 }
d9a64523 3992
3e170ce0 3993 setid = wqset->wqset_id;
3e170ce0
A
3994
3995 /* fast path: most waitqs are members of only 1 set */
3996 if (waitq->waitq_set_id == setid) {
3997 waitq_unlock(waitq);
3e170ce0
A
3998 return TRUE;
3999 }
4000
4001 /* walk the link table and look for the Set ID of wqset */
39037602 4002 kr = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
0a7de745 4003 WQL_ALL, (void *)&setid, waitq_inset_cb);
3e170ce0
A
4004
4005out_unlock:
4006 waitq_unlock(waitq);
0a7de745 4007 return kr == WQ_ITERATE_FOUND;
3e170ce0
A
4008}
4009
4010/**
4011 * Returns true is the given waitq is a member of at least 1 set
4012 */
0a7de745
A
4013boolean_t
4014waitq_in_set(struct waitq *waitq)
3e170ce0 4015{
39037602 4016 struct waitq_link *link;
3e170ce0 4017 boolean_t inset = FALSE;
3e170ce0 4018
0a7de745 4019 if (waitq_irq_safe(waitq)) {
39037602 4020 return FALSE;
0a7de745 4021 }
39037602 4022
3e170ce0
A
4023 waitq_lock(waitq);
4024
0a7de745 4025 if (!waitq->waitq_set_id) {
3e170ce0 4026 goto out_unlock;
0a7de745 4027 }
3e170ce0 4028
39037602 4029 link = wql_get_link(waitq->waitq_set_id);
3e170ce0
A
4030 if (link) {
4031 /* if we get here, the waitq is in _at_least_one_ set */
4032 inset = TRUE;
39037602 4033 wql_put_link(link);
3e170ce0
A
4034 } else {
4035 /* we can just optimize this for next time */
4036 waitq->waitq_set_id = 0;
4037 }
4038
4039out_unlock:
4040 waitq_unlock(waitq);
3e170ce0
A
4041 return inset;
4042}
4043
4044
4045/**
4046 * pre-allocate a waitq link structure from the link table
4047 *
4048 * Conditions:
4049 * 'waitq' is not locked
4050 * may (rarely) block if link table needs to grow
4051 */
0a7de745
A
4052uint64_t
4053waitq_link_reserve(struct waitq *waitq)
3e170ce0 4054{
39037602 4055 struct waitq_link *link;
3e170ce0
A
4056 uint64_t reserved_id = 0;
4057
4058 assert(get_preemption_level() == 0 && waitq_wait_possible(current_thread()));
4059
4060 /*
4061 * We've asserted that the caller can block, so we enforce a
4062 * minimum-free table element policy here.
4063 */
39037602 4064 wql_ensure_free_space();
3e170ce0
A
4065
4066 (void)waitq;
39037602 4067 link = wql_alloc_link(LT_RESERVED);
0a7de745 4068 if (!link) {
3e170ce0 4069 return 0;
0a7de745 4070 }
3e170ce0 4071
39037602 4072 reserved_id = link->wql_setid.id;
3e170ce0
A
4073
4074 return reserved_id;
4075}
4076
4077/**
4078 * release a pre-allocated waitq link structure
4079 */
0a7de745
A
4080void
4081waitq_link_release(uint64_t id)
3e170ce0 4082{
39037602 4083 struct waitq_link *link;
3e170ce0 4084
0a7de745 4085 if (id == 0) {
3e170ce0 4086 return;
0a7de745 4087 }
3e170ce0 4088
39037602 4089 link = wql_get_reserved(id, WQL_LINK);
0a7de745 4090 if (!link) {
3e170ce0 4091 return;
0a7de745 4092 }
3e170ce0
A
4093
4094 /*
4095 * if we successfully got a link object, then we know
4096 * it's not been marked valid, and can be released with
39037602 4097 * a standard wql_put_link() which should free the element.
3e170ce0 4098 */
39037602
A
4099 wql_put_link(link);
4100#if CONFIG_LTABLE_STATS
4101 g_wqlinktable.nreserved_releases += 1;
3e170ce0
A
4102#endif
4103}
4104
4105/**
4106 * link 'waitq' to the set identified by 'setid' using the 'link' structure
4107 *
4108 * Conditions:
4109 * 'waitq' is locked
4110 * caller should have a reference to the 'link' object
4111 */
0a7de745
A
4112static kern_return_t
4113waitq_link_internal(struct waitq *waitq,
4114 uint64_t setid, struct waitq_link *link)
3e170ce0 4115{
39037602 4116 struct waitq_link *qlink;
3e170ce0
A
4117 kern_return_t kr;
4118
4119 assert(waitq_held(waitq));
d9a64523
A
4120 assert(setid != 0);
4121 assert(setid != WQSET_NOT_LINKED);
3e170ce0
A
4122
4123 /*
4124 * If the waitq_set_id field is empty, then this waitq is not
4125 * a member of any other set. All we have to do is update the
4126 * field.
4127 */
4128 if (!waitq->waitq_set_id) {
4129 waitq->waitq_set_id = setid;
4130 return KERN_SUCCESS;
4131 }
4132
39037602 4133 qlink = wql_get_link(waitq->waitq_set_id);
3e170ce0
A
4134 if (!qlink) {
4135 /*
4136 * The set to which this wait queue belonged has been
4137 * destroyed / invalidated. We can re-use the waitq field.
4138 */
4139 waitq->waitq_set_id = setid;
4140 return KERN_SUCCESS;
4141 }
39037602 4142 wql_put_link(qlink);
3e170ce0
A
4143
4144 /*
4145 * Check to see if it's already a member of the set.
4146 *
4147 * TODO: check for cycles!
4148 */
39037602 4149 kr = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
0a7de745
A
4150 WQL_ALL, (void *)&setid, waitq_inset_cb);
4151 if (kr == WQ_ITERATE_FOUND) {
d9a64523 4152 return KERN_ALREADY_IN_SET;
0a7de745 4153 }
3e170ce0
A
4154
4155 /*
4156 * This wait queue is a member of at least one set already,
4157 * and _not_ a member of the given set. Use our previously
4158 * allocated link object, and hook it up to the wait queue.
4159 * Note that it's possible that one or more of the wait queue sets to
4160 * which the wait queue belongs was invalidated before we allocated
4161 * this link object. That's OK because the next time we use that
4162 * object we'll just ignore it.
4163 */
39037602
A
4164 link->wql_link.left_setid = setid;
4165 link->wql_link.right_setid = waitq->waitq_set_id;
4166 wql_mkvalid(link);
3e170ce0 4167
39037602 4168 waitq->waitq_set_id = link->wql_setid.id;
3e170ce0
A
4169
4170 return KERN_SUCCESS;
4171}
4172
4173/**
4174 * link 'waitq' to 'wqset'
4175 *
4176 * Conditions:
4177 * if 'lock_state' contains WAITQ_SHOULD_LOCK, 'waitq' must be unlocked.
4178 * Otherwise, 'waitq' must be locked.
4179 *
4180 * may (rarely) block on link table allocation if the table has to grow,
4181 * and no 'reserved_link' object is passed.
4182 *
d9a64523
A
4183 * may block and acquire wqset lock if the wqset passed has no link.
4184 *
3e170ce0
A
4185 * Notes:
4186 * The caller can guarantee that this function will never block by
d9a64523 4187 * - pre-allocating a link table object and passing its ID in 'reserved_link'
0a7de745
A
4188 * - and pre-allocating the waitq set link calling waitq_set_lazy_init_link.
4189 * It is not possible to provide a reserved_link without having also linked
d9a64523 4190 * the wqset.
3e170ce0 4191 */
0a7de745
A
4192kern_return_t
4193waitq_link(struct waitq *waitq, struct waitq_set *wqset,
4194 waitq_lock_state_t lock_state, uint64_t *reserved_link)
3e170ce0
A
4195{
4196 kern_return_t kr;
39037602 4197 struct waitq_link *link;
3e170ce0 4198 int should_lock = (lock_state == WAITQ_SHOULD_LOCK);
3e170ce0 4199
0a7de745 4200 if (!waitq_valid(waitq) || waitq_irq_safe(waitq)) {
3e170ce0 4201 panic("Invalid waitq: %p", waitq);
0a7de745 4202 }
3e170ce0 4203
0a7de745 4204 if (!waitqs_is_set(wqset)) {
3e170ce0 4205 return KERN_INVALID_ARGUMENT;
0a7de745 4206 }
3e170ce0 4207
d9a64523
A
4208 if (!reserved_link || *reserved_link == 0) {
4209 if (!waitqs_is_linked(wqset)) {
4210 waitq_set_lazy_init_link(wqset);
4211 }
4212 }
4213
3e170ce0 4214 wqdbg_v("Link waitq %p to wqset 0x%llx",
0a7de745 4215 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), wqset->wqset_id);
3e170ce0 4216
3e170ce0
A
4217 /*
4218 * We _might_ need a new link object here, so we'll grab outside
4219 * the lock because the alloc call _might_ block.
4220 *
39037602 4221 * If the caller reserved a link beforehand, then wql_get_link
3e170ce0
A
4222 * is guaranteed not to block because the caller holds an extra
4223 * reference to the link which, in turn, hold a reference to the
4224 * link table.
4225 */
4226 if (reserved_link && *reserved_link != 0) {
39037602 4227 link = wql_get_reserved(*reserved_link, WQL_LINK);
3e170ce0
A
4228 /* always consume the caller's reference */
4229 *reserved_link = 0;
4230 } else {
39037602 4231 link = wql_alloc_link(WQL_LINK);
3e170ce0 4232 }
0a7de745 4233 if (!link) {
3e170ce0 4234 return KERN_NO_SPACE;
0a7de745 4235 }
3e170ce0
A
4236
4237 if (should_lock) {
3e170ce0
A
4238 waitq_lock(waitq);
4239 }
4240
4241 kr = waitq_link_internal(waitq, wqset->wqset_id, link);
4242
4243 if (should_lock) {
4244 waitq_unlock(waitq);
3e170ce0
A
4245 }
4246
39037602 4247 wql_put_link(link);
3e170ce0
A
4248
4249 return kr;
4250}
4251
4252/**
4253 * helper: unlink 'waitq' from waitq set identified by 'setid'
4254 * this function also prunes invalid objects from the tree
4255 *
4256 * Conditions:
39037602 4257 * MUST be called from walk_waitq_links link table walk
3e170ce0
A
4258 * 'waitq' is locked
4259 *
4260 * Notes:
4261 * This is a helper function which compresses the link table by culling
4262 * unused or unnecessary links. See comments below for different
4263 * scenarios.
4264 */
0a7de745
A
4265static inline int
4266waitq_maybe_remove_link(struct waitq *waitq,
4267 uint64_t setid,
4268 struct waitq_link *parent,
4269 struct waitq_link *left,
4270 struct waitq_link *right)
3e170ce0
A
4271{
4272 uint64_t *wq_setid = &waitq->waitq_set_id;
4273
4274 /*
4275 * There are two scenarios:
4276 *
4277 * Scenario 1:
4278 * --------------------------------------------------------------------
4279 * waitq->waitq_set_id == parent
4280 *
4281 * parent(LINK)
4282 * / \
4283 * / \
4284 * / \
4285 * L(LINK/WQS_l) R(LINK/WQS_r)
4286 *
4287 * In this scenario, we assert that the original waitq points to the
4288 * parent link we were passed in. If WQS_l (or WQS_r) is the waitq
4289 * set we're looking for, we can set the corresponding parent
4290 * link id (left or right) to 0. To compress the tree, we can reset the
4291 * waitq_set_id of the original waitq to point to the side of the
4292 * parent that is still valid. We then discard the parent link object.
4293 */
39037602 4294 if (*wq_setid == parent->wql_setid.id) {
3e170ce0
A
4295 if (!left && !right) {
4296 /* completely invalid children */
39037602 4297 wql_invalidate(parent);
3e170ce0
A
4298 wqdbg_v("S1, L+R");
4299 *wq_setid = 0;
4300 return WQ_ITERATE_INVALID;
39037602 4301 } else if (!left || left->wql_setid.id == setid) {
3e170ce0
A
4302 /*
4303 * left side matches we know it points either to the
4304 * WQS we're unlinking, or to an invalid object:
4305 * no need to invalidate it
4306 */
39037602
A
4307 *wq_setid = right ? right->wql_setid.id : 0;
4308 wql_invalidate(parent);
3e170ce0
A
4309 wqdbg_v("S1, L");
4310 return left ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
39037602 4311 } else if (!right || right->wql_setid.id == setid) {
3e170ce0
A
4312 /*
4313 * if right side matches we know it points either to the
4314 * WQS we're unlinking, or to an invalid object:
4315 * no need to invalidate it
4316 */
39037602
A
4317 *wq_setid = left ? left->wql_setid.id : 0;
4318 wql_invalidate(parent);
3e170ce0
A
4319 wqdbg_v("S1, R");
4320 return right ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4321 }
4322 }
4323
4324 /*
4325 * the tree walk starts at the top-of-tree and moves down,
4326 * so these are safe asserts.
4327 */
4328 assert(left || right); /* one of them has to be valid at this point */
4329
4330 /*
4331 * Scenario 2:
4332 * --------------------------------------------------------------------
4333 * waitq->waitq_set_id == ... (OR parent)
4334 *
4335 * ...
4336 * |
4337 * parent
4338 * / \
4339 * / \
4340 * L(LINK) R(LINK)
4341 * /\ /\
4342 * / \ / \
4343 * / \ Rl(*) Rr(*)
4344 * Ll(WQS) Lr(WQS)
4345 *
4346 * In this scenario, a leaf node of either the left or right side
4347 * could be the wait queue set we're looking to unlink. We also handle
4348 * the case where one of these links is invalid. If a leaf node is
4349 * invalid or it's the set we're looking for, we can safely remove the
4350 * middle link (left or right) and point the parent link directly to
4351 * the remaining leaf node.
4352 */
39037602 4353 if (left && wql_type(left) == WQL_LINK) {
3e170ce0 4354 uint64_t Ll, Lr;
39037602
A
4355 struct waitq_link *linkLl, *linkLr;
4356 assert(left->wql_setid.id != setid);
4357 Ll = left->wql_link.left_setid;
4358 Lr = left->wql_link.right_setid;
4359 linkLl = wql_get_link(Ll);
4360 linkLr = wql_get_link(Lr);
3e170ce0
A
4361 if (!linkLl && !linkLr) {
4362 /*
4363 * The left object points to two invalid objects!
4364 * We can invalidate the left w/o touching the parent.
4365 */
39037602 4366 wql_invalidate(left);
3e170ce0
A
4367 wqdbg_v("S2, Ll+Lr");
4368 return WQ_ITERATE_INVALID;
4369 } else if (!linkLl || Ll == setid) {
4370 /* Ll is invalid and/or the wait queue set we're looking for */
39037602
A
4371 parent->wql_link.left_setid = Lr;
4372 wql_invalidate(left);
4373 wql_put_link(linkLl);
4374 wql_put_link(linkLr);
3e170ce0
A
4375 wqdbg_v("S2, Ll");
4376 return linkLl ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4377 } else if (!linkLr || Lr == setid) {
4378 /* Lr is invalid and/or the wait queue set we're looking for */
39037602
A
4379 parent->wql_link.left_setid = Ll;
4380 wql_invalidate(left);
4381 wql_put_link(linkLr);
4382 wql_put_link(linkLl);
3e170ce0
A
4383 wqdbg_v("S2, Lr");
4384 return linkLr ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4385 }
39037602
A
4386 wql_put_link(linkLl);
4387 wql_put_link(linkLr);
3e170ce0
A
4388 }
4389
39037602 4390 if (right && wql_type(right) == WQL_LINK) {
3e170ce0 4391 uint64_t Rl, Rr;
39037602
A
4392 struct waitq_link *linkRl, *linkRr;
4393 assert(right->wql_setid.id != setid);
4394 Rl = right->wql_link.left_setid;
4395 Rr = right->wql_link.right_setid;
4396 linkRl = wql_get_link(Rl);
4397 linkRr = wql_get_link(Rr);
3e170ce0
A
4398 if (!linkRl && !linkRr) {
4399 /*
4400 * The right object points to two invalid objects!
4401 * We can invalidate the right w/o touching the parent.
4402 */
39037602 4403 wql_invalidate(right);
3e170ce0
A
4404 wqdbg_v("S2, Rl+Rr");
4405 return WQ_ITERATE_INVALID;
4406 } else if (!linkRl || Rl == setid) {
4407 /* Rl is invalid and/or the wait queue set we're looking for */
39037602
A
4408 parent->wql_link.right_setid = Rr;
4409 wql_invalidate(right);
4410 wql_put_link(linkRl);
4411 wql_put_link(linkRr);
3e170ce0
A
4412 wqdbg_v("S2, Rl");
4413 return linkRl ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4414 } else if (!linkRr || Rr == setid) {
4415 /* Rr is invalid and/or the wait queue set we're looking for */
39037602
A
4416 parent->wql_link.right_setid = Rl;
4417 wql_invalidate(right);
4418 wql_put_link(linkRl);
4419 wql_put_link(linkRr);
3e170ce0
A
4420 wqdbg_v("S2, Rr");
4421 return linkRr ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4422 }
39037602
A
4423 wql_put_link(linkRl);
4424 wql_put_link(linkRr);
3e170ce0
A
4425 }
4426
4427 return WQ_ITERATE_CONTINUE;
4428}
4429
4430/**
4431 * link table walk callback that unlinks 'waitq' from 'ctx->setid'
4432 *
4433 * Conditions:
39037602 4434 * called from walk_waitq_links
3e170ce0
A
4435 * 'waitq' is locked
4436 *
4437 * Notes:
4438 * uses waitq_maybe_remove_link() to compress the linktable and
4439 * perform the actual unlinking
4440 */
0a7de745
A
4441static int
4442waitq_unlink_cb(struct waitq *waitq, void *ctx,
4443 struct waitq_link *link)
3e170ce0
A
4444{
4445 uint64_t setid = *((uint64_t *)ctx);
39037602 4446 struct waitq_link *right, *left;
3e170ce0
A
4447 int ret = 0;
4448
0a7de745 4449 if (wql_type(link) != WQL_LINK) {
3e170ce0 4450 return WQ_ITERATE_CONTINUE;
0a7de745 4451 }
3e170ce0 4452
0a7de745 4453 do {
39037602
A
4454 left = wql_get_link(link->wql_link.left_setid);
4455 right = wql_get_link(link->wql_link.right_setid);
3e170ce0
A
4456
4457 ret = waitq_maybe_remove_link(waitq, setid, link, left, right);
4458
39037602
A
4459 wql_put_link(left);
4460 wql_put_link(right);
3e170ce0 4461
0a7de745 4462 if (!wql_is_valid(link)) {
3e170ce0 4463 return WQ_ITERATE_INVALID;
0a7de745 4464 }
3e170ce0
A
4465 /* A ret value of UNLINKED will break us out of table walk */
4466 } while (ret == WQ_ITERATE_INVALID);
4467
4468 return ret;
4469}
4470
4471
4472/**
4473 * undo/remove a prepost from 'ctx' (waitq) to 'wqset'
4474 *
4475 * Conditions:
4476 * Called from wq_prepost_foreach_locked OR wq_prepost_iterate
4477 * 'wqset' may be NULL
4478 * (ctx)->unlink_wqset is locked
4479 */
0a7de745
A
4480static int
4481waitq_unlink_prepost_cb(struct waitq_set __unused *wqset, void *ctx,
4482 struct wq_prepost *wqp, struct waitq *waitq)
3e170ce0
A
4483{
4484 struct wq_unlink_ctx *ulctx = (struct wq_unlink_ctx *)ctx;
4485
0a7de745 4486 if (waitq != ulctx->unlink_wq) {
3e170ce0 4487 return WQ_ITERATE_CONTINUE;
0a7de745 4488 }
3e170ce0
A
4489
4490 if (wqp_type(wqp) == WQP_WQ &&
4491 wqp->wqp_prepostid.id == ulctx->unlink_wqset->wqset_prepost_id) {
4492 /* this is the only prepost on this wait queue set */
4493 wqdbg_v("unlink wqp (WQ) 0x%llx", wqp->wqp_prepostid.id);
4494 ulctx->unlink_wqset->wqset_prepost_id = 0;
4495 return WQ_ITERATE_BREAK;
4496 }
4497
4498 assert(wqp_type(wqp) == WQP_POST);
4499
4500 /*
4501 * The prepost object 'wqp' points to a waitq which should no longer
4502 * be preposted to 'ulctx->unlink_wqset'. We can remove the prepost
4503 * object from the list and break out of the iteration. Using the
4504 * context object in this way allows this same callback function to be
4505 * used from both wq_prepost_foreach_locked and wq_prepost_iterate.
4506 */
4507 wq_prepost_remove(ulctx->unlink_wqset, wqp);
4508 return WQ_ITERATE_BREAK;
4509}
4510
4511/**
4512 * unlink 'waitq' from 'wqset'
4513 *
4514 * Conditions:
4515 * 'waitq' is locked
4516 * 'wqset' is _not_ locked
4517 * may (rarely) spin in prepost clear and drop/re-acquire 'waitq' lock
4518 * (see waitq_clear_prepost_locked)
4519 */
0a7de745
A
4520static kern_return_t
4521waitq_unlink_locked(struct waitq *waitq,
4522 struct waitq_set *wqset)
3e170ce0
A
4523{
4524 uint64_t setid;
4525 kern_return_t kr;
4526
39037602
A
4527 assert(!waitq_irq_safe(waitq));
4528
3e170ce0
A
4529 if (waitq->waitq_set_id == 0) {
4530 /*
4531 * TODO:
4532 * it doesn't belong to anyone, and it has a prepost object?
4533 * This is an artifact of not cleaning up after kqueues when
4534 * they prepost into select sets...
4535 */
0a7de745 4536 if (waitq->waitq_prepost_id != 0) {
39037602 4537 (void)waitq_clear_prepost_locked(waitq);
0a7de745 4538 }
3e170ce0
A
4539 return KERN_NOT_IN_SET;
4540 }
4541
d9a64523
A
4542 if (!waitqs_is_linked(wqset)) {
4543 /*
4544 * No link has been allocated for the wqset,
4545 * so no waitq could have been linked to it.
4546 */
4547 return KERN_NOT_IN_SET;
4548 }
4549
4550 setid = wqset->wqset_id;
4551
3e170ce0
A
4552 if (waitq->waitq_set_id == setid) {
4553 waitq->waitq_set_id = 0;
4554 /*
4555 * This was the only set to which the waitq belonged: we can
4556 * safely release the waitq's prepost object. It doesn't
4557 * matter if this function drops and re-acquires the lock
4558 * because we're not manipulating waitq state any more.
4559 */
39037602 4560 (void)waitq_clear_prepost_locked(waitq);
3e170ce0
A
4561 return KERN_SUCCESS;
4562 }
4563
4564 /*
4565 * The waitq was a member of more that 1 set, so we need to
4566 * handle potentially compressing the link table, and
4567 * adjusting the waitq->waitq_set_id value.
4568 *
4569 * Note: we can't free the waitq's associated prepost object (if any)
4570 * because it may be in use by the one or more _other_ sets to
4571 * which this queue belongs.
4572 *
4573 * Note: This function only handles a single level of the queue linkage.
4574 * Removing a waitq from a set to which it does not directly
4575 * belong is undefined. For example, if a waitq belonged to set
4576 * A, and set A belonged to set B. You can't remove the waitq
4577 * from set B.
4578 */
39037602 4579 kr = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
0a7de745 4580 WQL_LINK, (void *)&setid, waitq_unlink_cb);
3e170ce0
A
4581
4582 if (kr == WQ_ITERATE_UNLINKED) {
4583 struct wq_unlink_ctx ulctx;
3e170ce0
A
4584
4585 kr = KERN_SUCCESS; /* found it and dis-associated it */
4586
39037602 4587 /* don't look for preposts if it's not prepost-enabled */
0a7de745 4588 if (!wqset->wqset_q.waitq_prepost) {
39037602 4589 goto out;
0a7de745 4590 }
39037602
A
4591
4592 assert(!waitq_irq_safe(&wqset->wqset_q));
4593
3e170ce0
A
4594 waitq_set_lock(wqset);
4595 /*
4596 * clear out any prepost from waitq into wqset
4597 * TODO: this could be more efficient than a linear search of
4598 * the waitq set's prepost list.
4599 */
4600 ulctx.unlink_wq = waitq;
4601 ulctx.unlink_wqset = wqset;
4602 (void)wq_prepost_iterate(wqset->wqset_prepost_id, (void *)&ulctx,
0a7de745 4603 waitq_unlink_prepost_cb);
3e170ce0 4604 waitq_set_unlock(wqset);
3e170ce0
A
4605 } else {
4606 kr = KERN_NOT_IN_SET; /* waitq is _not_ associated with wqset */
4607 }
4608
39037602 4609out:
3e170ce0
A
4610 return kr;
4611}
4612
4613/**
4614 * unlink 'waitq' from 'wqset'
4615 *
4616 * Conditions:
4617 * neither 'waitq' nor 'wqset' is locked
4618 * may disable and re-enable interrupts
4619 * may (rarely) spin in prepost clear
4620 * (see waitq_clear_prepost_locked)
4621 */
0a7de745
A
4622kern_return_t
4623waitq_unlink(struct waitq *waitq, struct waitq_set *wqset)
3e170ce0
A
4624{
4625 kern_return_t kr = KERN_SUCCESS;
3e170ce0
A
4626
4627 assert(waitqs_is_set(wqset));
4628
4629 /*
4630 * we allow the waitq to be invalid because the caller may be trying
4631 * to clear out old/dirty state
4632 */
0a7de745 4633 if (!waitq_valid(waitq)) {
3e170ce0 4634 return KERN_INVALID_ARGUMENT;
0a7de745 4635 }
3e170ce0
A
4636
4637 wqdbg_v("unlink waitq %p from set 0x%llx",
0a7de745 4638 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), wqset->wqset_id);
3e170ce0 4639
39037602
A
4640 assert(!waitq_irq_safe(waitq));
4641
3e170ce0
A
4642 waitq_lock(waitq);
4643
39037602 4644 kr = waitq_unlink_locked(waitq, wqset);
3e170ce0
A
4645
4646 waitq_unlock(waitq);
3e170ce0
A
4647 return kr;
4648}
4649
4650/**
4651 * unlink a waitq from a waitq set, but reference the waitq by its prepost ID
4652 *
4653 * Conditions:
4654 * 'wqset' is unlocked
4655 * wqp_id may be valid or invalid
4656 */
0a7de745
A
4657void
4658waitq_unlink_by_prepost_id(uint64_t wqp_id, struct waitq_set *wqset)
3e170ce0
A
4659{
4660 struct wq_prepost *wqp;
4661
4662 disable_preemption();
4663 wqp = wq_prepost_get(wqp_id);
4664 if (wqp) {
4665 struct waitq *wq;
3e170ce0
A
4666
4667 wq = wqp->wqp_wq.wqp_wq_ptr;
4668
4669 /*
4670 * lock the waitq, then release our prepost ID reference, then
4671 * unlink the waitq from the wqset: this ensures that we don't
4672 * hold a prepost ID reference during the unlink, but we also
4673 * complete the unlink operation atomically to avoid a race
4674 * with waitq_unlink[_all].
4675 */
39037602
A
4676 assert(!waitq_irq_safe(wq));
4677
3e170ce0
A
4678 waitq_lock(wq);
4679 wq_prepost_put(wqp);
4680
4681 if (!waitq_valid(wq)) {
4682 /* someone already tore down this waitq! */
4683 waitq_unlock(wq);
3e170ce0
A
4684 enable_preemption();
4685 return;
4686 }
4687
4688 /* this _may_ drop the wq lock, but that's OK */
39037602 4689 waitq_unlink_locked(wq, wqset);
3e170ce0
A
4690
4691 waitq_unlock(wq);
3e170ce0
A
4692 }
4693 enable_preemption();
4694 return;
4695}
4696
4697
5ba3f43e
A
4698/**
4699 * reference and lock a waitq by its prepost ID
4700 *
4701 * Conditions:
4702 * wqp_id may be valid or invalid
4703 *
4704 * Returns:
4705 * a locked waitq if wqp_id was valid
4706 * NULL on failure
4707 */
0a7de745
A
4708struct waitq *
4709waitq_lock_by_prepost_id(uint64_t wqp_id)
5ba3f43e
A
4710{
4711 struct waitq *wq = NULL;
4712 struct wq_prepost *wqp;
4713
4714 disable_preemption();
4715 wqp = wq_prepost_get(wqp_id);
4716 if (wqp) {
4717 wq = wqp->wqp_wq.wqp_wq_ptr;
4718
4719 assert(!waitq_irq_safe(wq));
4720
4721 waitq_lock(wq);
4722 wq_prepost_put(wqp);
4723
4724 if (!waitq_valid(wq)) {
4725 /* someone already tore down this waitq! */
4726 waitq_unlock(wq);
4727 enable_preemption();
4728 return NULL;
4729 }
4730 }
4731 enable_preemption();
4732 return wq;
4733}
4734
4735
3e170ce0
A
4736/**
4737 * unlink 'waitq' from all sets to which it belongs
4738 *
4739 * Conditions:
39037602
A
4740 * 'waitq' is locked on entry
4741 * returns with waitq lock dropped
3e170ce0
A
4742 *
4743 * Notes:
3e170ce0
A
4744 * may (rarely) spin (see waitq_clear_prepost_locked)
4745 */
0a7de745
A
4746kern_return_t
4747waitq_unlink_all_unlock(struct waitq *waitq)
3e170ce0 4748{
39037602 4749 uint64_t old_set_id = 0;
3e170ce0 4750 wqdbg_v("unlink waitq %p from all sets",
0a7de745 4751 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq));
39037602 4752 assert(!waitq_irq_safe(waitq));
3e170ce0
A
4753
4754 /* it's not a member of any sets */
39037602
A
4755 if (waitq->waitq_set_id == 0) {
4756 waitq_unlock(waitq);
3e170ce0 4757 return KERN_SUCCESS;
39037602 4758 }
3e170ce0 4759
39037602 4760 old_set_id = waitq->waitq_set_id;
3e170ce0
A
4761 waitq->waitq_set_id = 0;
4762
4763 /*
4764 * invalidate the prepost entry for this waitq.
4765 * This may drop and re-acquire the waitq lock, but that's OK because
4766 * if it was added to another set and preposted to that set in the
4767 * time we drop the lock, the state will remain consistent.
4768 */
39037602
A
4769 (void)waitq_clear_prepost_locked(waitq);
4770
4771 waitq_unlock(waitq);
4772
4773 if (old_set_id) {
4774 /*
4775 * Walk the link table and invalidate each LINK object that
4776 * used to connect this waitq to one or more sets: this works
4777 * because WQL_LINK objects are private to each wait queue
4778 */
4779 (void)walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, old_set_id,
0a7de745 4780 WQL_LINK, NULL, waitq_unlink_all_cb);
39037602 4781 }
3e170ce0
A
4782
4783 return KERN_SUCCESS;
4784}
4785
4786/**
4787 * unlink 'waitq' from all sets to which it belongs
4788 *
4789 * Conditions:
4790 * 'waitq' is not locked
4791 * may disable and re-enable interrupts
4792 * may (rarely) spin
4793 * (see waitq_unlink_all_locked, waitq_clear_prepost_locked)
4794 */
0a7de745
A
4795kern_return_t
4796waitq_unlink_all(struct waitq *waitq)
3e170ce0
A
4797{
4798 kern_return_t kr = KERN_SUCCESS;
3e170ce0 4799
0a7de745 4800 if (!waitq_valid(waitq)) {
3e170ce0 4801 panic("Invalid waitq: %p", waitq);
0a7de745 4802 }
3e170ce0 4803
39037602 4804 assert(!waitq_irq_safe(waitq));
3e170ce0 4805 waitq_lock(waitq);
39037602
A
4806 if (!waitq_valid(waitq)) {
4807 waitq_unlock(waitq);
4808 return KERN_SUCCESS;
3e170ce0
A
4809 }
4810
39037602
A
4811 kr = waitq_unlink_all_unlock(waitq);
4812 /* waitq unlocked and set links deallocated */
4813
3e170ce0
A
4814 return kr;
4815}
4816
4817
4818/**
4819 * unlink all waitqs from 'wqset'
4820 *
4821 * Conditions:
39037602
A
4822 * 'wqset' is locked on entry
4823 * 'wqset' is unlocked on exit and spl is restored
4824 *
4825 * Note:
3e170ce0
A
4826 * may (rarely) spin/block (see waitq_clear_prepost_locked)
4827 */
0a7de745
A
4828kern_return_t
4829waitq_set_unlink_all_unlock(struct waitq_set *wqset)
3e170ce0 4830{
39037602
A
4831 struct waitq_link *link;
4832 uint64_t prepost_id;
3e170ce0
A
4833
4834 wqdbg_v("unlink all queues from set 0x%llx", wqset->wqset_id);
4835
4836 /*
4837 * This operation does not require interaction with any of the set's
4838 * constituent wait queues. All we have to do is invalidate the SetID
4839 */
3e170ce0 4840
0a7de745 4841 if (waitqs_is_linked(wqset)) {
d9a64523
A
4842 /* invalidate and re-alloc the link object first */
4843 link = wql_get_link(wqset->wqset_id);
4844
4845 /* we may have raced with a waitq_set_deinit: handle this */
4846 if (!link) {
4847 waitq_set_unlock(wqset);
4848 return KERN_SUCCESS;
4849 }
3e170ce0 4850
d9a64523 4851 wql_invalidate(link);
3e170ce0 4852
d9a64523
A
4853 /* re-alloc the object to get a new generation ID */
4854 wql_realloc_link(link, WQL_WQS);
4855 link->wql_wqs.wql_set = wqset;
3e170ce0 4856
d9a64523
A
4857 wqset->wqset_id = link->wql_setid.id;
4858 wql_mkvalid(link);
4859 wql_put_link(link);
4860 }
3e170ce0
A
4861
4862 /* clear any preposts attached to this set */
39037602 4863 prepost_id = 0;
0a7de745 4864 if (wqset->wqset_q.waitq_prepost && wqset->wqset_prepost_id) {
39037602 4865 prepost_id = wqset->wqset_prepost_id;
0a7de745 4866 }
39037602 4867 /* else { TODO: notify kqueue subsystem? } */
3e170ce0
A
4868 wqset->wqset_prepost_id = 0;
4869
4870 /*
4871 * clear set linkage and prepost object associated with this set:
4872 * waitq sets may prepost to other sets if, for example, they are
4873 * associated with a kqueue which is in a select set.
4874 *
39037602 4875 * This releases all the set link objects
3e170ce0
A
4876 * (links to other sets to which this set was previously added)
4877 */
39037602
A
4878 waitq_unlink_all_unlock(&wqset->wqset_q);
4879 /* wqset->wqset_q unlocked */
3e170ce0
A
4880
4881 /* drop / unlink all the prepost table objects */
0a7de745 4882 if (prepost_id) {
3e170ce0 4883 (void)wq_prepost_iterate(prepost_id, NULL,
0a7de745
A
4884 wqset_clear_prepost_chain_cb);
4885 }
3e170ce0
A
4886
4887 return KERN_SUCCESS;
4888}
4889
39037602
A
4890/**
4891 * unlink all waitqs from 'wqset'
4892 *
4893 * Conditions:
4894 * 'wqset' is not locked
4895 * may (rarely) spin/block (see waitq_clear_prepost_locked)
4896 */
0a7de745
A
4897kern_return_t
4898waitq_set_unlink_all(struct waitq_set *wqset)
39037602
A
4899{
4900 assert(waitqs_is_set(wqset));
4901 assert(!waitq_irq_safe(&wqset->wqset_q));
4902
4903 waitq_set_lock(wqset);
4904 return waitq_set_unlink_all_unlock(wqset);
4905 /* wqset unlocked and set links and preposts deallocated */
4906}
3e170ce0 4907
0a7de745
A
4908static int
4909waitq_prepost_reserve_cb(struct waitq *waitq, void *ctx,
4910 struct waitq_link *link)
3e170ce0
A
4911{
4912 uint32_t *num = (uint32_t *)ctx;
4913 (void)waitq;
4914
4915 /*
4916 * In the worst case, we'll have to allocate 2 prepost objects
4917 * per waitq set (if the set was already preposted by another
4918 * waitq).
4919 */
39037602 4920 if (wql_type(link) == WQL_WQS) {
3e170ce0
A
4921 /*
4922 * check to see if the associated waitq actually supports
4923 * preposting
4924 */
0a7de745 4925 if (waitq_set_can_prepost(link->wql_wqs.wql_set)) {
3e170ce0 4926 *num += 2;
0a7de745 4927 }
3e170ce0
A
4928 }
4929 return WQ_ITERATE_CONTINUE;
4930}
4931
0a7de745
A
4932static int
4933waitq_alloc_prepost_reservation(int nalloc, struct waitq *waitq,
4934 int *did_unlock, struct wq_prepost **wqp)
3e170ce0
A
4935{
4936 struct wq_prepost *tmp;
4937 struct wqp_cache *cache;
4938
4939 *did_unlock = 0;
4940
4941 /*
4942 * Before we unlock the waitq, check the per-processor prepost object
4943 * cache to see if there's enough there for us. If so, do the
4944 * allocation, keep the lock and save an entire iteration over the set
4945 * linkage!
4946 */
4947 if (waitq) {
4948 disable_preemption();
4949 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
0a7de745 4950 if (nalloc <= (int)cache->avail) {
3e170ce0 4951 goto do_alloc;
0a7de745 4952 }
3e170ce0
A
4953 enable_preemption();
4954
4955 /* unlock the waitq to perform the allocation */
4956 *did_unlock = 1;
4957 waitq_unlock(waitq);
3e170ce0
A
4958 }
4959
4960do_alloc:
39037602 4961 tmp = wq_prepost_alloc(LT_RESERVED, nalloc);
0a7de745 4962 if (!tmp) {
3e170ce0 4963 panic("Couldn't reserve %d preposts for waitq @%p (wqp@%p)",
0a7de745
A
4964 nalloc, waitq, *wqp);
4965 }
3e170ce0
A
4966 if (*wqp) {
4967 /* link the two lists */
4968 int __assert_only rc;
4969 rc = wq_prepost_rlink(tmp, *wqp);
4970 assert(rc == nalloc);
4971 }
4972 *wqp = tmp;
4973
4974 /*
4975 * If the caller can block, then enforce a minimum-free table element
4976 * policy here. This helps ensure that we will have enough prepost
4977 * objects for callers such as selwakeup() that can be called with
4978 * spin locks held.
4979 */
0a7de745 4980 if (get_preemption_level() == 0) {
3e170ce0 4981 wq_prepost_ensure_free_space();
0a7de745 4982 }
3e170ce0
A
4983
4984 if (waitq) {
4985 if (*did_unlock == 0) {
4986 /* decrement the preemption count if alloc from cache */
4987 enable_preemption();
4988 } else {
4989 /* otherwise: re-lock the waitq */
3e170ce0
A
4990 waitq_lock(waitq);
4991 }
4992 }
4993
4994 return nalloc;
4995}
4996
0a7de745
A
4997static int
4998waitq_count_prepost_reservation(struct waitq *waitq, int extra, int keep_locked)
3e170ce0
A
4999{
5000 int npreposts = 0;
5001
5002 /*
5003 * If the waitq is not currently part of a set, and we're not asked to
5004 * keep the waitq locked then we'll want to have 3 in reserve
5005 * just-in-case it becomes part of a set while we unlock and reserve.
5006 * We may need up to 1 object for the waitq, and 2 for the set.
5007 */
5008 if (waitq->waitq_set_id == 0) {
5009 npreposts = 3;
5010 } else {
5011 /* this queue has never been preposted before */
0a7de745 5012 if (waitq->waitq_prepost_id == 0) {
3e170ce0 5013 npreposts = 3;
0a7de745 5014 }
3e170ce0
A
5015
5016 /*
5017 * Walk the set of table linkages associated with this waitq
5018 * and count the worst-case number of prepost objects that
5019 * may be needed during a wakeup_all. We can walk this without
5020 * locking each set along the way because the table-based IDs
5021 * disconnect us from the set pointers themselves, and the
5022 * table walking is careful to read the setid values only once.
5023 * Locking each set up the chain also doesn't guarantee that
5024 * their membership won't change between the time we unlock
5025 * that set and when we actually go to prepost, so our
5026 * situation is no worse than before and we've alleviated lock
5027 * contention on any sets to which this waitq belongs.
5028 */
39037602 5029 (void)walk_waitq_links(LINK_WALK_FULL_DAG_UNLOCKED,
0a7de745
A
5030 waitq, waitq->waitq_set_id,
5031 WQL_WQS, (void *)&npreposts,
5032 waitq_prepost_reserve_cb);
3e170ce0
A
5033 }
5034
0a7de745 5035 if (extra > 0) {
3e170ce0 5036 npreposts += extra;
0a7de745 5037 }
3e170ce0
A
5038
5039 if (npreposts == 0 && !keep_locked) {
5040 /*
5041 * If we get here, we were asked to reserve some prepost
5042 * objects for a waitq that's previously preposted, and is not
5043 * currently a member of any sets. We have also been
5044 * instructed to unlock the waitq when we're done. In this
5045 * case, we pre-allocated enough reserved objects to handle
5046 * the case where the waitq gets added to a single set when
5047 * the lock is released.
5048 */
5049 npreposts = 3;
5050 }
5051
5052 return npreposts;
5053}
5054
5055
5056/**
5057 * pre-allocate prepost objects for 'waitq'
5058 *
5059 * Conditions:
5060 * 'waitq' is not locked
5061 *
5062 * Returns:
5063 * panic on error
5064 *
5065 * 0 on success, '*reserved' is set to the head of a singly-linked
5066 * list of pre-allocated prepost objects.
5067 *
5068 * Notes:
5069 * If 'lock_state' is WAITQ_KEEP_LOCKED, this function performs the pre-allocation
39037602 5070 * atomically and returns 'waitq' locked.
3e170ce0
A
5071 *
5072 * This function attempts to pre-allocate precisely enough prepost
5073 * objects based on the current set membership of 'waitq'. If the
5074 * operation is performed atomically, then the caller
5075 * is guaranteed to have enough pre-allocated prepost object to avoid
5076 * any (rare) blocking in the wakeup path.
5077 */
0a7de745
A
5078uint64_t
5079waitq_prepost_reserve(struct waitq *waitq, int extra,
5080 waitq_lock_state_t lock_state)
3e170ce0
A
5081{
5082 uint64_t reserved = 0;
5083 uint64_t prev_setid = 0, prev_prepostid = 0;
5084 struct wq_prepost *wqp = NULL;
5085 int nalloc = 0, npreposts = 0;
5086 int keep_locked = (lock_state == WAITQ_KEEP_LOCKED);
5087 int unlocked = 0;
5088
3e170ce0 5089 wqdbg_v("Attempting to reserve prepost linkages for waitq %p (extra:%d)",
0a7de745 5090 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), extra);
3e170ce0
A
5091
5092 if (waitq == NULL && extra > 0) {
5093 /*
5094 * Simple prepost object allocation:
5095 * we'll add 2 more because the waitq might need an object,
5096 * and the set itself may need a new POST object in addition
5097 * to the number of preposts requested by the caller
5098 */
39037602 5099 nalloc = waitq_alloc_prepost_reservation(extra + 2, NULL,
0a7de745 5100 &unlocked, &wqp);
3e170ce0
A
5101 assert(nalloc == extra + 2);
5102 return wqp->wqp_prepostid.id;
5103 }
5104
5105 assert(lock_state == WAITQ_KEEP_LOCKED || lock_state == WAITQ_UNLOCK);
5106
39037602 5107 assert(!waitq_irq_safe(waitq));
3e170ce0 5108
39037602 5109 waitq_lock(waitq);
3e170ce0
A
5110
5111 /* remember the set ID that we started with */
5112 prev_setid = waitq->waitq_set_id;
5113 prev_prepostid = waitq->waitq_prepost_id;
5114
5115 /*
5116 * If the waitq is not part of a set, and we're asked to
5117 * keep the set locked, then we don't have to reserve
5118 * anything!
5119 */
0a7de745 5120 if (prev_setid == 0 && keep_locked) {
3e170ce0 5121 goto out;
0a7de745 5122 }
3e170ce0
A
5123
5124 npreposts = waitq_count_prepost_reservation(waitq, extra, keep_locked);
5125
5126 /* nothing for us to do! */
5127 if (npreposts == 0) {
0a7de745 5128 if (keep_locked) {
3e170ce0 5129 goto out;
0a7de745 5130 }
3e170ce0
A
5131 goto out_unlock;
5132 }
5133
5134try_alloc:
5135 /* this _may_ unlock and relock the waitq! */
39037602 5136 nalloc = waitq_alloc_prepost_reservation(npreposts, waitq,
0a7de745 5137 &unlocked, &wqp);
3e170ce0
A
5138
5139 if (!unlocked) {
5140 /* allocation held the waitq lock: we'd done! */
0a7de745 5141 if (keep_locked) {
3e170ce0 5142 goto out;
0a7de745 5143 }
3e170ce0
A
5144 goto out_unlock;
5145 }
5146
5147 /*
5148 * Before we return, if the allocation had to unlock the waitq, we
5149 * must check one more time to see if we have enough. If not, we'll
5150 * try to allocate the difference. If the caller requests it, we'll
5151 * also leave the waitq locked so that the use of the pre-allocated
5152 * prepost objects can be guaranteed to be enough if a wakeup_all is
5153 * performed before unlocking the waitq.
5154 */
5155
5156 /*
5157 * If the waitq is no longer associated with a set, or if the waitq's
5158 * set/prepostid has not changed since we first walked its linkage,
5159 * we're done.
5160 */
5161 if ((waitq->waitq_set_id == 0) ||
5162 (waitq->waitq_set_id == prev_setid &&
0a7de745
A
5163 waitq->waitq_prepost_id == prev_prepostid)) {
5164 if (keep_locked) {
3e170ce0 5165 goto out;
0a7de745 5166 }
3e170ce0
A
5167 goto out_unlock;
5168 }
5169
5170 npreposts = waitq_count_prepost_reservation(waitq, extra, keep_locked);
5171
5172 if (npreposts > nalloc) {
5173 prev_setid = waitq->waitq_set_id;
5174 prev_prepostid = waitq->waitq_prepost_id;
5175 npreposts = npreposts - nalloc; /* only allocate the diff */
5176 goto try_alloc;
5177 }
5178
0a7de745 5179 if (keep_locked) {
3e170ce0 5180 goto out;
0a7de745 5181 }
3e170ce0
A
5182
5183out_unlock:
5184 waitq_unlock(waitq);
3e170ce0 5185out:
0a7de745 5186 if (wqp) {
3e170ce0 5187 reserved = wqp->wqp_prepostid.id;
0a7de745 5188 }
3e170ce0
A
5189
5190 return reserved;
5191}
5192
5193/**
5194 * release a linked list of prepost objects allocated via _prepost_reserve
5195 *
5196 * Conditions:
5197 * may (rarely) spin waiting for prepost table growth memcpy
5198 */
0a7de745
A
5199void
5200waitq_prepost_release_reserve(uint64_t id)
3e170ce0
A
5201{
5202 struct wq_prepost *wqp;
5203
5204 wqdbg_v("releasing reserved preposts starting at: 0x%llx", id);
5205
5206 wqp = wq_prepost_rfirst(id);
0a7de745 5207 if (!wqp) {
3e170ce0 5208 return;
0a7de745 5209 }
3e170ce0
A
5210
5211 wq_prepost_release_rlist(wqp);
5212}
5213
5214
5215/**
5216 * clear all preposts from 'wqset'
5217 *
5218 * Conditions:
5219 * 'wqset' is not locked
5220 */
0a7de745
A
5221void
5222waitq_set_clear_preposts(struct waitq_set *wqset)
3e170ce0
A
5223{
5224 uint64_t prepost_id;
5225 spl_t spl;
5226
5227 assert(waitqs_is_set(wqset));
5228
0a7de745 5229 if (!wqset->wqset_q.waitq_prepost || !wqset->wqset_prepost_id) {
39037602 5230 return;
0a7de745 5231 }
39037602 5232
3e170ce0 5233 wqdbg_v("Clearing all preposted queues on waitq_set: 0x%llx",
0a7de745 5234 wqset->wqset_id);
3e170ce0 5235
0a7de745 5236 if (waitq_irq_safe(&wqset->wqset_q)) {
3e170ce0 5237 spl = splsched();
0a7de745 5238 }
3e170ce0
A
5239 waitq_set_lock(wqset);
5240 prepost_id = wqset->wqset_prepost_id;
5241 wqset->wqset_prepost_id = 0;
5242 waitq_set_unlock(wqset);
0a7de745 5243 if (waitq_irq_safe(&wqset->wqset_q)) {
3e170ce0 5244 splx(spl);
0a7de745 5245 }
3e170ce0
A
5246
5247 /* drop / unlink all the prepost table objects */
0a7de745 5248 if (prepost_id) {
3e170ce0 5249 (void)wq_prepost_iterate(prepost_id, NULL,
0a7de745
A
5250 wqset_clear_prepost_chain_cb);
5251 }
3e170ce0
A
5252}
5253
5254
5255/* ----------------------------------------------------------------------
5256 *
5257 * Iteration: waitq -> sets / waitq_set -> preposts
5258 *
5259 * ---------------------------------------------------------------------- */
5260
5261struct wq_it_ctx {
5262 void *input;
5263 void *ctx;
5264 waitq_iterator_t it;
3e170ce0
A
5265};
5266
0a7de745
A
5267static int
5268waitq_iterate_sets_cb(struct waitq *waitq, void *ctx,
5269 struct waitq_link *link)
3e170ce0
A
5270{
5271 struct wq_it_ctx *wctx = (struct wq_it_ctx *)(ctx);
5272 struct waitq_set *wqset;
5273 int ret;
3e170ce0
A
5274
5275 (void)waitq;
39037602
A
5276 assert(!waitq_irq_safe(waitq));
5277 assert(wql_type(link) == WQL_WQS);
3e170ce0
A
5278
5279 /*
5280 * the waitq is locked, so we can just take the set lock
5281 * and call the iterator function
5282 */
39037602 5283 wqset = link->wql_wqs.wql_set;
3e170ce0 5284 assert(wqset != NULL);
39037602 5285 assert(!waitq_irq_safe(&wqset->wqset_q));
3e170ce0
A
5286 waitq_set_lock(wqset);
5287
5288 ret = wctx->it(wctx->ctx, (struct waitq *)wctx->input, wqset);
5289
5290 waitq_set_unlock(wqset);
3e170ce0
A
5291 return ret;
5292}
5293
5294/**
5295 * call external iterator function for each prepost object in wqset
5296 *
5297 * Conditions:
5298 * Called from wq_prepost_foreach_locked
5299 * (wqset locked, waitq _not_ locked)
5300 */
0a7de745
A
5301static int
5302wqset_iterate_prepost_cb(struct waitq_set *wqset, void *ctx,
5303 struct wq_prepost *wqp, struct waitq *waitq)
3e170ce0
A
5304{
5305 struct wq_it_ctx *wctx = (struct wq_it_ctx *)(ctx);
5306 uint64_t wqp_id;
5307 int ret;
3e170ce0
A
5308
5309 (void)wqp;
5310
5311 /*
5312 * This is a bit tricky. The 'wqset' is locked, but the 'waitq' is not.
5313 * Taking the 'waitq' lock is a lock order violation, so we need to be
5314 * careful. We also must realize that we may have taken a reference to
5315 * the 'wqp' just as the associated waitq was being torn down (or
5316 * clearing all its preposts) - see waitq_clear_prepost_locked(). If
5317 * the 'wqp' is valid and we can get the waitq lock, then we are good
5318 * to go. If not, we need to back off, check that the 'wqp' hasn't
5319 * been invalidated, and try to re-take the locks.
5320 */
39037602
A
5321 assert(!waitq_irq_safe(waitq));
5322
0a7de745 5323 if (waitq_lock_try(waitq)) {
3e170ce0 5324 goto call_iterator;
0a7de745 5325 }
3e170ce0 5326
0a7de745 5327 if (!wqp_is_valid(wqp)) {
3e170ce0 5328 return WQ_ITERATE_RESTART;
0a7de745 5329 }
3e170ce0
A
5330
5331 /* We are passed a prepost object with a reference on it. If neither
5332 * the waitq set nor the waitq require interrupts disabled, then we
5333 * may block on the delay(1) call below. We can't hold a prepost
5334 * object reference while blocking, so we have to give that up as well
5335 * and re-acquire it when we come back.
5336 */
5337 wqp_id = wqp->wqp_prepostid.id;
5338 wq_prepost_put(wqp);
5339 waitq_set_unlock(wqset);
5340 wqdbg_v("dropped set:%p lock waiting for wqp:%p (0x%llx -> wq:%p)",
0a7de745 5341 wqset, wqp, wqp->wqp_prepostid.id, waitq);
3e170ce0
A
5342 delay(1);
5343 waitq_set_lock(wqset);
5344 wqp = wq_prepost_get(wqp_id);
0a7de745 5345 if (!wqp) {
3e170ce0
A
5346 /* someone cleared preposts while we slept! */
5347 return WQ_ITERATE_DROPPED;
0a7de745 5348 }
3e170ce0
A
5349
5350 /*
5351 * TODO:
5352 * This differs slightly from the logic in ipc_mqueue.c:
5353 * ipc_mqueue_receive_on_thread(). There, if the waitq lock
5354 * can't be obtained, the prepost link is placed on the back of
5355 * the chain, and the iteration starts from the beginning. Here,
5356 * we just restart from the beginning.
5357 */
5358 return WQ_ITERATE_RESTART;
5359
5360call_iterator:
5361 if (!wqp_is_valid(wqp)) {
5362 ret = WQ_ITERATE_RESTART;
5363 goto out_unlock;
5364 }
5365
5366 /* call the external callback */
5367 ret = wctx->it(wctx->ctx, waitq, wqset);
5368
5369 if (ret == WQ_ITERATE_BREAK_KEEP_LOCKED) {
5370 ret = WQ_ITERATE_BREAK;
3e170ce0
A
5371 goto out;
5372 }
5373
5374out_unlock:
5375 waitq_unlock(waitq);
3e170ce0
A
5376out:
5377 return ret;
5378}
5379
5380/**
5381 * iterator over all sets to which the given waitq has been linked
5382 *
5383 * Conditions:
0a7de745 5384 * 'waitq' is locked
3e170ce0 5385 */
0a7de745
A
5386int
5387waitq_iterate_sets(struct waitq *waitq, void *ctx, waitq_iterator_t it)
3e170ce0
A
5388{
5389 int ret;
5390 struct wq_it_ctx wctx = {
5391 .input = (void *)waitq,
5392 .ctx = ctx,
5393 .it = it,
5394 };
0a7de745 5395 if (!it || !waitq) {
3e170ce0 5396 return KERN_INVALID_ARGUMENT;
0a7de745 5397 }
3e170ce0 5398
39037602 5399 ret = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
0a7de745
A
5400 WQL_WQS, (void *)&wctx, waitq_iterate_sets_cb);
5401 if (ret == WQ_ITERATE_CONTINUE) {
3e170ce0 5402 ret = WQ_ITERATE_SUCCESS;
0a7de745 5403 }
3e170ce0
A
5404 return ret;
5405}
5406
5407/**
5408 * iterator over all preposts in the given wqset
5409 *
5410 * Conditions:
0a7de745 5411 * 'wqset' is locked
3e170ce0 5412 */
0a7de745
A
5413int
5414waitq_set_iterate_preposts(struct waitq_set *wqset,
5415 void *ctx, waitq_iterator_t it)
3e170ce0
A
5416{
5417 struct wq_it_ctx wctx = {
5418 .input = (void *)wqset,
5419 .ctx = ctx,
5420 .it = it,
3e170ce0 5421 };
0a7de745 5422 if (!it || !wqset) {
3e170ce0 5423 return WQ_ITERATE_INVALID;
0a7de745 5424 }
3e170ce0
A
5425
5426 assert(waitq_held(&wqset->wqset_q));
5427
5428 return wq_prepost_foreach_locked(wqset, (void *)&wctx,
0a7de745 5429 wqset_iterate_prepost_cb);
3e170ce0
A
5430}
5431
5432
5433/* ----------------------------------------------------------------------
5434 *
5435 * Higher-level APIs
5436 *
5437 * ---------------------------------------------------------------------- */
5438
39037602 5439
3e170ce0
A
5440/**
5441 * declare a thread's intent to wait on 'waitq' for 'wait_event'
5442 *
5443 * Conditions:
5444 * 'waitq' is not locked
3e170ce0 5445 */
0a7de745
A
5446wait_result_t
5447waitq_assert_wait64(struct waitq *waitq,
5448 event64_t wait_event,
5449 wait_interrupt_t interruptible,
5450 uint64_t deadline)
3e170ce0 5451{
3e170ce0 5452 thread_t thread = current_thread();
39037602 5453 wait_result_t ret;
3e170ce0
A
5454 spl_t s;
5455
0a7de745 5456 if (!waitq_valid(waitq)) {
3e170ce0 5457 panic("Invalid waitq: %p", waitq);
0a7de745 5458 }
3e170ce0 5459
0a7de745 5460 if (waitq_irq_safe(waitq)) {
3e170ce0 5461 s = splsched();
0a7de745 5462 }
3e170ce0 5463
39037602 5464 waitq_lock(waitq);
3e170ce0 5465 ret = waitq_assert_wait64_locked(waitq, wait_event, interruptible,
0a7de745
A
5466 TIMEOUT_URGENCY_SYS_NORMAL,
5467 deadline, TIMEOUT_NO_LEEWAY, thread);
3e170ce0
A
5468 waitq_unlock(waitq);
5469
0a7de745 5470 if (waitq_irq_safe(waitq)) {
39037602 5471 splx(s);
0a7de745 5472 }
3e170ce0
A
5473
5474 return ret;
5475}
5476
5477/**
5478 * declare a thread's intent to wait on 'waitq' for 'wait_event'
5479 *
5480 * Conditions:
5481 * 'waitq' is not locked
5482 * will disable and re-enable interrupts while locking current_thread()
5483 */
0a7de745
A
5484wait_result_t
5485waitq_assert_wait64_leeway(struct waitq *waitq,
5486 event64_t wait_event,
5487 wait_interrupt_t interruptible,
5488 wait_timeout_urgency_t urgency,
5489 uint64_t deadline,
5490 uint64_t leeway)
3e170ce0
A
5491{
5492 wait_result_t ret;
5493 thread_t thread = current_thread();
5494 spl_t s;
5495
0a7de745 5496 if (!waitq_valid(waitq)) {
3e170ce0 5497 panic("Invalid waitq: %p", waitq);
0a7de745 5498 }
3e170ce0 5499
0a7de745 5500 if (waitq_irq_safe(waitq)) {
3e170ce0 5501 s = splsched();
0a7de745 5502 }
3e170ce0 5503
39037602 5504 waitq_lock(waitq);
3e170ce0 5505 ret = waitq_assert_wait64_locked(waitq, wait_event, interruptible,
0a7de745 5506 urgency, deadline, leeway, thread);
3e170ce0
A
5507 waitq_unlock(waitq);
5508
0a7de745 5509 if (waitq_irq_safe(waitq)) {
39037602 5510 splx(s);
0a7de745 5511 }
3e170ce0
A
5512
5513 return ret;
5514}
5515
5516/**
5517 * wakeup a single thread from a waitq that's waiting for a given event
5518 *
5519 * Conditions:
5520 * 'waitq' is not locked
5521 * may (rarely) block if 'waitq' is non-global and a member of 1 or more sets
5522 * may disable and re-enable interrupts
5523 *
5524 * Notes:
5525 * will _not_ block if waitq is global (or not a member of any set)
5526 */
0a7de745
A
5527kern_return_t
5528waitq_wakeup64_one(struct waitq *waitq, event64_t wake_event,
5529 wait_result_t result, int priority)
3e170ce0
A
5530{
5531 kern_return_t kr;
5532 uint64_t reserved_preposts = 0;
5533 spl_t spl;
5534
0a7de745 5535 if (!waitq_valid(waitq)) {
3e170ce0 5536 panic("Invalid waitq: %p", waitq);
0a7de745 5537 }
3e170ce0 5538
39037602
A
5539 if (!waitq_irq_safe(waitq)) {
5540 /* reserve preposts in addition to locking the waitq */
5541 reserved_preposts = waitq_prepost_reserve(waitq, 0, WAITQ_KEEP_LOCKED);
5542 } else {
5543 spl = splsched();
5544 waitq_lock(waitq);
5545 }
3e170ce0
A
5546
5547 /* waitq is locked upon return */
5548 kr = waitq_wakeup64_one_locked(waitq, wake_event, result,
0a7de745 5549 &reserved_preposts, priority, WAITQ_UNLOCK);
3e170ce0 5550
0a7de745 5551 if (waitq_irq_safe(waitq)) {
3e170ce0 5552 splx(spl);
0a7de745 5553 }
3e170ce0
A
5554
5555 /* release any left-over prepost object (won't block/lock anything) */
5556 waitq_prepost_release_reserve(reserved_preposts);
5557
5558 return kr;
5559}
5560
5561/**
5562 * wakeup all threads from a waitq that are waiting for a given event
5563 *
5564 * Conditions:
5565 * 'waitq' is not locked
5566 * may (rarely) block if 'waitq' is non-global and a member of 1 or more sets
5567 * may disable and re-enable interrupts
5568 *
5569 * Notes:
5570 * will _not_ block if waitq is global (or not a member of any set)
5571 */
0a7de745
A
5572kern_return_t
5573waitq_wakeup64_all(struct waitq *waitq,
5574 event64_t wake_event,
5575 wait_result_t result,
5576 int priority)
3e170ce0
A
5577{
5578 kern_return_t ret;
5579 uint64_t reserved_preposts = 0;
5580 spl_t s;
5581
0a7de745 5582 if (!waitq_valid(waitq)) {
3e170ce0 5583 panic("Invalid waitq: %p", waitq);
0a7de745 5584 }
3e170ce0 5585
39037602
A
5586 if (!waitq_irq_safe(waitq)) {
5587 /* reserve preposts in addition to locking waitq */
5588 reserved_preposts = waitq_prepost_reserve(waitq, 0,
0a7de745 5589 WAITQ_KEEP_LOCKED);
39037602
A
5590 } else {
5591 s = splsched();
5592 waitq_lock(waitq);
5593 }
3e170ce0
A
5594
5595 ret = waitq_wakeup64_all_locked(waitq, wake_event, result,
0a7de745
A
5596 &reserved_preposts, priority,
5597 WAITQ_UNLOCK);
3e170ce0 5598
0a7de745 5599 if (waitq_irq_safe(waitq)) {
3e170ce0 5600 splx(s);
0a7de745 5601 }
3e170ce0
A
5602
5603 waitq_prepost_release_reserve(reserved_preposts);
5604
5605 return ret;
3e170ce0
A
5606}
5607
5608/**
5609 * wakeup a specific thread iff it's waiting on 'waitq' for 'wake_event'
5610 *
5611 * Conditions:
5612 * 'waitq' is not locked
5613 *
5614 * Notes:
5615 * May temporarily disable and re-enable interrupts
5616 */
0a7de745
A
5617kern_return_t
5618waitq_wakeup64_thread(struct waitq *waitq,
5619 event64_t wake_event,
5620 thread_t thread,
5621 wait_result_t result)
3e170ce0
A
5622{
5623 kern_return_t ret;
5624 spl_t s, th_spl;
5625
0a7de745 5626 if (!waitq_valid(waitq)) {
3e170ce0 5627 panic("Invalid waitq: %p", waitq);
0a7de745 5628 }
3e170ce0 5629
0a7de745 5630 if (waitq_irq_safe(waitq)) {
3e170ce0 5631 s = splsched();
0a7de745 5632 }
3e170ce0
A
5633 waitq_lock(waitq);
5634
5635 ret = waitq_select_thread_locked(waitq, wake_event, thread, &th_spl);
5636 /* on success, returns 'thread' locked */
5637
5638 waitq_unlock(waitq);
5639
5640 if (ret == KERN_SUCCESS) {
5641 ret = thread_go(thread, result);
5642 assert(ret == KERN_SUCCESS);
5643 thread_unlock(thread);
5644 splx(th_spl);
5645 waitq_stats_count_wakeup(waitq);
5646 } else {
5647 ret = KERN_NOT_WAITING;
5648 waitq_stats_count_fail(waitq);
5649 }
5650
0a7de745 5651 if (waitq_irq_safe(waitq)) {
3e170ce0 5652 splx(s);
0a7de745 5653 }
3e170ce0
A
5654
5655 return ret;
5656}
39037602
A
5657
5658/**
5659 * wakeup a single thread from a waitq that's waiting for a given event
5660 * and return a reference to that thread
5661 * returns THREAD_NULL if no thread was waiting
5662 *
5663 * Conditions:
5664 * 'waitq' is not locked
5665 * may (rarely) block if 'waitq' is non-global and a member of 1 or more sets
5666 * may disable and re-enable interrupts
5667 *
5668 * Notes:
5669 * will _not_ block if waitq is global (or not a member of any set)
5670 */
5671thread_t
5672waitq_wakeup64_identify(struct waitq *waitq,
0a7de745
A
5673 event64_t wake_event,
5674 wait_result_t result,
5675 int priority)
39037602
A
5676{
5677 uint64_t reserved_preposts = 0;
5678 spl_t thread_spl = 0;
5679 thread_t thread;
5680 spl_t spl;
5681
0a7de745 5682 if (!waitq_valid(waitq)) {
39037602 5683 panic("Invalid waitq: %p", waitq);
0a7de745 5684 }
39037602
A
5685
5686 if (!waitq_irq_safe(waitq)) {
5687 /* reserve preposts in addition to locking waitq */
5688 reserved_preposts = waitq_prepost_reserve(waitq, 0, WAITQ_KEEP_LOCKED);
5689 } else {
5690 spl = splsched();
5691 waitq_lock(waitq);
5692 }
5693
5694 thread = waitq_wakeup64_identify_locked(waitq, wake_event, result,
0a7de745
A
5695 &thread_spl, &reserved_preposts,
5696 priority, WAITQ_UNLOCK);
39037602
A
5697 /* waitq is unlocked, thread is locked */
5698
5699 if (thread != THREAD_NULL) {
5700 thread_reference(thread);
5701 thread_unlock(thread);
5702 splx(thread_spl);
5703 }
0a7de745
A
5704
5705 if (waitq_irq_safe(waitq)) {
5706 splx(spl);
5707 }
39037602
A
5708
5709 /* release any left-over prepost object (won't block/lock anything) */
5710 waitq_prepost_release_reserve(reserved_preposts);
5711
5712 /* returns +1 ref to running thread or THREAD_NULL */
5713 return thread;
5714}