]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/waitq.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / kern / waitq.c
CommitLineData
3e170ce0 1/*
39037602 2 * Copyright (c) 2015-2016 Apple Inc. All rights reserved.
3e170ce0
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56#include <kern/ast.h>
39037602 57#include <kern/backtrace.h>
3e170ce0 58#include <kern/kern_types.h>
39037602 59#include <kern/ltable.h>
3e170ce0
A
60#include <kern/mach_param.h>
61#include <kern/queue.h>
62#include <kern/sched_prim.h>
63#include <kern/simple_lock.h>
64#include <kern/spl.h>
65#include <kern/waitq.h>
66#include <kern/zalloc.h>
39037602
A
67#include <kern/policy_internal.h>
68
3e170ce0
A
69#include <libkern/OSAtomic.h>
70#include <mach/sync_policy.h>
71#include <vm/vm_kern.h>
72
73#include <sys/kdebug.h>
74
39037602
A
75#if defined(CONFIG_WAITQ_LINK_STATS) || defined(CONFIG_WAITQ_PREPOST_STATS)
76# if !defined(CONFIG_LTABLE_STATS)
77# error "You must configure LTABLE_STATS to use WAITQ_[LINK|PREPOST]_STATS"
78# endif
79# if !defined(CONFIG_WAITQ_STATS)
80# error "You must configure WAITQ_STATS to use WAITQ_[LINK|PREPOST]_STATS"
81# endif
82#endif
83
3e170ce0
A
84#if CONFIG_WAITQ_DEBUG
85#define wqdbg(fmt,...) \
86 printf("WQ[%s]: " fmt "\n", __func__, ## __VA_ARGS__)
87#else
88#define wqdbg(fmt,...) do { } while (0)
89#endif
90
91#ifdef WAITQ_VERBOSE_DEBUG
92#define wqdbg_v(fmt,...) \
93 printf("WQ[v:%s]: " fmt "\n", __func__, ## __VA_ARGS__)
94#else
95#define wqdbg_v(fmt,...) do { } while (0)
96#endif
97
98#define wqinfo(fmt,...) \
99 printf("WQ[%s]: " fmt "\n", __func__, ## __VA_ARGS__)
100
39037602
A
101#define wqerr(fmt,...) \
102 printf("WQ[%s] ERROR: " fmt "\n", __func__, ## __VA_ARGS__)
103
3e170ce0
A
104
105/*
39037602
A
106 * un-comment the following lines to debug the link/prepost tables
107 * NOTE: this expands each element by ~40 bytes
3e170ce0 108 */
39037602
A
109//#define CONFIG_WAITQ_LINK_STATS
110//#define CONFIG_WAITQ_PREPOST_STATS
3e170ce0 111
39037602
A
112/*
113 * file-static functions / data
114 */
115static thread_t waitq_select_one_locked(struct waitq *waitq, event64_t event,
116 uint64_t *reserved_preposts,
117 int priority, spl_t *spl);
3e170ce0 118
39037602
A
119static kern_return_t waitq_select_thread_locked(struct waitq *waitq,
120 event64_t event,
121 thread_t thread, spl_t *spl);
3e170ce0 122
39037602
A
123#define WAITQ_SET_MAX (task_max * 3)
124static zone_t waitq_set_zone;
3e170ce0 125
3e170ce0 126
39037602
A
127#define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align)))
128#define ROUNDDOWN(x,y) (((x)/(y))*(y))
3e170ce0 129
3e170ce0 130
39037602
A
131#if defined(CONFIG_LTABLE_STATS) || defined(CONFIG_WAITQ_STATS)
132static __inline__ void waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int skip);
133#endif
3e170ce0 134
3e170ce0 135
39037602
A
136#define waitq_lock_to(wq,to) \
137 (hw_lock_to(&(wq)->waitq_interlock, to))
3e170ce0 138
39037602
A
139#define waitq_lock_unlock(wq) \
140 (hw_lock_unlock(&(wq)->waitq_interlock))
3e170ce0 141
39037602
A
142#define waitq_lock_init(wq) \
143 (hw_lock_init(&(wq)->waitq_interlock))
3e170ce0 144
3e170ce0 145
39037602
A
146/*
147 * Prepost callback function for specially marked waitq sets
148 * (prepost alternative)
149 */
150extern void waitq_set__CALLING_PREPOST_HOOK__(void *ctx, void *memberctx, int priority);
3e170ce0 151
39037602
A
152#define DEFAULT_MIN_FREE_TABLE_ELEM 100
153static uint32_t g_min_free_table_elem;
154static uint32_t g_min_free_cache;
3e170ce0
A
155
156
157/* ----------------------------------------------------------------------
158 *
159 * SetID Link Table Implementation
160 *
161 * ---------------------------------------------------------------------- */
39037602 162static struct link_table g_wqlinktable;
3e170ce0 163
39037602
A
164enum wq_link_type {
165 WQL_ALL = -1,
166 WQL_FREE = LT_FREE,
167 WQL_WQS = LT_ELEM,
168 WQL_LINK = LT_LINK,
3e170ce0
A
169};
170
39037602
A
171struct waitq_link {
172 struct lt_elem wqte;
3e170ce0
A
173
174 union {
39037602 175 /* wqt_type == WQL_WQS (LT_ELEM) */
3e170ce0 176 struct {
39037602 177 struct waitq_set *wql_set;
3e170ce0 178 /* uint64_t sl_prepost_id; */
39037602 179 } wql_wqs;
3e170ce0 180
39037602 181 /* wqt_type == WQL_LINK (LT_LINK) */
3e170ce0 182 struct {
39037602
A
183 uint64_t left_setid;
184 uint64_t right_setid;
185 } wql_link;
3e170ce0
A
186 };
187#ifdef CONFIG_WAITQ_LINK_STATS
188 thread_t sl_alloc_th;
189 task_t sl_alloc_task;
190 uintptr_t sl_alloc_bt[NWAITQ_BTFRAMES];
191 uint64_t sl_alloc_ts;
192 uintptr_t sl_invalidate_bt[NWAITQ_BTFRAMES];
193 uint64_t sl_invalidate_ts;
194 uintptr_t sl_mkvalid_bt[NWAITQ_BTFRAMES];
195 uint64_t sl_mkvalid_ts;
196 uint64_t sl_free_ts;
197#endif
198};
199#if !defined(CONFIG_WAITQ_LINK_STATS)
39037602
A
200static_assert((sizeof(struct waitq_link) & (sizeof(struct waitq_link) - 1)) == 0,
201 "waitq_link struct must be a power of two!");
3e170ce0
A
202#endif
203
39037602
A
204#define wql_refcnt(link) \
205 (lt_bits_refcnt((link)->wqte.lt_bits))
3e170ce0 206
39037602
A
207#define wql_type(link) \
208 (lt_bits_type((link)->wqte.lt_bits))
3e170ce0 209
39037602 210#define wql_mkvalid(link) \
3e170ce0 211 do { \
39037602
A
212 lt_elem_mkvalid(&(link)->wqte); \
213 wql_do_mkvalid_stats(&(link)->wqte); \
3e170ce0
A
214 } while (0)
215
39037602
A
216#define wql_is_valid(link) \
217 lt_bits_valid((link)->wqte.lt_bits)
3e170ce0 218
39037602 219#define wql_setid wqte.lt_id
3e170ce0 220
39037602
A
221#define WQL_WQS_POISON ((void *)(0xf00df00d))
222#define WQL_LINK_POISON (0x0bad0badffffffffull)
3e170ce0 223
39037602 224static void wql_poison(struct link_table *table, struct lt_elem *elem)
3e170ce0 225{
39037602 226 struct waitq_link *link = (struct waitq_link *)elem;
3e170ce0
A
227 (void)table;
228
39037602
A
229 switch (wql_type(link)) {
230 case WQL_WQS:
231 link->wql_wqs.wql_set = WQL_WQS_POISON;
3e170ce0 232 break;
39037602
A
233 case WQL_LINK:
234 link->wql_link.left_setid = WQL_LINK_POISON;
235 link->wql_link.right_setid = WQL_LINK_POISON;
3e170ce0
A
236 break;
237 default:
238 break;
239 }
240#ifdef CONFIG_WAITQ_LINK_STATS
39037602
A
241 memset(link->sl_alloc_bt, 0, sizeof(link->sl_alloc_bt));
242 link->sl_alloc_ts = 0;
243 memset(link->sl_mkvalid_bt, 0, sizeof(link->sl_mkvalid_bt));
244 link->sl_mkvalid_ts = 0;
3e170ce0 245
39037602 246 link->sl_alloc_th = THREAD_NULL;
3e170ce0
A
247 /* leave the sl_alloc_task in place for debugging */
248
39037602 249 link->sl_free_ts = mach_absolute_time();
3e170ce0
A
250#endif
251}
252
253#ifdef CONFIG_WAITQ_LINK_STATS
39037602 254static __inline__ void wql_do_alloc_stats(struct lt_elem *elem)
3e170ce0
A
255{
256 if (elem) {
39037602 257 struct waitq_link *link = (struct waitq_link *)elem;
3e170ce0
A
258 memset(link->sl_alloc_bt, 0, sizeof(link->sl_alloc_bt));
259 waitq_grab_backtrace(link->sl_alloc_bt, 0);
260 link->sl_alloc_th = current_thread();
261 link->sl_alloc_task = current_task();
262
263 assert(link->sl_alloc_ts == 0);
264 link->sl_alloc_ts = mach_absolute_time();
265
266 memset(link->sl_invalidate_bt, 0, sizeof(link->sl_invalidate_bt));
267 link->sl_invalidate_ts = 0;
268 }
269}
270
39037602 271static __inline__ void wql_do_invalidate_stats(struct lt_elem *elem)
3e170ce0 272{
39037602 273 struct waitq_link *link = (struct waitq_link *)elem;
3e170ce0
A
274
275 if (!elem)
276 return;
277
278 assert(link->sl_mkvalid_ts > 0);
279
280 memset(link->sl_invalidate_bt, 0, sizeof(link->sl_invalidate_bt));
281 link->sl_invalidate_ts = mach_absolute_time();
282 waitq_grab_backtrace(link->sl_invalidate_bt, 0);
283}
284
39037602 285static __inline__ void wql_do_mkvalid_stats(struct lt_elem *elem)
3e170ce0 286{
39037602 287 struct waitq_link *link = (struct waitq_link *)elem;
3e170ce0
A
288
289 if (!elem)
290 return;
291
292 memset(link->sl_mkvalid_bt, 0, sizeof(link->sl_mkvalid_bt));
293 link->sl_mkvalid_ts = mach_absolute_time();
294 waitq_grab_backtrace(link->sl_mkvalid_bt, 0);
295}
296#else
39037602
A
297#define wql_do_alloc_stats(e)
298#define wql_do_invalidate_stats(e)
299#define wql_do_mkvalid_stats(e)
3e170ce0
A
300#endif /* CONFIG_WAITQ_LINK_STATS */
301
39037602 302static void wql_init(void)
3e170ce0
A
303{
304 uint32_t tablesz = 0, max_links = 0;
305
306 if (PE_parse_boot_argn("wql_tsize", &tablesz, sizeof(tablesz)) != TRUE)
39037602 307 tablesz = (uint32_t)g_lt_max_tbl_size;
3e170ce0
A
308
309 tablesz = P2ROUNDUP(tablesz, PAGE_SIZE);
39037602 310 max_links = tablesz / sizeof(struct waitq_link);
3e170ce0
A
311 assert(max_links > 0 && tablesz > 0);
312
313 /* we have a restricted index range */
39037602
A
314 if (max_links > (LT_IDX_MAX + 1))
315 max_links = LT_IDX_MAX + 1;
3e170ce0
A
316
317 wqinfo("init linktable with max:%d elements (%d bytes)",
318 max_links, tablesz);
39037602
A
319 ltable_init(&g_wqlinktable, "wqslab.wql", max_links,
320 sizeof(struct waitq_link), wql_poison);
3e170ce0
A
321}
322
39037602 323static void wql_ensure_free_space(void)
3e170ce0 324{
39037602 325 if (g_wqlinktable.nelem - g_wqlinktable.used_elem < g_min_free_table_elem) {
3e170ce0
A
326 /*
327 * we don't hold locks on these values, so check for underflow
328 */
39037602 329 if (g_wqlinktable.used_elem <= g_wqlinktable.nelem) {
3e170ce0 330 wqdbg_v("Forcing table growth: nelem=%d, used=%d, min_free=%d",
39037602 331 g_wqlinktable.nelem, g_wqlinktable.used_elem,
3e170ce0 332 g_min_free_table_elem);
39037602 333 ltable_grow(&g_wqlinktable, g_min_free_table_elem);
3e170ce0
A
334 }
335 }
336}
337
39037602 338static struct waitq_link *wql_alloc_link(int type)
3e170ce0 339{
39037602 340 struct lt_elem *elem;
3e170ce0 341
39037602
A
342 elem = ltable_alloc_elem(&g_wqlinktable, type, 1, 0);
343 wql_do_alloc_stats(elem);
344 return (struct waitq_link *)elem;
3e170ce0
A
345}
346
39037602 347static void wql_realloc_link(struct waitq_link *link, int type)
3e170ce0 348{
39037602 349 ltable_realloc_elem(&g_wqlinktable, &link->wqte, type);
3e170ce0
A
350#ifdef CONFIG_WAITQ_LINK_STATS
351 memset(link->sl_alloc_bt, 0, sizeof(link->sl_alloc_bt));
352 link->sl_alloc_ts = 0;
39037602 353 wql_do_alloc_stats(&link->wqte);
3e170ce0
A
354
355 memset(link->sl_invalidate_bt, 0, sizeof(link->sl_invalidate_bt));
356 link->sl_invalidate_ts = 0;
357#endif
358}
359
39037602 360static void wql_invalidate(struct waitq_link *link)
3e170ce0 361{
39037602
A
362 lt_elem_invalidate(&link->wqte);
363 wql_do_invalidate_stats(&link->wqte);
3e170ce0
A
364}
365
39037602 366static struct waitq_link *wql_get_link(uint64_t setid)
3e170ce0 367{
39037602 368 struct lt_elem *elem;
3e170ce0 369
39037602
A
370 elem = ltable_get_elem(&g_wqlinktable, setid);
371 return (struct waitq_link *)elem;
3e170ce0
A
372}
373
39037602 374static void wql_put_link(struct waitq_link *link)
3e170ce0
A
375{
376 if (!link)
377 return;
39037602 378 ltable_put_elem(&g_wqlinktable, (struct lt_elem *)link);
3e170ce0
A
379}
380
39037602 381static struct waitq_link *wql_get_reserved(uint64_t setid, int type)
3e170ce0 382{
39037602 383 struct lt_elem *elem;
3e170ce0 384
39037602 385 elem = lt_elem_list_first(&g_wqlinktable, setid);
3e170ce0
A
386 if (!elem)
387 return NULL;
39037602
A
388 ltable_realloc_elem(&g_wqlinktable, elem, type);
389 return (struct waitq_link *)elem;
3e170ce0
A
390}
391
392
393static inline int waitq_maybe_remove_link(struct waitq *waitq,
394 uint64_t setid,
39037602
A
395 struct waitq_link *parent,
396 struct waitq_link *left,
397 struct waitq_link *right);
3e170ce0
A
398
399enum {
400 LINK_WALK_ONE_LEVEL = 0,
401 LINK_WALK_FULL_DAG = 1,
402 LINK_WALK_FULL_DAG_UNLOCKED = 2,
403};
404
39037602
A
405typedef int (*wql_callback_func)(struct waitq *waitq, void *ctx,
406 struct waitq_link *link);
3e170ce0
A
407
408/**
39037602 409 * walk_waitq_links: walk all table elements (of type 'link_type') pointed to by 'setid'
3e170ce0
A
410 *
411 * Conditions:
412 * waitq is locked (or NULL)
413 * 'setid' is managed by 'waitq'
414 * this could be direct (waitq->waitq_set_id == setid)
415 * OR indirect (setid is the left/right ID in a LINK chain,
416 * whose root is waitq->waitq_set_id)
417 *
418 * Notes:
419 * This function uses recursion to walk the set of table elements
420 * pointed to by 'setid'. For each element encountered, 'cb' will be
421 * called. If non-zero, the return value of this callback function can
422 * early-out of the table walk.
423 *
424 * For each link element encountered, the function takes a reference to
425 * it. The reference is dropped only after the callback and any recursion
426 * has completed.
427 *
428 * The assumed table/link/tree structure:
429 * 'setid'
430 * / \
431 * / \
432 * L(LINK) R(LINK)
433 * /\ /\
434 * / \ / \
435 * / \ Rl(*) Rr(*)
436 * Ll(*) Lr(*) /\ /\
437 * /\ /\ ... ... ... ...
438 * ... ... ... ...
439 * \
440 * WQS(wqset_q.waitq_setid == Sx)
441 * [waitq set is a membet of setid, 'Sx')
442 *
443 * 'Sx'
444 * / \
445 * / \
446 * L(LINK) R(LINK)
447 * /\ /\
448 * ... ... ... ...
449 *
450 * The basic algorithm is as follows:
451 * *) take a reference to the table object pointed to by 'setid'
452 * *) if appropriate, call 'cb' (potentially early-out on non-zero return)
453 * *) if the link object points to a waitq set, and the walk type
454 * is 'FULL_DAG' (full directed-acyclic-graph), then try to lock
455 * the associated waitq set object and recursively walk all sets to
456 * which that set belongs. This is a DFS of the tree structure.
457 * *) recurse down the left side of the tree (following the
39037602 458 * 'left_setid' pointer in the link object
3e170ce0 459 * *) recurse down the right side of the tree (following the
39037602 460 * 'right_setid' pointer in the link object
3e170ce0
A
461 */
462static __attribute__((noinline))
39037602 463int walk_waitq_links(int walk_type, struct waitq *waitq,
3e170ce0 464 uint64_t setid, int link_type,
39037602 465 void *ctx, wql_callback_func cb)
3e170ce0 466{
39037602 467 struct waitq_link *link;
3e170ce0 468 uint64_t nextid;
39037602 469 int wqltype;
3e170ce0 470
39037602 471 link = wql_get_link(setid);
3e170ce0
A
472
473 /* invalid link */
474 if (!link)
475 return WQ_ITERATE_CONTINUE;
476
477 setid = nextid = 0;
39037602
A
478 wqltype = wql_type(link);
479 if (wqltype == WQL_LINK) {
480 setid = link->wql_link.left_setid;
481 nextid = link->wql_link.right_setid;
3e170ce0
A
482 }
483
484 /*
485 * Make the callback only on specified link_type (or all links)
486 * Note that after the callback, the link object may be
487 * invalid. The only valid thing we can do is put our
488 * reference to it (which may put it back on the free list)
489 */
39037602 490 if (link_type == WQL_ALL || link_type == wqltype) {
3e170ce0
A
491 /* allow the callback to early-out */
492 int ret = cb(waitq, ctx, link);
493 if (ret != WQ_ITERATE_CONTINUE) {
39037602 494 wql_put_link(link);
3e170ce0
A
495 return ret;
496 }
497 }
498
39037602 499 if (wqltype == WQL_WQS &&
3e170ce0
A
500 (walk_type == LINK_WALK_FULL_DAG ||
501 walk_type == LINK_WALK_FULL_DAG_UNLOCKED)) {
502 /*
503 * Recurse down any sets to which this wait queue set was
504 * added. We do this just before we put our reference to
505 * the link object (which may free it).
506 */
39037602 507 struct waitq_set *wqset = link->wql_wqs.wql_set;
3e170ce0 508 int ret = WQ_ITERATE_CONTINUE;
3e170ce0
A
509 int should_unlock = 0;
510 uint64_t wqset_setid = 0;
3e170ce0
A
511
512 if (waitq_set_is_valid(wqset) && walk_type == LINK_WALK_FULL_DAG) {
39037602 513 assert(!waitq_irq_safe(&wqset->wqset_q));
3e170ce0
A
514 waitq_set_lock(wqset);
515 should_unlock = 1;
516 }
517
518 /*
519 * verify the linked waitq set as it could have been
520 * invalidated before we grabbed the lock!
521 */
39037602 522 if (wqset->wqset_id != link->wql_setid.id) {
3e170ce0
A
523 /*This is the bottom of the tree: just get out */
524 if (should_unlock) {
525 waitq_set_unlock(wqset);
3e170ce0 526 }
39037602 527 wql_put_link(link);
3e170ce0
A
528 return WQ_ITERATE_CONTINUE;
529 }
530
531 wqset_setid = wqset->wqset_q.waitq_set_id;
532
533 if (wqset_setid > 0)
39037602 534 ret = walk_waitq_links(walk_type, &wqset->wqset_q,
3e170ce0
A
535 wqset_setid, link_type, ctx, cb);
536 if (should_unlock) {
537 waitq_set_unlock(wqset);
3e170ce0
A
538 }
539 if (ret != WQ_ITERATE_CONTINUE) {
39037602 540 wql_put_link(link);
3e170ce0
A
541 return ret;
542 }
543 }
544
39037602 545 wql_put_link(link);
3e170ce0
A
546
547 /* recurse down left side of the tree */
548 if (setid) {
39037602 549 int ret = walk_waitq_links(walk_type, waitq, setid, link_type, ctx, cb);
3e170ce0
A
550 if (ret != WQ_ITERATE_CONTINUE)
551 return ret;
552 }
553
554 /* recurse down right side of the tree */
555 if (nextid)
39037602 556 return walk_waitq_links(walk_type, waitq, nextid, link_type, ctx, cb);
3e170ce0
A
557
558 return WQ_ITERATE_CONTINUE;
559}
560
561/* ----------------------------------------------------------------------
562 *
563 * Prepost Link Table Implementation
564 *
565 * ---------------------------------------------------------------------- */
39037602 566static struct link_table g_prepost_table;
3e170ce0
A
567
568enum wq_prepost_type {
39037602
A
569 WQP_FREE = LT_FREE,
570 WQP_WQ = LT_ELEM,
571 WQP_POST = LT_LINK,
3e170ce0
A
572};
573
574struct wq_prepost {
39037602 575 struct lt_elem wqte;
3e170ce0
A
576
577 union {
39037602 578 /* wqt_type == WQP_WQ (LT_ELEM) */
3e170ce0
A
579 struct {
580 struct waitq *wqp_wq_ptr;
581 } wqp_wq;
39037602 582 /* wqt_type == WQP_POST (LT_LINK) */
3e170ce0
A
583 struct {
584 uint64_t wqp_next_id;
585 uint64_t wqp_wq_id;
586 } wqp_post;
587 };
588#ifdef CONFIG_WAITQ_PREPOST_STATS
589 thread_t wqp_alloc_th;
590 task_t wqp_alloc_task;
591 uintptr_t wqp_alloc_bt[NWAITQ_BTFRAMES];
592#endif
593};
594#if !defined(CONFIG_WAITQ_PREPOST_STATS)
39037602 595static_assert((sizeof(struct wq_prepost) & (sizeof(struct wq_prepost) - 1)) == 0,
3e170ce0
A
596 "wq_prepost struct must be a power of two!");
597#endif
598
599#define wqp_refcnt(wqp) \
39037602 600 (lt_bits_refcnt((wqp)->wqte.lt_bits))
3e170ce0
A
601
602#define wqp_type(wqp) \
39037602 603 (lt_bits_type((wqp)->wqte.lt_bits))
3e170ce0
A
604
605#define wqp_set_valid(wqp) \
39037602 606 lt_elem_mkvalid(&(wqp)->wqte)
3e170ce0
A
607
608#define wqp_is_valid(wqp) \
39037602 609 lt_bits_valid((wqp)->wqte.lt_bits)
3e170ce0 610
39037602 611#define wqp_prepostid wqte.lt_id
3e170ce0
A
612
613#define WQP_WQ_POISON (0x0bad0badffffffffull)
614#define WQP_POST_POISON (0xf00df00df00df00d)
615
39037602 616static void wqp_poison(struct link_table *table, struct lt_elem *elem)
3e170ce0
A
617{
618 struct wq_prepost *wqp = (struct wq_prepost *)elem;
619 (void)table;
620
621 switch (wqp_type(wqp)) {
622 case WQP_WQ:
623 break;
624 case WQP_POST:
625 wqp->wqp_post.wqp_next_id = WQP_POST_POISON;
626 wqp->wqp_post.wqp_wq_id = WQP_POST_POISON;
627 break;
628 default:
629 break;
630 }
631}
632
633#ifdef CONFIG_WAITQ_PREPOST_STATS
39037602 634static __inline__ void wqp_do_alloc_stats(struct lt_elem *elem)
3e170ce0 635{
39037602
A
636 if (!elem)
637 return;
3e170ce0 638
39037602
A
639 struct wq_prepost *wqp = (struct wq_prepost *)elem;
640 uintptr_t alloc_bt[sizeof(wqp->wqp_alloc_bt)];
3e170ce0 641
39037602 642 waitq_grab_backtrace(alloc_bt, NWAITQ_BTFRAMES);
3e170ce0 643
39037602
A
644 /* be sure the take stats for _all_ allocated objects */
645 for (;;) {
646 memcpy(wqp->wqp_alloc_bt, alloc_bt, sizeof(alloc_bt));
647 wqp->wqp_alloc_th = current_thread();
648 wqp->wqp_alloc_task = current_task();
649 wqp = (struct wq_prepost *)lt_elem_list_next(&g_prepost_table, &wqp->wqte);
650 if (!wqp)
651 break;
3e170ce0
A
652 }
653}
654#else
655#define wqp_do_alloc_stats(e)
656#endif /* CONFIG_WAITQ_LINK_STATS */
657
658static void wqp_init(void)
659{
660 uint32_t tablesz = 0, max_wqp = 0;
661
662 if (PE_parse_boot_argn("wqp_tsize", &tablesz, sizeof(tablesz)) != TRUE)
39037602 663 tablesz = (uint32_t)g_lt_max_tbl_size;
3e170ce0
A
664
665 tablesz = P2ROUNDUP(tablesz, PAGE_SIZE);
666 max_wqp = tablesz / sizeof(struct wq_prepost);
667 assert(max_wqp > 0 && tablesz > 0);
668
669 /* we have a restricted index range */
39037602
A
670 if (max_wqp > (LT_IDX_MAX + 1))
671 max_wqp = LT_IDX_MAX + 1;
3e170ce0
A
672
673 wqinfo("init prepost table with max:%d elements (%d bytes)",
674 max_wqp, tablesz);
39037602
A
675 ltable_init(&g_prepost_table, "wqslab.prepost", max_wqp,
676 sizeof(struct wq_prepost), wqp_poison);
3e170ce0
A
677}
678
679/*
680 * Refill the per-CPU cache.
681 */
682static void wq_prepost_refill_cpu_cache(uint32_t nalloc)
683{
39037602 684 struct lt_elem *new_head, *old_head;
3e170ce0
A
685 struct wqp_cache *cache;
686
687 /* require preemption enabled to allocate elements */
688 if (get_preemption_level() != 0)
689 return;
690
39037602
A
691 new_head = ltable_alloc_elem(&g_prepost_table,
692 LT_RESERVED, nalloc, 1);
3e170ce0
A
693 if (new_head == NULL)
694 return;
695
696 disable_preemption();
697 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
39037602
A
698
699 /* check once more before putting these elements on the list */
700 if (cache->avail >= WQP_CACHE_MAX) {
701 lt_elem_list_release(&g_prepost_table, new_head, LT_RESERVED);
702 enable_preemption();
703 return;
704 }
705
3e170ce0 706 cache->avail += nalloc;
39037602
A
707 if (cache->head == 0 || cache->head == LT_IDX_MAX) {
708 cache->head = new_head->lt_id.id;
3e170ce0
A
709 goto out;
710 }
711
39037602
A
712 old_head = lt_elem_list_first(&g_prepost_table, cache->head);
713 (void)lt_elem_list_link(&g_prepost_table, new_head, old_head);
714 cache->head = new_head->lt_id.id;
3e170ce0
A
715
716out:
717 enable_preemption();
718 return;
719}
720
721static void wq_prepost_ensure_free_space(void)
722{
723 uint32_t free_elem;
724 uint32_t min_free;
725 struct wqp_cache *cache;
726
727 if (g_min_free_cache == 0)
728 g_min_free_cache = (WQP_CACHE_MAX * ml_get_max_cpus());
729
730 /*
731 * Ensure that we always have a pool of per-CPU prepost elements
732 */
733 disable_preemption();
734 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
735 free_elem = cache->avail;
736 enable_preemption();
737
738 if (free_elem < (WQP_CACHE_MAX / 3))
739 wq_prepost_refill_cpu_cache(WQP_CACHE_MAX - free_elem);
740
741 /*
742 * Now ensure that we have a sufficient amount of free table space
743 */
744 free_elem = g_prepost_table.nelem - g_prepost_table.used_elem;
745 min_free = g_min_free_table_elem + g_min_free_cache;
746 if (free_elem < min_free) {
747 /*
748 * we don't hold locks on these values, so check for underflow
749 */
750 if (g_prepost_table.used_elem <= g_prepost_table.nelem) {
751 wqdbg_v("Forcing table growth: nelem=%d, used=%d, min_free=%d+%d",
752 g_prepost_table.nelem, g_prepost_table.used_elem,
753 g_min_free_table_elem, g_min_free_cache);
39037602 754 ltable_grow(&g_prepost_table, min_free);
3e170ce0
A
755 }
756 }
757}
758
759static struct wq_prepost *wq_prepost_alloc(int type, int nelem)
760{
39037602 761 struct lt_elem *elem;
3e170ce0
A
762 struct wq_prepost *wqp;
763 struct wqp_cache *cache;
764
39037602 765 if (type != LT_RESERVED)
3e170ce0
A
766 goto do_alloc;
767 if (nelem == 0)
768 return NULL;
769
770 /*
771 * First try to grab the elements from the per-CPU cache if we are
772 * allocating RESERVED elements
773 */
774 disable_preemption();
775 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
776 if (nelem <= (int)cache->avail) {
39037602 777 struct lt_elem *first, *next = NULL;
3e170ce0
A
778 int nalloc = nelem;
779
780 cache->avail -= nelem;
781
782 /* grab the first element */
39037602 783 first = lt_elem_list_first(&g_prepost_table, cache->head);
3e170ce0
A
784
785 /* find the last element and re-adjust the cache head */
786 for (elem = first; elem != NULL && nalloc > 0; elem = next) {
39037602 787 next = lt_elem_list_next(&g_prepost_table, elem);
3e170ce0
A
788 if (--nalloc == 0) {
789 /* terminate the allocated list */
39037602 790 elem->lt_next_idx = LT_IDX_MAX;
3e170ce0
A
791 break;
792 }
793 }
794 assert(nalloc == 0);
795 if (!next)
39037602 796 cache->head = LT_IDX_MAX;
3e170ce0 797 else
39037602 798 cache->head = next->lt_id.id;
3e170ce0 799 /* assert that we don't have mis-matched book keeping */
39037602 800 assert(!(cache->head == LT_IDX_MAX && cache->avail > 0));
3e170ce0
A
801 enable_preemption();
802 elem = first;
803 goto out;
804 }
805 enable_preemption();
806
807do_alloc:
808 /* fall-back to standard table allocation */
39037602 809 elem = ltable_alloc_elem(&g_prepost_table, type, nelem, 0);
3e170ce0
A
810 if (!elem)
811 return NULL;
812
813out:
814 wqp = (struct wq_prepost *)elem;
815 wqp_do_alloc_stats(elem);
816 return wqp;
817}
818
3e170ce0
A
819static void wq_prepost_invalidate(struct wq_prepost *wqp)
820{
39037602 821 lt_elem_invalidate(&wqp->wqte);
3e170ce0
A
822}
823
824static struct wq_prepost *wq_prepost_get(uint64_t wqp_id)
825{
39037602 826 struct lt_elem *elem;
3e170ce0 827
39037602 828 elem = ltable_get_elem(&g_prepost_table, wqp_id);
3e170ce0
A
829 return (struct wq_prepost *)elem;
830}
831
832static void wq_prepost_put(struct wq_prepost *wqp)
833{
39037602 834 ltable_put_elem(&g_prepost_table, (struct lt_elem *)wqp);
3e170ce0
A
835}
836
837static int wq_prepost_rlink(struct wq_prepost *parent, struct wq_prepost *child)
838{
39037602 839 return lt_elem_list_link(&g_prepost_table, &parent->wqte, &child->wqte);
3e170ce0
A
840}
841
842static struct wq_prepost *wq_prepost_get_rnext(struct wq_prepost *head)
843{
39037602 844 struct lt_elem *elem;
3e170ce0
A
845 struct wq_prepost *wqp;
846 uint64_t id;
847
39037602 848 elem = lt_elem_list_next(&g_prepost_table, &head->wqte);
3e170ce0
A
849 if (!elem)
850 return NULL;
39037602
A
851 id = elem->lt_id.id;
852 elem = ltable_get_elem(&g_prepost_table, id);
3e170ce0
A
853
854 if (!elem)
855 return NULL;
856 wqp = (struct wq_prepost *)elem;
39037602 857 if (elem->lt_id.id != id ||
3e170ce0
A
858 wqp_type(wqp) != WQP_POST ||
859 wqp->wqp_post.wqp_next_id != head->wqp_prepostid.id) {
39037602 860 ltable_put_elem(&g_prepost_table, elem);
3e170ce0
A
861 return NULL;
862 }
863
864 return wqp;
865}
866
867static void wq_prepost_reset_rnext(struct wq_prepost *wqp)
868{
39037602 869 (void)lt_elem_list_break(&g_prepost_table, &wqp->wqte);
3e170ce0
A
870}
871
872
873/**
874 * remove 'wqp' from the prepost list on 'wqset'
875 *
876 * Conditions:
877 * wqset is locked
878 * caller holds a reference on wqp (and is responsible to release it)
879 *
880 * Result:
881 * wqp is invalidated, wqset is potentially updated with a new
882 * prepost ID, and the next element of the prepost list may be
883 * consumed as well (if the list contained only 2 objects)
884 */
885static int wq_prepost_remove(struct waitq_set *wqset,
886 struct wq_prepost *wqp)
887{
888 int more_posts = 1;
889 uint64_t next_id = wqp->wqp_post.wqp_next_id;
890 uint64_t wqp_id = wqp->wqp_prepostid.id;
891 struct wq_prepost *prev_wqp, *next_wqp;
892
893 assert(wqp_type(wqp) == WQP_POST);
39037602 894 assert(wqset->wqset_q.waitq_prepost == 1);
3e170ce0
A
895
896 if (next_id == wqp_id) {
897 /* the list is singular and becoming empty */
898 wqset->wqset_prepost_id = 0;
899 more_posts = 0;
900 goto out;
901 }
902
903 prev_wqp = wq_prepost_get_rnext(wqp);
904 assert(prev_wqp != NULL);
905 assert(prev_wqp->wqp_post.wqp_next_id == wqp_id);
906 assert(prev_wqp->wqp_prepostid.id != wqp_id);
907 assert(wqp_type(prev_wqp) == WQP_POST);
908
909 if (prev_wqp->wqp_prepostid.id == next_id) {
910 /*
911 * There are two items in the list, and we're removing one. We
912 * only need to keep the WQP_WQ pointer from 'prev_wqp'
913 */
914 wqset->wqset_prepost_id = prev_wqp->wqp_post.wqp_wq_id;
915 wq_prepost_invalidate(prev_wqp);
916 wq_prepost_put(prev_wqp);
917 more_posts = 0;
918 goto out;
919 }
920
921 /* prev->next = next */
922 prev_wqp->wqp_post.wqp_next_id = next_id;
923
924 /* next->prev = prev */
925 next_wqp = wq_prepost_get(next_id);
926 assert(next_wqp != NULL);
927 assert(next_wqp != wqp);
928 assert(next_wqp != prev_wqp);
929 assert(wqp_type(next_wqp) == WQP_POST);
930
931 wq_prepost_reset_rnext(next_wqp);
932 wq_prepost_rlink(next_wqp, prev_wqp);
933
934 /* If we remove the head of the list, update the wqset */
935 if (wqp_id == wqset->wqset_prepost_id)
936 wqset->wqset_prepost_id = next_id;
937
938 wq_prepost_put(prev_wqp);
939 wq_prepost_put(next_wqp);
940
941out:
942 wq_prepost_reset_rnext(wqp);
943 wq_prepost_invalidate(wqp);
944 return more_posts;
945}
946
947static struct wq_prepost *wq_prepost_rfirst(uint64_t id)
948{
39037602
A
949 struct lt_elem *elem;
950 elem = lt_elem_list_first(&g_prepost_table, id);
3e170ce0
A
951 wqp_do_alloc_stats(elem);
952 return (struct wq_prepost *)(void *)elem;
953}
954
955static struct wq_prepost *wq_prepost_rpop(uint64_t *id, int type)
956{
39037602
A
957 struct lt_elem *elem;
958 elem = lt_elem_list_pop(&g_prepost_table, id, type);
3e170ce0
A
959 wqp_do_alloc_stats(elem);
960 return (struct wq_prepost *)(void *)elem;
961}
962
963static void wq_prepost_release_rlist(struct wq_prepost *wqp)
964{
965 int nelem = 0;
966 struct wqp_cache *cache;
39037602 967 struct lt_elem *elem;
3e170ce0
A
968
969 if (!wqp)
970 return;
971
972 elem = &wqp->wqte;
973
974 /*
975 * These are reserved elements: release them back to the per-cpu pool
976 * if our cache is running low.
977 */
978 disable_preemption();
979 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
980 if (cache->avail < WQP_CACHE_MAX) {
39037602
A
981 struct lt_elem *tmp = NULL;
982 if (cache->head != LT_IDX_MAX)
983 tmp = lt_elem_list_first(&g_prepost_table, cache->head);
984 nelem = lt_elem_list_link(&g_prepost_table, elem, tmp);
985 cache->head = elem->lt_id.id;
3e170ce0
A
986 cache->avail += nelem;
987 enable_preemption();
988 return;
989 }
990 enable_preemption();
991
992 /* release these elements back to the main table */
39037602 993 nelem = lt_elem_list_release(&g_prepost_table, elem, LT_RESERVED);
3e170ce0
A
994
995#if CONFIG_WAITQ_STATS
996 g_prepost_table.nreserved_releases += 1;
997 OSDecrementAtomic64(&g_prepost_table.nreservations);
998#endif
999}
1000
1001typedef int (*wqp_callback_func)(struct waitq_set *wqset,
1002 void *ctx,
1003 struct wq_prepost *wqp,
1004 struct waitq *waitq);
1005
1006/**
1007 * iterate over a chain of preposts associated with a waitq set.
1008 *
1009 * Conditions:
1010 * wqset is locked
1011 *
1012 * Notes:
1013 * This loop performs automatic prepost chain management / culling, and
1014 * may reset or adjust the waitq set's prepost ID pointer. If you don't
1015 * want this extra processing, you can use wq_prepost_iterate().
1016 */
1017static int wq_prepost_foreach_locked(struct waitq_set *wqset,
1018 void *ctx, wqp_callback_func cb)
1019{
39037602 1020 int ret = WQ_ITERATE_SUCCESS;
3e170ce0
A
1021 struct wq_prepost *wqp, *tmp_wqp;
1022
39037602
A
1023 assert(cb != NULL);
1024
1025 if (!wqset || !waitq_set_maybe_preposted(wqset))
3e170ce0
A
1026 return WQ_ITERATE_SUCCESS;
1027
1028restart:
1029 wqp = wq_prepost_get(wqset->wqset_prepost_id);
1030 if (!wqp) {
1031 /*
1032 * The prepost object is no longer valid, reset the waitq
1033 * set's prepost id.
1034 */
1035 wqset->wqset_prepost_id = 0;
1036 return WQ_ITERATE_SUCCESS;
1037 }
1038
1039 if (wqp_type(wqp) == WQP_WQ) {
1040 uint64_t __assert_only wqp_id = wqp->wqp_prepostid.id;
39037602
A
1041
1042 ret = cb(wqset, ctx, wqp, wqp->wqp_wq.wqp_wq_ptr);
3e170ce0
A
1043
1044 switch (ret) {
1045 case WQ_ITERATE_INVALIDATE_CONTINUE:
1046 /* the caller wants to remove the only prepost here */
1047 assert(wqp_id == wqset->wqset_prepost_id);
1048 wqset->wqset_prepost_id = 0;
1049 /* fall through */
1050 case WQ_ITERATE_CONTINUE:
1051 wq_prepost_put(wqp);
1052 ret = WQ_ITERATE_SUCCESS;
1053 break;
1054 case WQ_ITERATE_RESTART:
1055 wq_prepost_put(wqp);
1056 /* fall through */
1057 case WQ_ITERATE_DROPPED:
1058 goto restart;
1059 default:
1060 wq_prepost_put(wqp);
1061 break;
1062 }
1063 return ret;
1064 }
1065
1066 assert(wqp->wqp_prepostid.id == wqset->wqset_prepost_id);
1067 assert(wqp_type(wqp) == WQP_POST);
1068
1069 /*
1070 * At this point we know we have a list of POST objects.
1071 * Grab a handle to the last element in the list and start
1072 * the iteration.
1073 */
1074 tmp_wqp = wq_prepost_get_rnext(wqp);
1075 assert(tmp_wqp != NULL && wqp_type(tmp_wqp) == WQP_POST);
1076
1077 uint64_t last_id = tmp_wqp->wqp_prepostid.id;
1078 wq_prepost_put(tmp_wqp);
1079
1080 ret = WQ_ITERATE_SUCCESS;
1081 for (;;) {
1082 uint64_t wqp_id, first_id, next_id;
1083
1084 wqp_id = wqp->wqp_prepostid.id;
1085 first_id = wqset->wqset_prepost_id;
1086 next_id = wqp->wqp_post.wqp_next_id;
1087
1088 /* grab the WQP_WQ object this _POST points to */
1089 tmp_wqp = wq_prepost_get(wqp->wqp_post.wqp_wq_id);
1090 if (!tmp_wqp) {
1091 /*
1092 * This WQP_POST object points to an invalid
1093 * WQP_WQ object - remove the POST object from
1094 * the list.
1095 */
1096 if (wq_prepost_remove(wqset, wqp) == 0) {
1097 wq_prepost_put(wqp);
1098 goto restart;
1099 }
1100 goto next_prepost;
1101 }
1102 assert(wqp_type(tmp_wqp) == WQP_WQ);
1103 /*
1104 * make the callback: note that this could remove 'wqp' or
1105 * drop the lock on our waitq set. We need to re-validate
1106 * our state when this function returns.
1107 */
39037602 1108 ret = cb(wqset, ctx, wqp, tmp_wqp->wqp_wq.wqp_wq_ptr);
3e170ce0
A
1109 wq_prepost_put(tmp_wqp);
1110
1111 switch (ret) {
1112 case WQ_ITERATE_CONTINUE:
1113 /* continue iteration */
1114 break;
1115 case WQ_ITERATE_INVALIDATE_CONTINUE:
1116 assert(next_id == wqp->wqp_post.wqp_next_id);
1117 if (wq_prepost_remove(wqset, wqp) == 0) {
1118 wq_prepost_put(wqp);
1119 goto restart;
1120 }
1121 goto next_prepost;
1122 case WQ_ITERATE_RESTART:
1123 wq_prepost_put(wqp);
1124 /* fall-through */
1125 case WQ_ITERATE_DROPPED:
1126 /* the callback dropped the ref to wqp: just restart */
1127 goto restart;
1128 default:
1129 /* break out of the iteration for some other reason */
1130 goto finish_prepost_foreach;
1131 }
1132
1133 /*
1134 * the set lock may have been dropped during callback,
1135 * if something looks different, restart the prepost iteration
1136 */
1137 if (!wqp_is_valid(wqp) ||
1138 (wqp->wqp_post.wqp_next_id != next_id) ||
1139 wqset->wqset_prepost_id != first_id) {
1140 wq_prepost_put(wqp);
1141 goto restart;
1142 }
1143
1144next_prepost:
1145 /* this was the last object in the list */
1146 if (wqp_id == last_id)
1147 break;
1148
1149 /* get the next object */
1150 tmp_wqp = wq_prepost_get(next_id);
1151 if (!tmp_wqp) {
1152 /*
1153 * At this point we've already checked our state
1154 * after the callback (which may have dropped the set
1155 * lock). If we find an invalid member of the list
1156 * then something is wrong.
1157 */
1158 panic("Invalid WQP_POST member 0x%llx in waitq set "
1159 "0x%llx prepost list (first:%llx, "
1160 "wqp:%p)",
1161 next_id, wqset->wqset_id, first_id, wqp);
1162 }
1163 wq_prepost_put(wqp);
1164 wqp = tmp_wqp;
1165
1166 assert(wqp_type(wqp) == WQP_POST);
1167 }
1168
1169finish_prepost_foreach:
1170 wq_prepost_put(wqp);
1171 if (ret == WQ_ITERATE_CONTINUE)
1172 ret = WQ_ITERATE_SUCCESS;
1173
1174 return ret;
1175}
1176
1177/**
1178 * Perform a simple loop over a chain of prepost objects
1179 *
1180 * Conditions:
1181 * If 'prepost_id' is associated with a waitq (set) then that object must
1182 * be locked before calling this function.
1183 * Callback function, 'cb', must be able to handle a NULL wqset pointer
1184 * and a NULL waitq pointer!
1185 *
1186 * Notes:
1187 * This prepost chain iteration will _not_ automatically adjust any chain
1188 * element or linkage. This is the responsibility of the caller! If you
1189 * want automatic prepost chain management (at a cost of extra CPU time),
1190 * you can use: wq_prepost_foreach_locked().
1191 */
1192static int wq_prepost_iterate(uint64_t prepost_id,
1193 void *ctx, wqp_callback_func cb)
1194{
1195 int ret;
1196 struct wq_prepost *wqp;
1197
1198 if (!prepost_id)
1199 return WQ_ITERATE_SUCCESS;
1200
1201 wqp = wq_prepost_get(prepost_id);
1202 if (!wqp)
1203 return WQ_ITERATE_SUCCESS;
1204
1205 if (wqp_type(wqp) == WQP_WQ) {
1206 ret = WQ_ITERATE_SUCCESS;
1207 if (cb)
1208 ret = cb(NULL, ctx, wqp, wqp->wqp_wq.wqp_wq_ptr);
1209
1210 if (ret != WQ_ITERATE_DROPPED)
1211 wq_prepost_put(wqp);
1212 return ret;
1213 }
1214
1215 assert(wqp->wqp_prepostid.id == prepost_id);
1216 assert(wqp_type(wqp) == WQP_POST);
1217
1218 /* at this point we know we have a list of POST objects */
1219 uint64_t next_id;
1220
1221 ret = WQ_ITERATE_CONTINUE;
1222 do {
1223 struct wq_prepost *tmp_wqp;
1224 struct waitq *wq = NULL;
1225
1226 next_id = wqp->wqp_post.wqp_next_id;
1227
1228 /* grab the WQP_WQ object this _POST points to */
1229 tmp_wqp = wq_prepost_get(wqp->wqp_post.wqp_wq_id);
1230 if (tmp_wqp) {
1231 assert(wqp_type(tmp_wqp) == WQP_WQ);
1232 wq = tmp_wqp->wqp_wq.wqp_wq_ptr;
1233 }
1234
1235 if (cb)
1236 ret = cb(NULL, ctx, wqp, wq);
1237 if (tmp_wqp)
1238 wq_prepost_put(tmp_wqp);
1239
1240 if (ret != WQ_ITERATE_CONTINUE)
1241 break;
1242
1243 tmp_wqp = wq_prepost_get(next_id);
1244 if (!tmp_wqp) {
1245 /*
1246 * the chain is broken: nothing we can do here besides
1247 * bail from the iteration.
1248 */
1249 ret = WQ_ITERATE_ABORTED;
1250 break;
1251 }
1252
1253 wq_prepost_put(wqp);
1254 wqp = tmp_wqp;
1255
1256 assert(wqp_type(wqp) == WQP_POST);
1257 } while (next_id != prepost_id);
1258
1259 if (ret != WQ_ITERATE_DROPPED)
1260 wq_prepost_put(wqp);
1261
1262 if (ret == WQ_ITERATE_CONTINUE)
1263 ret = WQ_ITERATE_SUCCESS;
1264 return ret;
1265}
1266
1267
1268struct _is_posted_ctx {
1269 struct waitq *posting_wq;
1270 int did_prepost;
1271};
1272
1273static int wq_is_preposted_on_set_cb(struct waitq_set *wqset, void *ctx,
1274 struct wq_prepost *wqp, struct waitq *waitq)
1275{
1276 struct _is_posted_ctx *pctx = (struct _is_posted_ctx *)ctx;
1277
1278 (void)wqset;
1279 (void)wqp;
1280
1281 /*
1282 * Don't early-out, run through the _entire_ list:
1283 * This ensures that we retain a minimum number of invalid elements.
1284 */
1285 if (pctx->posting_wq == waitq)
1286 pctx->did_prepost = 1;
1287
1288 return WQ_ITERATE_CONTINUE;
1289}
1290
1291
1292/**
1293 * checks if 'waitq' has already preposted on 'wqset'
1294 *
1295 * Parameters:
1296 * waitq The waitq that's preposting
1297 * wqset The set onto which waitq may be preposted
1298 *
1299 * Conditions:
1300 * both waitq and wqset are locked
1301 *
1302 * Returns non-zero if 'waitq' has already preposted to 'wqset'
1303 */
1304static int wq_is_preposted_on_set(struct waitq *waitq, struct waitq_set *wqset)
1305{
1306 int ret;
1307 struct _is_posted_ctx pctx;
1308
1309 /*
1310 * If the set's only prepost matches the waitq's prepost ID,
1311 * then it obviously already preposted to the set.
1312 */
1313 if (waitq->waitq_prepost_id != 0 &&
1314 wqset->wqset_prepost_id == waitq->waitq_prepost_id)
1315 return 1;
1316
1317 /* use full prepost iteration: always trim the list */
1318 pctx.posting_wq = waitq;
1319 pctx.did_prepost = 0;
1320 ret = wq_prepost_foreach_locked(wqset, (void *)&pctx,
1321 wq_is_preposted_on_set_cb);
1322 return pctx.did_prepost;
1323}
1324
1325static struct wq_prepost *wq_get_prepost_obj(uint64_t *reserved, int type)
1326{
1327 struct wq_prepost *wqp = NULL;
1328 /*
1329 * don't fail just because the caller doesn't have enough
1330 * reservations, we've kept a low-water mark on the prepost table,
1331 * so there should be some available for us.
1332 */
1333 if (reserved && *reserved) {
1334 wqp = wq_prepost_rpop(reserved, type);
39037602 1335 assert(wqp->wqte.lt_id.idx < g_prepost_table.nelem);
3e170ce0
A
1336 } else {
1337 /*
1338 * TODO: if in interrupt context, grab from a special
1339 * region / reserved list!
1340 */
1341 wqp = wq_prepost_alloc(type, 1);
1342 }
1343
1344 if (wqp == NULL)
1345 panic("Couldn't allocate prepost object!");
1346 return wqp;
1347}
1348
1349
1350/**
1351 * prepost a waitq onto a waitq set
1352 *
1353 * Parameters:
1354 * wqset The set onto which waitq will be preposted
1355 * waitq The waitq that's preposting
39037602 1356 * reserved List (lt_elem_list_ style) of pre-allocated prepost elements
3e170ce0
A
1357 * Could be NULL
1358 *
1359 * Conditions:
1360 * both wqset and waitq are locked
1361 *
1362 * Notes:
1363 * If reserved is NULL, this may block on prepost table growth.
1364 */
1365static void wq_prepost_do_post_locked(struct waitq_set *wqset,
1366 struct waitq *waitq,
1367 uint64_t *reserved)
1368{
1369 struct wq_prepost *wqp_post, *wqp_head, *wqp_tail;
1370
1371 assert(waitq_held(waitq) && waitq_held(&wqset->wqset_q));
1372
1373 /*
1374 * nothing to do if it's already preposted:
1375 * note that this also culls any invalid prepost objects
1376 */
1377 if (wq_is_preposted_on_set(waitq, wqset))
1378 return;
1379
1380 /*
1381 * This function is called because an event is being posted to 'waitq'.
1382 * We need a prepost object associated with this queue. Allocate one
1383 * now if the waitq isn't already associated with one.
1384 */
1385 if (waitq->waitq_prepost_id == 0) {
1386 struct wq_prepost *wqp;
1387 wqp = wq_get_prepost_obj(reserved, WQP_WQ);
1388 wqp->wqp_wq.wqp_wq_ptr = waitq;
1389 wqp_set_valid(wqp);
1390 waitq->waitq_prepost_id = wqp->wqp_prepostid.id;
1391 wq_prepost_put(wqp);
1392 }
1393
39037602 1394#if CONFIG_LTABLE_STATS
3e170ce0
A
1395 g_prepost_table.npreposts += 1;
1396#endif
1397
1398 wqdbg_v("preposting waitq %p (0x%llx) to set 0x%llx",
1399 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq),
1400 waitq->waitq_prepost_id, wqset->wqset_id);
1401
1402 if (wqset->wqset_prepost_id == 0) {
1403 /* the set has no previous preposts */
1404 wqset->wqset_prepost_id = waitq->waitq_prepost_id;
1405 return;
1406 }
1407
1408 wqp_head = wq_prepost_get(wqset->wqset_prepost_id);
1409 if (!wqp_head) {
1410 /* the previous prepost has become invalid */
1411 wqset->wqset_prepost_id = waitq->waitq_prepost_id;
1412 return;
1413 }
1414
1415 assert(wqp_head->wqp_prepostid.id == wqset->wqset_prepost_id);
1416
1417 /*
1418 * If we get here, we're going to need at least one new wq_prepost
1419 * object. If the previous wqset_prepost_id points to a WQP_WQ, we
1420 * actually need to allocate 2 wq_prepost objects because the WQP_WQ
1421 * is tied to the waitq and shared across all sets.
1422 */
1423 wqp_post = wq_get_prepost_obj(reserved, WQP_POST);
1424
1425 wqp_post->wqp_post.wqp_wq_id = waitq->waitq_prepost_id;
1426 wqdbg_v("POST 0x%llx :: WQ 0x%llx", wqp_post->wqp_prepostid.id,
1427 waitq->waitq_prepost_id);
1428
1429 if (wqp_type(wqp_head) == WQP_WQ) {
1430 /*
1431 * We must replace the wqset_prepost_id with a pointer
1432 * to two new WQP_POST objects
1433 */
1434 uint64_t wqp_id = wqp_head->wqp_prepostid.id;
1435 wqdbg_v("set 0x%llx previous had 1 WQ prepost (0x%llx): "
1436 "replacing with two POST preposts",
1437 wqset->wqset_id, wqp_id);
1438
1439 /* drop the old reference */
1440 wq_prepost_put(wqp_head);
1441
1442 /* grab another new object (the 2nd of two) */
1443 wqp_head = wq_get_prepost_obj(reserved, WQP_POST);
1444
1445 /* point this one to the original WQP_WQ object */
1446 wqp_head->wqp_post.wqp_wq_id = wqp_id;
1447 wqdbg_v("POST 0x%llx :: WQ 0x%llx",
1448 wqp_head->wqp_prepostid.id, wqp_id);
1449
1450 /* link it to the new wqp_post object allocated earlier */
1451 wqp_head->wqp_post.wqp_next_id = wqp_post->wqp_prepostid.id;
1452 /* make the list a double-linked and circular */
1453 wq_prepost_rlink(wqp_head, wqp_post);
1454
1455 /*
1456 * Finish setting up the new prepost: point it back to the
1457 * POST object we allocated to replace the original wqset
1458 * WQ prepost object
1459 */
1460 wqp_post->wqp_post.wqp_next_id = wqp_head->wqp_prepostid.id;
1461 wq_prepost_rlink(wqp_post, wqp_head);
1462
1463 /* mark objects valid, and reset the wqset prepost list head */
1464 wqp_set_valid(wqp_head);
1465 wqp_set_valid(wqp_post);
1466 wqset->wqset_prepost_id = wqp_head->wqp_prepostid.id;
1467
1468 /* release both references */
1469 wq_prepost_put(wqp_head);
1470 wq_prepost_put(wqp_post);
1471
1472 wqdbg_v("set 0x%llx: 0x%llx/0x%llx -> 0x%llx/0x%llx -> 0x%llx",
1473 wqset->wqset_id, wqset->wqset_prepost_id,
1474 wqp_head->wqp_prepostid.id, wqp_head->wqp_post.wqp_next_id,
1475 wqp_post->wqp_prepostid.id,
1476 wqp_post->wqp_post.wqp_next_id);
1477 return;
1478 }
1479
1480 assert(wqp_type(wqp_head) == WQP_POST);
1481
1482 /*
1483 * Add the new prepost to the end of the prepost list
1484 */
1485 wqp_tail = wq_prepost_get_rnext(wqp_head);
1486 assert(wqp_tail != NULL);
1487 assert(wqp_tail->wqp_post.wqp_next_id == wqset->wqset_prepost_id);
1488
1489 /*
1490 * link the head to the new tail
1491 * NOTE: this needs to happen first in case wqp_tail == wqp_head
1492 */
1493 wq_prepost_reset_rnext(wqp_head);
1494 wq_prepost_rlink(wqp_head, wqp_post);
1495
1496 /* point the new object to the list head, and list tail */
1497 wqp_post->wqp_post.wqp_next_id = wqp_head->wqp_prepostid.id;
1498 wq_prepost_rlink(wqp_post, wqp_tail);
1499
1500 /* point the last item in the waitq set's list to the new object */
1501 wqp_tail->wqp_post.wqp_next_id = wqp_post->wqp_prepostid.id;
1502
1503 wqp_set_valid(wqp_post);
1504
1505 wq_prepost_put(wqp_head);
1506 wq_prepost_put(wqp_tail);
1507 wq_prepost_put(wqp_post);
1508
1509 wqdbg_v("set 0x%llx (wqp:0x%llx) last_prepost:0x%llx, "
1510 "new_prepost:0x%llx->0x%llx", wqset->wqset_id,
1511 wqset->wqset_prepost_id, wqp_head->wqp_prepostid.id,
1512 wqp_post->wqp_prepostid.id, wqp_post->wqp_post.wqp_next_id);
1513
1514 return;
1515}
1516
1517
1518/* ----------------------------------------------------------------------
1519 *
1520 * Stats collection / reporting
1521 *
1522 * ---------------------------------------------------------------------- */
39037602
A
1523#if defined(CONFIG_LTABLE_STATS) && defined(CONFIG_WAITQ_STATS)
1524static void wq_table_stats(struct link_table *table, struct wq_table_stats *stats)
3e170ce0
A
1525{
1526 stats->version = WAITQ_STATS_VERSION;
1527 stats->table_elements = table->nelem;
1528 stats->table_used_elems = table->used_elem;
1529 stats->table_elem_sz = table->elem_sz;
1530 stats->table_slabs = table->nslabs;
1531 stats->table_slab_sz = table->slab_sz;
1532
1533 stats->table_num_allocs = table->nallocs;
1534 stats->table_num_preposts = table->npreposts;
1535 stats->table_num_reservations = table->nreservations;
1536
1537 stats->table_max_used = table->max_used;
1538 stats->table_avg_used = table->avg_used;
1539 stats->table_max_reservations = table->max_reservations;
1540 stats->table_avg_reservations = table->avg_reservations;
1541}
1542
1543void waitq_link_stats(struct wq_table_stats *stats)
1544{
1545 if (!stats)
1546 return;
39037602 1547 wq_table_stats(&g_wqlinktable, stats);
3e170ce0
A
1548}
1549
1550void waitq_prepost_stats(struct wq_table_stats *stats)
1551{
1552 wq_table_stats(&g_prepost_table, stats);
1553}
1554#endif
1555
1556
1557/* ----------------------------------------------------------------------
1558 *
1559 * Global Wait Queues
1560 *
1561 * ---------------------------------------------------------------------- */
1562
1563static struct waitq g_boot_waitq;
1564static struct waitq *global_waitqs = &g_boot_waitq;
1565static uint32_t g_num_waitqs = 1;
1566
1567/*
1568 * Zero out the used MSBs of the event.
1569 */
1570#define _CAST_TO_EVENT_MASK(event) ((uintptr_t)(event) & ((1ul << _EVENT_MASK_BITS) - 1ul))
1571
3e170ce0
A
1572static __inline__ uint32_t waitq_hash(char *key, size_t length)
1573{
39037602 1574 uint32_t hash = jenkins_hash(key, length);
3e170ce0
A
1575
1576 hash &= (g_num_waitqs - 1);
1577 return hash;
1578}
1579
1580/* return a global waitq pointer corresponding to the given event */
1581struct waitq *_global_eventq(char *event, size_t event_length)
1582{
1583 return &global_waitqs[waitq_hash(event, event_length)];
1584}
1585
1586/* return an indexed global waitq pointer */
1587struct waitq *global_waitq(int index)
1588{
1589 return &global_waitqs[index % g_num_waitqs];
1590}
1591
1592
39037602 1593#if defined(CONFIG_LTABLE_STATS) || defined(CONFIG_WAITQ_STATS)
3e170ce0
A
1594/* this global is for lldb */
1595const uint32_t g_nwaitq_btframes = NWAITQ_BTFRAMES;
3e170ce0
A
1596
1597static __inline__ void waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int skip)
1598{
1599 uintptr_t buf[NWAITQ_BTFRAMES + skip];
1600 if (skip < 0)
1601 skip = 0;
1602 memset(buf, 0, (NWAITQ_BTFRAMES + skip) * sizeof(uintptr_t));
39037602 1603 backtrace(buf, g_nwaitq_btframes + skip);
3e170ce0
A
1604 memcpy(&bt[0], &buf[skip], NWAITQ_BTFRAMES * sizeof(uintptr_t));
1605}
39037602
A
1606#else /* no stats */
1607#define waitq_grab_backtrace(...)
1608#endif
1609
1610#if CONFIG_WAITQ_STATS
1611
1612struct wq_stats g_boot_stats;
1613struct wq_stats *g_waitq_stats = &g_boot_stats;
3e170ce0
A
1614
1615static __inline__ struct wq_stats *waitq_global_stats(struct waitq *waitq) {
1616 struct wq_stats *wqs;
1617 uint32_t idx;
1618
1619 if (!waitq_is_global(waitq))
1620 return NULL;
1621
1622 idx = (uint32_t)(((uintptr_t)waitq - (uintptr_t)global_waitqs) / sizeof(*waitq));
1623 assert(idx < g_num_waitqs);
1624 wqs = &g_waitq_stats[idx];
1625 return wqs;
1626}
1627
1628static __inline__ void waitq_stats_count_wait(struct waitq *waitq)
1629{
1630 struct wq_stats *wqs = waitq_global_stats(waitq);
1631 if (wqs != NULL) {
1632 wqs->waits++;
1633 waitq_grab_backtrace(wqs->last_wait, 2);
1634 }
1635}
1636
1637static __inline__ void waitq_stats_count_wakeup(struct waitq *waitq)
1638{
1639 struct wq_stats *wqs = waitq_global_stats(waitq);
1640 if (wqs != NULL) {
1641 wqs->wakeups++;
1642 waitq_grab_backtrace(wqs->last_wakeup, 2);
1643 }
1644}
1645
1646static __inline__ void waitq_stats_count_clear_wakeup(struct waitq *waitq)
1647{
1648 struct wq_stats *wqs = waitq_global_stats(waitq);
1649 if (wqs != NULL) {
1650 wqs->wakeups++;
1651 wqs->clears++;
1652 waitq_grab_backtrace(wqs->last_wakeup, 2);
1653 }
1654}
1655
1656static __inline__ void waitq_stats_count_fail(struct waitq *waitq)
1657{
1658 struct wq_stats *wqs = waitq_global_stats(waitq);
1659 if (wqs != NULL) {
1660 wqs->failed_wakeups++;
1661 waitq_grab_backtrace(wqs->last_failed_wakeup, 2);
1662 }
1663}
39037602 1664#else /* !CONFIG_WAITQ_STATS */
3e170ce0
A
1665#define waitq_stats_count_wait(q) do { } while (0)
1666#define waitq_stats_count_wakeup(q) do { } while (0)
1667#define waitq_stats_count_clear_wakeup(q) do { } while (0)
1668#define waitq_stats_count_fail(q) do { } while (0)
1669#endif
1670
1671int waitq_is_valid(struct waitq *waitq)
1672{
39037602 1673 return (waitq != NULL) && waitq->waitq_isvalid && ((waitq->waitq_type & ~1) == WQT_QUEUE);
3e170ce0
A
1674}
1675
1676int waitq_set_is_valid(struct waitq_set *wqset)
1677{
39037602 1678 return (wqset != NULL) && wqset->wqset_q.waitq_isvalid && waitqs_is_set(wqset);
3e170ce0
A
1679}
1680
1681int waitq_is_global(struct waitq *waitq)
1682{
1683 if (waitq >= global_waitqs && waitq < global_waitqs + g_num_waitqs)
1684 return 1;
1685 return 0;
1686}
1687
1688int waitq_irq_safe(struct waitq *waitq)
1689{
1690 /* global wait queues have this bit set on initialization */
1691 return waitq->waitq_irq;
1692}
1693
1694static uint32_t waitq_hash_size(void)
1695{
1696 uint32_t hsize, queues;
1697
1698 if (PE_parse_boot_argn("wqsize", &hsize, sizeof(hsize)))
1699 return (hsize);
1700
39037602 1701 queues = thread_max / 5;
3e170ce0
A
1702 hsize = P2ROUNDUP(queues * sizeof(struct waitq), PAGE_SIZE);
1703
1704 return hsize;
1705}
1706
1707void waitq_bootstrap(void)
1708{
1709 kern_return_t kret;
39037602 1710 uint32_t whsize, qsz, tmp32;
3e170ce0 1711
39037602
A
1712 g_min_free_table_elem = DEFAULT_MIN_FREE_TABLE_ELEM;
1713 if (PE_parse_boot_argn("wqt_min_free", &tmp32, sizeof(tmp32)) == TRUE)
1714 g_min_free_table_elem = tmp32;
1715 wqdbg("Minimum free table elements: %d", tmp32);
3e170ce0
A
1716
1717 /*
1718 * Determine the amount of memory we're willing to reserve for
1719 * the waitqueue hash table
1720 */
1721 whsize = waitq_hash_size();
1722
1723 /* Determine the number of waitqueues we can fit. */
1724 qsz = sizeof(struct waitq);
1725 whsize = ROUNDDOWN(whsize, qsz);
1726 g_num_waitqs = whsize / qsz;
1727
1728 /*
1729 * The hash algorithm requires that this be a power of 2, so we
1730 * just mask off all the low-order bits.
1731 */
1732 for (uint32_t i = 0; i < 31; i++) {
1733 uint32_t bit = (1 << i);
1734 if ((g_num_waitqs & bit) == g_num_waitqs)
1735 break;
1736 g_num_waitqs &= ~bit;
1737 }
1738 assert(g_num_waitqs > 0);
1739
1740 /* Now determine how much memory we really need. */
1741 whsize = P2ROUNDUP(g_num_waitqs * qsz, PAGE_SIZE);
1742
1743 wqdbg("allocating %d global queues (%d bytes)", g_num_waitqs, whsize);
1744 kret = kernel_memory_allocate(kernel_map, (vm_offset_t *)&global_waitqs,
1745 whsize, 0, KMA_KOBJECT|KMA_NOPAGEWAIT, VM_KERN_MEMORY_WAITQ);
1746 if (kret != KERN_SUCCESS || global_waitqs == NULL)
1747 panic("kernel_memory_allocate() failed to alloc global_waitqs"
1748 ", error: %d, whsize: 0x%x", kret, whsize);
1749
1750#if CONFIG_WAITQ_STATS
1751 whsize = P2ROUNDUP(g_num_waitqs * sizeof(struct wq_stats), PAGE_SIZE);
1752 kret = kernel_memory_allocate(kernel_map, (vm_offset_t *)&g_waitq_stats,
1753 whsize, 0, KMA_KOBJECT|KMA_NOPAGEWAIT, VM_KERN_MEMORY_WAITQ);
1754 if (kret != KERN_SUCCESS || global_waitqs == NULL)
1755 panic("kernel_memory_allocate() failed to alloc g_waitq_stats"
1756 ", error: %d, whsize: 0x%x", kret, whsize);
1757 memset(g_waitq_stats, 0, whsize);
1758#endif
1759
1760 for (uint32_t i = 0; i < g_num_waitqs; i++) {
1761 waitq_init(&global_waitqs[i], SYNC_POLICY_FIFO|SYNC_POLICY_DISABLE_IRQ);
1762 }
1763
3e170ce0
A
1764 waitq_set_zone = zinit(sizeof(struct waitq_set),
1765 WAITQ_SET_MAX * sizeof(struct waitq_set),
1766 sizeof(struct waitq_set),
1767 "waitq sets");
1768 zone_change(waitq_set_zone, Z_NOENCRYPT, TRUE);
39037602
A
1769
1770 /* initialize the global waitq link table */
1771 wql_init();
1772
1773 /* initialize the global waitq prepost table */
1774 wqp_init();
3e170ce0
A
1775}
1776
1777
1778/* ----------------------------------------------------------------------
1779 *
1780 * Wait Queue Implementation
1781 *
1782 * ---------------------------------------------------------------------- */
1783
1784/*
1785 * Double the standard lock timeout, because wait queues tend
1786 * to iterate over a number of threads - locking each. If there is
1787 * a problem with a thread lock, it normally times out at the wait
1788 * queue level first, hiding the real problem.
1789 */
1790/* For x86, the hardware timeout is in TSC units. */
1791#if defined(__i386__) || defined(__x86_64__)
1792#define hwLockTimeOut LockTimeOutTSC
1793#else
1794#define hwLockTimeOut LockTimeOut
1795#endif
1796
1797void waitq_lock(struct waitq *wq)
1798{
39037602 1799 if (__improbable(waitq_lock_to(wq,
3e170ce0
A
1800 hwLockTimeOut * 2) == 0)) {
1801 boolean_t wql_acquired = FALSE;
1802
1803 while (machine_timeout_suspended()) {
3e170ce0 1804 mp_enable_preemption();
39037602 1805 wql_acquired = waitq_lock_to(wq,
3e170ce0
A
1806 hwLockTimeOut * 2);
1807 if (wql_acquired)
1808 break;
1809 }
1810 if (wql_acquired == FALSE)
1811 panic("waitq deadlock - waitq=%p, cpu=%d\n",
1812 wq, cpu_number());
1813 }
39037602
A
1814#if defined(__x86_64__)
1815 pltrace(FALSE);
1816#endif
3e170ce0
A
1817 assert(waitq_held(wq));
1818}
1819
1820void waitq_unlock(struct waitq *wq)
1821{
1822 assert(waitq_held(wq));
39037602
A
1823#if defined(__x86_64__)
1824 pltrace(TRUE);
1825#endif
1826 waitq_lock_unlock(wq);
3e170ce0
A
1827}
1828
1829
1830/**
1831 * clear the thread-related waitq state
1832 *
1833 * Conditions:
1834 * 'thread' is locked
1835 */
1836static inline void thread_clear_waitq_state(thread_t thread)
1837{
1838 thread->waitq = NULL;
1839 thread->wait_event = NO_EVENT64;
1840 thread->at_safe_point = FALSE;
1841}
1842
1843
1844typedef thread_t (*waitq_select_cb)(void *ctx, struct waitq *waitq,
1845 int is_global, thread_t thread);
1846
1847struct waitq_select_args {
1848 /* input parameters */
1849 struct waitq *posted_waitq;
1850 struct waitq *waitq;
1851 event64_t event;
1852 waitq_select_cb select_cb;
1853 void *select_ctx;
1854
1855 uint64_t *reserved_preposts;
1856
1857 /* output parameters */
1858 queue_t threadq;
1859 int max_threads;
1860 int *nthreads;
1861 spl_t *spl;
1862};
1863
1864static void do_waitq_select_n_locked(struct waitq_select_args *args);
1865
1866/**
1867 * callback invoked once for every waitq set to which a waitq belongs
1868 *
1869 * Conditions:
1870 * ctx->posted_waitq is locked
1871 * 'link' points to a valid waitq set
1872 *
1873 * Notes:
1874 * Takes the waitq set lock on the set pointed to by 'link'
1875 * Calls do_waitq_select_n_locked() which could recurse back into
1876 * this function if the waitq set is a member of other sets.
1877 * If no threads were selected, it preposts the input waitq
1878 * onto the waitq set pointed to by 'link'.
1879 */
1880static int waitq_select_walk_cb(struct waitq *waitq, void *ctx,
39037602 1881 struct waitq_link *link)
3e170ce0
A
1882{
1883 int ret = WQ_ITERATE_CONTINUE;
1884 struct waitq_select_args args = *((struct waitq_select_args *)ctx);
1885 struct waitq_set *wqset;
3e170ce0
A
1886
1887 (void)waitq;
39037602 1888 assert(wql_type(link) == WQL_WQS);
3e170ce0 1889
39037602 1890 wqset = link->wql_wqs.wql_set;
3e170ce0
A
1891 args.waitq = &wqset->wqset_q;
1892
39037602
A
1893 assert(!waitq_irq_safe(waitq));
1894 assert(!waitq_irq_safe(&wqset->wqset_q));
1895
3e170ce0
A
1896 waitq_set_lock(wqset);
1897 /*
1898 * verify that the link wasn't invalidated just before
1899 * we were able to take the lock.
1900 */
39037602 1901 if (wqset->wqset_id != link->wql_setid.id)
3e170ce0
A
1902 goto out_unlock;
1903
1904 /*
1905 * Find any threads waiting on this wait queue set,
1906 * and recurse into any waitq set to which this set belongs.
1907 */
1908 do_waitq_select_n_locked(&args);
1909
1910 if (*(args.nthreads) > 0 ||
1911 (args.threadq && !queue_empty(args.threadq))) {
1912 /* at least 1 thread was selected and returned: don't prepost */
1913 if (args.max_threads > 0 &&
1914 *(args.nthreads) >= args.max_threads) {
1915 /* break out of the setid walk */
1916 ret = WQ_ITERATE_FOUND;
1917 }
1918 goto out_unlock;
1919 } else {
1920 /*
1921 * No thread selected: prepost 'waitq' to 'wqset'
1922 * if wqset can handle preposts and the event is set to 0.
1923 * We also make sure to not post waitq sets to other sets.
1924 *
39037602
A
1925 * If the set doesn't support preposts, but does support
1926 * prepost callout/hook interaction, invoke the predefined
1927 * callout function and pass the set's 'prepost_hook.' This
1928 * could potentially release another thread to handle events.
3e170ce0 1929 */
39037602
A
1930 if (args.event == NO_EVENT64) {
1931 if (waitq_set_can_prepost(wqset)) {
1932 wq_prepost_do_post_locked(
1933 wqset, waitq, args.reserved_preposts);
1934 } else if (waitq_set_has_prepost_hook(wqset)) {
1935 waitq_set__CALLING_PREPOST_HOOK__(
1936 wqset->wqset_prepost_hook, waitq, 0);
1937 }
3e170ce0
A
1938 }
1939 }
1940
1941out_unlock:
1942 waitq_set_unlock(wqset);
3e170ce0
A
1943 return ret;
1944}
1945
1946/**
1947 * generic thread selection from a waitq (and sets to which the waitq belongs)
1948 *
1949 * Conditions:
1950 * args->waitq (and args->posted_waitq) is locked
1951 *
1952 * Notes:
1953 * Uses the optional select callback function to refine the selection
1954 * of one or more threads from a waitq and any set to which the waitq
1955 * belongs. The select callback is invoked once for every thread that
1956 * is found to be waiting on the input args->waitq.
1957 *
1958 * If one or more threads are selected, this may disable interrupts.
1959 * The previous interrupt state is returned in args->spl and should
1960 * be used in a call to splx() if threads are returned to the caller.
1961 */
1962static void do_waitq_select_n_locked(struct waitq_select_args *args)
1963{
1964 struct waitq *waitq = args->waitq;
1965 int max_threads = args->max_threads;
1966 thread_t thread = THREAD_NULL, first_thread = THREAD_NULL;
39037602
A
1967 struct waitq *safeq;
1968 uint32_t remaining_eventmask = 0;
1969 uint32_t eventmask;
3e170ce0 1970 int *nthreads = args->nthreads;
39037602 1971 spl_t spl = 0;
3e170ce0
A
1972
1973 assert(max_threads != 0);
1974
39037602
A
1975 if (!waitq_irq_safe(waitq)) {
1976 /* JMM - add flag to waitq to avoid global lookup if no waiters */
1977 eventmask = _CAST_TO_EVENT_MASK(waitq);
1978 safeq = global_eventq(waitq);
1979 if (*nthreads == 0)
1980 spl = splsched();
1981 waitq_lock(safeq);
1982 } else {
3e170ce0 1983 eventmask = _CAST_TO_EVENT_MASK(args->event);
39037602 1984 safeq = waitq;
3e170ce0
A
1985 }
1986
39037602
A
1987 /*
1988 * If the safeq doesn't have an eventmask (not global) or the event
1989 * we're looking for IS set in its eventmask, then scan the threads
1990 * in that queue for ones that match the original <waitq,event> pair.
1991 */
1992 if (!waitq_is_global(safeq) ||
1993 (safeq->waitq_eventmask & eventmask) == eventmask) {
1994
1995 /* look through each thread waiting directly on the safeq */
1996 qe_foreach_element_safe(thread, &safeq->waitq_queue, wait_links) {
1997 thread_t t = THREAD_NULL;
1998 assert_thread_magic(thread);
1999
2000 if (thread->waitq == waitq && thread->wait_event == args->event) {
2001 t = thread;
2002 if (first_thread == THREAD_NULL)
2003 first_thread = thread;
2004
2005 /* allow the caller to futher refine the selection */
2006 if (args->select_cb)
2007 t = args->select_cb(args->select_ctx, waitq,
2008 waitq_is_global(waitq), thread);
2009 if (t != THREAD_NULL) {
2010 *nthreads += 1;
2011 if (args->threadq) {
2012 if (*nthreads == 1)
2013 *(args->spl) = (safeq != waitq) ? spl : splsched();
2014 thread_lock(t);
2015 thread_clear_waitq_state(t);
2016 /* put locked thread on output queue */
2017 re_queue_tail(args->threadq, &t->wait_links);
2018 }
2019 /* only enqueue up to 'max' threads */
2020 if (*nthreads >= max_threads && max_threads > 0)
2021 break;
3e170ce0 2022 }
39037602
A
2023 }
2024 /* thread wasn't selected so track it's event */
2025 if (t == THREAD_NULL) {
2026 remaining_eventmask |= (thread->waitq != safeq) ?
2027 _CAST_TO_EVENT_MASK(thread->waitq):
2028 _CAST_TO_EVENT_MASK(thread->wait_event);
3e170ce0
A
2029 }
2030 }
3e170ce0 2031
39037602
A
2032 /*
2033 * Update the eventmask of global queues we just scanned:
2034 * - If we selected all the threads in the queue, we can clear its
2035 * eventmask.
2036 *
2037 * - If we didn't find enough threads to fill our needs, then we can
2038 * assume we looked at every thread in the queue and the mask we
2039 * computed is complete - so reset it.
2040 */
2041 if (waitq_is_global(safeq)) {
2042 if (queue_empty(&safeq->waitq_queue))
2043 safeq->waitq_eventmask = 0;
2044 else if (max_threads < 0 || *nthreads < max_threads)
2045 safeq->waitq_eventmask = remaining_eventmask;
2046 }
2047 }
3e170ce0
A
2048
2049 /*
2050 * Grab the first thread in the queue if no other thread was selected.
2051 * We can guarantee that no one has manipulated this thread because
2052 * it's waiting on the given waitq, and we have that waitq locked.
2053 */
2054 if (*nthreads == 0 && first_thread != THREAD_NULL && args->threadq) {
2055 /* we know this is the first (and only) thread */
2056 ++(*nthreads);
39037602 2057 *(args->spl) = (safeq != waitq) ? spl : splsched();
3e170ce0
A
2058 thread_lock(first_thread);
2059 thread_clear_waitq_state(first_thread);
39037602 2060 re_queue_tail(args->threadq, &first_thread->wait_links);
3e170ce0 2061
39037602
A
2062 /* update the eventmask on [now] empty global queues */
2063 if (waitq_is_global(safeq) && queue_empty(&safeq->waitq_queue))
2064 safeq->waitq_eventmask = 0;
3e170ce0
A
2065 }
2066
39037602
A
2067 /* unlock the safe queue if we locked one above */
2068 if (safeq != waitq) {
2069 waitq_unlock(safeq);
2070 if (*nthreads == 0)
2071 splx(spl);
2072 }
2073
3e170ce0
A
2074 if (max_threads > 0 && *nthreads >= max_threads)
2075 return;
2076
2077 /*
2078 * wait queues that are not in any sets
2079 * are the bottom of the recursion
2080 */
2081 if (!waitq->waitq_set_id)
2082 return;
2083
2084 /* check to see if the set ID for this wait queue is valid */
39037602 2085 struct waitq_link *link = wql_get_link(waitq->waitq_set_id);
3e170ce0
A
2086 if (!link) {
2087 /* the waitq set to which this waitq belonged, has been invalidated */
2088 waitq->waitq_set_id = 0;
2089 return;
2090 }
2091
39037602 2092 wql_put_link(link);
3e170ce0
A
2093
2094 /*
2095 * If this waitq is a member of any wait queue sets, we need to look
2096 * for waiting thread(s) in any of those sets, and prepost all sets that
2097 * don't have active waiters.
2098 *
2099 * Note that we do a local walk of this waitq's links - we manually
2100 * recurse down wait queue set's with non-zero wqset_q.waitq_set_id
2101 */
39037602
A
2102 (void)walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
2103 WQL_WQS, (void *)args, waitq_select_walk_cb);
3e170ce0
A
2104}
2105
2106/**
2107 * main entry point for thread selection from a waitq
2108 *
2109 * Conditions:
2110 * waitq is locked
2111 *
2112 * Returns:
2113 * The number of threads waiting on 'waitq' for 'event' which have
2114 * been placed onto the input 'threadq'
2115 *
2116 * Notes:
2117 * The 'select_cb' function is invoked for every thread found waiting
2118 * on 'waitq' for 'event'. The thread is _not_ locked upon callback
2119 * invocation. This parameter may be NULL.
2120 *
2121 * If one or more threads are returned in 'threadq' then the caller is
2122 * responsible to call splx() using the returned 'spl' value. Each
2123 * returned thread is locked.
2124 */
2125static __inline__ int waitq_select_n_locked(struct waitq *waitq,
2126 event64_t event,
2127 waitq_select_cb select_cb,
2128 void *select_ctx,
2129 uint64_t *reserved_preposts,
2130 queue_t threadq,
2131 int max_threads, spl_t *spl)
2132{
2133 int nthreads = 0;
2134
2135 struct waitq_select_args args = {
2136 .posted_waitq = waitq,
2137 .waitq = waitq,
2138 .event = event,
2139 .select_cb = select_cb,
2140 .select_ctx = select_ctx,
2141 .reserved_preposts = reserved_preposts,
2142 .threadq = threadq,
2143 .max_threads = max_threads,
2144 .nthreads = &nthreads,
2145 .spl = spl,
2146 };
2147
2148 do_waitq_select_n_locked(&args);
2149 return nthreads;
2150}
2151
2152
2153/**
2154 * callback function that uses thread parameters to determine wakeup eligibility
2155 *
2156 * Conditions:
2157 * 'waitq' is locked
2158 * 'thread' is not locked
2159 */
2160static thread_t waitq_select_one_cb(void *ctx, struct waitq *waitq,
2161 int is_global, thread_t thread)
2162{
2163 int fifo_q, realtime;
2164 boolean_t thread_imp_donor = FALSE;
2165
2166 (void)ctx;
2167 (void)waitq;
2168 (void)is_global;
2169 realtime = 0;
2170
2171 fifo_q = 1; /* default to FIFO for all queues for now */
2172#if IMPORTANCE_INHERITANCE
2173 if (is_global)
2174 fifo_q = 0; /* 'thread_imp_donor' takes the place of FIFO checking */
2175#endif
2176
2177 if (thread->sched_pri >= BASEPRI_REALTIME)
2178 realtime = 1;
2179
2180#if IMPORTANCE_INHERITANCE
2181 /*
2182 * Checking imp donor bit does not need thread lock or
2183 * or task lock since we have the wait queue lock and
2184 * thread can not be removed from it without acquiring
2185 * wait queue lock. The imp donor bit may change
2186 * once we read its value, but it is ok to wake
2187 * a thread while someone drops importance assertion
2188 * on the that thread.
2189 */
2190 thread_imp_donor = task_is_importance_donor(thread->task);
2191#endif /* IMPORTANCE_INHERITANCE */
2192
2193 if (fifo_q || thread_imp_donor == TRUE
2194 || realtime || (thread->options & TH_OPT_VMPRIV)) {
2195 /*
2196 * If this thread's task is an importance donor,
2197 * or it's a realtime thread, or it's a VM privileged
2198 * thread, OR the queue is marked as FIFO:
2199 * select the thread
2200 */
2201 return thread;
2202 }
2203
2204 /* by default, _don't_ select the thread */
2205 return THREAD_NULL;
2206}
2207
39037602
A
2208
2209
3e170ce0 2210/**
39037602 2211 * select from a waitq a single thread waiting for a given event
3e170ce0
A
2212 *
2213 * Conditions:
2214 * 'waitq' is locked
2215 *
2216 * Returns:
2217 * A locked thread that's been removed from the waitq, but has not
2218 * yet been put on a run queue. Caller is responsible to call splx
2219 * with the '*spl' value.
2220 */
2221static thread_t waitq_select_one_locked(struct waitq *waitq, event64_t event,
2222 uint64_t *reserved_preposts,
2223 int priority, spl_t *spl)
2224{
39037602 2225 (void)priority;
3e170ce0
A
2226 int nthreads;
2227 queue_head_t threadq;
2228
3e170ce0
A
2229 queue_init(&threadq);
2230
2231 nthreads = waitq_select_n_locked(waitq, event, waitq_select_one_cb, NULL,
39037602 2232 reserved_preposts, &threadq, 1, spl);
3e170ce0
A
2233
2234 /* if we selected a thread, return it (still locked) */
2235 if (!queue_empty(&threadq)) {
2236 thread_t t;
2237 queue_entry_t qe = dequeue_head(&threadq);
39037602 2238 t = qe_element(qe, struct thread, wait_links);
3e170ce0
A
2239 assert(queue_empty(&threadq)); /* there should be 1 entry */
2240 /* t has been locked and removed from all queues */
2241 return t;
2242 }
2243
2244 return THREAD_NULL;
2245}
2246
39037602
A
2247struct find_max_pri_ctx {
2248 integer_t max_sched_pri;
2249 integer_t max_base_pri;
2250 thread_t highest_thread;
2251};
2252
2253/**
2254 * callback function that finds the max priority thread
2255 *
2256 * Conditions:
2257 * 'waitq' is locked
2258 * 'thread' is not locked
2259 */
2260static thread_t
2261waitq_find_max_pri_cb(void *ctx_in,
2262 __unused struct waitq *waitq,
2263 __unused int is_global,
2264 thread_t thread)
2265{
2266 struct find_max_pri_ctx *ctx = (struct find_max_pri_ctx *)ctx_in;
2267
2268 /*
2269 * thread is not locked, use pri as a hint only
2270 * wake up the highest base pri, and find the highest sched pri at that base pri
2271 */
2272 integer_t sched_pri = *(volatile int16_t *)&thread->sched_pri;
2273 integer_t base_pri = *(volatile int16_t *)&thread->base_pri;
2274
2275 if (ctx->highest_thread == THREAD_NULL ||
2276 (base_pri > ctx->max_base_pri) ||
2277 (base_pri == ctx->max_base_pri && sched_pri > ctx->max_sched_pri)) {
2278 /* don't select the thread, just update ctx */
2279
2280 ctx->max_sched_pri = sched_pri;
2281 ctx->max_base_pri = base_pri;
2282 ctx->highest_thread = thread;
2283 }
2284
2285 return THREAD_NULL;
2286}
2287
2288/**
2289 * select from a waitq the highest priority thread waiting for a given event
2290 *
2291 * Conditions:
2292 * 'waitq' is locked
2293 *
2294 * Returns:
2295 * A locked thread that's been removed from the waitq, but has not
2296 * yet been put on a run queue. Caller is responsible to call splx
2297 * with the '*spl' value.
2298 */
2299static thread_t
2300waitq_select_max_locked(struct waitq *waitq, event64_t event,
2301 uint64_t *reserved_preposts,
2302 spl_t *spl)
2303{
2304 __assert_only int nthreads;
2305 assert(!waitq->waitq_set_id); /* doesn't support recursive sets */
2306
2307 struct find_max_pri_ctx ctx = {
2308 .max_sched_pri = 0,
2309 .max_base_pri = 0,
2310 .highest_thread = THREAD_NULL,
2311 };
2312
2313 /*
2314 * Scan the waitq to find the highest priority thread.
2315 * This doesn't remove any thread from the queue
2316 */
2317 nthreads = waitq_select_n_locked(waitq, event, waitq_find_max_pri_cb, &ctx,
2318 reserved_preposts, NULL, 1, spl);
2319
2320 assert(nthreads == 0);
2321
2322 if (ctx.highest_thread != THREAD_NULL) {
2323 __assert_only kern_return_t ret;
2324
2325 /* Remove only the thread we just found */
2326 ret = waitq_select_thread_locked(waitq, event, ctx.highest_thread, spl);
2327
2328 assert(ret == KERN_SUCCESS);
2329 return ctx.highest_thread;
2330 }
2331
2332 return THREAD_NULL;
2333}
2334
3e170ce0
A
2335
2336struct select_thread_ctx {
2337 thread_t thread;
2338 event64_t event;
2339 spl_t *spl;
2340};
2341
2342/**
2343 * link walk callback invoked once for each set to which a waitq belongs
2344 *
2345 * Conditions:
2346 * initial waitq is locked
2347 * ctx->thread is unlocked
2348 *
2349 * Notes:
2350 * This may disable interrupts and early-out of the full DAG link walk by
2351 * returning KERN_ALREADY_IN_SET. In this case, the returned thread has
2352 * been removed from the waitq, it's waitq state has been reset, and the
2353 * caller is responsible to call splx() with the returned interrupt state
2354 * in ctx->spl.
2355 */
2356static int waitq_select_thread_cb(struct waitq *waitq, void *ctx,
39037602 2357 struct waitq_link *link)
3e170ce0
A
2358{
2359 struct select_thread_ctx *stctx = (struct select_thread_ctx *)ctx;
2360 struct waitq_set *wqset;
39037602
A
2361 struct waitq *wqsetq;
2362 struct waitq *safeq;
2363 spl_t s;
3e170ce0
A
2364
2365 (void)waitq;
39037602 2366
3e170ce0
A
2367 thread_t thread = stctx->thread;
2368 event64_t event = stctx->event;
2369
39037602 2370 if (wql_type(link) != WQL_WQS)
3e170ce0
A
2371 return WQ_ITERATE_CONTINUE;
2372
39037602
A
2373 wqset = link->wql_wqs.wql_set;
2374 wqsetq = &wqset->wqset_q;
3e170ce0 2375
39037602
A
2376 assert(!waitq_irq_safe(waitq));
2377 assert(!waitq_irq_safe(wqsetq));
2378
2379 waitq_set_lock(wqset);
2380
2381 s = splsched();
3e170ce0 2382
39037602
A
2383 /* find and lock the interrupt-safe waitq the thread is thought to be on */
2384 safeq = global_eventq(wqsetq);
2385 waitq_lock(safeq);
2386
2387 thread_lock(thread);
2388
2389 if ((thread->waitq == wqsetq) && (thread->wait_event == event)) {
2390 remqueue(&thread->wait_links);
2391 if (queue_empty(&safeq->waitq_queue)) {
2392 safeq->waitq_eventmask = 0;
2393 }
3e170ce0 2394 thread_clear_waitq_state(thread);
39037602
A
2395 waitq_unlock(safeq);
2396 waitq_set_unlock(wqset);
3e170ce0
A
2397 /*
2398 * thread still locked,
2399 * return non-zero to break out of WQS walk
2400 */
39037602 2401 *(stctx->spl) = s;
3e170ce0
A
2402 return WQ_ITERATE_FOUND;
2403 }
2404
2405 thread_unlock(thread);
2406 waitq_set_unlock(wqset);
39037602
A
2407 waitq_unlock(safeq);
2408 splx(s);
3e170ce0
A
2409
2410 return WQ_ITERATE_CONTINUE;
2411}
2412
2413/**
2414 * returns KERN_SUCCESS and locks 'thread' if-and-only-if 'thread' is waiting
2415 * on 'waitq' (or any set to which waitq belongs) for 'event'
2416 *
2417 * Conditions:
2418 * 'waitq' is locked
2419 * 'thread' is unlocked
2420 */
2421static kern_return_t waitq_select_thread_locked(struct waitq *waitq,
2422 event64_t event,
2423 thread_t thread, spl_t *spl)
2424{
39037602
A
2425 struct waitq *safeq;
2426 struct waitq_link *link;
3e170ce0
A
2427 struct select_thread_ctx ctx;
2428 kern_return_t kr;
39037602
A
2429 spl_t s;
2430
2431 s = splsched();
2432
2433 /* Find and lock the interrupts disabled queue the thread is actually on */
2434 if (!waitq_irq_safe(waitq)) {
2435 safeq = global_eventq(waitq);
2436 waitq_lock(safeq);
2437 } else {
2438 safeq = waitq;
2439 }
3e170ce0 2440
3e170ce0
A
2441 thread_lock(thread);
2442
2443 if ((thread->waitq == waitq) && (thread->wait_event == event)) {
39037602
A
2444 remqueue(&thread->wait_links);
2445 if (queue_empty(&safeq->waitq_queue)) {
2446 safeq->waitq_eventmask = 0;
2447 }
3e170ce0 2448 thread_clear_waitq_state(thread);
39037602 2449 *spl = s;
3e170ce0
A
2450 /* thread still locked */
2451 return KERN_SUCCESS;
2452 }
2453
2454 thread_unlock(thread);
39037602
A
2455
2456 if (safeq != waitq)
2457 waitq_unlock(safeq);
2458
2459 splx(s);
3e170ce0
A
2460
2461 if (!waitq->waitq_set_id)
2462 return KERN_NOT_WAITING;
2463
2464 /* check to see if the set ID for this wait queue is valid */
39037602 2465 link = wql_get_link(waitq->waitq_set_id);
3e170ce0
A
2466 if (!link) {
2467 /* the waitq to which this set belonged, has been invalidated */
2468 waitq->waitq_set_id = 0;
2469 return KERN_NOT_WAITING;
2470 }
2471
2472 /*
2473 * The thread may be waiting on a wait queue set to which
2474 * the input 'waitq' belongs. Go look for the thread in
2475 * all wait queue sets. If it's there, we'll remove it
2476 * because it's equivalent to waiting directly on the input waitq.
2477 */
2478 ctx.thread = thread;
2479 ctx.event = event;
2480 ctx.spl = spl;
39037602
A
2481 kr = walk_waitq_links(LINK_WALK_FULL_DAG, waitq, waitq->waitq_set_id,
2482 WQL_WQS, (void *)&ctx, waitq_select_thread_cb);
3e170ce0 2483
39037602 2484 wql_put_link(link);
3e170ce0
A
2485
2486 /* we found a thread, return success */
2487 if (kr == WQ_ITERATE_FOUND)
2488 return KERN_SUCCESS;
2489
2490 return KERN_NOT_WAITING;
2491}
2492
2493static int prepost_exists_cb(struct waitq_set __unused *wqset,
2494 void __unused *ctx,
2495 struct wq_prepost __unused *wqp,
2496 struct waitq __unused *waitq)
2497{
2498 /* if we get here, then we know that there is a valid prepost object! */
2499 return WQ_ITERATE_FOUND;
2500}
2501
2502/**
2503 * declare a thread's intent to wait on 'waitq' for 'wait_event'
2504 *
2505 * Conditions:
2506 * 'waitq' is locked
3e170ce0
A
2507 */
2508wait_result_t waitq_assert_wait64_locked(struct waitq *waitq,
2509 event64_t wait_event,
2510 wait_interrupt_t interruptible,
2511 wait_timeout_urgency_t urgency,
2512 uint64_t deadline,
2513 uint64_t leeway,
2514 thread_t thread)
2515{
2516 wait_result_t wait_result;
2517 int realtime = 0;
39037602
A
2518 struct waitq *safeq;
2519 uintptr_t eventmask;
2520 spl_t s;
2521
3e170ce0
A
2522
2523 /*
2524 * Warning: Do _not_ place debugging print statements here.
39037602 2525 * The waitq is locked!
3e170ce0 2526 */
39037602 2527 assert(!thread->started || thread == current_thread());
3e170ce0
A
2528
2529 if (thread->waitq != NULL)
2530 panic("thread already waiting on %p", thread->waitq);
2531
2532 if (waitq_is_set(waitq)) {
2533 struct waitq_set *wqset = (struct waitq_set *)waitq;
2534 /*
2535 * early-out if the thread is waiting on a wait queue set
2536 * that has already been pre-posted.
2537 */
2538 if (wait_event == NO_EVENT64 && waitq_set_maybe_preposted(wqset)) {
2539 int ret;
2540 /*
2541 * Run through the list of potential preposts. Because
2542 * this is a hot path, we short-circuit the iteration
2543 * if we find just one prepost object.
2544 */
2545 ret = wq_prepost_foreach_locked(wqset, NULL,
2546 prepost_exists_cb);
2547 if (ret == WQ_ITERATE_FOUND) {
39037602
A
2548 s = splsched();
2549 thread_lock(thread);
3e170ce0 2550 thread->wait_result = THREAD_AWAKENED;
39037602
A
2551 thread_unlock(thread);
2552 splx(s);
3e170ce0
A
2553 return THREAD_AWAKENED;
2554 }
2555 }
2556 }
2557
39037602
A
2558 s = splsched();
2559
2560 /*
2561 * If already dealing with an irq safe wait queue, we are all set.
2562 * Otherwise, determine a global queue to use and lock it.
2563 */
2564 if (!waitq_irq_safe(waitq)) {
2565 safeq = global_eventq(waitq);
2566 eventmask = _CAST_TO_EVENT_MASK(waitq);
2567 waitq_lock(safeq);
2568 } else {
2569 safeq = waitq;
2570 eventmask = _CAST_TO_EVENT_MASK(wait_event);
2571 }
2572
2573 /* lock the thread now that we have the irq-safe waitq locked */
2574 thread_lock(thread);
2575
3e170ce0
A
2576 /*
2577 * Realtime threads get priority for wait queue placements.
2578 * This allows wait_queue_wakeup_one to prefer a waiting
2579 * realtime thread, similar in principle to performing
2580 * a wait_queue_wakeup_all and allowing scheduler prioritization
2581 * to run the realtime thread, but without causing the
2582 * lock contention of that scenario.
2583 */
2584 if (thread->sched_pri >= BASEPRI_REALTIME)
2585 realtime = 1;
2586
2587 /*
2588 * This is the extent to which we currently take scheduling attributes
2589 * into account. If the thread is vm priviledged, we stick it at
2590 * the front of the queue. Later, these queues will honor the policy
2591 * value set at waitq_init time.
2592 */
2593 wait_result = thread_mark_wait_locked(thread, interruptible);
2594 /* thread->wait_result has been set */
2595 if (wait_result == THREAD_WAITING) {
39037602
A
2596
2597 if (!safeq->waitq_fifo
3e170ce0 2598 || (thread->options & TH_OPT_VMPRIV) || realtime)
39037602 2599 enqueue_head(&safeq->waitq_queue, &thread->wait_links);
3e170ce0 2600 else
39037602 2601 enqueue_tail(&safeq->waitq_queue, &thread->wait_links);
3e170ce0 2602
39037602 2603 /* mark the event and real waitq, even if enqueued on a global safeq */
3e170ce0
A
2604 thread->wait_event = wait_event;
2605 thread->waitq = waitq;
2606
2607 if (deadline != 0) {
2608 boolean_t act;
39037602 2609
3e170ce0
A
2610 act = timer_call_enter_with_leeway(&thread->wait_timer,
2611 NULL,
2612 deadline, leeway,
2613 urgency, FALSE);
2614 if (!act)
2615 thread->wait_timer_active++;
2616 thread->wait_timer_is_set = TRUE;
2617 }
2618
39037602
A
2619 if (waitq_is_global(safeq))
2620 safeq->waitq_eventmask |= eventmask;
3e170ce0
A
2621
2622 waitq_stats_count_wait(waitq);
2623 }
2624
39037602
A
2625 /* unlock the thread */
2626 thread_unlock(thread);
2627
2628 /* unlock the safeq if we locked it here */
2629 if (safeq != waitq) {
2630 waitq_unlock(safeq);
2631 }
2632
2633 splx(s);
2634
3e170ce0
A
2635 return wait_result;
2636}
2637
2638/**
2639 * remove 'thread' from its current blocking state on 'waitq'
2640 *
2641 * Conditions:
3e170ce0
A
2642 * 'thread' is locked
2643 *
2644 * Notes:
2645 * This function is primarily used by clear_wait_internal in
2646 * sched_prim.c from the thread timer wakeup path
2647 * (i.e. the thread was waiting on 'waitq' with a timeout that expired)
2648 */
39037602 2649int waitq_pull_thread_locked(struct waitq *waitq, thread_t thread)
3e170ce0 2650{
39037602
A
2651 struct waitq *safeq;
2652
2653 assert_thread_magic(thread);
3e170ce0
A
2654 assert(thread->waitq == waitq);
2655
39037602
A
2656 /* Find the interrupts disabled queue thread is waiting on */
2657 if (!waitq_irq_safe(waitq)) {
2658 safeq = global_eventq(waitq);
2659 } else {
2660 safeq = waitq;
2661 }
2662
2663 /* thread is already locked so have to try for the waitq lock */
2664 if (!waitq_lock_try(safeq))
2665 return 0;
2666
2667 remqueue(&thread->wait_links);
3e170ce0
A
2668 thread_clear_waitq_state(thread);
2669 waitq_stats_count_clear_wakeup(waitq);
2670
2671 /* clear the global event mask if this was the last thread there! */
39037602
A
2672 if (waitq_is_global(safeq) && queue_empty(&safeq->waitq_queue)) {
2673 safeq->waitq_eventmask = 0;
2674 /* JMM - also mark no-waiters on waitq (if not the same as the safeq) */
2675 }
2676
2677 waitq_unlock(safeq);
2678
2679 return 1;
3e170ce0
A
2680}
2681
2682
2683static __inline__
2684void maybe_adjust_thread_pri(thread_t thread, int priority) {
2685 if (thread->sched_pri < priority) {
2686 if (priority <= MAXPRI) {
2687 set_sched_pri(thread, priority);
2688
2689 thread->was_promoted_on_wakeup = 1;
2690 thread->sched_flags |= TH_SFLAG_PROMOTED;
2691 }
2692 return;
2693 }
2694
2695 /*
2696 * If the caller is requesting the waitq subsystem to promote the
2697 * priority of the awoken thread, then boost the thread's priority to
2698 * the default WAITQ_BOOST_PRIORITY (if it's not already equal or
2699 * higher priority). This boost must be removed via a call to
2700 * waitq_clear_promotion_locked.
2701 */
2702 if (priority == WAITQ_PROMOTE_PRIORITY &&
2703 (thread->sched_pri < WAITQ_BOOST_PRIORITY ||
2704 !(thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED))) {
2705
2706 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_PROMOTE) | DBG_FUNC_NONE,
2707 (uintptr_t)thread_tid(thread),
2708 thread->sched_pri, thread->base_pri,
2709 WAITQ_BOOST_PRIORITY, 0);
2710 thread->sched_flags |= TH_SFLAG_WAITQ_PROMOTED;
2711 if (thread->sched_pri < WAITQ_BOOST_PRIORITY)
2712 set_sched_pri(thread, WAITQ_BOOST_PRIORITY);
2713 }
2714}
2715
2716/**
2717 * Clear a thread's waitq priority promotion state and the waitq's boost flag
2718 *
2719 * This function will always clear the waitq's 'waitq_boost' flag. If the
2720 * 'thread' parameter is non-null, the this function will also check the
2721 * priority promotion (boost) state of that thread. If this thread was boosted
2722 * (by having been awoken from a boosting waitq), then this boost state is
2723 * cleared. This function is to be paired with waitq_enable_promote_locked.
2724 */
2725void waitq_clear_promotion_locked(struct waitq *waitq, thread_t thread)
2726{
2727 spl_t s;
2728
2729 assert(waitq_held(waitq));
2730 if (thread == THREAD_NULL)
2731 return;
2732
2733 if (!waitq_irq_safe(waitq))
2734 s = splsched();
2735 thread_lock(thread);
2736
2737 if (thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED) {
2738 thread->sched_flags &= ~TH_SFLAG_WAITQ_PROMOTED;
2739
2740 if (thread->sched_flags & TH_SFLAG_PROMOTED_MASK) {
2741 /* it still has other promotions (mutex/rw_lock) */
2742 } else if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
2743 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_DEMOTE) | DBG_FUNC_NONE,
2744 (uintptr_t)thread_tid(thread),
2745 thread->sched_pri,
2746 thread->base_pri,
2747 DEPRESSPRI, 0);
2748 set_sched_pri(thread, DEPRESSPRI);
2749 } else {
2750 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_DEMOTE) | DBG_FUNC_NONE,
2751 (uintptr_t)thread_tid(thread),
2752 thread->sched_pri,
2753 thread->base_pri,
2754 thread->base_pri, 0);
2755 thread_recompute_sched_pri(thread, FALSE);
2756 }
2757 }
2758
2759 thread_unlock(thread);
2760 if (!waitq_irq_safe(waitq))
2761 splx(s);
2762}
2763
2764/**
2765 * wakeup all threads waiting on 'waitq' for 'wake_event'
2766 *
2767 * Conditions:
2768 * 'waitq' is locked
2769 *
2770 * Notes:
2771 * May temporarily disable and re-enable interrupts
2772 * and re-adjust thread priority of each awoken thread.
2773 *
2774 * If the input 'lock_state' == WAITQ_UNLOCK then the waitq will have
2775 * been unlocked before calling thread_go() on any returned threads, and
2776 * is guaranteed to be unlocked upon function return.
2777 */
2778kern_return_t waitq_wakeup64_all_locked(struct waitq *waitq,
2779 event64_t wake_event,
2780 wait_result_t result,
2781 uint64_t *reserved_preposts,
2782 int priority,
2783 waitq_lock_state_t lock_state)
2784{
2785 kern_return_t ret;
2786 thread_t thread;
2787 spl_t th_spl;
2788 int nthreads;
2789 queue_head_t wakeup_queue;
2790
2791 assert(waitq_held(waitq));
2792 queue_init(&wakeup_queue);
2793
2794 nthreads = waitq_select_n_locked(waitq, wake_event, NULL, NULL,
2795 reserved_preposts,
2796 &wakeup_queue, -1, &th_spl);
2797
2798 /* set each thread running */
2799 ret = KERN_NOT_WAITING;
2800
2801#if CONFIG_WAITQ_STATS
39037602 2802 qe_foreach_element(thread, &wakeup_queue, wait_links)
3e170ce0
A
2803 waitq_stats_count_wakeup(waitq);
2804#endif
2805 if (lock_state == WAITQ_UNLOCK)
2806 waitq_unlock(waitq);
2807
39037602
A
2808 qe_foreach_element_safe(thread, &wakeup_queue, wait_links) {
2809 assert_thread_magic(thread);
2810 remqueue(&thread->wait_links);
3e170ce0
A
2811 maybe_adjust_thread_pri(thread, priority);
2812 ret = thread_go(thread, result);
2813 assert(ret == KERN_SUCCESS);
2814 thread_unlock(thread);
2815 }
2816 if (nthreads > 0)
2817 splx(th_spl);
2818 else
2819 waitq_stats_count_fail(waitq);
2820
2821 return ret;
2822}
2823
2824/**
2825 * wakeup one thread waiting on 'waitq' for 'wake_event'
2826 *
2827 * Conditions:
2828 * 'waitq' is locked
2829 *
2830 * Notes:
2831 * May temporarily disable and re-enable interrupts.
2832 */
2833kern_return_t waitq_wakeup64_one_locked(struct waitq *waitq,
2834 event64_t wake_event,
2835 wait_result_t result,
2836 uint64_t *reserved_preposts,
2837 int priority,
2838 waitq_lock_state_t lock_state)
2839{
2840 thread_t thread;
2841 spl_t th_spl;
2842
2843 assert(waitq_held(waitq));
2844
39037602
A
2845 if (priority == WAITQ_SELECT_MAX_PRI) {
2846 thread = waitq_select_max_locked(waitq, wake_event,
2847 reserved_preposts,
2848 &th_spl);
2849 } else {
2850 thread = waitq_select_one_locked(waitq, wake_event,
2851 reserved_preposts,
2852 priority, &th_spl);
2853 }
2854
3e170ce0
A
2855
2856 if (thread != THREAD_NULL)
2857 waitq_stats_count_wakeup(waitq);
2858 else
2859 waitq_stats_count_fail(waitq);
2860
2861 if (lock_state == WAITQ_UNLOCK)
2862 waitq_unlock(waitq);
2863
2864 if (thread != THREAD_NULL) {
2865 maybe_adjust_thread_pri(thread, priority);
2866 kern_return_t ret = thread_go(thread, result);
2867 assert(ret == KERN_SUCCESS);
2868 thread_unlock(thread);
2869 splx(th_spl);
2870 return ret;
2871 }
2872
2873 return KERN_NOT_WAITING;
2874}
2875
2876/**
2877 * wakeup one thread waiting on 'waitq' for 'wake_event'
2878 *
2879 * Conditions:
2880 * 'waitq' is locked
2881 *
2882 * Returns:
2883 * A locked, runnable thread.
2884 * If return value is non-NULL, interrupts have also
2885 * been disabled, and the caller is responsible to call
2886 * splx() with the returned '*spl' value.
2887 */
39037602
A
2888thread_t
2889waitq_wakeup64_identify_locked(struct waitq *waitq,
2890 event64_t wake_event,
2891 wait_result_t result,
2892 spl_t *spl,
2893 uint64_t *reserved_preposts,
2894 int priority,
2895 waitq_lock_state_t lock_state)
3e170ce0
A
2896{
2897 thread_t thread;
2898
2899 assert(waitq_held(waitq));
2900
39037602
A
2901 if (priority == WAITQ_SELECT_MAX_PRI) {
2902 thread = waitq_select_max_locked(waitq, wake_event,
2903 reserved_preposts,
2904 spl);
2905 } else {
2906 thread = waitq_select_one_locked(waitq, wake_event,
2907 reserved_preposts,
2908 priority, spl);
2909 }
3e170ce0
A
2910
2911 if (thread != THREAD_NULL)
2912 waitq_stats_count_wakeup(waitq);
2913 else
2914 waitq_stats_count_fail(waitq);
2915
2916 if (lock_state == WAITQ_UNLOCK)
2917 waitq_unlock(waitq);
2918
2919 if (thread != THREAD_NULL) {
2920 kern_return_t __assert_only ret;
2921 ret = thread_go(thread, result);
2922 assert(ret == KERN_SUCCESS);
2923 }
2924
2925 return thread; /* locked if not NULL (caller responsible for spl) */
2926}
2927
2928/**
2929 * wakeup a specific thread iff it's waiting on 'waitq' for 'wake_event'
2930 *
2931 * Conditions:
2932 * 'waitq' is locked
2933 * 'thread' is unlocked
2934 *
2935 * Notes:
2936 * May temporarily disable and re-enable interrupts
2937 *
2938 * If the input lock_state == WAITQ_UNLOCK then the waitq will have been
2939 * unlocked before calling thread_go() if 'thread' is to be awoken, and
2940 * is guaranteed to be unlocked upon function return.
2941 */
2942kern_return_t waitq_wakeup64_thread_locked(struct waitq *waitq,
2943 event64_t wake_event,
2944 thread_t thread,
2945 wait_result_t result,
2946 waitq_lock_state_t lock_state)
2947{
2948 kern_return_t ret;
2949 spl_t th_spl;
2950
2951 assert(waitq_held(waitq));
39037602 2952 assert_thread_magic(thread);
3e170ce0
A
2953
2954 /*
2955 * See if the thread was still waiting there. If so, it got
2956 * dequeued and returned locked.
2957 */
2958 ret = waitq_select_thread_locked(waitq, wake_event, thread, &th_spl);
2959
2960 if (ret == KERN_SUCCESS)
2961 waitq_stats_count_wakeup(waitq);
2962 else
2963 waitq_stats_count_fail(waitq);
2964
2965 if (lock_state == WAITQ_UNLOCK)
2966 waitq_unlock(waitq);
2967
2968 if (ret != KERN_SUCCESS)
2969 return KERN_NOT_WAITING;
2970
2971 ret = thread_go(thread, result);
2972 assert(ret == KERN_SUCCESS);
2973 thread_unlock(thread);
2974 splx(th_spl);
2975
2976 return ret;
2977}
2978
2979
2980
2981/* ----------------------------------------------------------------------
2982 *
2983 * In-Kernel API
2984 *
2985 * ---------------------------------------------------------------------- */
2986
2987/**
2988 * initialize a waitq object
2989 */
2990kern_return_t waitq_init(struct waitq *waitq, int policy)
2991{
2992 assert(waitq != NULL);
2993
2994 /* only FIFO and LIFO for now */
2995 if ((policy & SYNC_POLICY_FIXED_PRIORITY) != 0)
2996 return KERN_INVALID_ARGUMENT;
2997
2998 waitq->waitq_fifo = ((policy & SYNC_POLICY_REVERSED) == 0);
2999 waitq->waitq_irq = !!(policy & SYNC_POLICY_DISABLE_IRQ);
3000 waitq->waitq_prepost = 0;
3001 waitq->waitq_type = WQT_QUEUE;
3002 waitq->waitq_eventmask = 0;
3003
3004 waitq->waitq_set_id = 0;
3005 waitq->waitq_prepost_id = 0;
3006
39037602 3007 waitq_lock_init(waitq);
3e170ce0
A
3008 queue_init(&waitq->waitq_queue);
3009
39037602 3010 waitq->waitq_isvalid = 1;
3e170ce0
A
3011 return KERN_SUCCESS;
3012}
3013
3014struct wq_unlink_ctx {
3015 struct waitq *unlink_wq;
3016 struct waitq_set *unlink_wqset;
3017};
3018
3019static int waitq_unlink_prepost_cb(struct waitq_set __unused *wqset, void *ctx,
3020 struct wq_prepost *wqp, struct waitq *waitq);
3021
3022/**
39037602 3023 * walk_waitq_links callback to invalidate 'link' parameter
3e170ce0
A
3024 *
3025 * Conditions:
39037602 3026 * Called from walk_waitq_links.
3e170ce0
A
3027 * Note that unlink other callbacks, this one make no assumptions about
3028 * the 'waitq' parameter, specifically it does not have to be locked or
3029 * even valid.
3030 */
3031static int waitq_unlink_all_cb(struct waitq *waitq, void *ctx,
39037602 3032 struct waitq_link *link)
3e170ce0
A
3033{
3034 (void)waitq;
3035 (void)ctx;
39037602
A
3036 if (wql_type(link) == WQL_LINK && wql_is_valid(link))
3037 wql_invalidate(link);
3e170ce0 3038
39037602 3039 if (wql_type(link) == WQL_WQS) {
3e170ce0 3040 struct waitq_set *wqset;
3e170ce0
A
3041 struct wq_unlink_ctx ulctx;
3042
3043 /*
3044 * When destroying the waitq, take the time to clear out any
3045 * preposts it may have made. This could potentially save time
3046 * on the IPC send path which would otherwise have to iterate
3047 * over lots of dead port preposts.
3048 */
3049 if (waitq->waitq_prepost_id == 0)
3050 goto out;
3051
39037602 3052 wqset = link->wql_wqs.wql_set;
3e170ce0 3053 assert(wqset != NULL);
39037602 3054 assert(!waitq_irq_safe(&wqset->wqset_q));
3e170ce0 3055
3e170ce0
A
3056 waitq_set_lock(wqset);
3057
3058 if (!waitq_set_is_valid(wqset)) {
3059 /* someone raced us to teardown */
3060 goto out_unlock;
3061 }
3062 if (!waitq_set_maybe_preposted(wqset))
3063 goto out_unlock;
3064
3065 ulctx.unlink_wq = waitq;
3066 ulctx.unlink_wqset = wqset;
3067 (void)wq_prepost_iterate(wqset->wqset_prepost_id, &ulctx,
3068 waitq_unlink_prepost_cb);
3069out_unlock:
3070 waitq_set_unlock(wqset);
3e170ce0
A
3071 }
3072
3073out:
3074 return WQ_ITERATE_CONTINUE;
3075}
3076
3077
3078/**
3079 * cleanup any link/prepost table resources associated with a waitq
3080 */
3081void waitq_deinit(struct waitq *waitq)
3082{
3e170ce0
A
3083 spl_t s;
3084
39037602 3085 if (!waitq || !waitq_is_queue(waitq))
3e170ce0
A
3086 return;
3087
3088 if (waitq_irq_safe(waitq))
3089 s = splsched();
3090 waitq_lock(waitq);
39037602
A
3091 if (!waitq_valid(waitq)) {
3092 waitq_unlock(waitq);
3093 if (waitq_irq_safe(waitq))
3094 splx(s);
3095 return;
3096 }
3e170ce0 3097
3e170ce0 3098 waitq->waitq_type = WQT_INVALID;
39037602 3099 waitq->waitq_isvalid = 0;
3e170ce0 3100
39037602
A
3101 if (!waitq_irq_safe(waitq)) {
3102 waitq_unlink_all_unlock(waitq);
3103 /* waitq unlocked and set links deallocated */
3104 } else {
3105 waitq_unlock(waitq);
3e170ce0 3106 splx(s);
39037602 3107 }
3e170ce0 3108
39037602 3109 assert(queue_empty(&waitq->waitq_queue));
3e170ce0
A
3110}
3111
39037602
A
3112void waitq_invalidate_locked(struct waitq *waitq)
3113{
3114 assert(waitq_held(waitq));
3115 assert(waitq_is_valid(waitq));
3116 waitq->waitq_isvalid = 0;
3117}
3e170ce0
A
3118
3119/**
3120 * invalidate the given wq_prepost object
3121 *
3122 * Conditions:
3123 * Called from wq_prepost_iterate (_not_ from wq_prepost_foreach_locked!)
3124 */
3125static int wqset_clear_prepost_chain_cb(struct waitq_set __unused *wqset,
3126 void __unused *ctx,
3127 struct wq_prepost *wqp,
3128 struct waitq __unused *waitq)
3129{
3130 if (wqp_type(wqp) == WQP_POST)
3131 wq_prepost_invalidate(wqp);
3132 return WQ_ITERATE_CONTINUE;
3133}
3134
3135
3136/**
3137 * allocate and initialize a waitq set object
3138 *
3139 * Conditions:
3140 * may block
3141 *
3142 * Returns:
3143 * allocated / initialized waitq_set object
3144 * NULL on failure
3145 */
39037602 3146struct waitq_set *waitq_set_alloc(int policy, void *prepost_hook)
3e170ce0
A
3147{
3148 struct waitq_set *wqset;
3149
3150 wqset = (struct waitq_set *)zalloc(waitq_set_zone);
3151 if (!wqset)
3152 panic("Can't allocate a new waitq set from zone %p", waitq_set_zone);
3153
3154 kern_return_t ret;
39037602 3155 ret = waitq_set_init(wqset, policy, NULL, prepost_hook);
3e170ce0
A
3156 if (ret != KERN_SUCCESS) {
3157 zfree(waitq_set_zone, wqset);
3158 wqset = NULL;
3159 }
3160
3161 return wqset;
3162}
3163
3164/**
3165 * initialize a waitq set object
3166 *
3167 * Conditions:
3168 * may (rarely) block if link table needs to grow, and
3169 * no 'reserved_link' object is passed.
3170 */
3171kern_return_t waitq_set_init(struct waitq_set *wqset,
39037602
A
3172 int policy, uint64_t *reserved_link,
3173 void *prepost_hook)
3e170ce0 3174{
39037602 3175 struct waitq_link *link;
3e170ce0
A
3176 kern_return_t ret;
3177
3178 memset(wqset, 0, sizeof(*wqset));
3179
3180 ret = waitq_init(&wqset->wqset_q, policy);
3181 if (ret != KERN_SUCCESS)
3182 return ret;
3183
3184 wqset->wqset_q.waitq_type = WQT_SET;
39037602 3185 if (policy & SYNC_POLICY_PREPOST) {
3e170ce0 3186 wqset->wqset_q.waitq_prepost = 1;
39037602
A
3187 wqset->wqset_prepost_id = 0;
3188 assert(prepost_hook == NULL);
3189 } else {
3e170ce0 3190 wqset->wqset_q.waitq_prepost = 0;
39037602
A
3191 wqset->wqset_prepost_hook = prepost_hook;
3192 }
3e170ce0
A
3193
3194 if (reserved_link && *reserved_link != 0) {
39037602 3195 link = wql_get_reserved(*reserved_link, WQL_WQS);
3e170ce0
A
3196 /* always consume the caller's reference */
3197 *reserved_link = 0;
3198 } else {
39037602 3199 link = wql_alloc_link(WQL_WQS);
3e170ce0
A
3200 }
3201 if (!link)
3202 panic("Can't allocate link object for waitq set: %p", wqset);
3203
39037602
A
3204 link->wql_wqs.wql_set = wqset;
3205 wql_mkvalid(link);
3e170ce0 3206
39037602
A
3207 wqset->wqset_id = link->wql_setid.id;
3208 wql_put_link(link);
3e170ce0
A
3209
3210 return KERN_SUCCESS;
3211}
3212
3213/**
3214 * clear out / release any resources associated with a waitq set
3215 *
3216 * Conditions:
3217 * may block
3218 * Note:
3219 * This will render the waitq set invalid, and it must
3220 * be re-initialized with waitq_set_init before it can be used again
3221 */
3222void waitq_set_deinit(struct waitq_set *wqset)
3223{
39037602
A
3224 struct waitq_link *link = NULL;
3225 uint64_t set_id, prepost_id;
3e170ce0
A
3226
3227 if (!waitqs_is_set(wqset))
3228 panic("trying to de-initialize an invalid wqset @%p", wqset);
3229
39037602 3230 assert(!waitq_irq_safe(&wqset->wqset_q));
3e170ce0
A
3231 waitq_set_lock(wqset);
3232
3233 set_id = wqset->wqset_id;
3234
3235 /* grab the set's link object */
39037602 3236 link = wql_get_link(set_id);
3e170ce0 3237 if (link)
39037602 3238 wql_invalidate(link);
3e170ce0
A
3239
3240 /* someone raced us to deinit */
39037602 3241 if (!link || wqset->wqset_id != set_id || set_id != link->wql_setid.id) {
3e170ce0 3242 if (link)
39037602 3243 wql_put_link(link);
3e170ce0 3244 waitq_set_unlock(wqset);
3e170ce0
A
3245 return;
3246 }
3247
3248 /* every wait queue set should have a valid link object */
39037602 3249 assert(link != NULL && wql_type(link) == WQL_WQS);
3e170ce0
A
3250
3251 wqset->wqset_id = 0;
3252
3e170ce0
A
3253 /*
3254 * This set may have a lot of preposts, or may have been a member of
3255 * many other sets. To minimize spinlock hold times, we clear out the
3256 * waitq set data structure under the lock-hold, but don't clear any
3257 * table objects. We keep handles to the prepost and set linkage
3258 * objects and free those outside the critical section.
3259 */
39037602
A
3260 prepost_id = 0;
3261 if (wqset->wqset_q.waitq_prepost && wqset->wqset_prepost_id)
3262 prepost_id = wqset->wqset_prepost_id;
3263 /* else { TODO: notify kqueue subsystem? } */
3e170ce0
A
3264 wqset->wqset_prepost_id = 0;
3265
39037602
A
3266 wqset->wqset_q.waitq_type = WQT_INVALID;
3267 wqset->wqset_q.waitq_fifo = 0;
3268 wqset->wqset_q.waitq_prepost = 0;
3269 wqset->wqset_q.waitq_isvalid = 0;
3e170ce0 3270
39037602
A
3271 /* don't clear the 'waitq_irq' bit: it's used in locking! */
3272 wqset->wqset_q.waitq_eventmask = 0;
3273
3274 waitq_unlink_all_unlock(&wqset->wqset_q);
3275 /* wqset->wqset_q unlocked and set links deallocated */
3e170ce0
A
3276
3277 /*
39037602
A
3278 * walk_waitq_links may race with us for access to the waitq set.
3279 * If walk_waitq_links has a reference to the set, then we should wait
3e170ce0
A
3280 * until the link's refcount goes to 1 (our reference) before we exit
3281 * this function. That way we ensure that the waitq set memory will
3282 * remain valid even though it's been cleared out.
3283 */
39037602 3284 while (wql_refcnt(link) > 1)
3e170ce0 3285 delay(1);
39037602 3286 wql_put_link(link);
3e170ce0
A
3287
3288 /* drop / unlink all the prepost table objects */
39037602
A
3289 /* JMM - can this happen before the delay? */
3290 if (prepost_id)
3291 (void)wq_prepost_iterate(prepost_id, NULL,
3292 wqset_clear_prepost_chain_cb);
3e170ce0
A
3293}
3294
3295/**
3296 * de-initialize and free an allocated waitq set object
3297 *
3298 * Conditions:
3299 * may block
3300 */
3301kern_return_t waitq_set_free(struct waitq_set *wqset)
3302{
3303 waitq_set_deinit(wqset);
3304
3305 memset(wqset, 0, sizeof(*wqset));
3306 zfree(waitq_set_zone, wqset);
3307
3308 return KERN_SUCCESS;
3309}
3310
3311#if defined(DEVLEOPMENT) || defined(DEBUG)
3312#if CONFIG_WAITQ_DEBUG
3313/**
3314 * return the set ID of 'wqset'
3315 */
3316uint64_t wqset_id(struct waitq_set *wqset)
3317{
3318 if (!wqset)
3319 return 0;
3320
3321 assert(waitqs_is_set(wqset));
3322 return wqset->wqset_id;
3323}
3324
3325/**
3326 * returns a pointer to the waitq object embedded in 'wqset'
3327 */
3328struct waitq *wqset_waitq(struct waitq_set *wqset)
3329{
3330 if (!wqset)
3331 return NULL;
3332
3333 assert(waitqs_is_set(wqset));
3334
3335 return &wqset->wqset_q;
3336}
3337#endif /* CONFIG_WAITQ_DEBUG */
3338#endif /* DEVELOPMENT || DEBUG */
3339
3340
3341/**
3342 * clear all preposts originating from 'waitq'
3343 *
3344 * Conditions:
3345 * 'waitq' locked
3346 * may (rarely) spin waiting for another on-core thread to
3347 * release the last reference to the waitq's prepost link object
3348 *
3349 * NOTE:
3350 * If this function needs to spin, it will drop the waitq lock!
3351 * The return value of the function indicates whether or not this
3352 * happened: 1 == lock was dropped, 0 == lock held
3353 */
39037602 3354int waitq_clear_prepost_locked(struct waitq *waitq)
3e170ce0
A
3355{
3356 struct wq_prepost *wqp;
3357 int dropped_lock = 0;
3358
39037602
A
3359 assert(!waitq_irq_safe(waitq));
3360
3e170ce0
A
3361 if (waitq->waitq_prepost_id == 0)
3362 return 0;
3363
3364 wqp = wq_prepost_get(waitq->waitq_prepost_id);
3365 waitq->waitq_prepost_id = 0;
3366 if (wqp) {
3367 uint64_t wqp_id = wqp->wqp_prepostid.id;
3368 wqdbg_v("invalidate prepost 0x%llx (refcnt:%d)",
3369 wqp->wqp_prepostid.id, wqp_refcnt(wqp));
3370 wq_prepost_invalidate(wqp);
3371 while (wqp_refcnt(wqp) > 1) {
3e170ce0
A
3372
3373 /*
3374 * Some other thread must have raced us to grab a link
3375 * object reference before we invalidated it. This
3376 * means that they are probably trying to access the
3377 * waitq to which the prepost object points. We need
3378 * to wait here until the other thread drops their
3379 * reference. We know that no one else can get a
3380 * reference (the object has been invalidated), and
3381 * that prepost references are short-lived (dropped on
3382 * a call to wq_prepost_put). We also know that no one
3383 * blocks while holding a reference therefore the
3384 * other reference holder must be on-core. We'll just
3385 * sit and wait for the other reference to be dropped.
3386 */
3387 disable_preemption();
3388
3389 waitq_unlock(waitq);
3e170ce0
A
3390 dropped_lock = 1;
3391 /*
3392 * don't yield here, just spin and assume the other
3393 * consumer is already on core...
3394 */
3395 delay(1);
39037602 3396
3e170ce0
A
3397 waitq_lock(waitq);
3398
3399 enable_preemption();
3400 }
3401 if (wqp_refcnt(wqp) > 0 && wqp->wqp_prepostid.id == wqp_id)
3402 wq_prepost_put(wqp);
3403 }
3404
3405 return dropped_lock;
3406}
3407
3408/**
3409 * clear all preposts originating from 'waitq'
3410 *
3411 * Conditions:
3412 * 'waitq' is not locked
3413 * may disable and re-enable interrupts
3414 */
3415void waitq_clear_prepost(struct waitq *waitq)
3416{
3e170ce0 3417 assert(waitq_valid(waitq));
39037602 3418 assert(!waitq_irq_safe(waitq));
3e170ce0 3419
3e170ce0
A
3420 waitq_lock(waitq);
3421 /* it doesn't matter to us if the lock is dropped here */
39037602 3422 (void)waitq_clear_prepost_locked(waitq);
3e170ce0 3423 waitq_unlock(waitq);
3e170ce0
A
3424}
3425
3426/**
3427 * return a the waitq's prepost object ID (allocate if necessary)
3428 *
3429 * Conditions:
3430 * 'waitq' is unlocked
3431 */
3432uint64_t waitq_get_prepost_id(struct waitq *waitq)
3433{
3434 struct wq_prepost *wqp;
3435 uint64_t wqp_id = 0;
3e170ce0
A
3436
3437 if (!waitq_valid(waitq))
3438 return 0;
39037602
A
3439
3440 assert(!waitq_irq_safe(waitq));
3e170ce0 3441
3e170ce0
A
3442 waitq_lock(waitq);
3443
3444 if (!waitq_valid(waitq))
3445 goto out_unlock;
3446
3447 if (waitq->waitq_prepost_id) {
3448 wqp_id = waitq->waitq_prepost_id;
3449 goto out_unlock;
3450 }
3451
3452 /* don't hold a spinlock while allocating a prepost object */
3453 waitq_unlock(waitq);
3e170ce0
A
3454
3455 wqp = wq_prepost_alloc(WQP_WQ, 1);
3456 if (!wqp)
3457 return 0;
3458
3459 /* re-acquire the waitq lock */
3e170ce0
A
3460 waitq_lock(waitq);
3461
3462 if (!waitq_valid(waitq)) {
3463 wq_prepost_put(wqp);
3464 wqp_id = 0;
3465 goto out_unlock;
3466 }
3467
3468 if (waitq->waitq_prepost_id) {
3469 /* we were beat by someone else */
3470 wq_prepost_put(wqp);
3471 wqp_id = waitq->waitq_prepost_id;
3472 goto out_unlock;
3473 }
3474
3475 wqp->wqp_wq.wqp_wq_ptr = waitq;
3476
3477 wqp_set_valid(wqp);
3478 wqp_id = wqp->wqp_prepostid.id;
3479 waitq->waitq_prepost_id = wqp_id;
3480
3481 wq_prepost_put(wqp);
3482
3483out_unlock:
3484 waitq_unlock(waitq);
3e170ce0
A
3485
3486 return wqp_id;
3487}
3488
3489
39037602 3490static int waitq_inset_cb(struct waitq *waitq, void *ctx, struct waitq_link *link)
3e170ce0
A
3491{
3492 uint64_t setid = *(uint64_t *)ctx;
39037602 3493 int wqltype = wql_type(link);
3e170ce0 3494 (void)waitq;
39037602 3495 if (wqltype == WQL_WQS && link->wql_setid.id == setid) {
3e170ce0
A
3496 wqdbg_v(" waitq already in set 0x%llx", setid);
3497 return WQ_ITERATE_FOUND;
39037602 3498 } else if (wqltype == WQL_LINK) {
3e170ce0
A
3499 /*
3500 * break out early if we see a link that points to the setid
3501 * in question. This saves us a step in the
3502 * iteration/recursion
3503 */
39037602
A
3504 wqdbg_v(" waitq already in set 0x%llx (WQL_LINK)", setid);
3505 if (link->wql_link.left_setid == setid ||
3506 link->wql_link.right_setid == setid)
3e170ce0
A
3507 return WQ_ITERATE_FOUND;
3508 }
3509
3510 return WQ_ITERATE_CONTINUE;
3511}
3512
3513/**
3514 * determine if 'waitq' is a member of 'wqset'
3515 *
3516 * Conditions:
3517 * neither 'waitq' nor 'wqset' is not locked
3518 * may disable and re-enable interrupts while locking 'waitq'
3519 */
3520boolean_t waitq_member(struct waitq *waitq, struct waitq_set *wqset)
3521{
3522 kern_return_t kr = WQ_ITERATE_SUCCESS;
3523 uint64_t setid;
3e170ce0
A
3524
3525 if (!waitq_valid(waitq))
3526 panic("Invalid waitq: %p", waitq);
39037602 3527 assert(!waitq_irq_safe(waitq));
3e170ce0
A
3528
3529 if (!waitqs_is_set(wqset))
3530 return FALSE;
39037602 3531
3e170ce0
A
3532 waitq_lock(waitq);
3533
3534 setid = wqset->wqset_id;
3535 if (!setid)
3536 goto out_unlock;
3537
3538 /* fast path: most waitqs are members of only 1 set */
3539 if (waitq->waitq_set_id == setid) {
3540 waitq_unlock(waitq);
3e170ce0
A
3541 return TRUE;
3542 }
3543
3544 /* walk the link table and look for the Set ID of wqset */
39037602
A
3545 kr = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
3546 WQL_ALL, (void *)&setid, waitq_inset_cb);
3e170ce0
A
3547
3548out_unlock:
3549 waitq_unlock(waitq);
39037602 3550 return (kr == WQ_ITERATE_FOUND);
3e170ce0
A
3551}
3552
3553/**
3554 * Returns true is the given waitq is a member of at least 1 set
3555 */
3556boolean_t waitq_in_set(struct waitq *waitq)
3557{
39037602 3558 struct waitq_link *link;
3e170ce0 3559 boolean_t inset = FALSE;
3e170ce0
A
3560
3561 if (waitq_irq_safe(waitq))
39037602
A
3562 return FALSE;
3563
3e170ce0
A
3564 waitq_lock(waitq);
3565
3566 if (!waitq->waitq_set_id)
3567 goto out_unlock;
3568
39037602 3569 link = wql_get_link(waitq->waitq_set_id);
3e170ce0
A
3570 if (link) {
3571 /* if we get here, the waitq is in _at_least_one_ set */
3572 inset = TRUE;
39037602 3573 wql_put_link(link);
3e170ce0
A
3574 } else {
3575 /* we can just optimize this for next time */
3576 waitq->waitq_set_id = 0;
3577 }
3578
3579out_unlock:
3580 waitq_unlock(waitq);
3e170ce0
A
3581 return inset;
3582}
3583
3584
3585/**
3586 * pre-allocate a waitq link structure from the link table
3587 *
3588 * Conditions:
3589 * 'waitq' is not locked
3590 * may (rarely) block if link table needs to grow
3591 */
3592uint64_t waitq_link_reserve(struct waitq *waitq)
3593{
39037602 3594 struct waitq_link *link;
3e170ce0
A
3595 uint64_t reserved_id = 0;
3596
3597 assert(get_preemption_level() == 0 && waitq_wait_possible(current_thread()));
3598
3599 /*
3600 * We've asserted that the caller can block, so we enforce a
3601 * minimum-free table element policy here.
3602 */
39037602 3603 wql_ensure_free_space();
3e170ce0
A
3604
3605 (void)waitq;
39037602 3606 link = wql_alloc_link(LT_RESERVED);
3e170ce0
A
3607 if (!link)
3608 return 0;
3609
39037602 3610 reserved_id = link->wql_setid.id;
3e170ce0
A
3611
3612 return reserved_id;
3613}
3614
3615/**
3616 * release a pre-allocated waitq link structure
3617 */
3618void waitq_link_release(uint64_t id)
3619{
39037602 3620 struct waitq_link *link;
3e170ce0
A
3621
3622 if (id == 0)
3623 return;
3624
39037602 3625 link = wql_get_reserved(id, WQL_LINK);
3e170ce0
A
3626 if (!link)
3627 return;
3628
3629 /*
3630 * if we successfully got a link object, then we know
3631 * it's not been marked valid, and can be released with
39037602 3632 * a standard wql_put_link() which should free the element.
3e170ce0 3633 */
39037602
A
3634 wql_put_link(link);
3635#if CONFIG_LTABLE_STATS
3636 g_wqlinktable.nreserved_releases += 1;
3e170ce0
A
3637#endif
3638}
3639
3640/**
3641 * link 'waitq' to the set identified by 'setid' using the 'link' structure
3642 *
3643 * Conditions:
3644 * 'waitq' is locked
3645 * caller should have a reference to the 'link' object
3646 */
3647static kern_return_t waitq_link_internal(struct waitq *waitq,
39037602 3648 uint64_t setid, struct waitq_link *link)
3e170ce0 3649{
39037602 3650 struct waitq_link *qlink;
3e170ce0
A
3651 kern_return_t kr;
3652
3653 assert(waitq_held(waitq));
3654
3655 /*
3656 * If the waitq_set_id field is empty, then this waitq is not
3657 * a member of any other set. All we have to do is update the
3658 * field.
3659 */
3660 if (!waitq->waitq_set_id) {
3661 waitq->waitq_set_id = setid;
3662 return KERN_SUCCESS;
3663 }
3664
39037602 3665 qlink = wql_get_link(waitq->waitq_set_id);
3e170ce0
A
3666 if (!qlink) {
3667 /*
3668 * The set to which this wait queue belonged has been
3669 * destroyed / invalidated. We can re-use the waitq field.
3670 */
3671 waitq->waitq_set_id = setid;
3672 return KERN_SUCCESS;
3673 }
39037602 3674 wql_put_link(qlink);
3e170ce0
A
3675
3676 /*
3677 * Check to see if it's already a member of the set.
3678 *
3679 * TODO: check for cycles!
3680 */
39037602
A
3681 kr = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
3682 WQL_ALL, (void *)&setid, waitq_inset_cb);
3e170ce0
A
3683 if (kr == WQ_ITERATE_FOUND)
3684 return kr;
3685
3686 /*
3687 * This wait queue is a member of at least one set already,
3688 * and _not_ a member of the given set. Use our previously
3689 * allocated link object, and hook it up to the wait queue.
3690 * Note that it's possible that one or more of the wait queue sets to
3691 * which the wait queue belongs was invalidated before we allocated
3692 * this link object. That's OK because the next time we use that
3693 * object we'll just ignore it.
3694 */
39037602
A
3695 link->wql_link.left_setid = setid;
3696 link->wql_link.right_setid = waitq->waitq_set_id;
3697 wql_mkvalid(link);
3e170ce0 3698
39037602 3699 waitq->waitq_set_id = link->wql_setid.id;
3e170ce0
A
3700
3701 return KERN_SUCCESS;
3702}
3703
3704/**
3705 * link 'waitq' to 'wqset'
3706 *
3707 * Conditions:
3708 * if 'lock_state' contains WAITQ_SHOULD_LOCK, 'waitq' must be unlocked.
3709 * Otherwise, 'waitq' must be locked.
3710 *
3711 * may (rarely) block on link table allocation if the table has to grow,
3712 * and no 'reserved_link' object is passed.
3713 *
3714 * Notes:
3715 * The caller can guarantee that this function will never block by
3716 * pre-allocating a link table object and passing its ID in 'reserved_link'
3717 */
3718kern_return_t waitq_link(struct waitq *waitq, struct waitq_set *wqset,
3719 waitq_lock_state_t lock_state, uint64_t *reserved_link)
3720{
3721 kern_return_t kr;
39037602 3722 struct waitq_link *link;
3e170ce0 3723 int should_lock = (lock_state == WAITQ_SHOULD_LOCK);
3e170ce0 3724
39037602 3725 if (!waitq_valid(waitq) || waitq_irq_safe(waitq))
3e170ce0
A
3726 panic("Invalid waitq: %p", waitq);
3727
3728 if (!waitqs_is_set(wqset))
3729 return KERN_INVALID_ARGUMENT;
3730
3731 wqdbg_v("Link waitq %p to wqset 0x%llx",
3732 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), wqset->wqset_id);
3733
3e170ce0
A
3734 /*
3735 * We _might_ need a new link object here, so we'll grab outside
3736 * the lock because the alloc call _might_ block.
3737 *
39037602 3738 * If the caller reserved a link beforehand, then wql_get_link
3e170ce0
A
3739 * is guaranteed not to block because the caller holds an extra
3740 * reference to the link which, in turn, hold a reference to the
3741 * link table.
3742 */
3743 if (reserved_link && *reserved_link != 0) {
39037602 3744 link = wql_get_reserved(*reserved_link, WQL_LINK);
3e170ce0
A
3745 /* always consume the caller's reference */
3746 *reserved_link = 0;
3747 } else {
39037602 3748 link = wql_alloc_link(WQL_LINK);
3e170ce0
A
3749 }
3750 if (!link)
3751 return KERN_NO_SPACE;
3752
3753 if (should_lock) {
3e170ce0
A
3754 waitq_lock(waitq);
3755 }
3756
3757 kr = waitq_link_internal(waitq, wqset->wqset_id, link);
3758
3759 if (should_lock) {
3760 waitq_unlock(waitq);
3e170ce0
A
3761 }
3762
39037602 3763 wql_put_link(link);
3e170ce0
A
3764
3765 return kr;
3766}
3767
3768/**
3769 * helper: unlink 'waitq' from waitq set identified by 'setid'
3770 * this function also prunes invalid objects from the tree
3771 *
3772 * Conditions:
39037602 3773 * MUST be called from walk_waitq_links link table walk
3e170ce0
A
3774 * 'waitq' is locked
3775 *
3776 * Notes:
3777 * This is a helper function which compresses the link table by culling
3778 * unused or unnecessary links. See comments below for different
3779 * scenarios.
3780 */
3781static inline int waitq_maybe_remove_link(struct waitq *waitq,
3782 uint64_t setid,
39037602
A
3783 struct waitq_link *parent,
3784 struct waitq_link *left,
3785 struct waitq_link *right)
3e170ce0
A
3786{
3787 uint64_t *wq_setid = &waitq->waitq_set_id;
3788
3789 /*
3790 * There are two scenarios:
3791 *
3792 * Scenario 1:
3793 * --------------------------------------------------------------------
3794 * waitq->waitq_set_id == parent
3795 *
3796 * parent(LINK)
3797 * / \
3798 * / \
3799 * / \
3800 * L(LINK/WQS_l) R(LINK/WQS_r)
3801 *
3802 * In this scenario, we assert that the original waitq points to the
3803 * parent link we were passed in. If WQS_l (or WQS_r) is the waitq
3804 * set we're looking for, we can set the corresponding parent
3805 * link id (left or right) to 0. To compress the tree, we can reset the
3806 * waitq_set_id of the original waitq to point to the side of the
3807 * parent that is still valid. We then discard the parent link object.
3808 */
39037602 3809 if (*wq_setid == parent->wql_setid.id) {
3e170ce0
A
3810 if (!left && !right) {
3811 /* completely invalid children */
39037602 3812 wql_invalidate(parent);
3e170ce0
A
3813 wqdbg_v("S1, L+R");
3814 *wq_setid = 0;
3815 return WQ_ITERATE_INVALID;
39037602 3816 } else if (!left || left->wql_setid.id == setid) {
3e170ce0
A
3817 /*
3818 * left side matches we know it points either to the
3819 * WQS we're unlinking, or to an invalid object:
3820 * no need to invalidate it
3821 */
39037602
A
3822 *wq_setid = right ? right->wql_setid.id : 0;
3823 wql_invalidate(parent);
3e170ce0
A
3824 wqdbg_v("S1, L");
3825 return left ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
39037602 3826 } else if (!right || right->wql_setid.id == setid) {
3e170ce0
A
3827 /*
3828 * if right side matches we know it points either to the
3829 * WQS we're unlinking, or to an invalid object:
3830 * no need to invalidate it
3831 */
39037602
A
3832 *wq_setid = left ? left->wql_setid.id : 0;
3833 wql_invalidate(parent);
3e170ce0
A
3834 wqdbg_v("S1, R");
3835 return right ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
3836 }
3837 }
3838
3839 /*
3840 * the tree walk starts at the top-of-tree and moves down,
3841 * so these are safe asserts.
3842 */
3843 assert(left || right); /* one of them has to be valid at this point */
3844
3845 /*
3846 * Scenario 2:
3847 * --------------------------------------------------------------------
3848 * waitq->waitq_set_id == ... (OR parent)
3849 *
3850 * ...
3851 * |
3852 * parent
3853 * / \
3854 * / \
3855 * L(LINK) R(LINK)
3856 * /\ /\
3857 * / \ / \
3858 * / \ Rl(*) Rr(*)
3859 * Ll(WQS) Lr(WQS)
3860 *
3861 * In this scenario, a leaf node of either the left or right side
3862 * could be the wait queue set we're looking to unlink. We also handle
3863 * the case where one of these links is invalid. If a leaf node is
3864 * invalid or it's the set we're looking for, we can safely remove the
3865 * middle link (left or right) and point the parent link directly to
3866 * the remaining leaf node.
3867 */
39037602 3868 if (left && wql_type(left) == WQL_LINK) {
3e170ce0 3869 uint64_t Ll, Lr;
39037602
A
3870 struct waitq_link *linkLl, *linkLr;
3871 assert(left->wql_setid.id != setid);
3872 Ll = left->wql_link.left_setid;
3873 Lr = left->wql_link.right_setid;
3874 linkLl = wql_get_link(Ll);
3875 linkLr = wql_get_link(Lr);
3e170ce0
A
3876 if (!linkLl && !linkLr) {
3877 /*
3878 * The left object points to two invalid objects!
3879 * We can invalidate the left w/o touching the parent.
3880 */
39037602 3881 wql_invalidate(left);
3e170ce0
A
3882 wqdbg_v("S2, Ll+Lr");
3883 return WQ_ITERATE_INVALID;
3884 } else if (!linkLl || Ll == setid) {
3885 /* Ll is invalid and/or the wait queue set we're looking for */
39037602
A
3886 parent->wql_link.left_setid = Lr;
3887 wql_invalidate(left);
3888 wql_put_link(linkLl);
3889 wql_put_link(linkLr);
3e170ce0
A
3890 wqdbg_v("S2, Ll");
3891 return linkLl ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
3892 } else if (!linkLr || Lr == setid) {
3893 /* Lr is invalid and/or the wait queue set we're looking for */
39037602
A
3894 parent->wql_link.left_setid = Ll;
3895 wql_invalidate(left);
3896 wql_put_link(linkLr);
3897 wql_put_link(linkLl);
3e170ce0
A
3898 wqdbg_v("S2, Lr");
3899 return linkLr ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
3900 }
39037602
A
3901 wql_put_link(linkLl);
3902 wql_put_link(linkLr);
3e170ce0
A
3903 }
3904
39037602 3905 if (right && wql_type(right) == WQL_LINK) {
3e170ce0 3906 uint64_t Rl, Rr;
39037602
A
3907 struct waitq_link *linkRl, *linkRr;
3908 assert(right->wql_setid.id != setid);
3909 Rl = right->wql_link.left_setid;
3910 Rr = right->wql_link.right_setid;
3911 linkRl = wql_get_link(Rl);
3912 linkRr = wql_get_link(Rr);
3e170ce0
A
3913 if (!linkRl && !linkRr) {
3914 /*
3915 * The right object points to two invalid objects!
3916 * We can invalidate the right w/o touching the parent.
3917 */
39037602 3918 wql_invalidate(right);
3e170ce0
A
3919 wqdbg_v("S2, Rl+Rr");
3920 return WQ_ITERATE_INVALID;
3921 } else if (!linkRl || Rl == setid) {
3922 /* Rl is invalid and/or the wait queue set we're looking for */
39037602
A
3923 parent->wql_link.right_setid = Rr;
3924 wql_invalidate(right);
3925 wql_put_link(linkRl);
3926 wql_put_link(linkRr);
3e170ce0
A
3927 wqdbg_v("S2, Rl");
3928 return linkRl ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
3929 } else if (!linkRr || Rr == setid) {
3930 /* Rr is invalid and/or the wait queue set we're looking for */
39037602
A
3931 parent->wql_link.right_setid = Rl;
3932 wql_invalidate(right);
3933 wql_put_link(linkRl);
3934 wql_put_link(linkRr);
3e170ce0
A
3935 wqdbg_v("S2, Rr");
3936 return linkRr ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
3937 }
39037602
A
3938 wql_put_link(linkRl);
3939 wql_put_link(linkRr);
3e170ce0
A
3940 }
3941
3942 return WQ_ITERATE_CONTINUE;
3943}
3944
3945/**
3946 * link table walk callback that unlinks 'waitq' from 'ctx->setid'
3947 *
3948 * Conditions:
39037602 3949 * called from walk_waitq_links
3e170ce0
A
3950 * 'waitq' is locked
3951 *
3952 * Notes:
3953 * uses waitq_maybe_remove_link() to compress the linktable and
3954 * perform the actual unlinking
3955 */
3956static int waitq_unlink_cb(struct waitq *waitq, void *ctx,
39037602 3957 struct waitq_link *link)
3e170ce0
A
3958{
3959 uint64_t setid = *((uint64_t *)ctx);
39037602 3960 struct waitq_link *right, *left;
3e170ce0
A
3961 int ret = 0;
3962
39037602 3963 if (wql_type(link) != WQL_LINK)
3e170ce0
A
3964 return WQ_ITERATE_CONTINUE;
3965
3966 do {
39037602
A
3967 left = wql_get_link(link->wql_link.left_setid);
3968 right = wql_get_link(link->wql_link.right_setid);
3e170ce0
A
3969
3970 ret = waitq_maybe_remove_link(waitq, setid, link, left, right);
3971
39037602
A
3972 wql_put_link(left);
3973 wql_put_link(right);
3e170ce0 3974
39037602 3975 if (!wql_is_valid(link))
3e170ce0
A
3976 return WQ_ITERATE_INVALID;
3977 /* A ret value of UNLINKED will break us out of table walk */
3978 } while (ret == WQ_ITERATE_INVALID);
3979
3980 return ret;
3981}
3982
3983
3984/**
3985 * undo/remove a prepost from 'ctx' (waitq) to 'wqset'
3986 *
3987 * Conditions:
3988 * Called from wq_prepost_foreach_locked OR wq_prepost_iterate
3989 * 'wqset' may be NULL
3990 * (ctx)->unlink_wqset is locked
3991 */
3992static int waitq_unlink_prepost_cb(struct waitq_set __unused *wqset, void *ctx,
3993 struct wq_prepost *wqp, struct waitq *waitq)
3994{
3995 struct wq_unlink_ctx *ulctx = (struct wq_unlink_ctx *)ctx;
3996
3997 if (waitq != ulctx->unlink_wq)
3998 return WQ_ITERATE_CONTINUE;
3999
4000 if (wqp_type(wqp) == WQP_WQ &&
4001 wqp->wqp_prepostid.id == ulctx->unlink_wqset->wqset_prepost_id) {
4002 /* this is the only prepost on this wait queue set */
4003 wqdbg_v("unlink wqp (WQ) 0x%llx", wqp->wqp_prepostid.id);
4004 ulctx->unlink_wqset->wqset_prepost_id = 0;
4005 return WQ_ITERATE_BREAK;
4006 }
4007
4008 assert(wqp_type(wqp) == WQP_POST);
4009
4010 /*
4011 * The prepost object 'wqp' points to a waitq which should no longer
4012 * be preposted to 'ulctx->unlink_wqset'. We can remove the prepost
4013 * object from the list and break out of the iteration. Using the
4014 * context object in this way allows this same callback function to be
4015 * used from both wq_prepost_foreach_locked and wq_prepost_iterate.
4016 */
4017 wq_prepost_remove(ulctx->unlink_wqset, wqp);
4018 return WQ_ITERATE_BREAK;
4019}
4020
4021/**
4022 * unlink 'waitq' from 'wqset'
4023 *
4024 * Conditions:
4025 * 'waitq' is locked
4026 * 'wqset' is _not_ locked
4027 * may (rarely) spin in prepost clear and drop/re-acquire 'waitq' lock
4028 * (see waitq_clear_prepost_locked)
4029 */
4030static kern_return_t waitq_unlink_locked(struct waitq *waitq,
39037602 4031 struct waitq_set *wqset)
3e170ce0
A
4032{
4033 uint64_t setid;
4034 kern_return_t kr;
4035
39037602
A
4036 assert(!waitq_irq_safe(waitq));
4037
3e170ce0
A
4038 setid = wqset->wqset_id;
4039
4040 if (waitq->waitq_set_id == 0) {
4041 /*
4042 * TODO:
4043 * it doesn't belong to anyone, and it has a prepost object?
4044 * This is an artifact of not cleaning up after kqueues when
4045 * they prepost into select sets...
4046 */
4047 if (waitq->waitq_prepost_id != 0)
39037602 4048 (void)waitq_clear_prepost_locked(waitq);
3e170ce0
A
4049 return KERN_NOT_IN_SET;
4050 }
4051
4052 if (waitq->waitq_set_id == setid) {
4053 waitq->waitq_set_id = 0;
4054 /*
4055 * This was the only set to which the waitq belonged: we can
4056 * safely release the waitq's prepost object. It doesn't
4057 * matter if this function drops and re-acquires the lock
4058 * because we're not manipulating waitq state any more.
4059 */
39037602 4060 (void)waitq_clear_prepost_locked(waitq);
3e170ce0
A
4061 return KERN_SUCCESS;
4062 }
4063
4064 /*
4065 * The waitq was a member of more that 1 set, so we need to
4066 * handle potentially compressing the link table, and
4067 * adjusting the waitq->waitq_set_id value.
4068 *
4069 * Note: we can't free the waitq's associated prepost object (if any)
4070 * because it may be in use by the one or more _other_ sets to
4071 * which this queue belongs.
4072 *
4073 * Note: This function only handles a single level of the queue linkage.
4074 * Removing a waitq from a set to which it does not directly
4075 * belong is undefined. For example, if a waitq belonged to set
4076 * A, and set A belonged to set B. You can't remove the waitq
4077 * from set B.
4078 */
39037602
A
4079 kr = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
4080 WQL_LINK, (void *)&setid, waitq_unlink_cb);
3e170ce0
A
4081
4082 if (kr == WQ_ITERATE_UNLINKED) {
4083 struct wq_unlink_ctx ulctx;
3e170ce0
A
4084
4085 kr = KERN_SUCCESS; /* found it and dis-associated it */
4086
39037602
A
4087 /* don't look for preposts if it's not prepost-enabled */
4088 if (!wqset->wqset_q.waitq_prepost)
4089 goto out;
4090
4091 assert(!waitq_irq_safe(&wqset->wqset_q));
4092
3e170ce0
A
4093 waitq_set_lock(wqset);
4094 /*
4095 * clear out any prepost from waitq into wqset
4096 * TODO: this could be more efficient than a linear search of
4097 * the waitq set's prepost list.
4098 */
4099 ulctx.unlink_wq = waitq;
4100 ulctx.unlink_wqset = wqset;
4101 (void)wq_prepost_iterate(wqset->wqset_prepost_id, (void *)&ulctx,
4102 waitq_unlink_prepost_cb);
4103 waitq_set_unlock(wqset);
3e170ce0
A
4104 } else {
4105 kr = KERN_NOT_IN_SET; /* waitq is _not_ associated with wqset */
4106 }
4107
39037602 4108out:
3e170ce0
A
4109 return kr;
4110}
4111
4112/**
4113 * unlink 'waitq' from 'wqset'
4114 *
4115 * Conditions:
4116 * neither 'waitq' nor 'wqset' is locked
4117 * may disable and re-enable interrupts
4118 * may (rarely) spin in prepost clear
4119 * (see waitq_clear_prepost_locked)
4120 */
4121kern_return_t waitq_unlink(struct waitq *waitq, struct waitq_set *wqset)
4122{
4123 kern_return_t kr = KERN_SUCCESS;
3e170ce0
A
4124
4125 assert(waitqs_is_set(wqset));
4126
4127 /*
4128 * we allow the waitq to be invalid because the caller may be trying
4129 * to clear out old/dirty state
4130 */
4131 if (!waitq_valid(waitq))
4132 return KERN_INVALID_ARGUMENT;
4133
4134 wqdbg_v("unlink waitq %p from set 0x%llx",
4135 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), wqset->wqset_id);
4136
39037602
A
4137 assert(!waitq_irq_safe(waitq));
4138
3e170ce0
A
4139 waitq_lock(waitq);
4140
39037602 4141 kr = waitq_unlink_locked(waitq, wqset);
3e170ce0
A
4142
4143 waitq_unlock(waitq);
3e170ce0
A
4144 return kr;
4145}
4146
4147/**
4148 * unlink a waitq from a waitq set, but reference the waitq by its prepost ID
4149 *
4150 * Conditions:
4151 * 'wqset' is unlocked
4152 * wqp_id may be valid or invalid
4153 */
4154void waitq_unlink_by_prepost_id(uint64_t wqp_id, struct waitq_set *wqset)
4155{
4156 struct wq_prepost *wqp;
4157
4158 disable_preemption();
4159 wqp = wq_prepost_get(wqp_id);
4160 if (wqp) {
4161 struct waitq *wq;
3e170ce0
A
4162
4163 wq = wqp->wqp_wq.wqp_wq_ptr;
4164
4165 /*
4166 * lock the waitq, then release our prepost ID reference, then
4167 * unlink the waitq from the wqset: this ensures that we don't
4168 * hold a prepost ID reference during the unlink, but we also
4169 * complete the unlink operation atomically to avoid a race
4170 * with waitq_unlink[_all].
4171 */
39037602
A
4172 assert(!waitq_irq_safe(wq));
4173
3e170ce0
A
4174 waitq_lock(wq);
4175 wq_prepost_put(wqp);
4176
4177 if (!waitq_valid(wq)) {
4178 /* someone already tore down this waitq! */
4179 waitq_unlock(wq);
3e170ce0
A
4180 enable_preemption();
4181 return;
4182 }
4183
4184 /* this _may_ drop the wq lock, but that's OK */
39037602 4185 waitq_unlink_locked(wq, wqset);
3e170ce0
A
4186
4187 waitq_unlock(wq);
3e170ce0
A
4188 }
4189 enable_preemption();
4190 return;
4191}
4192
4193
4194/**
4195 * unlink 'waitq' from all sets to which it belongs
4196 *
4197 * Conditions:
39037602
A
4198 * 'waitq' is locked on entry
4199 * returns with waitq lock dropped
3e170ce0
A
4200 *
4201 * Notes:
3e170ce0
A
4202 * may (rarely) spin (see waitq_clear_prepost_locked)
4203 */
39037602 4204kern_return_t waitq_unlink_all_unlock(struct waitq *waitq)
3e170ce0 4205{
39037602 4206 uint64_t old_set_id = 0;
3e170ce0
A
4207 wqdbg_v("unlink waitq %p from all sets",
4208 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq));
39037602 4209 assert(!waitq_irq_safe(waitq));
3e170ce0
A
4210
4211 /* it's not a member of any sets */
39037602
A
4212 if (waitq->waitq_set_id == 0) {
4213 waitq_unlock(waitq);
3e170ce0 4214 return KERN_SUCCESS;
39037602 4215 }
3e170ce0 4216
39037602 4217 old_set_id = waitq->waitq_set_id;
3e170ce0
A
4218 waitq->waitq_set_id = 0;
4219
4220 /*
4221 * invalidate the prepost entry for this waitq.
4222 * This may drop and re-acquire the waitq lock, but that's OK because
4223 * if it was added to another set and preposted to that set in the
4224 * time we drop the lock, the state will remain consistent.
4225 */
39037602
A
4226 (void)waitq_clear_prepost_locked(waitq);
4227
4228 waitq_unlock(waitq);
4229
4230 if (old_set_id) {
4231 /*
4232 * Walk the link table and invalidate each LINK object that
4233 * used to connect this waitq to one or more sets: this works
4234 * because WQL_LINK objects are private to each wait queue
4235 */
4236 (void)walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, old_set_id,
4237 WQL_LINK, NULL, waitq_unlink_all_cb);
4238 }
3e170ce0
A
4239
4240 return KERN_SUCCESS;
4241}
4242
4243/**
4244 * unlink 'waitq' from all sets to which it belongs
4245 *
4246 * Conditions:
4247 * 'waitq' is not locked
4248 * may disable and re-enable interrupts
4249 * may (rarely) spin
4250 * (see waitq_unlink_all_locked, waitq_clear_prepost_locked)
4251 */
4252kern_return_t waitq_unlink_all(struct waitq *waitq)
4253{
4254 kern_return_t kr = KERN_SUCCESS;
3e170ce0
A
4255
4256 if (!waitq_valid(waitq))
4257 panic("Invalid waitq: %p", waitq);
4258
39037602 4259 assert(!waitq_irq_safe(waitq));
3e170ce0 4260 waitq_lock(waitq);
39037602
A
4261 if (!waitq_valid(waitq)) {
4262 waitq_unlock(waitq);
4263 return KERN_SUCCESS;
3e170ce0
A
4264 }
4265
39037602
A
4266 kr = waitq_unlink_all_unlock(waitq);
4267 /* waitq unlocked and set links deallocated */
4268
3e170ce0
A
4269 return kr;
4270}
4271
4272
4273/**
4274 * unlink all waitqs from 'wqset'
4275 *
4276 * Conditions:
39037602
A
4277 * 'wqset' is locked on entry
4278 * 'wqset' is unlocked on exit and spl is restored
4279 *
4280 * Note:
3e170ce0
A
4281 * may (rarely) spin/block (see waitq_clear_prepost_locked)
4282 */
39037602 4283kern_return_t waitq_set_unlink_all_unlock(struct waitq_set *wqset)
3e170ce0 4284{
39037602
A
4285 struct waitq_link *link;
4286 uint64_t prepost_id;
3e170ce0
A
4287
4288 wqdbg_v("unlink all queues from set 0x%llx", wqset->wqset_id);
4289
4290 /*
4291 * This operation does not require interaction with any of the set's
4292 * constituent wait queues. All we have to do is invalidate the SetID
4293 */
3e170ce0
A
4294
4295 /* invalidate and re-alloc the link object first */
39037602 4296 link = wql_get_link(wqset->wqset_id);
3e170ce0
A
4297
4298 /* we may have raced with a waitq_set_deinit: handle this */
4299 if (!link) {
4300 waitq_set_unlock(wqset);
4301 return KERN_SUCCESS;
4302 }
4303
39037602 4304 wql_invalidate(link);
3e170ce0
A
4305
4306 /* re-alloc the object to get a new generation ID */
39037602
A
4307 wql_realloc_link(link, WQL_WQS);
4308 link->wql_wqs.wql_set = wqset;
3e170ce0 4309
39037602
A
4310 wqset->wqset_id = link->wql_setid.id;
4311 wql_mkvalid(link);
4312 wql_put_link(link);
3e170ce0
A
4313
4314 /* clear any preposts attached to this set */
39037602
A
4315 prepost_id = 0;
4316 if (wqset->wqset_q.waitq_prepost && wqset->wqset_prepost_id)
4317 prepost_id = wqset->wqset_prepost_id;
4318 /* else { TODO: notify kqueue subsystem? } */
3e170ce0
A
4319 wqset->wqset_prepost_id = 0;
4320
4321 /*
4322 * clear set linkage and prepost object associated with this set:
4323 * waitq sets may prepost to other sets if, for example, they are
4324 * associated with a kqueue which is in a select set.
4325 *
39037602 4326 * This releases all the set link objects
3e170ce0
A
4327 * (links to other sets to which this set was previously added)
4328 */
39037602
A
4329 waitq_unlink_all_unlock(&wqset->wqset_q);
4330 /* wqset->wqset_q unlocked */
3e170ce0
A
4331
4332 /* drop / unlink all the prepost table objects */
4333 if (prepost_id)
4334 (void)wq_prepost_iterate(prepost_id, NULL,
4335 wqset_clear_prepost_chain_cb);
4336
4337 return KERN_SUCCESS;
4338}
4339
39037602
A
4340/**
4341 * unlink all waitqs from 'wqset'
4342 *
4343 * Conditions:
4344 * 'wqset' is not locked
4345 * may (rarely) spin/block (see waitq_clear_prepost_locked)
4346 */
4347kern_return_t waitq_set_unlink_all(struct waitq_set *wqset)
4348{
4349 assert(waitqs_is_set(wqset));
4350 assert(!waitq_irq_safe(&wqset->wqset_q));
4351
4352 waitq_set_lock(wqset);
4353 return waitq_set_unlink_all_unlock(wqset);
4354 /* wqset unlocked and set links and preposts deallocated */
4355}
3e170ce0
A
4356
4357static int waitq_prepost_reserve_cb(struct waitq *waitq, void *ctx,
39037602 4358 struct waitq_link *link)
3e170ce0
A
4359{
4360 uint32_t *num = (uint32_t *)ctx;
4361 (void)waitq;
4362
4363 /*
4364 * In the worst case, we'll have to allocate 2 prepost objects
4365 * per waitq set (if the set was already preposted by another
4366 * waitq).
4367 */
39037602 4368 if (wql_type(link) == WQL_WQS) {
3e170ce0
A
4369 /*
4370 * check to see if the associated waitq actually supports
4371 * preposting
4372 */
39037602 4373 if (waitq_set_can_prepost(link->wql_wqs.wql_set))
3e170ce0
A
4374 *num += 2;
4375 }
4376 return WQ_ITERATE_CONTINUE;
4377}
4378
4379static int waitq_alloc_prepost_reservation(int nalloc, struct waitq *waitq,
39037602 4380 int *did_unlock, struct wq_prepost **wqp)
3e170ce0
A
4381{
4382 struct wq_prepost *tmp;
4383 struct wqp_cache *cache;
4384
4385 *did_unlock = 0;
4386
4387 /*
4388 * Before we unlock the waitq, check the per-processor prepost object
4389 * cache to see if there's enough there for us. If so, do the
4390 * allocation, keep the lock and save an entire iteration over the set
4391 * linkage!
4392 */
4393 if (waitq) {
4394 disable_preemption();
4395 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
4396 if (nalloc <= (int)cache->avail)
4397 goto do_alloc;
4398 enable_preemption();
4399
4400 /* unlock the waitq to perform the allocation */
4401 *did_unlock = 1;
4402 waitq_unlock(waitq);
3e170ce0
A
4403 }
4404
4405do_alloc:
39037602 4406 tmp = wq_prepost_alloc(LT_RESERVED, nalloc);
3e170ce0
A
4407 if (!tmp)
4408 panic("Couldn't reserve %d preposts for waitq @%p (wqp@%p)",
4409 nalloc, waitq, *wqp);
4410 if (*wqp) {
4411 /* link the two lists */
4412 int __assert_only rc;
4413 rc = wq_prepost_rlink(tmp, *wqp);
4414 assert(rc == nalloc);
4415 }
4416 *wqp = tmp;
4417
4418 /*
4419 * If the caller can block, then enforce a minimum-free table element
4420 * policy here. This helps ensure that we will have enough prepost
4421 * objects for callers such as selwakeup() that can be called with
4422 * spin locks held.
4423 */
4424 if (get_preemption_level() == 0)
4425 wq_prepost_ensure_free_space();
4426
4427 if (waitq) {
4428 if (*did_unlock == 0) {
4429 /* decrement the preemption count if alloc from cache */
4430 enable_preemption();
4431 } else {
4432 /* otherwise: re-lock the waitq */
3e170ce0
A
4433 waitq_lock(waitq);
4434 }
4435 }
4436
4437 return nalloc;
4438}
4439
4440static int waitq_count_prepost_reservation(struct waitq *waitq, int extra, int keep_locked)
4441{
4442 int npreposts = 0;
4443
4444 /*
4445 * If the waitq is not currently part of a set, and we're not asked to
4446 * keep the waitq locked then we'll want to have 3 in reserve
4447 * just-in-case it becomes part of a set while we unlock and reserve.
4448 * We may need up to 1 object for the waitq, and 2 for the set.
4449 */
4450 if (waitq->waitq_set_id == 0) {
4451 npreposts = 3;
4452 } else {
4453 /* this queue has never been preposted before */
4454 if (waitq->waitq_prepost_id == 0)
4455 npreposts = 3;
4456
4457 /*
4458 * Walk the set of table linkages associated with this waitq
4459 * and count the worst-case number of prepost objects that
4460 * may be needed during a wakeup_all. We can walk this without
4461 * locking each set along the way because the table-based IDs
4462 * disconnect us from the set pointers themselves, and the
4463 * table walking is careful to read the setid values only once.
4464 * Locking each set up the chain also doesn't guarantee that
4465 * their membership won't change between the time we unlock
4466 * that set and when we actually go to prepost, so our
4467 * situation is no worse than before and we've alleviated lock
4468 * contention on any sets to which this waitq belongs.
4469 */
39037602 4470 (void)walk_waitq_links(LINK_WALK_FULL_DAG_UNLOCKED,
3e170ce0 4471 waitq, waitq->waitq_set_id,
39037602 4472 WQL_WQS, (void *)&npreposts,
3e170ce0
A
4473 waitq_prepost_reserve_cb);
4474 }
4475
4476 if (extra > 0)
4477 npreposts += extra;
4478
4479 if (npreposts == 0 && !keep_locked) {
4480 /*
4481 * If we get here, we were asked to reserve some prepost
4482 * objects for a waitq that's previously preposted, and is not
4483 * currently a member of any sets. We have also been
4484 * instructed to unlock the waitq when we're done. In this
4485 * case, we pre-allocated enough reserved objects to handle
4486 * the case where the waitq gets added to a single set when
4487 * the lock is released.
4488 */
4489 npreposts = 3;
4490 }
4491
4492 return npreposts;
4493}
4494
4495
4496/**
4497 * pre-allocate prepost objects for 'waitq'
4498 *
4499 * Conditions:
4500 * 'waitq' is not locked
4501 *
4502 * Returns:
4503 * panic on error
4504 *
4505 * 0 on success, '*reserved' is set to the head of a singly-linked
4506 * list of pre-allocated prepost objects.
4507 *
4508 * Notes:
4509 * If 'lock_state' is WAITQ_KEEP_LOCKED, this function performs the pre-allocation
39037602 4510 * atomically and returns 'waitq' locked.
3e170ce0
A
4511 *
4512 * This function attempts to pre-allocate precisely enough prepost
4513 * objects based on the current set membership of 'waitq'. If the
4514 * operation is performed atomically, then the caller
4515 * is guaranteed to have enough pre-allocated prepost object to avoid
4516 * any (rare) blocking in the wakeup path.
4517 */
4518uint64_t waitq_prepost_reserve(struct waitq *waitq, int extra,
39037602 4519 waitq_lock_state_t lock_state)
3e170ce0
A
4520{
4521 uint64_t reserved = 0;
4522 uint64_t prev_setid = 0, prev_prepostid = 0;
4523 struct wq_prepost *wqp = NULL;
4524 int nalloc = 0, npreposts = 0;
4525 int keep_locked = (lock_state == WAITQ_KEEP_LOCKED);
4526 int unlocked = 0;
4527
3e170ce0
A
4528 wqdbg_v("Attempting to reserve prepost linkages for waitq %p (extra:%d)",
4529 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), extra);
4530
4531 if (waitq == NULL && extra > 0) {
4532 /*
4533 * Simple prepost object allocation:
4534 * we'll add 2 more because the waitq might need an object,
4535 * and the set itself may need a new POST object in addition
4536 * to the number of preposts requested by the caller
4537 */
39037602 4538 nalloc = waitq_alloc_prepost_reservation(extra + 2, NULL,
3e170ce0
A
4539 &unlocked, &wqp);
4540 assert(nalloc == extra + 2);
4541 return wqp->wqp_prepostid.id;
4542 }
4543
4544 assert(lock_state == WAITQ_KEEP_LOCKED || lock_state == WAITQ_UNLOCK);
4545
39037602 4546 assert(!waitq_irq_safe(waitq));
3e170ce0 4547
39037602 4548 waitq_lock(waitq);
3e170ce0
A
4549
4550 /* remember the set ID that we started with */
4551 prev_setid = waitq->waitq_set_id;
4552 prev_prepostid = waitq->waitq_prepost_id;
4553
4554 /*
4555 * If the waitq is not part of a set, and we're asked to
4556 * keep the set locked, then we don't have to reserve
4557 * anything!
4558 */
4559 if (prev_setid == 0 && keep_locked)
4560 goto out;
4561
4562 npreposts = waitq_count_prepost_reservation(waitq, extra, keep_locked);
4563
4564 /* nothing for us to do! */
4565 if (npreposts == 0) {
4566 if (keep_locked)
4567 goto out;
4568 goto out_unlock;
4569 }
4570
4571try_alloc:
4572 /* this _may_ unlock and relock the waitq! */
39037602 4573 nalloc = waitq_alloc_prepost_reservation(npreposts, waitq,
3e170ce0
A
4574 &unlocked, &wqp);
4575
4576 if (!unlocked) {
4577 /* allocation held the waitq lock: we'd done! */
4578 if (keep_locked)
4579 goto out;
4580 goto out_unlock;
4581 }
4582
4583 /*
4584 * Before we return, if the allocation had to unlock the waitq, we
4585 * must check one more time to see if we have enough. If not, we'll
4586 * try to allocate the difference. If the caller requests it, we'll
4587 * also leave the waitq locked so that the use of the pre-allocated
4588 * prepost objects can be guaranteed to be enough if a wakeup_all is
4589 * performed before unlocking the waitq.
4590 */
4591
4592 /*
4593 * If the waitq is no longer associated with a set, or if the waitq's
4594 * set/prepostid has not changed since we first walked its linkage,
4595 * we're done.
4596 */
4597 if ((waitq->waitq_set_id == 0) ||
4598 (waitq->waitq_set_id == prev_setid &&
4599 waitq->waitq_prepost_id == prev_prepostid)) {
4600 if (keep_locked)
4601 goto out;
4602 goto out_unlock;
4603 }
4604
4605 npreposts = waitq_count_prepost_reservation(waitq, extra, keep_locked);
4606
4607 if (npreposts > nalloc) {
4608 prev_setid = waitq->waitq_set_id;
4609 prev_prepostid = waitq->waitq_prepost_id;
4610 npreposts = npreposts - nalloc; /* only allocate the diff */
4611 goto try_alloc;
4612 }
4613
4614 if (keep_locked)
4615 goto out;
4616
4617out_unlock:
4618 waitq_unlock(waitq);
3e170ce0
A
4619out:
4620 if (wqp)
4621 reserved = wqp->wqp_prepostid.id;
4622
4623 return reserved;
4624}
4625
4626/**
4627 * release a linked list of prepost objects allocated via _prepost_reserve
4628 *
4629 * Conditions:
4630 * may (rarely) spin waiting for prepost table growth memcpy
4631 */
4632void waitq_prepost_release_reserve(uint64_t id)
4633{
4634 struct wq_prepost *wqp;
4635
4636 wqdbg_v("releasing reserved preposts starting at: 0x%llx", id);
4637
4638 wqp = wq_prepost_rfirst(id);
4639 if (!wqp)
4640 return;
4641
4642 wq_prepost_release_rlist(wqp);
4643}
4644
4645
4646/**
4647 * clear all preposts from 'wqset'
4648 *
4649 * Conditions:
4650 * 'wqset' is not locked
4651 */
4652void waitq_set_clear_preposts(struct waitq_set *wqset)
4653{
4654 uint64_t prepost_id;
4655 spl_t spl;
4656
4657 assert(waitqs_is_set(wqset));
4658
39037602
A
4659 if (!wqset->wqset_q.waitq_prepost || !wqset->wqset_prepost_id)
4660 return;
4661
3e170ce0
A
4662 wqdbg_v("Clearing all preposted queues on waitq_set: 0x%llx",
4663 wqset->wqset_id);
4664
4665 if (waitq_irq_safe(&wqset->wqset_q))
4666 spl = splsched();
4667 waitq_set_lock(wqset);
4668 prepost_id = wqset->wqset_prepost_id;
4669 wqset->wqset_prepost_id = 0;
4670 waitq_set_unlock(wqset);
4671 if (waitq_irq_safe(&wqset->wqset_q))
4672 splx(spl);
4673
4674 /* drop / unlink all the prepost table objects */
4675 if (prepost_id)
4676 (void)wq_prepost_iterate(prepost_id, NULL,
4677 wqset_clear_prepost_chain_cb);
4678}
4679
4680
4681/* ----------------------------------------------------------------------
4682 *
4683 * Iteration: waitq -> sets / waitq_set -> preposts
4684 *
4685 * ---------------------------------------------------------------------- */
4686
4687struct wq_it_ctx {
4688 void *input;
4689 void *ctx;
4690 waitq_iterator_t it;
3e170ce0
A
4691};
4692
4693static int waitq_iterate_sets_cb(struct waitq *waitq, void *ctx,
39037602 4694 struct waitq_link *link)
3e170ce0
A
4695{
4696 struct wq_it_ctx *wctx = (struct wq_it_ctx *)(ctx);
4697 struct waitq_set *wqset;
4698 int ret;
3e170ce0
A
4699
4700 (void)waitq;
39037602
A
4701 assert(!waitq_irq_safe(waitq));
4702 assert(wql_type(link) == WQL_WQS);
3e170ce0
A
4703
4704 /*
4705 * the waitq is locked, so we can just take the set lock
4706 * and call the iterator function
4707 */
39037602 4708 wqset = link->wql_wqs.wql_set;
3e170ce0 4709 assert(wqset != NULL);
39037602 4710 assert(!waitq_irq_safe(&wqset->wqset_q));
3e170ce0
A
4711 waitq_set_lock(wqset);
4712
4713 ret = wctx->it(wctx->ctx, (struct waitq *)wctx->input, wqset);
4714
4715 waitq_set_unlock(wqset);
3e170ce0
A
4716 return ret;
4717}
4718
4719/**
4720 * call external iterator function for each prepost object in wqset
4721 *
4722 * Conditions:
4723 * Called from wq_prepost_foreach_locked
4724 * (wqset locked, waitq _not_ locked)
4725 */
4726static int wqset_iterate_prepost_cb(struct waitq_set *wqset, void *ctx,
4727 struct wq_prepost *wqp, struct waitq *waitq)
4728{
4729 struct wq_it_ctx *wctx = (struct wq_it_ctx *)(ctx);
4730 uint64_t wqp_id;
4731 int ret;
3e170ce0
A
4732
4733 (void)wqp;
4734
4735 /*
4736 * This is a bit tricky. The 'wqset' is locked, but the 'waitq' is not.
4737 * Taking the 'waitq' lock is a lock order violation, so we need to be
4738 * careful. We also must realize that we may have taken a reference to
4739 * the 'wqp' just as the associated waitq was being torn down (or
4740 * clearing all its preposts) - see waitq_clear_prepost_locked(). If
4741 * the 'wqp' is valid and we can get the waitq lock, then we are good
4742 * to go. If not, we need to back off, check that the 'wqp' hasn't
4743 * been invalidated, and try to re-take the locks.
4744 */
39037602
A
4745 assert(!waitq_irq_safe(waitq));
4746
3e170ce0
A
4747 if (waitq_lock_try(waitq))
4748 goto call_iterator;
4749
3e170ce0
A
4750 if (!wqp_is_valid(wqp))
4751 return WQ_ITERATE_RESTART;
4752
4753 /* We are passed a prepost object with a reference on it. If neither
4754 * the waitq set nor the waitq require interrupts disabled, then we
4755 * may block on the delay(1) call below. We can't hold a prepost
4756 * object reference while blocking, so we have to give that up as well
4757 * and re-acquire it when we come back.
4758 */
4759 wqp_id = wqp->wqp_prepostid.id;
4760 wq_prepost_put(wqp);
4761 waitq_set_unlock(wqset);
4762 wqdbg_v("dropped set:%p lock waiting for wqp:%p (0x%llx -> wq:%p)",
4763 wqset, wqp, wqp->wqp_prepostid.id, waitq);
4764 delay(1);
4765 waitq_set_lock(wqset);
4766 wqp = wq_prepost_get(wqp_id);
4767 if (!wqp)
4768 /* someone cleared preposts while we slept! */
4769 return WQ_ITERATE_DROPPED;
4770
4771 /*
4772 * TODO:
4773 * This differs slightly from the logic in ipc_mqueue.c:
4774 * ipc_mqueue_receive_on_thread(). There, if the waitq lock
4775 * can't be obtained, the prepost link is placed on the back of
4776 * the chain, and the iteration starts from the beginning. Here,
4777 * we just restart from the beginning.
4778 */
4779 return WQ_ITERATE_RESTART;
4780
4781call_iterator:
4782 if (!wqp_is_valid(wqp)) {
4783 ret = WQ_ITERATE_RESTART;
4784 goto out_unlock;
4785 }
4786
4787 /* call the external callback */
4788 ret = wctx->it(wctx->ctx, waitq, wqset);
4789
4790 if (ret == WQ_ITERATE_BREAK_KEEP_LOCKED) {
4791 ret = WQ_ITERATE_BREAK;
3e170ce0
A
4792 goto out;
4793 }
4794
4795out_unlock:
4796 waitq_unlock(waitq);
3e170ce0
A
4797out:
4798 return ret;
4799}
4800
4801/**
4802 * iterator over all sets to which the given waitq has been linked
4803 *
4804 * Conditions:
4805 * 'waitq' is locked
4806 */
4807int waitq_iterate_sets(struct waitq *waitq, void *ctx, waitq_iterator_t it)
4808{
4809 int ret;
4810 struct wq_it_ctx wctx = {
4811 .input = (void *)waitq,
4812 .ctx = ctx,
4813 .it = it,
4814 };
4815 if (!it || !waitq)
4816 return KERN_INVALID_ARGUMENT;
4817
39037602
A
4818 ret = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
4819 WQL_WQS, (void *)&wctx, waitq_iterate_sets_cb);
3e170ce0
A
4820 if (ret == WQ_ITERATE_CONTINUE)
4821 ret = WQ_ITERATE_SUCCESS;
4822 return ret;
4823}
4824
4825/**
4826 * iterator over all preposts in the given wqset
4827 *
4828 * Conditions:
4829 * 'wqset' is locked
4830 */
4831int waitq_set_iterate_preposts(struct waitq_set *wqset,
39037602 4832 void *ctx, waitq_iterator_t it)
3e170ce0
A
4833{
4834 struct wq_it_ctx wctx = {
4835 .input = (void *)wqset,
4836 .ctx = ctx,
4837 .it = it,
3e170ce0
A
4838 };
4839 if (!it || !wqset)
4840 return WQ_ITERATE_INVALID;
4841
4842 assert(waitq_held(&wqset->wqset_q));
4843
4844 return wq_prepost_foreach_locked(wqset, (void *)&wctx,
4845 wqset_iterate_prepost_cb);
4846}
4847
4848
4849/* ----------------------------------------------------------------------
4850 *
4851 * Higher-level APIs
4852 *
4853 * ---------------------------------------------------------------------- */
4854
39037602 4855
3e170ce0
A
4856/**
4857 * declare a thread's intent to wait on 'waitq' for 'wait_event'
4858 *
4859 * Conditions:
4860 * 'waitq' is not locked
3e170ce0
A
4861 */
4862wait_result_t waitq_assert_wait64(struct waitq *waitq,
4863 event64_t wait_event,
4864 wait_interrupt_t interruptible,
39037602 4865 uint64_t deadline)
3e170ce0 4866{
3e170ce0 4867 thread_t thread = current_thread();
39037602 4868 wait_result_t ret;
3e170ce0
A
4869 spl_t s;
4870
4871 if (!waitq_valid(waitq))
4872 panic("Invalid waitq: %p", waitq);
4873
4874 if (waitq_irq_safe(waitq))
4875 s = splsched();
3e170ce0 4876
39037602 4877 waitq_lock(waitq);
3e170ce0
A
4878 ret = waitq_assert_wait64_locked(waitq, wait_event, interruptible,
4879 TIMEOUT_URGENCY_SYS_NORMAL,
4880 deadline, TIMEOUT_NO_LEEWAY, thread);
3e170ce0
A
4881 waitq_unlock(waitq);
4882
39037602
A
4883 if (waitq_irq_safe(waitq))
4884 splx(s);
3e170ce0
A
4885
4886 return ret;
4887}
4888
4889/**
4890 * declare a thread's intent to wait on 'waitq' for 'wait_event'
4891 *
4892 * Conditions:
4893 * 'waitq' is not locked
4894 * will disable and re-enable interrupts while locking current_thread()
4895 */
4896wait_result_t waitq_assert_wait64_leeway(struct waitq *waitq,
4897 event64_t wait_event,
4898 wait_interrupt_t interruptible,
4899 wait_timeout_urgency_t urgency,
4900 uint64_t deadline,
4901 uint64_t leeway)
4902{
4903 wait_result_t ret;
4904 thread_t thread = current_thread();
4905 spl_t s;
4906
4907 if (!waitq_valid(waitq))
4908 panic("Invalid waitq: %p", waitq);
4909
4910 if (waitq_irq_safe(waitq))
4911 s = splsched();
3e170ce0 4912
39037602 4913 waitq_lock(waitq);
3e170ce0
A
4914 ret = waitq_assert_wait64_locked(waitq, wait_event, interruptible,
4915 urgency, deadline, leeway, thread);
3e170ce0
A
4916 waitq_unlock(waitq);
4917
39037602
A
4918 if (waitq_irq_safe(waitq))
4919 splx(s);
3e170ce0
A
4920
4921 return ret;
4922}
4923
4924/**
4925 * wakeup a single thread from a waitq that's waiting for a given event
4926 *
4927 * Conditions:
4928 * 'waitq' is not locked
4929 * may (rarely) block if 'waitq' is non-global and a member of 1 or more sets
4930 * may disable and re-enable interrupts
4931 *
4932 * Notes:
4933 * will _not_ block if waitq is global (or not a member of any set)
4934 */
4935kern_return_t waitq_wakeup64_one(struct waitq *waitq, event64_t wake_event,
4936 wait_result_t result, int priority)
4937{
4938 kern_return_t kr;
4939 uint64_t reserved_preposts = 0;
4940 spl_t spl;
4941
4942 if (!waitq_valid(waitq))
4943 panic("Invalid waitq: %p", waitq);
4944
39037602
A
4945 if (!waitq_irq_safe(waitq)) {
4946 /* reserve preposts in addition to locking the waitq */
4947 reserved_preposts = waitq_prepost_reserve(waitq, 0, WAITQ_KEEP_LOCKED);
4948 } else {
4949 spl = splsched();
4950 waitq_lock(waitq);
4951 }
3e170ce0
A
4952
4953 /* waitq is locked upon return */
4954 kr = waitq_wakeup64_one_locked(waitq, wake_event, result,
4955 &reserved_preposts, priority, WAITQ_UNLOCK);
4956
4957 if (waitq_irq_safe(waitq))
4958 splx(spl);
4959
4960 /* release any left-over prepost object (won't block/lock anything) */
4961 waitq_prepost_release_reserve(reserved_preposts);
4962
4963 return kr;
4964}
4965
4966/**
4967 * wakeup all threads from a waitq that are waiting for a given event
4968 *
4969 * Conditions:
4970 * 'waitq' is not locked
4971 * may (rarely) block if 'waitq' is non-global and a member of 1 or more sets
4972 * may disable and re-enable interrupts
4973 *
4974 * Notes:
4975 * will _not_ block if waitq is global (or not a member of any set)
4976 */
4977kern_return_t waitq_wakeup64_all(struct waitq *waitq,
4978 event64_t wake_event,
4979 wait_result_t result,
4980 int priority)
4981{
4982 kern_return_t ret;
4983 uint64_t reserved_preposts = 0;
4984 spl_t s;
4985
4986 if (!waitq_valid(waitq))
4987 panic("Invalid waitq: %p", waitq);
4988
39037602
A
4989 if (!waitq_irq_safe(waitq)) {
4990 /* reserve preposts in addition to locking waitq */
4991 reserved_preposts = waitq_prepost_reserve(waitq, 0,
4992 WAITQ_KEEP_LOCKED);
4993 } else {
4994 s = splsched();
4995 waitq_lock(waitq);
4996 }
3e170ce0
A
4997
4998 ret = waitq_wakeup64_all_locked(waitq, wake_event, result,
4999 &reserved_preposts, priority,
5000 WAITQ_UNLOCK);
5001
5002 if (waitq_irq_safe(waitq))
5003 splx(s);
5004
5005 waitq_prepost_release_reserve(reserved_preposts);
5006
5007 return ret;
5008
5009}
5010
5011/**
5012 * wakeup a specific thread iff it's waiting on 'waitq' for 'wake_event'
5013 *
5014 * Conditions:
5015 * 'waitq' is not locked
5016 *
5017 * Notes:
5018 * May temporarily disable and re-enable interrupts
5019 */
5020kern_return_t waitq_wakeup64_thread(struct waitq *waitq,
5021 event64_t wake_event,
5022 thread_t thread,
5023 wait_result_t result)
5024{
5025 kern_return_t ret;
5026 spl_t s, th_spl;
5027
5028 if (!waitq_valid(waitq))
5029 panic("Invalid waitq: %p", waitq);
5030
5031 if (waitq_irq_safe(waitq))
5032 s = splsched();
5033 waitq_lock(waitq);
5034
5035 ret = waitq_select_thread_locked(waitq, wake_event, thread, &th_spl);
5036 /* on success, returns 'thread' locked */
5037
5038 waitq_unlock(waitq);
5039
5040 if (ret == KERN_SUCCESS) {
5041 ret = thread_go(thread, result);
5042 assert(ret == KERN_SUCCESS);
5043 thread_unlock(thread);
5044 splx(th_spl);
5045 waitq_stats_count_wakeup(waitq);
5046 } else {
5047 ret = KERN_NOT_WAITING;
5048 waitq_stats_count_fail(waitq);
5049 }
5050
5051 if (waitq_irq_safe(waitq))
5052 splx(s);
5053
5054 return ret;
5055}
39037602
A
5056
5057/**
5058 * wakeup a single thread from a waitq that's waiting for a given event
5059 * and return a reference to that thread
5060 * returns THREAD_NULL if no thread was waiting
5061 *
5062 * Conditions:
5063 * 'waitq' is not locked
5064 * may (rarely) block if 'waitq' is non-global and a member of 1 or more sets
5065 * may disable and re-enable interrupts
5066 *
5067 * Notes:
5068 * will _not_ block if waitq is global (or not a member of any set)
5069 */
5070thread_t
5071waitq_wakeup64_identify(struct waitq *waitq,
5072 event64_t wake_event,
5073 wait_result_t result,
5074 int priority)
5075{
5076 uint64_t reserved_preposts = 0;
5077 spl_t thread_spl = 0;
5078 thread_t thread;
5079 spl_t spl;
5080
5081 if (!waitq_valid(waitq))
5082 panic("Invalid waitq: %p", waitq);
5083
5084 if (!waitq_irq_safe(waitq)) {
5085 /* reserve preposts in addition to locking waitq */
5086 reserved_preposts = waitq_prepost_reserve(waitq, 0, WAITQ_KEEP_LOCKED);
5087 } else {
5088 spl = splsched();
5089 waitq_lock(waitq);
5090 }
5091
5092 thread = waitq_wakeup64_identify_locked(waitq, wake_event, result,
5093 &thread_spl, &reserved_preposts,
5094 priority, WAITQ_UNLOCK);
5095 /* waitq is unlocked, thread is locked */
5096
5097 if (thread != THREAD_NULL) {
5098 thread_reference(thread);
5099 thread_unlock(thread);
5100 splx(thread_spl);
5101 }
5102
5103 if (waitq_irq_safe(waitq))
5104 splx(spl);
5105
5106 /* release any left-over prepost object (won't block/lock anything) */
5107 waitq_prepost_release_reserve(reserved_preposts);
5108
5109 /* returns +1 ref to running thread or THREAD_NULL */
5110 return thread;
5111}
5112