]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/waitq.c
2348ef57209aa03d5f16ebe6649bccca03be47b3
[apple/xnu.git] / osfmk / kern / waitq.c
1 /*
2 * Copyright (c) 2015-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 * un-comment the following lines to debug the link/prepost tables
59 * NOTE: this expands each element by ~40 bytes
60 */
61 //#define KEEP_WAITQ_LINK_STATS
62 //#define KEEP_WAITQ_PREPOST_STATS
63
64 #include <kern/ast.h>
65 #include <kern/backtrace.h>
66 #include <kern/kern_types.h>
67 #include <kern/ltable.h>
68 #include <kern/mach_param.h>
69 #include <kern/queue.h>
70 #include <kern/sched_prim.h>
71 #include <kern/simple_lock.h>
72 #include <kern/spl.h>
73 #include <kern/waitq.h>
74 #include <kern/zalloc.h>
75 #include <kern/policy_internal.h>
76 #include <kern/turnstile.h>
77
78 #include <os/hash.h>
79 #include <libkern/OSAtomic.h>
80 #include <mach/sync_policy.h>
81 #include <vm/vm_kern.h>
82
83 #include <sys/kdebug.h>
84
85 #if defined(KEEP_WAITQ_LINK_STATS) || defined(KEEP_WAITQ_PREPOST_STATS)
86 # if !CONFIG_LTABLE_STATS
87 # error "You must configure LTABLE_STATS to use WAITQ_[LINK|PREPOST]_STATS"
88 # endif
89 # if !CONFIG_WAITQ_STATS
90 # error "You must configure WAITQ_STATS to use WAITQ_[LINK|PREPOST]_STATS"
91 # endif
92 #endif
93
94 #if CONFIG_WAITQ_DEBUG
95 #define wqdbg(fmt, ...) \
96 printf("WQ[%s]: " fmt "\n", __func__, ## __VA_ARGS__)
97 #else
98 #define wqdbg(fmt, ...) do { } while (0)
99 #endif
100
101 #ifdef WAITQ_VERBOSE_DEBUG
102 #define wqdbg_v(fmt, ...) \
103 printf("WQ[v:%s]: " fmt "\n", __func__, ## __VA_ARGS__)
104 #else
105 #define wqdbg_v(fmt, ...) do { } while (0)
106 #endif
107
108 #define wqinfo(fmt, ...) \
109 printf("WQ[%s]: " fmt "\n", __func__, ## __VA_ARGS__)
110
111 #define wqerr(fmt, ...) \
112 printf("WQ[%s] ERROR: " fmt "\n", __func__, ## __VA_ARGS__)
113
114 /*
115 * file-static functions / data
116 */
117 static thread_t waitq_select_one_locked(struct waitq *waitq, event64_t event,
118 uint64_t *reserved_preposts,
119 int priority, spl_t *spl);
120
121 static kern_return_t waitq_select_thread_locked(struct waitq *waitq,
122 event64_t event,
123 thread_t thread, spl_t *spl);
124
125 #define WAITQ_SET_MAX (task_max * 3)
126 static zone_t waitq_set_zone;
127
128
129 #define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align)))
130 #define ROUNDDOWN(x, y) (((x)/(y))*(y))
131
132
133 #if CONFIG_LTABLE_STATS || CONFIG_WAITQ_STATS
134 static __inline__ void waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int skip);
135 #endif
136
137 lck_grp_t waitq_lck_grp;
138
139 #if __arm64__
140
141 #define waitq_lock_to(wq, to) \
142 (hw_lock_bit_to(&(wq)->waitq_interlock, LCK_ILOCK, to, &waitq_lck_grp))
143
144 #define waitq_lock_unlock(wq) \
145 (hw_unlock_bit(&(wq)->waitq_interlock, LCK_ILOCK))
146
147 #define waitq_lock_init(wq) \
148 (wq->waitq_interlock = 0)
149
150 #else
151
152 #define waitq_lock_to(wq, to) \
153 (hw_lock_to(&(wq)->waitq_interlock, to, &waitq_lck_grp))
154
155 #define waitq_lock_unlock(wq) \
156 (hw_lock_unlock(&(wq)->waitq_interlock))
157
158 #define waitq_lock_init(wq) \
159 (hw_lock_init(&(wq)->waitq_interlock))
160
161 #endif /* __arm64__ */
162
163 /*
164 * Prepost callback function for specially marked waitq sets
165 * (prepost alternative)
166 */
167 extern void waitq_set__CALLING_PREPOST_HOOK__(waitq_set_prepost_hook_t *ctx);
168
169 #define DEFAULT_MIN_FREE_TABLE_ELEM 100
170 static uint32_t g_min_free_table_elem;
171 static uint32_t g_min_free_cache;
172
173
174 /* ----------------------------------------------------------------------
175 *
176 * SetID Link Table Implementation
177 *
178 * ---------------------------------------------------------------------- */
179 static struct link_table g_wqlinktable;
180
181 enum wq_link_type {
182 WQL_ALL = -1,
183 WQL_FREE = LT_FREE,
184 WQL_WQS = LT_ELEM,
185 WQL_LINK = LT_LINK,
186 };
187
188 struct waitq_link {
189 struct lt_elem wqte;
190
191 union {
192 /* wqt_type == WQL_WQS (LT_ELEM) */
193 struct {
194 struct waitq_set *wql_set;
195 /* uint64_t sl_prepost_id; */
196 } wql_wqs;
197
198 /* wqt_type == WQL_LINK (LT_LINK) */
199 struct {
200 uint64_t left_setid;
201 uint64_t right_setid;
202 } wql_link;
203 };
204 #ifdef KEEP_WAITQ_LINK_STATS
205 thread_t sl_alloc_th;
206 task_t sl_alloc_task;
207 uintptr_t sl_alloc_bt[NWAITQ_BTFRAMES];
208 uint64_t sl_alloc_ts;
209 uintptr_t sl_invalidate_bt[NWAITQ_BTFRAMES];
210 uint64_t sl_invalidate_ts;
211 uintptr_t sl_mkvalid_bt[NWAITQ_BTFRAMES];
212 uint64_t sl_mkvalid_ts;
213 uint64_t sl_free_ts;
214 #endif
215 };
216 #if !defined(KEEP_WAITQ_LINK_STATS)
217 static_assert((sizeof(struct waitq_link) & (sizeof(struct waitq_link) - 1)) == 0,
218 "waitq_link struct must be a power of two!");
219 #endif
220
221 #define wql_refcnt(link) \
222 (lt_bits_refcnt((link)->wqte.lt_bits))
223
224 #define wql_type(link) \
225 (lt_bits_type((link)->wqte.lt_bits))
226
227 #define wql_mkvalid(link) \
228 do { \
229 lt_elem_mkvalid(&(link)->wqte); \
230 wql_do_mkvalid_stats(&(link)->wqte); \
231 } while (0)
232
233 #define wql_is_valid(link) \
234 lt_bits_valid((link)->wqte.lt_bits)
235
236 #define wql_setid wqte.lt_id
237
238 #define WQL_WQS_POISON ((void *)(0xf00df00d))
239 #define WQL_LINK_POISON (0x0bad0badffffffffull)
240
241 static void
242 wql_poison(struct link_table *table, struct lt_elem *elem)
243 {
244 struct waitq_link *link = (struct waitq_link *)elem;
245 (void)table;
246
247 switch (wql_type(link)) {
248 case WQL_WQS:
249 link->wql_wqs.wql_set = WQL_WQS_POISON;
250 break;
251 case WQL_LINK:
252 link->wql_link.left_setid = WQL_LINK_POISON;
253 link->wql_link.right_setid = WQL_LINK_POISON;
254 break;
255 default:
256 break;
257 }
258 #ifdef KEEP_WAITQ_LINK_STATS
259 memset(link->sl_alloc_bt, 0, sizeof(link->sl_alloc_bt));
260 link->sl_alloc_ts = 0;
261 memset(link->sl_mkvalid_bt, 0, sizeof(link->sl_mkvalid_bt));
262 link->sl_mkvalid_ts = 0;
263
264 link->sl_alloc_th = THREAD_NULL;
265 /* leave the sl_alloc_task in place for debugging */
266
267 link->sl_free_ts = mach_absolute_time();
268 #endif
269 }
270
271 #ifdef KEEP_WAITQ_LINK_STATS
272 static __inline__ void
273 wql_do_alloc_stats(struct lt_elem *elem)
274 {
275 if (elem) {
276 struct waitq_link *link = (struct waitq_link *)elem;
277 memset(link->sl_alloc_bt, 0, sizeof(link->sl_alloc_bt));
278 waitq_grab_backtrace(link->sl_alloc_bt, 0);
279 link->sl_alloc_th = current_thread();
280 link->sl_alloc_task = current_task();
281
282 assert(link->sl_alloc_ts == 0);
283 link->sl_alloc_ts = mach_absolute_time();
284
285 memset(link->sl_invalidate_bt, 0, sizeof(link->sl_invalidate_bt));
286 link->sl_invalidate_ts = 0;
287 }
288 }
289
290 static __inline__ void
291 wql_do_invalidate_stats(struct lt_elem *elem)
292 {
293 struct waitq_link *link = (struct waitq_link *)elem;
294
295 if (!elem) {
296 return;
297 }
298
299 assert(link->sl_mkvalid_ts > 0);
300
301 memset(link->sl_invalidate_bt, 0, sizeof(link->sl_invalidate_bt));
302 link->sl_invalidate_ts = mach_absolute_time();
303 waitq_grab_backtrace(link->sl_invalidate_bt, 0);
304 }
305
306 static __inline__ void
307 wql_do_mkvalid_stats(struct lt_elem *elem)
308 {
309 struct waitq_link *link = (struct waitq_link *)elem;
310
311 if (!elem) {
312 return;
313 }
314
315 memset(link->sl_mkvalid_bt, 0, sizeof(link->sl_mkvalid_bt));
316 link->sl_mkvalid_ts = mach_absolute_time();
317 waitq_grab_backtrace(link->sl_mkvalid_bt, 0);
318 }
319 #else
320 #define wql_do_alloc_stats(e)
321 #define wql_do_invalidate_stats(e)
322 #define wql_do_mkvalid_stats(e)
323 #endif /* KEEP_WAITQ_LINK_STATS */
324
325 static void
326 wql_init(void)
327 {
328 uint32_t tablesz = 0, max_links = 0;
329
330 if (PE_parse_boot_argn("wql_tsize", &tablesz, sizeof(tablesz)) != TRUE) {
331 tablesz = (uint32_t)g_lt_max_tbl_size;
332 }
333
334 tablesz = P2ROUNDUP(tablesz, PAGE_SIZE);
335 max_links = tablesz / sizeof(struct waitq_link);
336 assert(max_links > 0 && tablesz > 0);
337
338 /* we have a restricted index range */
339 if (max_links > (LT_IDX_MAX + 1)) {
340 max_links = LT_IDX_MAX + 1;
341 }
342
343 wqinfo("init linktable with max:%d elements (%d bytes)",
344 max_links, tablesz);
345 ltable_init(&g_wqlinktable, "wqslab.wql", max_links,
346 sizeof(struct waitq_link), wql_poison);
347 }
348
349 static void
350 wql_ensure_free_space(void)
351 {
352 if (g_wqlinktable.nelem - g_wqlinktable.used_elem < g_min_free_table_elem) {
353 /*
354 * we don't hold locks on these values, so check for underflow
355 */
356 if (g_wqlinktable.used_elem <= g_wqlinktable.nelem) {
357 wqdbg_v("Forcing table growth: nelem=%d, used=%d, min_free=%d",
358 g_wqlinktable.nelem, g_wqlinktable.used_elem,
359 g_min_free_table_elem);
360 ltable_grow(&g_wqlinktable, g_min_free_table_elem);
361 }
362 }
363 }
364
365 static struct waitq_link *
366 wql_alloc_link(int type)
367 {
368 struct lt_elem *elem;
369
370 elem = ltable_alloc_elem(&g_wqlinktable, type, 1, 0);
371 wql_do_alloc_stats(elem);
372 return (struct waitq_link *)elem;
373 }
374
375 static void
376 wql_realloc_link(struct waitq_link *link, int type)
377 {
378 ltable_realloc_elem(&g_wqlinktable, &link->wqte, type);
379 #ifdef KEEP_WAITQ_LINK_STATS
380 memset(link->sl_alloc_bt, 0, sizeof(link->sl_alloc_bt));
381 link->sl_alloc_ts = 0;
382 wql_do_alloc_stats(&link->wqte);
383
384 memset(link->sl_invalidate_bt, 0, sizeof(link->sl_invalidate_bt));
385 link->sl_invalidate_ts = 0;
386 #endif
387 }
388
389 static void
390 wql_invalidate(struct waitq_link *link)
391 {
392 lt_elem_invalidate(&link->wqte);
393 wql_do_invalidate_stats(&link->wqte);
394 }
395
396 static struct waitq_link *
397 wql_get_link(uint64_t setid)
398 {
399 struct lt_elem *elem;
400
401 elem = ltable_get_elem(&g_wqlinktable, setid);
402 return (struct waitq_link *)elem;
403 }
404
405 static void
406 wql_put_link(struct waitq_link *link)
407 {
408 if (!link) {
409 return;
410 }
411 ltable_put_elem(&g_wqlinktable, (struct lt_elem *)link);
412 }
413
414 static struct waitq_link *
415 wql_get_reserved(uint64_t setid, int type)
416 {
417 struct lt_elem *elem;
418
419 elem = lt_elem_list_first(&g_wqlinktable, setid);
420 if (!elem) {
421 return NULL;
422 }
423 ltable_realloc_elem(&g_wqlinktable, elem, type);
424 return (struct waitq_link *)elem;
425 }
426
427
428 static inline int waitq_maybe_remove_link(struct waitq *waitq,
429 uint64_t setid,
430 struct waitq_link *parent,
431 struct waitq_link *left,
432 struct waitq_link *right);
433
434 enum {
435 LINK_WALK_ONE_LEVEL = 0,
436 LINK_WALK_FULL_DAG = 1,
437 LINK_WALK_FULL_DAG_UNLOCKED = 2,
438 };
439
440 typedef int (*wql_callback_func)(struct waitq *waitq, void *ctx,
441 struct waitq_link *link);
442
443 /**
444 * walk_waitq_links: walk all table elements (of type 'link_type') pointed to by 'setid'
445 *
446 * Conditions:
447 * waitq is locked (or NULL)
448 * 'setid' is managed by 'waitq'
449 * this could be direct (waitq->waitq_set_id == setid)
450 * OR indirect (setid is the left/right ID in a LINK chain,
451 * whose root is waitq->waitq_set_id)
452 *
453 * Notes:
454 * This function uses recursion to walk the set of table elements
455 * pointed to by 'setid'. For each element encountered, 'cb' will be
456 * called. If non-zero, the return value of this callback function can
457 * early-out of the table walk.
458 *
459 * For each link element encountered, the function takes a reference to
460 * it. The reference is dropped only after the callback and any recursion
461 * has completed.
462 *
463 * The assumed table/link/tree structure:
464 * 'setid'
465 * / \
466 * / \
467 * L(LINK) R(LINK)
468 * /\ /\
469 * / \ / \
470 * / \ Rl(*) Rr(*)
471 * Ll(*) Lr(*) /\ /\
472 * /\ /\ ... ... ... ...
473 * ... ... ... ...
474 * \
475 * WQS(wqset_q.waitq_setid == Sx)
476 * [waitq set is a membet of setid, 'Sx')
477 *
478 * 'Sx'
479 * / \
480 * / \
481 * L(LINK) R(LINK)
482 * /\ /\
483 * ... ... ... ...
484 *
485 * The basic algorithm is as follows:
486 * *) take a reference to the table object pointed to by 'setid'
487 * *) if appropriate, call 'cb' (potentially early-out on non-zero return)
488 * *) if the link object points to a waitq set, and the walk type
489 * is 'FULL_DAG' (full directed-acyclic-graph), then try to lock
490 * the associated waitq set object and recursively walk all sets to
491 * which that set belongs. This is a DFS of the tree structure.
492 * *) recurse down the left side of the tree (following the
493 * 'left_setid' pointer in the link object
494 * *) recurse down the right side of the tree (following the
495 * 'right_setid' pointer in the link object
496 */
497 static __attribute__((noinline))
498 int
499 walk_waitq_links(int walk_type, struct waitq *waitq,
500 uint64_t setid, int link_type,
501 void *ctx, wql_callback_func cb)
502 {
503 struct waitq_link *link;
504 uint64_t nextid;
505 int wqltype;
506
507 link = wql_get_link(setid);
508
509 /* invalid link */
510 if (!link) {
511 return WQ_ITERATE_CONTINUE;
512 }
513
514 setid = nextid = 0;
515 wqltype = wql_type(link);
516 if (wqltype == WQL_LINK) {
517 setid = link->wql_link.left_setid;
518 nextid = link->wql_link.right_setid;
519 }
520
521 /*
522 * Make the callback only on specified link_type (or all links)
523 * Note that after the callback, the link object may be
524 * invalid. The only valid thing we can do is put our
525 * reference to it (which may put it back on the free list)
526 */
527 if (link_type == WQL_ALL || link_type == wqltype) {
528 /* allow the callback to early-out */
529 int ret = cb(waitq, ctx, link);
530 if (ret != WQ_ITERATE_CONTINUE) {
531 wql_put_link(link);
532 return ret;
533 }
534 }
535
536 if (wqltype == WQL_WQS &&
537 (walk_type == LINK_WALK_FULL_DAG ||
538 walk_type == LINK_WALK_FULL_DAG_UNLOCKED)) {
539 /*
540 * Recurse down any sets to which this wait queue set was
541 * added. We do this just before we put our reference to
542 * the link object (which may free it).
543 */
544 struct waitq_set *wqset = link->wql_wqs.wql_set;
545 int ret = WQ_ITERATE_CONTINUE;
546 int should_unlock = 0;
547 uint64_t wqset_setid = 0;
548
549 if (waitq_set_is_valid(wqset) && walk_type == LINK_WALK_FULL_DAG) {
550 assert(!waitq_irq_safe(&wqset->wqset_q));
551 waitq_set_lock(wqset);
552 should_unlock = 1;
553 }
554
555 /*
556 * verify the linked waitq set as it could have been
557 * invalidated before we grabbed the lock!
558 */
559 if (wqset->wqset_id != link->wql_setid.id) {
560 /* This is the bottom of the tree: just get out */
561 if (should_unlock) {
562 waitq_set_unlock(wqset);
563 }
564 wql_put_link(link);
565 return WQ_ITERATE_CONTINUE;
566 }
567
568 wqset_setid = wqset->wqset_q.waitq_set_id;
569
570 if (wqset_setid > 0) {
571 ret = walk_waitq_links(walk_type, &wqset->wqset_q,
572 wqset_setid, link_type, ctx, cb);
573 }
574 if (should_unlock) {
575 waitq_set_unlock(wqset);
576 }
577 if (ret != WQ_ITERATE_CONTINUE) {
578 wql_put_link(link);
579 return ret;
580 }
581 }
582
583 wql_put_link(link);
584
585 /* recurse down left side of the tree */
586 if (setid) {
587 int ret = walk_waitq_links(walk_type, waitq, setid, link_type, ctx, cb);
588 if (ret != WQ_ITERATE_CONTINUE) {
589 return ret;
590 }
591 }
592
593 /* recurse down right side of the tree */
594 if (nextid) {
595 return walk_waitq_links(walk_type, waitq, nextid, link_type, ctx, cb);
596 }
597
598 return WQ_ITERATE_CONTINUE;
599 }
600
601 /* ----------------------------------------------------------------------
602 *
603 * Prepost Link Table Implementation
604 *
605 * ---------------------------------------------------------------------- */
606 static struct link_table g_prepost_table;
607
608 enum wq_prepost_type {
609 WQP_FREE = LT_FREE,
610 WQP_WQ = LT_ELEM,
611 WQP_POST = LT_LINK,
612 };
613
614 struct wq_prepost {
615 struct lt_elem wqte;
616
617 union {
618 /* wqt_type == WQP_WQ (LT_ELEM) */
619 struct {
620 struct waitq *wqp_wq_ptr;
621 } wqp_wq;
622 /* wqt_type == WQP_POST (LT_LINK) */
623 struct {
624 uint64_t wqp_next_id;
625 uint64_t wqp_wq_id;
626 } wqp_post;
627 };
628 #ifdef KEEP_WAITQ_PREPOST_STATS
629 thread_t wqp_alloc_th;
630 task_t wqp_alloc_task;
631 uintptr_t wqp_alloc_bt[NWAITQ_BTFRAMES];
632 #endif
633 };
634 #if !defined(KEEP_WAITQ_PREPOST_STATS)
635 static_assert((sizeof(struct wq_prepost) & (sizeof(struct wq_prepost) - 1)) == 0,
636 "wq_prepost struct must be a power of two!");
637 #endif
638
639 #define wqp_refcnt(wqp) \
640 (lt_bits_refcnt((wqp)->wqte.lt_bits))
641
642 #define wqp_type(wqp) \
643 (lt_bits_type((wqp)->wqte.lt_bits))
644
645 #define wqp_set_valid(wqp) \
646 lt_elem_mkvalid(&(wqp)->wqte)
647
648 #define wqp_is_valid(wqp) \
649 lt_bits_valid((wqp)->wqte.lt_bits)
650
651 #define wqp_prepostid wqte.lt_id
652
653 #define WQP_WQ_POISON (0x0bad0badffffffffull)
654 #define WQP_POST_POISON (0xf00df00df00df00d)
655
656 static void
657 wqp_poison(struct link_table *table, struct lt_elem *elem)
658 {
659 struct wq_prepost *wqp = (struct wq_prepost *)elem;
660 (void)table;
661
662 switch (wqp_type(wqp)) {
663 case WQP_WQ:
664 break;
665 case WQP_POST:
666 wqp->wqp_post.wqp_next_id = WQP_POST_POISON;
667 wqp->wqp_post.wqp_wq_id = WQP_POST_POISON;
668 break;
669 default:
670 break;
671 }
672 }
673
674 #ifdef KEEP_WAITQ_PREPOST_STATS
675 static __inline__ void
676 wqp_do_alloc_stats(struct lt_elem *elem)
677 {
678 if (!elem) {
679 return;
680 }
681
682 struct wq_prepost *wqp = (struct wq_prepost *)elem;
683 uintptr_t alloc_bt[sizeof(wqp->wqp_alloc_bt)];
684
685 waitq_grab_backtrace(alloc_bt, NWAITQ_BTFRAMES);
686
687 /* be sure the take stats for _all_ allocated objects */
688 for (;;) {
689 memcpy(wqp->wqp_alloc_bt, alloc_bt, sizeof(alloc_bt));
690 wqp->wqp_alloc_th = current_thread();
691 wqp->wqp_alloc_task = current_task();
692 wqp = (struct wq_prepost *)lt_elem_list_next(&g_prepost_table, &wqp->wqte);
693 if (!wqp) {
694 break;
695 }
696 }
697 }
698 #else
699 #define wqp_do_alloc_stats(e)
700 #endif /* KEEP_WAITQ_LINK_STATS */
701
702 static void
703 wqp_init(void)
704 {
705 uint32_t tablesz = 0, max_wqp = 0;
706
707 if (PE_parse_boot_argn("wqp_tsize", &tablesz, sizeof(tablesz)) != TRUE) {
708 tablesz = (uint32_t)g_lt_max_tbl_size;
709 }
710
711 tablesz = P2ROUNDUP(tablesz, PAGE_SIZE);
712 max_wqp = tablesz / sizeof(struct wq_prepost);
713 assert(max_wqp > 0 && tablesz > 0);
714
715 /* we have a restricted index range */
716 if (max_wqp > (LT_IDX_MAX + 1)) {
717 max_wqp = LT_IDX_MAX + 1;
718 }
719
720 wqinfo("init prepost table with max:%d elements (%d bytes)",
721 max_wqp, tablesz);
722 ltable_init(&g_prepost_table, "wqslab.prepost", max_wqp,
723 sizeof(struct wq_prepost), wqp_poison);
724 }
725
726 /*
727 * Refill the per-CPU cache.
728 */
729 static void
730 wq_prepost_refill_cpu_cache(uint32_t nalloc)
731 {
732 struct lt_elem *new_head, *old_head;
733 struct wqp_cache *cache;
734
735 /* require preemption enabled to allocate elements */
736 if (get_preemption_level() != 0) {
737 return;
738 }
739
740 new_head = ltable_alloc_elem(&g_prepost_table,
741 LT_RESERVED, nalloc, 1);
742 if (new_head == NULL) {
743 return;
744 }
745
746 disable_preemption();
747 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
748
749 /* check once more before putting these elements on the list */
750 if (cache->avail >= WQP_CACHE_MAX) {
751 lt_elem_list_release(&g_prepost_table, new_head, LT_RESERVED);
752 enable_preemption();
753 return;
754 }
755
756 cache->avail += nalloc;
757 if (cache->head == 0 || cache->head == LT_IDX_MAX) {
758 cache->head = new_head->lt_id.id;
759 goto out;
760 }
761
762 old_head = lt_elem_list_first(&g_prepost_table, cache->head);
763 (void)lt_elem_list_link(&g_prepost_table, new_head, old_head);
764 cache->head = new_head->lt_id.id;
765
766 out:
767 enable_preemption();
768 return;
769 }
770
771 static void
772 wq_prepost_ensure_free_space(void)
773 {
774 uint32_t free_elem;
775 uint32_t min_free;
776 struct wqp_cache *cache;
777
778 if (g_min_free_cache == 0) {
779 g_min_free_cache = (WQP_CACHE_MAX * ml_get_max_cpus());
780 }
781
782 /*
783 * Ensure that we always have a pool of per-CPU prepost elements
784 */
785 disable_preemption();
786 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
787 free_elem = cache->avail;
788 enable_preemption();
789
790 if (free_elem < (WQP_CACHE_MAX / 3)) {
791 wq_prepost_refill_cpu_cache(WQP_CACHE_MAX - free_elem);
792 }
793
794 /*
795 * Now ensure that we have a sufficient amount of free table space
796 */
797 free_elem = g_prepost_table.nelem - g_prepost_table.used_elem;
798 min_free = g_min_free_table_elem + g_min_free_cache;
799 if (free_elem < min_free) {
800 /*
801 * we don't hold locks on these values, so check for underflow
802 */
803 if (g_prepost_table.used_elem <= g_prepost_table.nelem) {
804 wqdbg_v("Forcing table growth: nelem=%d, used=%d, min_free=%d+%d",
805 g_prepost_table.nelem, g_prepost_table.used_elem,
806 g_min_free_table_elem, g_min_free_cache);
807 ltable_grow(&g_prepost_table, min_free);
808 }
809 }
810 }
811
812 static struct wq_prepost *
813 wq_prepost_alloc(int type, int nelem)
814 {
815 struct lt_elem *elem;
816 struct wq_prepost *wqp;
817 struct wqp_cache *cache;
818
819 if (type != LT_RESERVED) {
820 goto do_alloc;
821 }
822 if (nelem == 0) {
823 return NULL;
824 }
825
826 /*
827 * First try to grab the elements from the per-CPU cache if we are
828 * allocating RESERVED elements
829 */
830 disable_preemption();
831 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
832 if (nelem <= (int)cache->avail) {
833 struct lt_elem *first, *next = NULL;
834 int nalloc = nelem;
835
836 cache->avail -= nelem;
837
838 /* grab the first element */
839 first = lt_elem_list_first(&g_prepost_table, cache->head);
840
841 /* find the last element and re-adjust the cache head */
842 for (elem = first; elem != NULL && nalloc > 0; elem = next) {
843 next = lt_elem_list_next(&g_prepost_table, elem);
844 if (--nalloc == 0) {
845 /* terminate the allocated list */
846 elem->lt_next_idx = LT_IDX_MAX;
847 break;
848 }
849 }
850 assert(nalloc == 0);
851 if (!next) {
852 cache->head = LT_IDX_MAX;
853 } else {
854 cache->head = next->lt_id.id;
855 }
856 /* assert that we don't have mis-matched book keeping */
857 assert(!(cache->head == LT_IDX_MAX && cache->avail > 0));
858 enable_preemption();
859 elem = first;
860 goto out;
861 }
862 enable_preemption();
863
864 do_alloc:
865 /* fall-back to standard table allocation */
866 elem = ltable_alloc_elem(&g_prepost_table, type, nelem, 0);
867 if (!elem) {
868 return NULL;
869 }
870
871 out:
872 wqp = (struct wq_prepost *)elem;
873 wqp_do_alloc_stats(elem);
874 return wqp;
875 }
876
877 static void
878 wq_prepost_invalidate(struct wq_prepost *wqp)
879 {
880 lt_elem_invalidate(&wqp->wqte);
881 }
882
883 static struct wq_prepost *
884 wq_prepost_get(uint64_t wqp_id)
885 {
886 struct lt_elem *elem;
887
888 elem = ltable_get_elem(&g_prepost_table, wqp_id);
889 return (struct wq_prepost *)elem;
890 }
891
892 static void
893 wq_prepost_put(struct wq_prepost *wqp)
894 {
895 ltable_put_elem(&g_prepost_table, (struct lt_elem *)wqp);
896 }
897
898 static int
899 wq_prepost_rlink(struct wq_prepost *parent, struct wq_prepost *child)
900 {
901 return lt_elem_list_link(&g_prepost_table, &parent->wqte, &child->wqte);
902 }
903
904 static struct wq_prepost *
905 wq_prepost_get_rnext(struct wq_prepost *head)
906 {
907 struct lt_elem *elem;
908 struct wq_prepost *wqp;
909 uint64_t id;
910
911 elem = lt_elem_list_next(&g_prepost_table, &head->wqte);
912 if (!elem) {
913 return NULL;
914 }
915 id = elem->lt_id.id;
916 elem = ltable_get_elem(&g_prepost_table, id);
917
918 if (!elem) {
919 return NULL;
920 }
921 wqp = (struct wq_prepost *)elem;
922 if (elem->lt_id.id != id ||
923 wqp_type(wqp) != WQP_POST ||
924 wqp->wqp_post.wqp_next_id != head->wqp_prepostid.id) {
925 ltable_put_elem(&g_prepost_table, elem);
926 return NULL;
927 }
928
929 return wqp;
930 }
931
932 static void
933 wq_prepost_reset_rnext(struct wq_prepost *wqp)
934 {
935 (void)lt_elem_list_break(&g_prepost_table, &wqp->wqte);
936 }
937
938
939 /**
940 * remove 'wqp' from the prepost list on 'wqset'
941 *
942 * Conditions:
943 * wqset is locked
944 * caller holds a reference on wqp (and is responsible to release it)
945 *
946 * Result:
947 * wqp is invalidated, wqset is potentially updated with a new
948 * prepost ID, and the next element of the prepost list may be
949 * consumed as well (if the list contained only 2 objects)
950 */
951 static int
952 wq_prepost_remove(struct waitq_set *wqset,
953 struct wq_prepost *wqp)
954 {
955 int more_posts = 1;
956 uint64_t next_id = wqp->wqp_post.wqp_next_id;
957 uint64_t wqp_id = wqp->wqp_prepostid.id;
958 struct wq_prepost *prev_wqp, *next_wqp;
959
960 assert(wqp_type(wqp) == WQP_POST);
961 assert(wqset->wqset_q.waitq_prepost == 1);
962
963 if (next_id == wqp_id) {
964 /* the list is singular and becoming empty */
965 wqset->wqset_prepost_id = 0;
966 more_posts = 0;
967 goto out;
968 }
969
970 prev_wqp = wq_prepost_get_rnext(wqp);
971 assert(prev_wqp != NULL);
972 assert(prev_wqp->wqp_post.wqp_next_id == wqp_id);
973 assert(prev_wqp->wqp_prepostid.id != wqp_id);
974 assert(wqp_type(prev_wqp) == WQP_POST);
975
976 if (prev_wqp->wqp_prepostid.id == next_id) {
977 /*
978 * There are two items in the list, and we're removing one. We
979 * only need to keep the WQP_WQ pointer from 'prev_wqp'
980 */
981 wqset->wqset_prepost_id = prev_wqp->wqp_post.wqp_wq_id;
982 wq_prepost_invalidate(prev_wqp);
983 wq_prepost_put(prev_wqp);
984 more_posts = 0;
985 goto out;
986 }
987
988 /* prev->next = next */
989 prev_wqp->wqp_post.wqp_next_id = next_id;
990
991 /* next->prev = prev */
992 next_wqp = wq_prepost_get(next_id);
993 assert(next_wqp != NULL);
994 assert(next_wqp != wqp);
995 assert(next_wqp != prev_wqp);
996 assert(wqp_type(next_wqp) == WQP_POST);
997
998 wq_prepost_reset_rnext(next_wqp);
999 wq_prepost_rlink(next_wqp, prev_wqp);
1000
1001 /* If we remove the head of the list, update the wqset */
1002 if (wqp_id == wqset->wqset_prepost_id) {
1003 wqset->wqset_prepost_id = next_id;
1004 }
1005
1006 wq_prepost_put(prev_wqp);
1007 wq_prepost_put(next_wqp);
1008
1009 out:
1010 wq_prepost_reset_rnext(wqp);
1011 wq_prepost_invalidate(wqp);
1012 return more_posts;
1013 }
1014
1015 static struct wq_prepost *
1016 wq_prepost_rfirst(uint64_t id)
1017 {
1018 struct lt_elem *elem;
1019 elem = lt_elem_list_first(&g_prepost_table, id);
1020 wqp_do_alloc_stats(elem);
1021 return (struct wq_prepost *)(void *)elem;
1022 }
1023
1024 static struct wq_prepost *
1025 wq_prepost_rpop(uint64_t *id, int type)
1026 {
1027 struct lt_elem *elem;
1028 elem = lt_elem_list_pop(&g_prepost_table, id, type);
1029 wqp_do_alloc_stats(elem);
1030 return (struct wq_prepost *)(void *)elem;
1031 }
1032
1033 static void
1034 wq_prepost_release_rlist(struct wq_prepost *wqp)
1035 {
1036 int nelem = 0;
1037 struct wqp_cache *cache;
1038 struct lt_elem *elem;
1039
1040 if (!wqp) {
1041 return;
1042 }
1043
1044 elem = &wqp->wqte;
1045
1046 /*
1047 * These are reserved elements: release them back to the per-cpu pool
1048 * if our cache is running low.
1049 */
1050 disable_preemption();
1051 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
1052 if (cache->avail < WQP_CACHE_MAX) {
1053 struct lt_elem *tmp = NULL;
1054 if (cache->head != LT_IDX_MAX) {
1055 tmp = lt_elem_list_first(&g_prepost_table, cache->head);
1056 }
1057 nelem = lt_elem_list_link(&g_prepost_table, elem, tmp);
1058 cache->head = elem->lt_id.id;
1059 cache->avail += nelem;
1060 enable_preemption();
1061 return;
1062 }
1063 enable_preemption();
1064
1065 /* release these elements back to the main table */
1066 nelem = lt_elem_list_release(&g_prepost_table, elem, LT_RESERVED);
1067
1068 #if CONFIG_WAITQ_STATS
1069 g_prepost_table.nreserved_releases += 1;
1070 OSDecrementAtomic64(&g_prepost_table.nreservations);
1071 #endif
1072 }
1073
1074 typedef int (*wqp_callback_func)(struct waitq_set *wqset,
1075 void *ctx,
1076 struct wq_prepost *wqp,
1077 struct waitq *waitq);
1078
1079 /**
1080 * iterate over a chain of preposts associated with a waitq set.
1081 *
1082 * Conditions:
1083 * wqset is locked
1084 *
1085 * Notes:
1086 * This loop performs automatic prepost chain management / culling, and
1087 * may reset or adjust the waitq set's prepost ID pointer. If you don't
1088 * want this extra processing, you can use wq_prepost_iterate().
1089 */
1090 static int
1091 wq_prepost_foreach_locked(struct waitq_set *wqset,
1092 void *ctx, wqp_callback_func cb)
1093 {
1094 int ret = WQ_ITERATE_SUCCESS;
1095 struct wq_prepost *wqp, *tmp_wqp;
1096
1097 assert(cb != NULL);
1098
1099 if (!wqset || !waitq_set_maybe_preposted(wqset)) {
1100 return WQ_ITERATE_SUCCESS;
1101 }
1102
1103 restart:
1104 wqp = wq_prepost_get(wqset->wqset_prepost_id);
1105 if (!wqp) {
1106 /*
1107 * The prepost object is no longer valid, reset the waitq
1108 * set's prepost id.
1109 */
1110 wqset->wqset_prepost_id = 0;
1111 return WQ_ITERATE_SUCCESS;
1112 }
1113
1114 if (wqp_type(wqp) == WQP_WQ) {
1115 uint64_t __assert_only wqp_id = wqp->wqp_prepostid.id;
1116
1117 ret = cb(wqset, ctx, wqp, wqp->wqp_wq.wqp_wq_ptr);
1118
1119 switch (ret) {
1120 case WQ_ITERATE_INVALIDATE_CONTINUE:
1121 /* the caller wants to remove the only prepost here */
1122 assert(wqp_id == wqset->wqset_prepost_id);
1123 wqset->wqset_prepost_id = 0;
1124 /* fall through */
1125 case WQ_ITERATE_CONTINUE:
1126 wq_prepost_put(wqp);
1127 ret = WQ_ITERATE_SUCCESS;
1128 break;
1129 case WQ_ITERATE_RESTART:
1130 wq_prepost_put(wqp);
1131 /* fall through */
1132 case WQ_ITERATE_DROPPED:
1133 goto restart;
1134 default:
1135 wq_prepost_put(wqp);
1136 break;
1137 }
1138 return ret;
1139 }
1140
1141 assert(wqp->wqp_prepostid.id == wqset->wqset_prepost_id);
1142 assert(wqp_type(wqp) == WQP_POST);
1143
1144 /*
1145 * At this point we know we have a list of POST objects.
1146 * Grab a handle to the last element in the list and start
1147 * the iteration.
1148 */
1149 tmp_wqp = wq_prepost_get_rnext(wqp);
1150 assert(tmp_wqp != NULL && wqp_type(tmp_wqp) == WQP_POST);
1151
1152 uint64_t last_id = tmp_wqp->wqp_prepostid.id;
1153 wq_prepost_put(tmp_wqp);
1154
1155 ret = WQ_ITERATE_SUCCESS;
1156 for (;;) {
1157 uint64_t wqp_id, first_id, next_id;
1158
1159 wqp_id = wqp->wqp_prepostid.id;
1160 first_id = wqset->wqset_prepost_id;
1161 next_id = wqp->wqp_post.wqp_next_id;
1162
1163 /* grab the WQP_WQ object this _POST points to */
1164 tmp_wqp = wq_prepost_get(wqp->wqp_post.wqp_wq_id);
1165 if (!tmp_wqp) {
1166 /*
1167 * This WQP_POST object points to an invalid
1168 * WQP_WQ object - remove the POST object from
1169 * the list.
1170 */
1171 if (wq_prepost_remove(wqset, wqp) == 0) {
1172 wq_prepost_put(wqp);
1173 goto restart;
1174 }
1175 goto next_prepost;
1176 }
1177 assert(wqp_type(tmp_wqp) == WQP_WQ);
1178 /*
1179 * make the callback: note that this could remove 'wqp' or
1180 * drop the lock on our waitq set. We need to re-validate
1181 * our state when this function returns.
1182 */
1183 ret = cb(wqset, ctx, wqp, tmp_wqp->wqp_wq.wqp_wq_ptr);
1184 wq_prepost_put(tmp_wqp);
1185
1186 switch (ret) {
1187 case WQ_ITERATE_CONTINUE:
1188 /* continue iteration */
1189 break;
1190 case WQ_ITERATE_INVALIDATE_CONTINUE:
1191 assert(next_id == wqp->wqp_post.wqp_next_id);
1192 if (wq_prepost_remove(wqset, wqp) == 0) {
1193 wq_prepost_put(wqp);
1194 goto restart;
1195 }
1196 goto next_prepost;
1197 case WQ_ITERATE_RESTART:
1198 wq_prepost_put(wqp);
1199 /* fall-through */
1200 case WQ_ITERATE_DROPPED:
1201 /* the callback dropped the ref to wqp: just restart */
1202 goto restart;
1203 default:
1204 /* break out of the iteration for some other reason */
1205 goto finish_prepost_foreach;
1206 }
1207
1208 /*
1209 * the set lock may have been dropped during callback,
1210 * if something looks different, restart the prepost iteration
1211 */
1212 if (!wqp_is_valid(wqp) ||
1213 (wqp->wqp_post.wqp_next_id != next_id) ||
1214 wqset->wqset_prepost_id != first_id) {
1215 wq_prepost_put(wqp);
1216 goto restart;
1217 }
1218
1219 next_prepost:
1220 /* this was the last object in the list */
1221 if (wqp_id == last_id) {
1222 break;
1223 }
1224
1225 /* get the next object */
1226 tmp_wqp = wq_prepost_get(next_id);
1227 if (!tmp_wqp) {
1228 /*
1229 * At this point we've already checked our state
1230 * after the callback (which may have dropped the set
1231 * lock). If we find an invalid member of the list
1232 * then something is wrong.
1233 */
1234 panic("Invalid WQP_POST member 0x%llx in waitq set "
1235 "0x%llx prepost list (first:%llx, "
1236 "wqp:%p)",
1237 next_id, wqset->wqset_id, first_id, wqp);
1238 }
1239 wq_prepost_put(wqp);
1240 wqp = tmp_wqp;
1241
1242 assert(wqp_type(wqp) == WQP_POST);
1243 }
1244
1245 finish_prepost_foreach:
1246 wq_prepost_put(wqp);
1247 if (ret == WQ_ITERATE_CONTINUE) {
1248 ret = WQ_ITERATE_SUCCESS;
1249 }
1250
1251 return ret;
1252 }
1253
1254 /**
1255 * Perform a simple loop over a chain of prepost objects
1256 *
1257 * Conditions:
1258 * If 'prepost_id' is associated with a waitq (set) then that object must
1259 * be locked before calling this function.
1260 * Callback function, 'cb', must be able to handle a NULL wqset pointer
1261 * and a NULL waitq pointer!
1262 *
1263 * Notes:
1264 * This prepost chain iteration will _not_ automatically adjust any chain
1265 * element or linkage. This is the responsibility of the caller! If you
1266 * want automatic prepost chain management (at a cost of extra CPU time),
1267 * you can use: wq_prepost_foreach_locked().
1268 */
1269 static int
1270 wq_prepost_iterate(uint64_t prepost_id,
1271 void *ctx, wqp_callback_func cb)
1272 {
1273 int ret;
1274 struct wq_prepost *wqp;
1275
1276 if (!prepost_id) {
1277 return WQ_ITERATE_SUCCESS;
1278 }
1279
1280 wqp = wq_prepost_get(prepost_id);
1281 if (!wqp) {
1282 return WQ_ITERATE_SUCCESS;
1283 }
1284
1285 if (wqp_type(wqp) == WQP_WQ) {
1286 ret = WQ_ITERATE_SUCCESS;
1287 if (cb) {
1288 ret = cb(NULL, ctx, wqp, wqp->wqp_wq.wqp_wq_ptr);
1289 }
1290
1291 if (ret != WQ_ITERATE_DROPPED) {
1292 wq_prepost_put(wqp);
1293 }
1294 return ret;
1295 }
1296
1297 assert(wqp->wqp_prepostid.id == prepost_id);
1298 assert(wqp_type(wqp) == WQP_POST);
1299
1300 /* at this point we know we have a list of POST objects */
1301 uint64_t next_id;
1302
1303 ret = WQ_ITERATE_CONTINUE;
1304 do {
1305 struct wq_prepost *tmp_wqp;
1306 struct waitq *wq = NULL;
1307
1308 next_id = wqp->wqp_post.wqp_next_id;
1309
1310 /* grab the WQP_WQ object this _POST points to */
1311 tmp_wqp = wq_prepost_get(wqp->wqp_post.wqp_wq_id);
1312 if (tmp_wqp) {
1313 assert(wqp_type(tmp_wqp) == WQP_WQ);
1314 wq = tmp_wqp->wqp_wq.wqp_wq_ptr;
1315 }
1316
1317 if (cb) {
1318 ret = cb(NULL, ctx, wqp, wq);
1319 }
1320 if (tmp_wqp) {
1321 wq_prepost_put(tmp_wqp);
1322 }
1323
1324 if (ret != WQ_ITERATE_CONTINUE) {
1325 break;
1326 }
1327
1328 tmp_wqp = wq_prepost_get(next_id);
1329 if (!tmp_wqp) {
1330 /*
1331 * the chain is broken: nothing we can do here besides
1332 * bail from the iteration.
1333 */
1334 ret = WQ_ITERATE_ABORTED;
1335 break;
1336 }
1337
1338 wq_prepost_put(wqp);
1339 wqp = tmp_wqp;
1340
1341 assert(wqp_type(wqp) == WQP_POST);
1342 } while (next_id != prepost_id);
1343
1344 if (ret != WQ_ITERATE_DROPPED) {
1345 wq_prepost_put(wqp);
1346 }
1347
1348 if (ret == WQ_ITERATE_CONTINUE) {
1349 ret = WQ_ITERATE_SUCCESS;
1350 }
1351 return ret;
1352 }
1353
1354
1355 struct _is_posted_ctx {
1356 struct waitq *posting_wq;
1357 int did_prepost;
1358 };
1359
1360 static int
1361 wq_is_preposted_on_set_cb(struct waitq_set *wqset, void *ctx,
1362 struct wq_prepost *wqp, struct waitq *waitq)
1363 {
1364 struct _is_posted_ctx *pctx = (struct _is_posted_ctx *)ctx;
1365
1366 (void)wqset;
1367 (void)wqp;
1368
1369 /*
1370 * Don't early-out, run through the _entire_ list:
1371 * This ensures that we retain a minimum number of invalid elements.
1372 */
1373 if (pctx->posting_wq == waitq) {
1374 pctx->did_prepost = 1;
1375 }
1376
1377 return WQ_ITERATE_CONTINUE;
1378 }
1379
1380
1381 /**
1382 * checks if 'waitq' has already preposted on 'wqset'
1383 *
1384 * Parameters:
1385 * waitq The waitq that's preposting
1386 * wqset The set onto which waitq may be preposted
1387 *
1388 * Conditions:
1389 * both waitq and wqset are locked
1390 *
1391 * Returns non-zero if 'waitq' has already preposted to 'wqset'
1392 */
1393 static int
1394 wq_is_preposted_on_set(struct waitq *waitq, struct waitq_set *wqset)
1395 {
1396 int ret;
1397 struct _is_posted_ctx pctx;
1398
1399 /*
1400 * If the set's only prepost matches the waitq's prepost ID,
1401 * then it obviously already preposted to the set.
1402 */
1403 if (waitq->waitq_prepost_id != 0 &&
1404 wqset->wqset_prepost_id == waitq->waitq_prepost_id) {
1405 return 1;
1406 }
1407
1408 /* use full prepost iteration: always trim the list */
1409 pctx.posting_wq = waitq;
1410 pctx.did_prepost = 0;
1411 ret = wq_prepost_foreach_locked(wqset, (void *)&pctx,
1412 wq_is_preposted_on_set_cb);
1413 return pctx.did_prepost;
1414 }
1415
1416 static struct wq_prepost *
1417 wq_get_prepost_obj(uint64_t *reserved, int type)
1418 {
1419 struct wq_prepost *wqp = NULL;
1420 /*
1421 * don't fail just because the caller doesn't have enough
1422 * reservations, we've kept a low-water mark on the prepost table,
1423 * so there should be some available for us.
1424 */
1425 if (reserved && *reserved) {
1426 wqp = wq_prepost_rpop(reserved, type);
1427 assert(wqp->wqte.lt_id.idx < g_prepost_table.nelem);
1428 } else {
1429 /*
1430 * TODO: if in interrupt context, grab from a special
1431 * region / reserved list!
1432 */
1433 wqp = wq_prepost_alloc(type, 1);
1434 }
1435
1436 if (wqp == NULL) {
1437 panic("Couldn't allocate prepost object!");
1438 }
1439 return wqp;
1440 }
1441
1442
1443 /**
1444 * prepost a waitq onto a waitq set
1445 *
1446 * Parameters:
1447 * wqset The set onto which waitq will be preposted
1448 * waitq The waitq that's preposting
1449 * reserved List (lt_elem_list_ style) of pre-allocated prepost elements
1450 * Could be NULL
1451 *
1452 * Conditions:
1453 * both wqset and waitq are locked
1454 *
1455 * Notes:
1456 * If reserved is NULL, this may block on prepost table growth.
1457 */
1458 static void
1459 wq_prepost_do_post_locked(struct waitq_set *wqset,
1460 struct waitq *waitq,
1461 uint64_t *reserved)
1462 {
1463 struct wq_prepost *wqp_post, *wqp_head, *wqp_tail;
1464
1465 assert(waitq_held(waitq) && waitq_held(&wqset->wqset_q));
1466
1467 /*
1468 * nothing to do if it's already preposted:
1469 * note that this also culls any invalid prepost objects
1470 */
1471 if (wq_is_preposted_on_set(waitq, wqset)) {
1472 return;
1473 }
1474
1475 assert(waitqs_is_linked(wqset));
1476
1477 /*
1478 * This function is called because an event is being posted to 'waitq'.
1479 * We need a prepost object associated with this queue. Allocate one
1480 * now if the waitq isn't already associated with one.
1481 */
1482 if (waitq->waitq_prepost_id == 0) {
1483 struct wq_prepost *wqp;
1484 wqp = wq_get_prepost_obj(reserved, WQP_WQ);
1485 wqp->wqp_wq.wqp_wq_ptr = waitq;
1486 wqp_set_valid(wqp);
1487 waitq->waitq_prepost_id = wqp->wqp_prepostid.id;
1488 wq_prepost_put(wqp);
1489 }
1490
1491 #if CONFIG_LTABLE_STATS
1492 g_prepost_table.npreposts += 1;
1493 #endif
1494
1495 wqdbg_v("preposting waitq %p (0x%llx) to set 0x%llx",
1496 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq),
1497 waitq->waitq_prepost_id, wqset->wqset_id);
1498
1499 if (wqset->wqset_prepost_id == 0) {
1500 /* the set has no previous preposts */
1501 wqset->wqset_prepost_id = waitq->waitq_prepost_id;
1502 return;
1503 }
1504
1505 wqp_head = wq_prepost_get(wqset->wqset_prepost_id);
1506 if (!wqp_head) {
1507 /* the previous prepost has become invalid */
1508 wqset->wqset_prepost_id = waitq->waitq_prepost_id;
1509 return;
1510 }
1511
1512 assert(wqp_head->wqp_prepostid.id == wqset->wqset_prepost_id);
1513
1514 /*
1515 * If we get here, we're going to need at least one new wq_prepost
1516 * object. If the previous wqset_prepost_id points to a WQP_WQ, we
1517 * actually need to allocate 2 wq_prepost objects because the WQP_WQ
1518 * is tied to the waitq and shared across all sets.
1519 */
1520 wqp_post = wq_get_prepost_obj(reserved, WQP_POST);
1521
1522 wqp_post->wqp_post.wqp_wq_id = waitq->waitq_prepost_id;
1523 wqdbg_v("POST 0x%llx :: WQ 0x%llx", wqp_post->wqp_prepostid.id,
1524 waitq->waitq_prepost_id);
1525
1526 if (wqp_type(wqp_head) == WQP_WQ) {
1527 /*
1528 * We must replace the wqset_prepost_id with a pointer
1529 * to two new WQP_POST objects
1530 */
1531 uint64_t wqp_id = wqp_head->wqp_prepostid.id;
1532 wqdbg_v("set 0x%llx previous had 1 WQ prepost (0x%llx): "
1533 "replacing with two POST preposts",
1534 wqset->wqset_id, wqp_id);
1535
1536 /* drop the old reference */
1537 wq_prepost_put(wqp_head);
1538
1539 /* grab another new object (the 2nd of two) */
1540 wqp_head = wq_get_prepost_obj(reserved, WQP_POST);
1541
1542 /* point this one to the original WQP_WQ object */
1543 wqp_head->wqp_post.wqp_wq_id = wqp_id;
1544 wqdbg_v("POST 0x%llx :: WQ 0x%llx",
1545 wqp_head->wqp_prepostid.id, wqp_id);
1546
1547 /* link it to the new wqp_post object allocated earlier */
1548 wqp_head->wqp_post.wqp_next_id = wqp_post->wqp_prepostid.id;
1549 /* make the list a double-linked and circular */
1550 wq_prepost_rlink(wqp_head, wqp_post);
1551
1552 /*
1553 * Finish setting up the new prepost: point it back to the
1554 * POST object we allocated to replace the original wqset
1555 * WQ prepost object
1556 */
1557 wqp_post->wqp_post.wqp_next_id = wqp_head->wqp_prepostid.id;
1558 wq_prepost_rlink(wqp_post, wqp_head);
1559
1560 /* mark objects valid, and reset the wqset prepost list head */
1561 wqp_set_valid(wqp_head);
1562 wqp_set_valid(wqp_post);
1563 wqset->wqset_prepost_id = wqp_head->wqp_prepostid.id;
1564
1565 /* release both references */
1566 wq_prepost_put(wqp_head);
1567 wq_prepost_put(wqp_post);
1568
1569 wqdbg_v("set 0x%llx: 0x%llx/0x%llx -> 0x%llx/0x%llx -> 0x%llx",
1570 wqset->wqset_id, wqset->wqset_prepost_id,
1571 wqp_head->wqp_prepostid.id, wqp_head->wqp_post.wqp_next_id,
1572 wqp_post->wqp_prepostid.id,
1573 wqp_post->wqp_post.wqp_next_id);
1574 return;
1575 }
1576
1577 assert(wqp_type(wqp_head) == WQP_POST);
1578
1579 /*
1580 * Add the new prepost to the end of the prepost list
1581 */
1582 wqp_tail = wq_prepost_get_rnext(wqp_head);
1583 assert(wqp_tail != NULL);
1584 assert(wqp_tail->wqp_post.wqp_next_id == wqset->wqset_prepost_id);
1585
1586 /*
1587 * link the head to the new tail
1588 * NOTE: this needs to happen first in case wqp_tail == wqp_head
1589 */
1590 wq_prepost_reset_rnext(wqp_head);
1591 wq_prepost_rlink(wqp_head, wqp_post);
1592
1593 /* point the new object to the list head, and list tail */
1594 wqp_post->wqp_post.wqp_next_id = wqp_head->wqp_prepostid.id;
1595 wq_prepost_rlink(wqp_post, wqp_tail);
1596
1597 /* point the last item in the waitq set's list to the new object */
1598 wqp_tail->wqp_post.wqp_next_id = wqp_post->wqp_prepostid.id;
1599
1600 wqp_set_valid(wqp_post);
1601
1602 wq_prepost_put(wqp_head);
1603 wq_prepost_put(wqp_tail);
1604 wq_prepost_put(wqp_post);
1605
1606 wqdbg_v("set 0x%llx (wqp:0x%llx) last_prepost:0x%llx, "
1607 "new_prepost:0x%llx->0x%llx", wqset->wqset_id,
1608 wqset->wqset_prepost_id, wqp_head->wqp_prepostid.id,
1609 wqp_post->wqp_prepostid.id, wqp_post->wqp_post.wqp_next_id);
1610
1611 return;
1612 }
1613
1614
1615 /* ----------------------------------------------------------------------
1616 *
1617 * Stats collection / reporting
1618 *
1619 * ---------------------------------------------------------------------- */
1620 #if CONFIG_LTABLE_STATS && CONFIG_WAITQ_STATS
1621 static void
1622 wq_table_stats(struct link_table *table, struct wq_table_stats *stats)
1623 {
1624 stats->version = WAITQ_STATS_VERSION;
1625 stats->table_elements = table->nelem;
1626 stats->table_used_elems = table->used_elem;
1627 stats->table_elem_sz = table->elem_sz;
1628 stats->table_slabs = table->nslabs;
1629 stats->table_slab_sz = table->slab_sz;
1630
1631 stats->table_num_allocs = table->nallocs;
1632 stats->table_num_preposts = table->npreposts;
1633 stats->table_num_reservations = table->nreservations;
1634
1635 stats->table_max_used = table->max_used;
1636 stats->table_avg_used = table->avg_used;
1637 stats->table_max_reservations = table->max_reservations;
1638 stats->table_avg_reservations = table->avg_reservations;
1639 }
1640
1641 void
1642 waitq_link_stats(struct wq_table_stats *stats)
1643 {
1644 if (!stats) {
1645 return;
1646 }
1647 wq_table_stats(&g_wqlinktable, stats);
1648 }
1649
1650 void
1651 waitq_prepost_stats(struct wq_table_stats *stats)
1652 {
1653 wq_table_stats(&g_prepost_table, stats);
1654 }
1655 #endif
1656
1657
1658 /* ----------------------------------------------------------------------
1659 *
1660 * Global Wait Queues
1661 *
1662 * ---------------------------------------------------------------------- */
1663
1664 static struct waitq g_boot_waitq;
1665 static struct waitq *global_waitqs = &g_boot_waitq;
1666 static uint32_t g_num_waitqs = 1;
1667
1668 /*
1669 * Zero out the used MSBs of the event.
1670 */
1671 #define _CAST_TO_EVENT_MASK(event) ((uintptr_t)(event) & ((1ul << _EVENT_MASK_BITS) - 1ul))
1672
1673 static __inline__ uint32_t
1674 waitq_hash(char *key, size_t length)
1675 {
1676 uint32_t hash = os_hash_jenkins(key, length);
1677
1678 hash &= (g_num_waitqs - 1);
1679 return hash;
1680 }
1681
1682 /* return a global waitq pointer corresponding to the given event */
1683 struct waitq *
1684 _global_eventq(char *event, size_t event_length)
1685 {
1686 return &global_waitqs[waitq_hash(event, event_length)];
1687 }
1688
1689 /* return an indexed global waitq pointer */
1690 struct waitq *
1691 global_waitq(int index)
1692 {
1693 return &global_waitqs[index % g_num_waitqs];
1694 }
1695
1696
1697 #if CONFIG_LTABLE_STATS || CONFIG_WAITQ_STATS
1698 /* this global is for lldb */
1699 const uint32_t g_nwaitq_btframes = NWAITQ_BTFRAMES;
1700
1701 static __inline__ void
1702 waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int skip)
1703 {
1704 uintptr_t buf[NWAITQ_BTFRAMES + skip];
1705 if (skip < 0) {
1706 skip = 0;
1707 }
1708 memset(buf, 0, (NWAITQ_BTFRAMES + skip) * sizeof(uintptr_t));
1709 backtrace(buf, g_nwaitq_btframes + skip, NULL);
1710 memcpy(&bt[0], &buf[skip], NWAITQ_BTFRAMES * sizeof(uintptr_t));
1711 }
1712 #else /* no stats */
1713 #define waitq_grab_backtrace(...)
1714 #endif
1715
1716 #if CONFIG_WAITQ_STATS
1717
1718 struct wq_stats g_boot_stats;
1719 struct wq_stats *g_waitq_stats = &g_boot_stats;
1720
1721 static __inline__ struct wq_stats *
1722 waitq_global_stats(struct waitq *waitq)
1723 {
1724 struct wq_stats *wqs;
1725 uint32_t idx;
1726
1727 if (!waitq_is_global(waitq)) {
1728 return NULL;
1729 }
1730
1731 idx = (uint32_t)(((uintptr_t)waitq - (uintptr_t)global_waitqs) / sizeof(*waitq));
1732 assert(idx < g_num_waitqs);
1733 wqs = &g_waitq_stats[idx];
1734 return wqs;
1735 }
1736
1737 static __inline__ void
1738 waitq_stats_count_wait(struct waitq *waitq)
1739 {
1740 struct wq_stats *wqs = waitq_global_stats(waitq);
1741 if (wqs != NULL) {
1742 wqs->waits++;
1743 waitq_grab_backtrace(wqs->last_wait, 2);
1744 }
1745 }
1746
1747 static __inline__ void
1748 waitq_stats_count_wakeup(struct waitq *waitq)
1749 {
1750 struct wq_stats *wqs = waitq_global_stats(waitq);
1751 if (wqs != NULL) {
1752 wqs->wakeups++;
1753 waitq_grab_backtrace(wqs->last_wakeup, 2);
1754 }
1755 }
1756
1757 static __inline__ void
1758 waitq_stats_count_clear_wakeup(struct waitq *waitq)
1759 {
1760 struct wq_stats *wqs = waitq_global_stats(waitq);
1761 if (wqs != NULL) {
1762 wqs->wakeups++;
1763 wqs->clears++;
1764 waitq_grab_backtrace(wqs->last_wakeup, 2);
1765 }
1766 }
1767
1768 static __inline__ void
1769 waitq_stats_count_fail(struct waitq *waitq)
1770 {
1771 struct wq_stats *wqs = waitq_global_stats(waitq);
1772 if (wqs != NULL) {
1773 wqs->failed_wakeups++;
1774 waitq_grab_backtrace(wqs->last_failed_wakeup, 2);
1775 }
1776 }
1777 #else /* !CONFIG_WAITQ_STATS */
1778 #define waitq_stats_count_wait(q) do { } while (0)
1779 #define waitq_stats_count_wakeup(q) do { } while (0)
1780 #define waitq_stats_count_clear_wakeup(q) do { } while (0)
1781 #define waitq_stats_count_fail(q) do { } while (0)
1782 #endif
1783
1784 int
1785 waitq_is_valid(struct waitq *waitq)
1786 {
1787 return (waitq != NULL) && waitq->waitq_isvalid;
1788 }
1789
1790 int
1791 waitq_set_is_valid(struct waitq_set *wqset)
1792 {
1793 return (wqset != NULL) && wqset->wqset_q.waitq_isvalid && waitqs_is_set(wqset);
1794 }
1795
1796 int
1797 waitq_is_global(struct waitq *waitq)
1798 {
1799 if (waitq >= global_waitqs && waitq < global_waitqs + g_num_waitqs) {
1800 return 1;
1801 }
1802 return 0;
1803 }
1804
1805 int
1806 waitq_irq_safe(struct waitq *waitq)
1807 {
1808 /* global wait queues have this bit set on initialization */
1809 return waitq->waitq_irq;
1810 }
1811
1812 struct waitq *
1813 waitq_get_safeq(struct waitq *waitq)
1814 {
1815 struct waitq *safeq;
1816
1817 /* Check if it's a port waitq */
1818 if (waitq_is_port_queue(waitq)) {
1819 assert(!waitq_irq_safe(waitq));
1820 safeq = ipc_port_rcv_turnstile_waitq(waitq);
1821 } else {
1822 safeq = global_eventq(waitq);
1823 }
1824 return safeq;
1825 }
1826
1827 static uint32_t
1828 waitq_hash_size(void)
1829 {
1830 uint32_t hsize, queues;
1831
1832 if (PE_parse_boot_argn("wqsize", &hsize, sizeof(hsize))) {
1833 return hsize;
1834 }
1835
1836 queues = thread_max / 5;
1837 hsize = P2ROUNDUP(queues * sizeof(struct waitq), PAGE_SIZE);
1838
1839 return hsize;
1840 }
1841
1842 /*
1843 * Since the priority ordered waitq uses basepri as the
1844 * ordering key assert that this value fits in a uint8_t.
1845 */
1846 static_assert(MAXPRI <= UINT8_MAX);
1847
1848 static inline void
1849 waitq_thread_insert(struct waitq *wq,
1850 thread_t thread, boolean_t fifo)
1851 {
1852 if (waitq_is_turnstile_queue(wq)) {
1853 turnstile_stats_update(0, TSU_TURNSTILE_BLOCK_COUNT, NULL);
1854 turnstile_waitq_add_thread_priority_queue(wq, thread);
1855 } else {
1856 turnstile_stats_update(0, TSU_REGULAR_WAITQ_BLOCK_COUNT, NULL);
1857 if (fifo) {
1858 enqueue_tail(&wq->waitq_queue, &thread->wait_links);
1859 } else {
1860 enqueue_head(&wq->waitq_queue, &thread->wait_links);
1861 }
1862 }
1863 }
1864
1865 static inline void
1866 waitq_thread_remove(struct waitq *wq,
1867 thread_t thread)
1868 {
1869 if (waitq_is_turnstile_queue(wq)) {
1870 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1871 (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (THREAD_REMOVED_FROM_TURNSTILE_WAITQ))) | DBG_FUNC_NONE,
1872 VM_KERNEL_UNSLIDE_OR_PERM(waitq_to_turnstile(wq)),
1873 thread_tid(thread),
1874 0, 0, 0);
1875 priority_queue_remove(&wq->waitq_prio_queue, &thread->wait_prioq_links,
1876 PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
1877 } else {
1878 remqueue(&(thread->wait_links));
1879 }
1880 }
1881
1882 void
1883 waitq_bootstrap(void)
1884 {
1885 kern_return_t kret;
1886 uint32_t whsize, qsz, tmp32;
1887
1888 g_min_free_table_elem = DEFAULT_MIN_FREE_TABLE_ELEM;
1889 if (PE_parse_boot_argn("wqt_min_free", &tmp32, sizeof(tmp32)) == TRUE) {
1890 g_min_free_table_elem = tmp32;
1891 }
1892 wqdbg("Minimum free table elements: %d", tmp32);
1893
1894 lck_grp_init(&waitq_lck_grp, "waitq", LCK_GRP_ATTR_NULL);
1895
1896 /*
1897 * Determine the amount of memory we're willing to reserve for
1898 * the waitqueue hash table
1899 */
1900 whsize = waitq_hash_size();
1901
1902 /* Determine the number of waitqueues we can fit. */
1903 qsz = sizeof(struct waitq);
1904 whsize = ROUNDDOWN(whsize, qsz);
1905 g_num_waitqs = whsize / qsz;
1906
1907 /*
1908 * The hash algorithm requires that this be a power of 2, so we
1909 * just mask off all the low-order bits.
1910 */
1911 for (uint32_t i = 0; i < 31; i++) {
1912 uint32_t bit = (1 << i);
1913 if ((g_num_waitqs & bit) == g_num_waitqs) {
1914 break;
1915 }
1916 g_num_waitqs &= ~bit;
1917 }
1918 assert(g_num_waitqs > 0);
1919
1920 /* Now determine how much memory we really need. */
1921 whsize = P2ROUNDUP(g_num_waitqs * qsz, PAGE_SIZE);
1922
1923 wqdbg("allocating %d global queues (%d bytes)", g_num_waitqs, whsize);
1924 kret = kernel_memory_allocate(kernel_map, (vm_offset_t *)&global_waitqs,
1925 whsize, 0, KMA_KOBJECT | KMA_NOPAGEWAIT, VM_KERN_MEMORY_WAITQ);
1926 if (kret != KERN_SUCCESS || global_waitqs == NULL) {
1927 panic("kernel_memory_allocate() failed to alloc global_waitqs"
1928 ", error: %d, whsize: 0x%x", kret, whsize);
1929 }
1930
1931 #if CONFIG_WAITQ_STATS
1932 whsize = P2ROUNDUP(g_num_waitqs * sizeof(struct wq_stats), PAGE_SIZE);
1933 kret = kernel_memory_allocate(kernel_map, (vm_offset_t *)&g_waitq_stats,
1934 whsize, 0, KMA_KOBJECT | KMA_NOPAGEWAIT, VM_KERN_MEMORY_WAITQ);
1935 if (kret != KERN_SUCCESS || global_waitqs == NULL) {
1936 panic("kernel_memory_allocate() failed to alloc g_waitq_stats"
1937 ", error: %d, whsize: 0x%x", kret, whsize);
1938 }
1939 memset(g_waitq_stats, 0, whsize);
1940 #endif
1941
1942 for (uint32_t i = 0; i < g_num_waitqs; i++) {
1943 waitq_init(&global_waitqs[i], SYNC_POLICY_FIFO | SYNC_POLICY_DISABLE_IRQ);
1944 }
1945
1946 waitq_set_zone = zinit(sizeof(struct waitq_set),
1947 WAITQ_SET_MAX * sizeof(struct waitq_set),
1948 sizeof(struct waitq_set),
1949 "waitq sets");
1950 zone_change(waitq_set_zone, Z_NOENCRYPT, TRUE);
1951
1952 /* initialize the global waitq link table */
1953 wql_init();
1954
1955 /* initialize the global waitq prepost table */
1956 wqp_init();
1957 }
1958
1959
1960 /* ----------------------------------------------------------------------
1961 *
1962 * Wait Queue Implementation
1963 *
1964 * ---------------------------------------------------------------------- */
1965
1966 /*
1967 * Double the standard lock timeout, because wait queues tend
1968 * to iterate over a number of threads - locking each. If there is
1969 * a problem with a thread lock, it normally times out at the wait
1970 * queue level first, hiding the real problem.
1971 */
1972 /* For x86, the hardware timeout is in TSC units. */
1973 #if defined(__i386__) || defined(__x86_64__)
1974 #define hwLockTimeOut LockTimeOutTSC
1975 #else
1976 #define hwLockTimeOut LockTimeOut
1977 #endif
1978
1979 void
1980 waitq_lock(struct waitq *wq)
1981 {
1982 if (__improbable(waitq_lock_to(wq,
1983 hwLockTimeOut * 2) == 0)) {
1984 boolean_t wql_acquired = FALSE;
1985
1986 while (machine_timeout_suspended()) {
1987 mp_enable_preemption();
1988 wql_acquired = waitq_lock_to(wq,
1989 hwLockTimeOut * 2);
1990 if (wql_acquired) {
1991 break;
1992 }
1993 }
1994 if (wql_acquired == FALSE) {
1995 panic("waitq deadlock - waitq=%p, cpu=%d\n",
1996 wq, cpu_number());
1997 }
1998 }
1999 #if defined(__x86_64__)
2000 pltrace(FALSE);
2001 #endif
2002 assert(waitq_held(wq));
2003 }
2004
2005 void
2006 waitq_unlock(struct waitq *wq)
2007 {
2008 assert(waitq_held(wq));
2009 #if defined(__x86_64__)
2010 pltrace(TRUE);
2011 #endif
2012 waitq_lock_unlock(wq);
2013 }
2014
2015
2016 /**
2017 * clear the thread-related waitq state
2018 *
2019 * Conditions:
2020 * 'thread' is locked
2021 */
2022 static inline void
2023 thread_clear_waitq_state(thread_t thread)
2024 {
2025 thread->waitq = NULL;
2026 thread->wait_event = NO_EVENT64;
2027 thread->at_safe_point = FALSE;
2028 }
2029
2030
2031 typedef thread_t (*waitq_select_cb)(void *ctx, struct waitq *waitq,
2032 int is_global, thread_t thread);
2033
2034 struct waitq_select_args {
2035 /* input parameters */
2036 struct waitq *posted_waitq;
2037 struct waitq *waitq;
2038 event64_t event;
2039 waitq_select_cb select_cb;
2040 void *select_ctx;
2041 int priority;
2042
2043 uint64_t *reserved_preposts;
2044
2045 /* output parameters */
2046 queue_t threadq;
2047 int max_threads;
2048 int *nthreads;
2049 spl_t *spl;
2050 };
2051
2052 static void do_waitq_select_n_locked(struct waitq_select_args *args);
2053
2054 /**
2055 * callback invoked once for every waitq set to which a waitq belongs
2056 *
2057 * Conditions:
2058 * ctx->posted_waitq is locked
2059 * 'link' points to a valid waitq set
2060 *
2061 * Notes:
2062 * Takes the waitq set lock on the set pointed to by 'link'
2063 * Calls do_waitq_select_n_locked() which could recurse back into
2064 * this function if the waitq set is a member of other sets.
2065 * If no threads were selected, it preposts the input waitq
2066 * onto the waitq set pointed to by 'link'.
2067 */
2068 static int
2069 waitq_select_walk_cb(struct waitq *waitq, void *ctx,
2070 struct waitq_link *link)
2071 {
2072 int ret = WQ_ITERATE_CONTINUE;
2073 struct waitq_select_args args = *((struct waitq_select_args *)ctx);
2074 struct waitq_set *wqset;
2075
2076 (void)waitq;
2077 assert(wql_type(link) == WQL_WQS);
2078
2079 wqset = link->wql_wqs.wql_set;
2080 args.waitq = &wqset->wqset_q;
2081
2082 assert(!waitq_irq_safe(waitq));
2083 assert(!waitq_irq_safe(&wqset->wqset_q));
2084
2085 waitq_set_lock(wqset);
2086 /*
2087 * verify that the link wasn't invalidated just before
2088 * we were able to take the lock.
2089 */
2090 if (wqset->wqset_id != link->wql_setid.id) {
2091 goto out_unlock;
2092 }
2093
2094 assert(waitqs_is_linked(wqset));
2095
2096 /*
2097 * Find any threads waiting on this wait queue set,
2098 * and recurse into any waitq set to which this set belongs.
2099 */
2100 do_waitq_select_n_locked(&args);
2101
2102 if (*args.nthreads > 0 || (args.threadq && !queue_empty(args.threadq))) {
2103 /* at least 1 thread was selected and returned: don't prepost */
2104 if (args.max_threads > 0 && *args.nthreads >= args.max_threads) {
2105 /* break out of the setid walk */
2106 ret = WQ_ITERATE_FOUND;
2107 }
2108 } else if (args.event == NO_EVENT64) {
2109 /*
2110 * No thread selected: prepost 'waitq' to 'wqset'
2111 * if wqset can handle preposts and the event is set to 0.
2112 * We also make sure to not post waitq sets to other sets.
2113 *
2114 * If the set doesn't support preposts, but does support
2115 * prepost callout/hook interaction, invoke the predefined
2116 * callout function and pass the set's 'prepost_hook.' This
2117 * could potentially release another thread to handle events.
2118 */
2119 if (waitq_set_can_prepost(wqset)) {
2120 wq_prepost_do_post_locked(
2121 wqset, waitq, args.reserved_preposts);
2122 } else if (waitq_set_has_prepost_hook(wqset)) {
2123 waitq_set_prepost_hook_t *hook = wqset->wqset_prepost_hook;
2124
2125 /*
2126 * When calling out to the prepost hook,
2127 * we drop the waitq lock, to allow for the kevent
2128 * subsytem to call into the waitq subsystem again,
2129 * without risking a deadlock.
2130 *
2131 * However, we need to guard against wqset going away,
2132 * so we increment the prepost hook use count
2133 * while the lock is dropped.
2134 *
2135 * This lets waitq_set_deinit() know to wait for the
2136 * prepost hook call to be done before it can proceed.
2137 *
2138 * Note: we need to keep preemption disabled the whole
2139 * time as waitq_set_deinit will spin on this.
2140 */
2141
2142 disable_preemption();
2143 os_atomic_inc(hook, relaxed);
2144 waitq_set_unlock(wqset);
2145
2146 waitq_set__CALLING_PREPOST_HOOK__(hook);
2147
2148 /* Note: after this decrement, the wqset may be deallocated */
2149 os_atomic_dec(hook, relaxed);
2150 enable_preemption();
2151 return ret;
2152 }
2153 }
2154
2155 out_unlock:
2156 waitq_set_unlock(wqset);
2157 return ret;
2158 }
2159
2160 /**
2161 * Routine to iterate over the waitq for non-priority ordered waitqs
2162 *
2163 * Conditions:
2164 * args->waitq (and args->posted_waitq) is locked
2165 *
2166 * Notes:
2167 * Uses the optional select callback function to refine the selection
2168 * of one or more threads from a waitq. The select callback is invoked
2169 * once for every thread that is found to be waiting on the input args->waitq.
2170 *
2171 * If one or more threads are selected, this may disable interrupts.
2172 * The previous interrupt state is returned in args->spl and should
2173 * be used in a call to splx() if threads are returned to the caller.
2174 */
2175 static thread_t
2176 waitq_queue_iterate_locked(struct waitq *safeq, struct waitq *waitq,
2177 spl_t spl, struct waitq_select_args *args,
2178 uint32_t *remaining_eventmask)
2179 {
2180 int max_threads = args->max_threads;
2181 int *nthreads = args->nthreads;
2182 thread_t thread = THREAD_NULL;
2183 thread_t first_thread = THREAD_NULL;
2184
2185 qe_foreach_element_safe(thread, &safeq->waitq_queue, wait_links) {
2186 thread_t t = THREAD_NULL;
2187 assert_thread_magic(thread);
2188
2189 /*
2190 * For non-priority ordered waitqs, we allow multiple events to be
2191 * mux'ed into the same waitq. Also safeqs may contain threads from
2192 * multiple waitqs. Only pick threads that match the
2193 * requested wait event.
2194 */
2195 if (thread->waitq == waitq && thread->wait_event == args->event) {
2196 t = thread;
2197 if (first_thread == THREAD_NULL) {
2198 first_thread = thread;
2199 }
2200
2201 /* allow the caller to futher refine the selection */
2202 if (args->select_cb) {
2203 t = args->select_cb(args->select_ctx, waitq,
2204 waitq_is_global(waitq), thread);
2205 }
2206 if (t != THREAD_NULL) {
2207 *nthreads += 1;
2208 if (args->threadq) {
2209 /* if output queue, add locked thread to it */
2210 if (*nthreads == 1) {
2211 *(args->spl) = (safeq != waitq) ? spl : splsched();
2212 }
2213 thread_lock(t);
2214 thread_clear_waitq_state(t);
2215 re_queue_tail(args->threadq, &t->wait_links);
2216 }
2217 /* only enqueue up to 'max' threads */
2218 if (*nthreads >= max_threads && max_threads > 0) {
2219 break;
2220 }
2221 }
2222 }
2223 /* thread wasn't selected so track it's event */
2224 if (t == THREAD_NULL) {
2225 *remaining_eventmask |= (thread->waitq != safeq) ?
2226 _CAST_TO_EVENT_MASK(thread->waitq) : _CAST_TO_EVENT_MASK(thread->wait_event);
2227 }
2228 }
2229
2230 return first_thread;
2231 }
2232
2233 /**
2234 * Routine to iterate and remove threads from priority ordered waitqs
2235 *
2236 * Conditions:
2237 * args->waitq (and args->posted_waitq) is locked
2238 *
2239 * Notes:
2240 * The priority ordered waitqs only support maximum priority element removal.
2241 *
2242 * Also, the implementation makes sure that all threads in a priority ordered
2243 * waitq are waiting on the same wait event. This is not necessarily true for
2244 * non-priority ordered waitqs. If one or more threads are selected, this may
2245 * disable interrupts. The previous interrupt state is returned in args->spl
2246 * and should be used in a call to splx() if threads are returned to the caller.
2247 *
2248 * In the future, we could support priority ordered waitqs with multiple wait
2249 * events in the same queue. The way to implement that would be to keep removing
2250 * elements from the waitq and if the event does not match the requested one,
2251 * add it to a local list. This local list of elements needs to be re-inserted
2252 * into the priority queue at the end and the select_cb return value &
2253 * remaining_eventmask would need to be handled appropriately. The implementation
2254 * is not very efficient but would work functionally.
2255 */
2256 static thread_t
2257 waitq_prioq_iterate_locked(struct waitq *safeq, struct waitq *waitq,
2258 spl_t spl, struct waitq_select_args *args,
2259 uint32_t *remaining_eventmask)
2260 {
2261 int max_threads = args->max_threads;
2262 int *nthreads = args->nthreads;
2263 thread_t first_thread = THREAD_NULL;
2264 thread_t thread = THREAD_NULL;
2265
2266 /*
2267 * The waitq select routines need to handle two cases:
2268 * Case 1: Peek at maximum priority thread in the waitq (remove_op = 0)
2269 * Get the maximum priority thread from the waitq without removing it.
2270 * In that case args->threadq == NULL and max_threads == 1.
2271 * Case 2: Remove 'n' highest priority threads from waitq (remove_op = 1)
2272 * Get max_threads (if available) while removing them from the waitq.
2273 * In that case args->threadq != NULL and max_threads is one of {-1, 1}.
2274 *
2275 * The only possible values for remaining_eventmask for the priority queue
2276 * waitq are either 0 (for the remove all threads case) or the original
2277 * safeq->waitq_eventmask (for the lookup/remove one thread cases).
2278 */
2279 *remaining_eventmask = safeq->waitq_eventmask;
2280 boolean_t remove_op = !!(args->threadq);
2281
2282 while ((max_threads <= 0) || (*nthreads < max_threads)) {
2283 if (priority_queue_empty(&(safeq->waitq_prio_queue))) {
2284 *remaining_eventmask = 0;
2285 break;
2286 }
2287
2288 if (remove_op) {
2289 thread = priority_queue_remove_max(&safeq->waitq_prio_queue,
2290 struct thread, wait_prioq_links,
2291 PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
2292 } else {
2293 /* For the peek operation, the only valid value for max_threads is 1 */
2294 assert(max_threads == 1);
2295 thread = priority_queue_max(&safeq->waitq_prio_queue,
2296 struct thread, wait_prioq_links);
2297 }
2298 /*
2299 * Ensure the wait event matches since priority ordered waitqs do not
2300 * support multiple events in the same waitq.
2301 */
2302 assert((thread->waitq == waitq) && (thread->wait_event == args->event));
2303
2304 if (args->select_cb) {
2305 /*
2306 * Call the select_cb passed into the waitq_select args. The callback
2307 * updates the select_ctx with information about the highest priority
2308 * thread which is eventually used by the caller.
2309 */
2310 thread_t __assert_only ret_thread = args->select_cb(args->select_ctx, waitq,
2311 waitq_is_global(waitq), thread);
2312 if (!remove_op) {
2313 /* For the peek operation, the thread should not be selected for addition */
2314 assert(ret_thread == THREAD_NULL);
2315 } else {
2316 /*
2317 * For the remove operation, the select routine should always return a valid
2318 * thread for priority waitqs. Since all threads in a prioq are equally
2319 * eligible, it should match the thread removed from the prioq. If this
2320 * invariant changes, the implementation would need to handle the
2321 * remaining_eventmask here correctly.
2322 */
2323 assert(ret_thread == thread);
2324 }
2325 }
2326
2327 if (first_thread == THREAD_NULL) {
2328 first_thread = thread;
2329 /*
2330 * turnstile_kernel_update_inheritor_on_wake_locked will lock
2331 * first_thread, so call it before locking it.
2332 */
2333 if (args->priority == WAITQ_PROMOTE_ON_WAKE && first_thread != THREAD_NULL && waitq_is_turnstile_queue(safeq)) {
2334 turnstile_kernel_update_inheritor_on_wake_locked(waitq_to_turnstile(safeq), (turnstile_inheritor_t)first_thread, TURNSTILE_INHERITOR_THREAD);
2335 }
2336 }
2337
2338 /* For the peek operation, break out early */
2339 if (!remove_op) {
2340 break;
2341 }
2342
2343 /* Add the thread to the result thread list */
2344 *nthreads += 1;
2345 if (*nthreads == 1) {
2346 *(args->spl) = (safeq != waitq) ? spl : splsched();
2347 }
2348 thread_lock(thread);
2349 thread_clear_waitq_state(thread);
2350 enqueue_tail(args->threadq, &(thread->wait_links));
2351 }
2352
2353 return first_thread;
2354 }
2355
2356 /**
2357 * generic thread selection from a waitq (and sets to which the waitq belongs)
2358 *
2359 * Conditions:
2360 * args->waitq (and args->posted_waitq) is locked
2361 *
2362 * Notes:
2363 * Uses the optional select callback function to refine the selection
2364 * of one or more threads from a waitq and any set to which the waitq
2365 * belongs. The select callback is invoked once for every thread that
2366 * is found to be waiting on the input args->waitq.
2367 *
2368 * If one or more threads are selected, this may disable interrupts.
2369 * The previous interrupt state is returned in args->spl and should
2370 * be used in a call to splx() if threads are returned to the caller.
2371 */
2372 static void
2373 do_waitq_select_n_locked(struct waitq_select_args *args)
2374 {
2375 struct waitq *waitq = args->waitq;
2376 int max_threads = args->max_threads;
2377 thread_t first_thread = THREAD_NULL;
2378 struct waitq *safeq;
2379 uint32_t remaining_eventmask = 0;
2380 uint32_t eventmask;
2381 int *nthreads = args->nthreads;
2382 spl_t spl = 0;
2383
2384 assert(max_threads != 0);
2385
2386 if (!waitq_irq_safe(waitq)) {
2387 /* JMM - add flag to waitq to avoid global lookup if no waiters */
2388 eventmask = _CAST_TO_EVENT_MASK(waitq);
2389 safeq = waitq_get_safeq(waitq);
2390 if (*nthreads == 0) {
2391 spl = splsched();
2392 }
2393 waitq_lock(safeq);
2394 } else {
2395 eventmask = _CAST_TO_EVENT_MASK(args->event);
2396 safeq = waitq;
2397 }
2398
2399 /*
2400 * If the safeq doesn't have an eventmask (not global) or the event
2401 * we're looking for IS set in its eventmask, then scan the threads
2402 * in that queue for ones that match the original <waitq,event> pair.
2403 */
2404 if (!waitq_is_global(safeq) ||
2405 (safeq->waitq_eventmask & eventmask) == eventmask) {
2406 if (waitq_is_turnstile_queue(safeq)) {
2407 first_thread = waitq_prioq_iterate_locked(safeq, waitq,
2408 spl, args,
2409 &remaining_eventmask);
2410 } else {
2411 first_thread = waitq_queue_iterate_locked(safeq, waitq,
2412 spl, args,
2413 &remaining_eventmask);
2414 }
2415
2416 /*
2417 * Update the eventmask of global queues we just scanned:
2418 * - If we selected all the threads in the queue, we can clear its
2419 * eventmask.
2420 *
2421 * - If we didn't find enough threads to fill our needs, then we can
2422 * assume we looked at every thread in the queue and the mask we
2423 * computed is complete - so reset it.
2424 */
2425 if (waitq_is_global(safeq)) {
2426 if (waitq_empty(safeq)) {
2427 safeq->waitq_eventmask = 0;
2428 } else if (max_threads < 0 || *nthreads < max_threads) {
2429 safeq->waitq_eventmask = remaining_eventmask;
2430 }
2431 }
2432 }
2433
2434 /*
2435 * Grab the first thread in the queue if no other thread was selected.
2436 * We can guarantee that no one has manipulated this thread because
2437 * it's waiting on the given waitq, and we have that waitq locked.
2438 */
2439 if (*nthreads == 0 && first_thread != THREAD_NULL && args->threadq) {
2440 /* we know this is the first (and only) thread */
2441 ++(*nthreads);
2442 *(args->spl) = (safeq != waitq) ? spl : splsched();
2443
2444 thread_lock(first_thread);
2445 thread_clear_waitq_state(first_thread);
2446 waitq_thread_remove(safeq, first_thread);
2447 enqueue_tail(args->threadq, &(first_thread->wait_links));
2448
2449 /* update the eventmask on [now] empty global queues */
2450 if (waitq_is_global(safeq) && waitq_empty(safeq)) {
2451 safeq->waitq_eventmask = 0;
2452 }
2453 }
2454
2455 /* unlock the safe queue if we locked one above */
2456 if (safeq != waitq) {
2457 waitq_unlock(safeq);
2458 if (*nthreads == 0) {
2459 splx(spl);
2460 }
2461 }
2462
2463 if (max_threads > 0 && *nthreads >= max_threads) {
2464 return;
2465 }
2466
2467 /*
2468 * wait queues that are not in any sets
2469 * are the bottom of the recursion
2470 */
2471 if (!waitq->waitq_set_id) {
2472 return;
2473 }
2474
2475 /* check to see if the set ID for this wait queue is valid */
2476 struct waitq_link *link = wql_get_link(waitq->waitq_set_id);
2477 if (!link) {
2478 /* the waitq set to which this waitq belonged, has been invalidated */
2479 waitq->waitq_set_id = 0;
2480 return;
2481 }
2482
2483 wql_put_link(link);
2484
2485 /*
2486 * If this waitq is a member of any wait queue sets, we need to look
2487 * for waiting thread(s) in any of those sets, and prepost all sets that
2488 * don't have active waiters.
2489 *
2490 * Note that we do a local walk of this waitq's links - we manually
2491 * recurse down wait queue set's with non-zero wqset_q.waitq_set_id
2492 */
2493 (void)walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
2494 WQL_WQS, (void *)args, waitq_select_walk_cb);
2495 }
2496
2497 /**
2498 * main entry point for thread selection from a waitq
2499 *
2500 * Conditions:
2501 * waitq is locked
2502 *
2503 * Returns:
2504 * The number of threads waiting on 'waitq' for 'event' which have
2505 * been placed onto the input 'threadq'
2506 *
2507 * Notes:
2508 * The 'select_cb' function is invoked for every thread found waiting on
2509 * 'waitq' for 'event'. The thread is _not_ locked upon callback
2510 * invocation. This parameter may be NULL.
2511 *
2512 * If one or more threads are returned in 'threadq' then the caller is
2513 * responsible to call splx() using the returned 'spl' value. Each
2514 * returned thread is locked.
2515 */
2516 static __inline__ int
2517 waitq_select_n_locked(struct waitq *waitq,
2518 event64_t event,
2519 waitq_select_cb select_cb,
2520 void *select_ctx,
2521 uint64_t *reserved_preposts,
2522 queue_t threadq,
2523 int max_threads, spl_t *spl,
2524 int priority)
2525 {
2526 int nthreads = 0;
2527
2528 struct waitq_select_args args = {
2529 .posted_waitq = waitq,
2530 .waitq = waitq,
2531 .event = event,
2532 .select_cb = select_cb,
2533 .select_ctx = select_ctx,
2534 .priority = priority,
2535 .reserved_preposts = reserved_preposts,
2536 .threadq = threadq,
2537 .max_threads = max_threads,
2538 .nthreads = &nthreads,
2539 .spl = spl,
2540 };
2541
2542 do_waitq_select_n_locked(&args);
2543 return nthreads;
2544 }
2545
2546 /**
2547 * select from a waitq a single thread waiting for a given event
2548 *
2549 * Conditions:
2550 * 'waitq' is locked
2551 *
2552 * Returns:
2553 * A locked thread that's been removed from the waitq, but has not
2554 * yet been put on a run queue. Caller is responsible to call splx
2555 * with the '*spl' value.
2556 */
2557 static thread_t
2558 waitq_select_one_locked(struct waitq *waitq, event64_t event,
2559 uint64_t *reserved_preposts,
2560 int priority, spl_t *spl)
2561 {
2562 int nthreads;
2563 queue_head_t threadq;
2564
2565 queue_init(&threadq);
2566
2567 nthreads = waitq_select_n_locked(waitq, event, NULL, NULL,
2568 reserved_preposts, &threadq, 1, spl, priority);
2569
2570 /* if we selected a thread, return it (still locked) */
2571 if (!queue_empty(&threadq)) {
2572 thread_t t;
2573 queue_entry_t qe = dequeue_head(&threadq);
2574 t = qe_element(qe, struct thread, wait_links);
2575 assert(queue_empty(&threadq)); /* there should be 1 entry */
2576 /* t has been locked and removed from all queues */
2577 return t;
2578 }
2579
2580 return THREAD_NULL;
2581 }
2582
2583 struct select_thread_ctx {
2584 thread_t thread;
2585 event64_t event;
2586 spl_t *spl;
2587 };
2588
2589 /**
2590 * link walk callback invoked once for each set to which a waitq belongs
2591 *
2592 * Conditions:
2593 * initial waitq is locked
2594 * ctx->thread is unlocked
2595 *
2596 * Notes:
2597 * This may disable interrupts and early-out of the full DAG link walk by
2598 * returning KERN_ALREADY_IN_SET. In this case, the returned thread has
2599 * been removed from the waitq, it's waitq state has been reset, and the
2600 * caller is responsible to call splx() with the returned interrupt state
2601 * in ctx->spl.
2602 */
2603 static int
2604 waitq_select_thread_cb(struct waitq *waitq, void *ctx,
2605 struct waitq_link *link)
2606 {
2607 struct select_thread_ctx *stctx = (struct select_thread_ctx *)ctx;
2608 struct waitq_set *wqset;
2609 struct waitq *wqsetq;
2610 struct waitq *safeq;
2611 spl_t s;
2612
2613 (void)waitq;
2614
2615 thread_t thread = stctx->thread;
2616 event64_t event = stctx->event;
2617
2618 if (wql_type(link) != WQL_WQS) {
2619 return WQ_ITERATE_CONTINUE;
2620 }
2621
2622 wqset = link->wql_wqs.wql_set;
2623 wqsetq = &wqset->wqset_q;
2624
2625 assert(!waitq_irq_safe(waitq));
2626 assert(!waitq_irq_safe(wqsetq));
2627
2628 waitq_set_lock(wqset);
2629
2630 s = splsched();
2631
2632 /* find and lock the interrupt-safe waitq the thread is thought to be on */
2633 safeq = waitq_get_safeq(wqsetq);
2634 waitq_lock(safeq);
2635
2636 thread_lock(thread);
2637
2638 if ((thread->waitq == wqsetq) && (thread->wait_event == event)) {
2639 waitq_thread_remove(wqsetq, thread);
2640 if (waitq_empty(safeq)) {
2641 safeq->waitq_eventmask = 0;
2642 }
2643 thread_clear_waitq_state(thread);
2644 waitq_unlock(safeq);
2645 waitq_set_unlock(wqset);
2646 /*
2647 * thread still locked,
2648 * return non-zero to break out of WQS walk
2649 */
2650 *(stctx->spl) = s;
2651 return WQ_ITERATE_FOUND;
2652 }
2653
2654 thread_unlock(thread);
2655 waitq_set_unlock(wqset);
2656 waitq_unlock(safeq);
2657 splx(s);
2658
2659 return WQ_ITERATE_CONTINUE;
2660 }
2661
2662 /**
2663 * returns KERN_SUCCESS and locks 'thread' if-and-only-if 'thread' is waiting
2664 * on 'waitq' (or any set to which waitq belongs) for 'event'
2665 *
2666 * Conditions:
2667 * 'waitq' is locked
2668 * 'thread' is unlocked
2669 */
2670 static kern_return_t
2671 waitq_select_thread_locked(struct waitq *waitq,
2672 event64_t event,
2673 thread_t thread, spl_t *spl)
2674 {
2675 struct waitq *safeq;
2676 struct waitq_link *link;
2677 struct select_thread_ctx ctx;
2678 kern_return_t kr;
2679 spl_t s;
2680
2681 s = splsched();
2682
2683 /* Find and lock the interrupts disabled queue the thread is actually on */
2684 if (!waitq_irq_safe(waitq)) {
2685 safeq = waitq_get_safeq(waitq);
2686 waitq_lock(safeq);
2687 } else {
2688 safeq = waitq;
2689 }
2690
2691 thread_lock(thread);
2692
2693 if ((thread->waitq == waitq) && (thread->wait_event == event)) {
2694 waitq_thread_remove(safeq, thread);
2695 if (waitq_empty(safeq)) {
2696 safeq->waitq_eventmask = 0;
2697 }
2698 thread_clear_waitq_state(thread);
2699 *spl = s;
2700 /* thread still locked */
2701 return KERN_SUCCESS;
2702 }
2703
2704 thread_unlock(thread);
2705
2706 if (safeq != waitq) {
2707 waitq_unlock(safeq);
2708 }
2709
2710 splx(s);
2711
2712 if (!waitq->waitq_set_id) {
2713 return KERN_NOT_WAITING;
2714 }
2715
2716 /* check to see if the set ID for this wait queue is valid */
2717 link = wql_get_link(waitq->waitq_set_id);
2718 if (!link) {
2719 /* the waitq to which this set belonged, has been invalidated */
2720 waitq->waitq_set_id = 0;
2721 return KERN_NOT_WAITING;
2722 }
2723
2724 /*
2725 * The thread may be waiting on a wait queue set to which
2726 * the input 'waitq' belongs. Go look for the thread in
2727 * all wait queue sets. If it's there, we'll remove it
2728 * because it's equivalent to waiting directly on the input waitq.
2729 */
2730 ctx.thread = thread;
2731 ctx.event = event;
2732 ctx.spl = spl;
2733 kr = walk_waitq_links(LINK_WALK_FULL_DAG, waitq, waitq->waitq_set_id,
2734 WQL_WQS, (void *)&ctx, waitq_select_thread_cb);
2735
2736 wql_put_link(link);
2737
2738 /* we found a thread, return success */
2739 if (kr == WQ_ITERATE_FOUND) {
2740 return KERN_SUCCESS;
2741 }
2742
2743 return KERN_NOT_WAITING;
2744 }
2745
2746 static int
2747 prepost_exists_cb(struct waitq_set __unused *wqset,
2748 void __unused *ctx,
2749 struct wq_prepost __unused *wqp,
2750 struct waitq __unused *waitq)
2751 {
2752 /* if we get here, then we know that there is a valid prepost object! */
2753 return WQ_ITERATE_FOUND;
2754 }
2755
2756 /**
2757 * declare a thread's intent to wait on 'waitq' for 'wait_event'
2758 *
2759 * Conditions:
2760 * 'waitq' is locked
2761 */
2762 wait_result_t
2763 waitq_assert_wait64_locked(struct waitq *waitq,
2764 event64_t wait_event,
2765 wait_interrupt_t interruptible,
2766 wait_timeout_urgency_t urgency,
2767 uint64_t deadline,
2768 uint64_t leeway,
2769 thread_t thread)
2770 {
2771 wait_result_t wait_result;
2772 int realtime = 0;
2773 struct waitq *safeq;
2774 uintptr_t eventmask;
2775 spl_t s;
2776
2777
2778 /*
2779 * Warning: Do _not_ place debugging print statements here.
2780 * The waitq is locked!
2781 */
2782 assert(!thread->started || thread == current_thread());
2783
2784 if (thread->waitq != NULL) {
2785 panic("thread already waiting on %p", thread->waitq);
2786 }
2787
2788 if (waitq_is_set(waitq)) {
2789 struct waitq_set *wqset = (struct waitq_set *)waitq;
2790 /*
2791 * early-out if the thread is waiting on a wait queue set
2792 * that has already been pre-posted.
2793 */
2794 if (wait_event == NO_EVENT64 && waitq_set_maybe_preposted(wqset)) {
2795 int ret;
2796 /*
2797 * Run through the list of potential preposts. Because
2798 * this is a hot path, we short-circuit the iteration
2799 * if we find just one prepost object.
2800 */
2801 ret = wq_prepost_foreach_locked(wqset, NULL,
2802 prepost_exists_cb);
2803 if (ret == WQ_ITERATE_FOUND) {
2804 s = splsched();
2805 thread_lock(thread);
2806 thread->wait_result = THREAD_AWAKENED;
2807 thread_unlock(thread);
2808 splx(s);
2809 return THREAD_AWAKENED;
2810 }
2811 }
2812 }
2813
2814 s = splsched();
2815
2816 /*
2817 * If already dealing with an irq safe wait queue, we are all set.
2818 * Otherwise, determine a global queue to use and lock it.
2819 */
2820 if (!waitq_irq_safe(waitq)) {
2821 safeq = waitq_get_safeq(waitq);
2822 eventmask = _CAST_TO_EVENT_MASK(waitq);
2823 waitq_lock(safeq);
2824 } else {
2825 safeq = waitq;
2826 eventmask = _CAST_TO_EVENT_MASK(wait_event);
2827 }
2828
2829 /* lock the thread now that we have the irq-safe waitq locked */
2830 thread_lock(thread);
2831
2832 /*
2833 * Realtime threads get priority for wait queue placements.
2834 * This allows wait_queue_wakeup_one to prefer a waiting
2835 * realtime thread, similar in principle to performing
2836 * a wait_queue_wakeup_all and allowing scheduler prioritization
2837 * to run the realtime thread, but without causing the
2838 * lock contention of that scenario.
2839 */
2840 if (thread->sched_pri >= BASEPRI_REALTIME) {
2841 realtime = 1;
2842 }
2843
2844 /*
2845 * This is the extent to which we currently take scheduling attributes
2846 * into account. If the thread is vm priviledged, we stick it at
2847 * the front of the queue. Later, these queues will honor the policy
2848 * value set at waitq_init time.
2849 */
2850 wait_result = thread_mark_wait_locked(thread, interruptible);
2851 /* thread->wait_result has been set */
2852 if (wait_result == THREAD_WAITING) {
2853 if (!safeq->waitq_fifo
2854 || (thread->options & TH_OPT_VMPRIV) || realtime) {
2855 waitq_thread_insert(safeq, thread, false);
2856 } else {
2857 waitq_thread_insert(safeq, thread, true);
2858 }
2859
2860 /* mark the event and real waitq, even if enqueued on a global safeq */
2861 thread->wait_event = wait_event;
2862 thread->waitq = waitq;
2863
2864 if (deadline != 0) {
2865 boolean_t act;
2866
2867 act = timer_call_enter_with_leeway(&thread->wait_timer,
2868 NULL,
2869 deadline, leeway,
2870 urgency, FALSE);
2871 if (!act) {
2872 thread->wait_timer_active++;
2873 }
2874 thread->wait_timer_is_set = TRUE;
2875 }
2876
2877 if (waitq_is_global(safeq)) {
2878 safeq->waitq_eventmask |= eventmask;
2879 }
2880
2881 waitq_stats_count_wait(waitq);
2882 }
2883
2884 /* unlock the thread */
2885 thread_unlock(thread);
2886
2887 /* update the inheritor's thread priority if the waitq is embedded in turnstile */
2888 if (waitq_is_turnstile_queue(safeq) && wait_result == THREAD_WAITING) {
2889 turnstile_recompute_priority_locked(waitq_to_turnstile(safeq));
2890 turnstile_update_inheritor_locked(waitq_to_turnstile(safeq));
2891 }
2892
2893 /* unlock the safeq if we locked it here */
2894 if (safeq != waitq) {
2895 waitq_unlock(safeq);
2896 }
2897
2898 splx(s);
2899
2900 return wait_result;
2901 }
2902
2903 /**
2904 * remove 'thread' from its current blocking state on 'waitq'
2905 *
2906 * Conditions:
2907 * 'thread' is locked
2908 *
2909 * Notes:
2910 * This function is primarily used by clear_wait_internal in
2911 * sched_prim.c from the thread timer wakeup path
2912 * (i.e. the thread was waiting on 'waitq' with a timeout that expired)
2913 */
2914 int
2915 waitq_pull_thread_locked(struct waitq *waitq, thread_t thread)
2916 {
2917 struct waitq *safeq;
2918
2919 assert_thread_magic(thread);
2920 assert(thread->waitq == waitq);
2921
2922 /* Find the interrupts disabled queue thread is waiting on */
2923 if (!waitq_irq_safe(waitq)) {
2924 safeq = waitq_get_safeq(waitq);
2925 } else {
2926 safeq = waitq;
2927 }
2928
2929 /* thread is already locked so have to try for the waitq lock */
2930 if (!waitq_lock_try(safeq)) {
2931 return 0;
2932 }
2933
2934 waitq_thread_remove(safeq, thread);
2935 thread_clear_waitq_state(thread);
2936 waitq_stats_count_clear_wakeup(waitq);
2937
2938 /* clear the global event mask if this was the last thread there! */
2939 if (waitq_is_global(safeq) && waitq_empty(safeq)) {
2940 safeq->waitq_eventmask = 0;
2941 /* JMM - also mark no-waiters on waitq (if not the same as the safeq) */
2942 }
2943
2944 waitq_unlock(safeq);
2945
2946 return 1;
2947 }
2948
2949
2950 static __inline__
2951 void
2952 maybe_adjust_thread_pri(thread_t thread,
2953 int priority,
2954 __kdebug_only struct waitq *waitq)
2955 {
2956 /*
2957 * If the caller is requesting the waitq subsystem to promote the
2958 * priority of the awoken thread, then boost the thread's priority to
2959 * the default WAITQ_BOOST_PRIORITY (if it's not already equal or
2960 * higher priority). This boost must be removed via a call to
2961 * waitq_clear_promotion_locked before the thread waits again.
2962 *
2963 * WAITQ_PROMOTE_PRIORITY is -2.
2964 * Anything above 0 represents a mutex promotion.
2965 * The default 'no action' value is -1.
2966 * TODO: define this in a header
2967 */
2968 if (priority == WAITQ_PROMOTE_PRIORITY) {
2969 uintptr_t trace_waitq = 0;
2970 if (__improbable(kdebug_enable)) {
2971 trace_waitq = VM_KERNEL_UNSLIDE_OR_PERM(waitq);
2972 }
2973
2974 sched_thread_promote_reason(thread, TH_SFLAG_WAITQ_PROMOTED, trace_waitq);
2975 }
2976 }
2977
2978 /*
2979 * Clear a potential thread priority promotion from a waitq wakeup
2980 * with WAITQ_PROMOTE_PRIORITY.
2981 *
2982 * This must be called on the thread which was woken up with TH_SFLAG_WAITQ_PROMOTED.
2983 */
2984 void
2985 waitq_clear_promotion_locked(struct waitq *waitq, thread_t thread)
2986 {
2987 spl_t s;
2988
2989 assert(waitq_held(waitq));
2990 assert(thread != THREAD_NULL);
2991 assert(thread == current_thread());
2992
2993 /* This flag is only cleared by the thread itself, so safe to check outside lock */
2994 if ((thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED) != TH_SFLAG_WAITQ_PROMOTED) {
2995 return;
2996 }
2997
2998 if (!waitq_irq_safe(waitq)) {
2999 s = splsched();
3000 }
3001 thread_lock(thread);
3002
3003 sched_thread_unpromote_reason(thread, TH_SFLAG_WAITQ_PROMOTED, 0);
3004
3005 thread_unlock(thread);
3006 if (!waitq_irq_safe(waitq)) {
3007 splx(s);
3008 }
3009 }
3010
3011 /**
3012 * wakeup all threads waiting on 'waitq' for 'wake_event'
3013 *
3014 * Conditions:
3015 * 'waitq' is locked
3016 *
3017 * Notes:
3018 * May temporarily disable and re-enable interrupts
3019 * and re-adjust thread priority of each awoken thread.
3020 *
3021 * If the input 'lock_state' == WAITQ_UNLOCK then the waitq will have
3022 * been unlocked before calling thread_go() on any returned threads, and
3023 * is guaranteed to be unlocked upon function return.
3024 */
3025 kern_return_t
3026 waitq_wakeup64_all_locked(struct waitq *waitq,
3027 event64_t wake_event,
3028 wait_result_t result,
3029 uint64_t *reserved_preposts,
3030 int priority,
3031 waitq_lock_state_t lock_state)
3032 {
3033 kern_return_t ret;
3034 thread_t thread;
3035 spl_t th_spl;
3036 int nthreads;
3037 queue_head_t wakeup_queue;
3038
3039 assert(waitq_held(waitq));
3040 queue_init(&wakeup_queue);
3041
3042 nthreads = waitq_select_n_locked(waitq, wake_event, NULL, NULL,
3043 reserved_preposts,
3044 &wakeup_queue, -1, &th_spl, priority);
3045
3046 /* set each thread running */
3047 ret = KERN_NOT_WAITING;
3048
3049 #if CONFIG_WAITQ_STATS
3050 qe_foreach_element(thread, &wakeup_queue, wait_links)
3051 waitq_stats_count_wakeup(waitq);
3052 #endif
3053 if (lock_state == WAITQ_UNLOCK) {
3054 waitq_unlock(waitq);
3055 }
3056
3057 qe_foreach_element_safe(thread, &wakeup_queue, wait_links) {
3058 assert_thread_magic(thread);
3059 remqueue(&thread->wait_links);
3060 maybe_adjust_thread_pri(thread, priority, waitq);
3061 ret = thread_go(thread, result);
3062 assert(ret == KERN_SUCCESS);
3063 thread_unlock(thread);
3064 }
3065 if (nthreads > 0) {
3066 splx(th_spl);
3067 } else {
3068 waitq_stats_count_fail(waitq);
3069 }
3070
3071 return ret;
3072 }
3073
3074 /**
3075 * wakeup one thread waiting on 'waitq' for 'wake_event'
3076 *
3077 * Conditions:
3078 * 'waitq' is locked
3079 *
3080 * Notes:
3081 * May temporarily disable and re-enable interrupts.
3082 */
3083 kern_return_t
3084 waitq_wakeup64_one_locked(struct waitq *waitq,
3085 event64_t wake_event,
3086 wait_result_t result,
3087 uint64_t *reserved_preposts,
3088 int priority,
3089 waitq_lock_state_t lock_state)
3090 {
3091 thread_t thread;
3092 spl_t th_spl;
3093
3094 assert(waitq_held(waitq));
3095
3096 thread = waitq_select_one_locked(waitq, wake_event,
3097 reserved_preposts,
3098 priority, &th_spl);
3099
3100 if (thread != THREAD_NULL) {
3101 waitq_stats_count_wakeup(waitq);
3102 } else {
3103 waitq_stats_count_fail(waitq);
3104 }
3105
3106 if (lock_state == WAITQ_UNLOCK) {
3107 waitq_unlock(waitq);
3108 }
3109
3110 if (thread != THREAD_NULL) {
3111 maybe_adjust_thread_pri(thread, priority, waitq);
3112 kern_return_t ret = thread_go(thread, result);
3113 assert(ret == KERN_SUCCESS);
3114 thread_unlock(thread);
3115 splx(th_spl);
3116 return ret;
3117 }
3118
3119 return KERN_NOT_WAITING;
3120 }
3121
3122 /**
3123 * wakeup one thread waiting on 'waitq' for 'wake_event'
3124 *
3125 * Conditions:
3126 * 'waitq' is locked
3127 *
3128 * Returns:
3129 * A locked, runnable thread.
3130 * If return value is non-NULL, interrupts have also
3131 * been disabled, and the caller is responsible to call
3132 * splx() with the returned '*spl' value.
3133 */
3134 thread_t
3135 waitq_wakeup64_identify_locked(struct waitq *waitq,
3136 event64_t wake_event,
3137 wait_result_t result,
3138 spl_t *spl,
3139 uint64_t *reserved_preposts,
3140 int priority,
3141 waitq_lock_state_t lock_state)
3142 {
3143 thread_t thread;
3144
3145 assert(waitq_held(waitq));
3146
3147 thread = waitq_select_one_locked(waitq, wake_event,
3148 reserved_preposts,
3149 priority, spl);
3150
3151 if (thread != THREAD_NULL) {
3152 waitq_stats_count_wakeup(waitq);
3153 } else {
3154 waitq_stats_count_fail(waitq);
3155 }
3156
3157 if (lock_state == WAITQ_UNLOCK) {
3158 waitq_unlock(waitq);
3159 }
3160
3161 if (thread != THREAD_NULL) {
3162 kern_return_t __assert_only ret;
3163 ret = thread_go(thread, result);
3164 assert(ret == KERN_SUCCESS);
3165 }
3166
3167 return thread; /* locked if not NULL (caller responsible for spl) */
3168 }
3169
3170 /**
3171 * wakeup a specific thread iff it's waiting on 'waitq' for 'wake_event'
3172 *
3173 * Conditions:
3174 * 'waitq' is locked
3175 * 'thread' is unlocked
3176 *
3177 * Notes:
3178 * May temporarily disable and re-enable interrupts
3179 *
3180 * If the input lock_state == WAITQ_UNLOCK then the waitq will have been
3181 * unlocked before calling thread_go() if 'thread' is to be awoken, and
3182 * is guaranteed to be unlocked upon function return.
3183 */
3184 kern_return_t
3185 waitq_wakeup64_thread_locked(struct waitq *waitq,
3186 event64_t wake_event,
3187 thread_t thread,
3188 wait_result_t result,
3189 waitq_lock_state_t lock_state)
3190 {
3191 kern_return_t ret;
3192 spl_t th_spl;
3193
3194 assert(waitq_held(waitq));
3195 assert_thread_magic(thread);
3196
3197 /*
3198 * See if the thread was still waiting there. If so, it got
3199 * dequeued and returned locked.
3200 */
3201 ret = waitq_select_thread_locked(waitq, wake_event, thread, &th_spl);
3202
3203 if (ret == KERN_SUCCESS) {
3204 waitq_stats_count_wakeup(waitq);
3205 } else {
3206 waitq_stats_count_fail(waitq);
3207 }
3208
3209 if (lock_state == WAITQ_UNLOCK) {
3210 waitq_unlock(waitq);
3211 }
3212
3213 if (ret != KERN_SUCCESS) {
3214 return KERN_NOT_WAITING;
3215 }
3216
3217 ret = thread_go(thread, result);
3218 assert(ret == KERN_SUCCESS);
3219 thread_unlock(thread);
3220 splx(th_spl);
3221
3222 return ret;
3223 }
3224
3225
3226
3227 /* ----------------------------------------------------------------------
3228 *
3229 * In-Kernel API
3230 *
3231 * ---------------------------------------------------------------------- */
3232
3233 /**
3234 * initialize a waitq object
3235 */
3236 kern_return_t
3237 waitq_init(struct waitq *waitq, int policy)
3238 {
3239 assert(waitq != NULL);
3240
3241 /* only FIFO and LIFO for now */
3242 if ((policy & SYNC_POLICY_FIXED_PRIORITY) != 0) {
3243 return KERN_INVALID_ARGUMENT;
3244 }
3245
3246 waitq->waitq_fifo = ((policy & SYNC_POLICY_REVERSED) == 0);
3247 waitq->waitq_irq = !!(policy & SYNC_POLICY_DISABLE_IRQ);
3248 waitq->waitq_prepost = 0;
3249 waitq->waitq_type = WQT_QUEUE;
3250 waitq->waitq_turnstile_or_port = !!(policy & SYNC_POLICY_TURNSTILE);
3251 waitq->waitq_eventmask = 0;
3252
3253 waitq->waitq_set_id = 0;
3254 waitq->waitq_prepost_id = 0;
3255
3256 waitq_lock_init(waitq);
3257 if (waitq_is_turnstile_queue(waitq)) {
3258 /* For turnstile, initialize it as a priority queue */
3259 priority_queue_init(&waitq->waitq_prio_queue,
3260 PRIORITY_QUEUE_BUILTIN_MAX_HEAP);
3261 assert(waitq->waitq_fifo == 0);
3262 } else {
3263 queue_init(&waitq->waitq_queue);
3264 }
3265
3266 waitq->waitq_isvalid = 1;
3267 return KERN_SUCCESS;
3268 }
3269
3270 struct wq_unlink_ctx {
3271 struct waitq *unlink_wq;
3272 struct waitq_set *unlink_wqset;
3273 };
3274
3275 static int waitq_unlink_prepost_cb(struct waitq_set __unused *wqset, void *ctx,
3276 struct wq_prepost *wqp, struct waitq *waitq);
3277
3278 /**
3279 * walk_waitq_links callback to invalidate 'link' parameter
3280 *
3281 * Conditions:
3282 * Called from walk_waitq_links.
3283 * Note that unlink other callbacks, this one make no assumptions about
3284 * the 'waitq' parameter, specifically it does not have to be locked or
3285 * even valid.
3286 */
3287 static int
3288 waitq_unlink_all_cb(struct waitq *waitq, void *ctx,
3289 struct waitq_link *link)
3290 {
3291 (void)waitq;
3292 (void)ctx;
3293 if (wql_type(link) == WQL_LINK && wql_is_valid(link)) {
3294 wql_invalidate(link);
3295 }
3296
3297 if (wql_type(link) == WQL_WQS) {
3298 struct waitq_set *wqset;
3299 struct wq_unlink_ctx ulctx;
3300
3301 /*
3302 * When destroying the waitq, take the time to clear out any
3303 * preposts it may have made. This could potentially save time
3304 * on the IPC send path which would otherwise have to iterate
3305 * over lots of dead port preposts.
3306 */
3307 if (waitq->waitq_prepost_id == 0) {
3308 goto out;
3309 }
3310
3311 wqset = link->wql_wqs.wql_set;
3312 assert(wqset != NULL);
3313 assert(!waitq_irq_safe(&wqset->wqset_q));
3314
3315 waitq_set_lock(wqset);
3316
3317 if (!waitq_set_is_valid(wqset)) {
3318 /* someone raced us to teardown */
3319 goto out_unlock;
3320 }
3321 if (!waitq_set_maybe_preposted(wqset)) {
3322 goto out_unlock;
3323 }
3324
3325 ulctx.unlink_wq = waitq;
3326 ulctx.unlink_wqset = wqset;
3327 (void)wq_prepost_iterate(wqset->wqset_prepost_id, &ulctx,
3328 waitq_unlink_prepost_cb);
3329 out_unlock:
3330 waitq_set_unlock(wqset);
3331 }
3332
3333 out:
3334 return WQ_ITERATE_CONTINUE;
3335 }
3336
3337
3338 /**
3339 * cleanup any link/prepost table resources associated with a waitq
3340 */
3341 void
3342 waitq_deinit(struct waitq *waitq)
3343 {
3344 spl_t s;
3345
3346 if (!waitq || !waitq_is_queue(waitq)) {
3347 return;
3348 }
3349
3350 if (waitq_irq_safe(waitq)) {
3351 s = splsched();
3352 }
3353 waitq_lock(waitq);
3354 if (!waitq_valid(waitq)) {
3355 waitq_unlock(waitq);
3356 if (waitq_irq_safe(waitq)) {
3357 splx(s);
3358 }
3359 return;
3360 }
3361
3362 waitq->waitq_isvalid = 0;
3363
3364 if (!waitq_irq_safe(waitq)) {
3365 waitq_unlink_all_unlock(waitq);
3366 /* waitq unlocked and set links deallocated */
3367 } else {
3368 waitq_unlock(waitq);
3369 splx(s);
3370 }
3371
3372 assert(waitq_empty(waitq));
3373 }
3374
3375 void
3376 waitq_invalidate_locked(struct waitq *waitq)
3377 {
3378 assert(waitq_held(waitq));
3379 assert(waitq_is_valid(waitq));
3380 waitq->waitq_isvalid = 0;
3381 }
3382
3383 /**
3384 * invalidate the given wq_prepost object
3385 *
3386 * Conditions:
3387 * Called from wq_prepost_iterate (_not_ from wq_prepost_foreach_locked!)
3388 */
3389 static int
3390 wqset_clear_prepost_chain_cb(struct waitq_set __unused *wqset,
3391 void __unused *ctx,
3392 struct wq_prepost *wqp,
3393 struct waitq __unused *waitq)
3394 {
3395 if (wqp_type(wqp) == WQP_POST) {
3396 wq_prepost_invalidate(wqp);
3397 }
3398 return WQ_ITERATE_CONTINUE;
3399 }
3400
3401
3402 /**
3403 * allocate and initialize a waitq set object
3404 *
3405 * Conditions:
3406 * may block
3407 *
3408 * Returns:
3409 * allocated / initialized waitq_set object.
3410 * the waits_set object returned does not have
3411 * a waitq_link associated.
3412 *
3413 * NULL on failure
3414 */
3415 struct waitq_set *
3416 waitq_set_alloc(int policy, waitq_set_prepost_hook_t *prepost_hook)
3417 {
3418 struct waitq_set *wqset;
3419
3420 wqset = (struct waitq_set *)zalloc(waitq_set_zone);
3421 if (!wqset) {
3422 panic("Can't allocate a new waitq set from zone %p", waitq_set_zone);
3423 }
3424
3425 kern_return_t ret;
3426 ret = waitq_set_init(wqset, policy, NULL, prepost_hook);
3427 if (ret != KERN_SUCCESS) {
3428 zfree(waitq_set_zone, wqset);
3429 wqset = NULL;
3430 }
3431
3432 return wqset;
3433 }
3434
3435 /**
3436 * initialize a waitq set object
3437 *
3438 * if no 'reserved_link' object is passed
3439 * the waitq_link will be lazily allocated
3440 * on demand through waitq_set_lazy_init_link.
3441 */
3442 kern_return_t
3443 waitq_set_init(struct waitq_set *wqset,
3444 int policy, uint64_t *reserved_link,
3445 waitq_set_prepost_hook_t *prepost_hook)
3446 {
3447 struct waitq_link *link;
3448 kern_return_t ret;
3449
3450 memset(wqset, 0, sizeof(*wqset));
3451
3452 ret = waitq_init(&wqset->wqset_q, policy);
3453 if (ret != KERN_SUCCESS) {
3454 return ret;
3455 }
3456
3457 wqset->wqset_q.waitq_type = WQT_SET;
3458 if (policy & SYNC_POLICY_PREPOST) {
3459 wqset->wqset_q.waitq_prepost = 1;
3460 wqset->wqset_prepost_id = 0;
3461 assert(prepost_hook == NULL);
3462 } else {
3463 wqset->wqset_q.waitq_prepost = 0;
3464 wqset->wqset_prepost_hook = prepost_hook;
3465 }
3466
3467 if (reserved_link && *reserved_link != 0) {
3468 link = wql_get_reserved(*reserved_link, WQL_WQS);
3469
3470 if (!link) {
3471 panic("Can't allocate link object for waitq set: %p", wqset);
3472 }
3473
3474 /* always consume the caller's reference */
3475 *reserved_link = 0;
3476
3477 link->wql_wqs.wql_set = wqset;
3478 wql_mkvalid(link);
3479
3480 wqset->wqset_id = link->wql_setid.id;
3481 wql_put_link(link);
3482 } else {
3483 /*
3484 * Lazy allocate the link only when an actual id is needed.
3485 */
3486 wqset->wqset_id = WQSET_NOT_LINKED;
3487 }
3488
3489 return KERN_SUCCESS;
3490 }
3491
3492 #if DEVELOPMENT || DEBUG
3493
3494 int
3495 sysctl_helper_waitq_set_nelem(void)
3496 {
3497 return ltable_nelem(&g_wqlinktable);
3498 }
3499
3500 #endif
3501
3502 /**
3503 * initialize a waitq set link.
3504 *
3505 * Conditions:
3506 * may block
3507 * locks and unlocks the waiq set lock
3508 *
3509 */
3510 void
3511 waitq_set_lazy_init_link(struct waitq_set *wqset)
3512 {
3513 struct waitq_link *link;
3514
3515 assert(get_preemption_level() == 0 && waitq_wait_possible(current_thread()));
3516
3517 waitq_set_lock(wqset);
3518 if (!waitq_set_should_lazy_init_link(wqset)) {
3519 waitq_set_unlock(wqset);
3520 return;
3521 }
3522
3523 assert(wqset->wqset_id == WQSET_NOT_LINKED);
3524 waitq_set_unlock(wqset);
3525
3526 link = wql_alloc_link(WQL_WQS);
3527 if (!link) {
3528 panic("Can't allocate link object for waitq set: %p", wqset);
3529 }
3530
3531 link->wql_wqs.wql_set = wqset;
3532
3533 waitq_set_lock(wqset);
3534 if (waitq_set_should_lazy_init_link(wqset)) {
3535 wql_mkvalid(link);
3536 wqset->wqset_id = link->wql_setid.id;
3537 }
3538
3539 assert(wqset->wqset_id != 0);
3540 assert(wqset->wqset_id != WQSET_NOT_LINKED);
3541
3542 waitq_set_unlock(wqset);
3543
3544 wql_put_link(link);
3545
3546 return;
3547 }
3548
3549 /**
3550 * checks if a waitq set needs to be linked.
3551 *
3552 */
3553 boolean_t
3554 waitq_set_should_lazy_init_link(struct waitq_set *wqset)
3555 {
3556 if (waitqs_is_linked(wqset) || wqset->wqset_id == 0) {
3557 return FALSE;
3558 }
3559 return TRUE;
3560 }
3561
3562 /**
3563 * clear out / release any resources associated with a waitq set
3564 *
3565 * Conditions:
3566 * may block
3567 * Note:
3568 * This will render the waitq set invalid, and it must
3569 * be re-initialized with waitq_set_init before it can be used again
3570 */
3571 void
3572 waitq_set_deinit(struct waitq_set *wqset)
3573 {
3574 struct waitq_link *link = NULL;
3575 uint64_t set_id, prepost_id;
3576
3577 if (!waitqs_is_set(wqset)) {
3578 panic("trying to de-initialize an invalid wqset @%p", wqset);
3579 }
3580
3581 assert(!waitq_irq_safe(&wqset->wqset_q));
3582
3583 waitq_set_lock(wqset);
3584
3585 if (waitq_set_has_prepost_hook(wqset)) {
3586 waitq_set_prepost_hook_t *hook = wqset->wqset_prepost_hook;
3587 /*
3588 * If the wqset_prepost_hook value is non 0,
3589 * then another core is currently posting to this waitq set
3590 * and we need for it to finish what it's doing.
3591 */
3592 while (os_atomic_load(hook, relaxed) != 0) {
3593 waitq_set_unlock(wqset);
3594 delay(1);
3595 waitq_set_lock(wqset);
3596 }
3597 }
3598
3599 set_id = wqset->wqset_id;
3600
3601 if (waitqs_is_linked(wqset) || set_id == 0) {
3602 /* grab the set's link object */
3603 link = wql_get_link(set_id);
3604 if (link) {
3605 wql_invalidate(link);
3606 }
3607 /* someone raced us to deinit */
3608 if (!link || wqset->wqset_id != set_id || set_id != link->wql_setid.id) {
3609 if (link) {
3610 wql_put_link(link);
3611 }
3612 waitq_set_unlock(wqset);
3613 return;
3614 }
3615
3616 /* the link should be a valid link object at this point */
3617 assert(link != NULL && wql_type(link) == WQL_WQS);
3618
3619 wqset->wqset_id = 0;
3620 }
3621
3622 /*
3623 * This set may have a lot of preposts, or may have been a member of
3624 * many other sets. To minimize spinlock hold times, we clear out the
3625 * waitq set data structure under the lock-hold, but don't clear any
3626 * table objects. We keep handles to the prepost and set linkage
3627 * objects and free those outside the critical section.
3628 */
3629 prepost_id = 0;
3630 if (wqset->wqset_q.waitq_prepost && wqset->wqset_prepost_id) {
3631 assert(link != NULL);
3632 prepost_id = wqset->wqset_prepost_id;
3633 }
3634 /* else { TODO: notify kqueue subsystem? } */
3635 wqset->wqset_prepost_id = 0;
3636
3637 wqset->wqset_q.waitq_fifo = 0;
3638 wqset->wqset_q.waitq_prepost = 0;
3639 wqset->wqset_q.waitq_isvalid = 0;
3640
3641 /* don't clear the 'waitq_irq' bit: it's used in locking! */
3642 wqset->wqset_q.waitq_eventmask = 0;
3643
3644 waitq_unlink_all_unlock(&wqset->wqset_q);
3645 /* wqset->wqset_q unlocked and set links deallocated */
3646
3647
3648 if (link) {
3649 /*
3650 * walk_waitq_links may race with us for access to the waitq set.
3651 * If walk_waitq_links has a reference to the set, then we should wait
3652 * until the link's refcount goes to 1 (our reference) before we exit
3653 * this function. That way we ensure that the waitq set memory will
3654 * remain valid even though it's been cleared out.
3655 */
3656 while (wql_refcnt(link) > 1) {
3657 delay(1);
3658 }
3659 wql_put_link(link);
3660 }
3661
3662 /* drop / unlink all the prepost table objects */
3663 /* JMM - can this happen before the delay? */
3664 if (prepost_id) {
3665 (void)wq_prepost_iterate(prepost_id, NULL,
3666 wqset_clear_prepost_chain_cb);
3667 }
3668 }
3669
3670 /**
3671 * de-initialize and free an allocated waitq set object
3672 *
3673 * Conditions:
3674 * may block
3675 */
3676 kern_return_t
3677 waitq_set_free(struct waitq_set *wqset)
3678 {
3679 waitq_set_deinit(wqset);
3680
3681 memset(wqset, 0, sizeof(*wqset));
3682 zfree(waitq_set_zone, wqset);
3683
3684 return KERN_SUCCESS;
3685 }
3686
3687 #if DEVELOPMENT || DEBUG
3688 #if CONFIG_WAITQ_DEBUG
3689 /**
3690 * return the set ID of 'wqset'
3691 */
3692 uint64_t
3693 wqset_id(struct waitq_set *wqset)
3694 {
3695 if (!wqset) {
3696 return 0;
3697 }
3698
3699 assert(waitqs_is_set(wqset));
3700
3701 if (!waitqs_is_linked(wqset)) {
3702 waitq_set_lazy_init_link(wqset);
3703 }
3704
3705 return wqset->wqset_id;
3706 }
3707
3708 /**
3709 * returns a pointer to the waitq object embedded in 'wqset'
3710 */
3711 struct waitq *
3712 wqset_waitq(struct waitq_set *wqset)
3713 {
3714 if (!wqset) {
3715 return NULL;
3716 }
3717
3718 assert(waitqs_is_set(wqset));
3719
3720 return &wqset->wqset_q;
3721 }
3722 #endif /* CONFIG_WAITQ_DEBUG */
3723 #endif /* DEVELOPMENT || DEBUG */
3724
3725
3726 /**
3727 * clear all preposts originating from 'waitq'
3728 *
3729 * Conditions:
3730 * 'waitq' locked
3731 * may (rarely) spin waiting for another on-core thread to
3732 * release the last reference to the waitq's prepost link object
3733 *
3734 * NOTE:
3735 * If this function needs to spin, it will drop the waitq lock!
3736 * The return value of the function indicates whether or not this
3737 * happened: 1 == lock was dropped, 0 == lock held
3738 */
3739 int
3740 waitq_clear_prepost_locked(struct waitq *waitq)
3741 {
3742 struct wq_prepost *wqp;
3743 int dropped_lock = 0;
3744
3745 assert(!waitq_irq_safe(waitq));
3746
3747 if (waitq->waitq_prepost_id == 0) {
3748 return 0;
3749 }
3750
3751 wqp = wq_prepost_get(waitq->waitq_prepost_id);
3752 waitq->waitq_prepost_id = 0;
3753 if (wqp) {
3754 uint64_t wqp_id = wqp->wqp_prepostid.id;
3755 wqdbg_v("invalidate prepost 0x%llx (refcnt:%d)",
3756 wqp->wqp_prepostid.id, wqp_refcnt(wqp));
3757 wq_prepost_invalidate(wqp);
3758 while (wqp_refcnt(wqp) > 1) {
3759 /*
3760 * Some other thread must have raced us to grab a link
3761 * object reference before we invalidated it. This
3762 * means that they are probably trying to access the
3763 * waitq to which the prepost object points. We need
3764 * to wait here until the other thread drops their
3765 * reference. We know that no one else can get a
3766 * reference (the object has been invalidated), and
3767 * that prepost references are short-lived (dropped on
3768 * a call to wq_prepost_put). We also know that no one
3769 * blocks while holding a reference therefore the
3770 * other reference holder must be on-core. We'll just
3771 * sit and wait for the other reference to be dropped.
3772 */
3773 disable_preemption();
3774
3775 waitq_unlock(waitq);
3776 dropped_lock = 1;
3777 /*
3778 * don't yield here, just spin and assume the other
3779 * consumer is already on core...
3780 */
3781 delay(1);
3782
3783 waitq_lock(waitq);
3784
3785 enable_preemption();
3786 }
3787 if (wqp_refcnt(wqp) > 0 && wqp->wqp_prepostid.id == wqp_id) {
3788 wq_prepost_put(wqp);
3789 }
3790 }
3791
3792 return dropped_lock;
3793 }
3794
3795 /**
3796 * clear all preposts originating from 'waitq'
3797 *
3798 * Conditions:
3799 * 'waitq' is not locked
3800 * may disable and re-enable interrupts
3801 */
3802 void
3803 waitq_clear_prepost(struct waitq *waitq)
3804 {
3805 assert(waitq_valid(waitq));
3806 assert(!waitq_irq_safe(waitq));
3807
3808 waitq_lock(waitq);
3809 /* it doesn't matter to us if the lock is dropped here */
3810 (void)waitq_clear_prepost_locked(waitq);
3811 waitq_unlock(waitq);
3812 }
3813
3814 /**
3815 * return a the waitq's prepost object ID (allocate if necessary)
3816 *
3817 * Conditions:
3818 * 'waitq' is unlocked
3819 */
3820 uint64_t
3821 waitq_get_prepost_id(struct waitq *waitq)
3822 {
3823 struct wq_prepost *wqp;
3824 uint64_t wqp_id = 0;
3825
3826 if (!waitq_valid(waitq)) {
3827 return 0;
3828 }
3829
3830 assert(!waitq_irq_safe(waitq));
3831
3832 waitq_lock(waitq);
3833
3834 if (!waitq_valid(waitq)) {
3835 goto out_unlock;
3836 }
3837
3838 if (waitq->waitq_prepost_id) {
3839 wqp_id = waitq->waitq_prepost_id;
3840 goto out_unlock;
3841 }
3842
3843 /* don't hold a spinlock while allocating a prepost object */
3844 waitq_unlock(waitq);
3845
3846 wqp = wq_prepost_alloc(WQP_WQ, 1);
3847 if (!wqp) {
3848 return 0;
3849 }
3850
3851 /* re-acquire the waitq lock */
3852 waitq_lock(waitq);
3853
3854 if (!waitq_valid(waitq)) {
3855 wq_prepost_put(wqp);
3856 wqp_id = 0;
3857 goto out_unlock;
3858 }
3859
3860 if (waitq->waitq_prepost_id) {
3861 /* we were beat by someone else */
3862 wq_prepost_put(wqp);
3863 wqp_id = waitq->waitq_prepost_id;
3864 goto out_unlock;
3865 }
3866
3867 wqp->wqp_wq.wqp_wq_ptr = waitq;
3868
3869 wqp_set_valid(wqp);
3870 wqp_id = wqp->wqp_prepostid.id;
3871 waitq->waitq_prepost_id = wqp_id;
3872
3873 wq_prepost_put(wqp);
3874
3875 out_unlock:
3876 waitq_unlock(waitq);
3877
3878 return wqp_id;
3879 }
3880
3881
3882 static int
3883 waitq_inset_cb(struct waitq *waitq, void *ctx, struct waitq_link *link)
3884 {
3885 uint64_t setid = *(uint64_t *)ctx;
3886 int wqltype = wql_type(link);
3887 (void)waitq;
3888 if (wqltype == WQL_WQS && link->wql_setid.id == setid) {
3889 wqdbg_v(" waitq already in set 0x%llx", setid);
3890 return WQ_ITERATE_FOUND;
3891 } else if (wqltype == WQL_LINK) {
3892 /*
3893 * break out early if we see a link that points to the setid
3894 * in question. This saves us a step in the
3895 * iteration/recursion
3896 */
3897 wqdbg_v(" waitq already in set 0x%llx (WQL_LINK)", setid);
3898 if (link->wql_link.left_setid == setid ||
3899 link->wql_link.right_setid == setid) {
3900 return WQ_ITERATE_FOUND;
3901 }
3902 }
3903
3904 return WQ_ITERATE_CONTINUE;
3905 }
3906
3907 /**
3908 * determine if 'waitq' is a member of 'wqset'
3909 *
3910 * Conditions:
3911 * neither 'waitq' nor 'wqset' is not locked
3912 * may disable and re-enable interrupts while locking 'waitq'
3913 */
3914 boolean_t
3915 waitq_member(struct waitq *waitq, struct waitq_set *wqset)
3916 {
3917 kern_return_t kr = WQ_ITERATE_SUCCESS;
3918 uint64_t setid;
3919
3920 if (!waitq_valid(waitq)) {
3921 panic("Invalid waitq: %p", waitq);
3922 }
3923 assert(!waitq_irq_safe(waitq));
3924
3925 if (!waitqs_is_set(wqset)) {
3926 return FALSE;
3927 }
3928
3929 waitq_lock(waitq);
3930
3931 if (!waitqs_is_linked(wqset)) {
3932 goto out_unlock;
3933 }
3934
3935 setid = wqset->wqset_id;
3936
3937 /* fast path: most waitqs are members of only 1 set */
3938 if (waitq->waitq_set_id == setid) {
3939 waitq_unlock(waitq);
3940 return TRUE;
3941 }
3942
3943 /* walk the link table and look for the Set ID of wqset */
3944 kr = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
3945 WQL_ALL, (void *)&setid, waitq_inset_cb);
3946
3947 out_unlock:
3948 waitq_unlock(waitq);
3949 return kr == WQ_ITERATE_FOUND;
3950 }
3951
3952 /**
3953 * Returns true is the given waitq is a member of at least 1 set
3954 */
3955 boolean_t
3956 waitq_in_set(struct waitq *waitq)
3957 {
3958 struct waitq_link *link;
3959 boolean_t inset = FALSE;
3960
3961 if (waitq_irq_safe(waitq)) {
3962 return FALSE;
3963 }
3964
3965 waitq_lock(waitq);
3966
3967 if (!waitq->waitq_set_id) {
3968 goto out_unlock;
3969 }
3970
3971 link = wql_get_link(waitq->waitq_set_id);
3972 if (link) {
3973 /* if we get here, the waitq is in _at_least_one_ set */
3974 inset = TRUE;
3975 wql_put_link(link);
3976 } else {
3977 /* we can just optimize this for next time */
3978 waitq->waitq_set_id = 0;
3979 }
3980
3981 out_unlock:
3982 waitq_unlock(waitq);
3983 return inset;
3984 }
3985
3986
3987 /**
3988 * pre-allocate a waitq link structure from the link table
3989 *
3990 * Conditions:
3991 * 'waitq' is not locked
3992 * may (rarely) block if link table needs to grow
3993 */
3994 uint64_t
3995 waitq_link_reserve(struct waitq *waitq)
3996 {
3997 struct waitq_link *link;
3998 uint64_t reserved_id = 0;
3999
4000 assert(get_preemption_level() == 0 && waitq_wait_possible(current_thread()));
4001
4002 /*
4003 * We've asserted that the caller can block, so we enforce a
4004 * minimum-free table element policy here.
4005 */
4006 wql_ensure_free_space();
4007
4008 (void)waitq;
4009 link = wql_alloc_link(LT_RESERVED);
4010 if (!link) {
4011 return 0;
4012 }
4013
4014 reserved_id = link->wql_setid.id;
4015
4016 return reserved_id;
4017 }
4018
4019 /**
4020 * release a pre-allocated waitq link structure
4021 */
4022 void
4023 waitq_link_release(uint64_t id)
4024 {
4025 struct waitq_link *link;
4026
4027 if (id == 0) {
4028 return;
4029 }
4030
4031 link = wql_get_reserved(id, WQL_LINK);
4032 if (!link) {
4033 return;
4034 }
4035
4036 /*
4037 * if we successfully got a link object, then we know
4038 * it's not been marked valid, and can be released with
4039 * a standard wql_put_link() which should free the element.
4040 */
4041 wql_put_link(link);
4042 #if CONFIG_LTABLE_STATS
4043 g_wqlinktable.nreserved_releases += 1;
4044 #endif
4045 }
4046
4047 /**
4048 * link 'waitq' to the set identified by 'setid' using the 'link' structure
4049 *
4050 * Conditions:
4051 * 'waitq' is locked
4052 * caller should have a reference to the 'link' object
4053 */
4054 static kern_return_t
4055 waitq_link_internal(struct waitq *waitq,
4056 uint64_t setid, struct waitq_link *link)
4057 {
4058 struct waitq_link *qlink;
4059 kern_return_t kr;
4060
4061 assert(waitq_held(waitq));
4062 assert(setid != 0);
4063 assert(setid != WQSET_NOT_LINKED);
4064
4065 /*
4066 * If the waitq_set_id field is empty, then this waitq is not
4067 * a member of any other set. All we have to do is update the
4068 * field.
4069 */
4070 if (!waitq->waitq_set_id) {
4071 waitq->waitq_set_id = setid;
4072 return KERN_SUCCESS;
4073 }
4074
4075 qlink = wql_get_link(waitq->waitq_set_id);
4076 if (!qlink) {
4077 /*
4078 * The set to which this wait queue belonged has been
4079 * destroyed / invalidated. We can re-use the waitq field.
4080 */
4081 waitq->waitq_set_id = setid;
4082 return KERN_SUCCESS;
4083 }
4084 wql_put_link(qlink);
4085
4086 /*
4087 * Check to see if it's already a member of the set.
4088 *
4089 * TODO: check for cycles!
4090 */
4091 kr = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
4092 WQL_ALL, (void *)&setid, waitq_inset_cb);
4093 if (kr == WQ_ITERATE_FOUND) {
4094 return KERN_ALREADY_IN_SET;
4095 }
4096
4097 /*
4098 * This wait queue is a member of at least one set already,
4099 * and _not_ a member of the given set. Use our previously
4100 * allocated link object, and hook it up to the wait queue.
4101 * Note that it's possible that one or more of the wait queue sets to
4102 * which the wait queue belongs was invalidated before we allocated
4103 * this link object. That's OK because the next time we use that
4104 * object we'll just ignore it.
4105 */
4106 link->wql_link.left_setid = setid;
4107 link->wql_link.right_setid = waitq->waitq_set_id;
4108 wql_mkvalid(link);
4109
4110 waitq->waitq_set_id = link->wql_setid.id;
4111
4112 return KERN_SUCCESS;
4113 }
4114
4115 /**
4116 * link 'waitq' to 'wqset'
4117 *
4118 * Conditions:
4119 * if 'lock_state' contains WAITQ_SHOULD_LOCK, 'waitq' must be unlocked.
4120 * Otherwise, 'waitq' must be locked.
4121 *
4122 * may (rarely) block on link table allocation if the table has to grow,
4123 * and no 'reserved_link' object is passed.
4124 *
4125 * may block and acquire wqset lock if the wqset passed has no link.
4126 *
4127 * Notes:
4128 * The caller can guarantee that this function will never block by
4129 * - pre-allocating a link table object and passing its ID in 'reserved_link'
4130 * - and pre-allocating the waitq set link calling waitq_set_lazy_init_link.
4131 * It is not possible to provide a reserved_link without having also linked
4132 * the wqset.
4133 */
4134 kern_return_t
4135 waitq_link(struct waitq *waitq, struct waitq_set *wqset,
4136 waitq_lock_state_t lock_state, uint64_t *reserved_link)
4137 {
4138 kern_return_t kr;
4139 struct waitq_link *link;
4140 int should_lock = (lock_state == WAITQ_SHOULD_LOCK);
4141
4142 if (!waitq_valid(waitq) || waitq_irq_safe(waitq)) {
4143 panic("Invalid waitq: %p", waitq);
4144 }
4145
4146 if (!waitqs_is_set(wqset)) {
4147 return KERN_INVALID_ARGUMENT;
4148 }
4149
4150 if (!reserved_link || *reserved_link == 0) {
4151 if (!waitqs_is_linked(wqset)) {
4152 waitq_set_lazy_init_link(wqset);
4153 }
4154 }
4155
4156 wqdbg_v("Link waitq %p to wqset 0x%llx",
4157 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), wqset->wqset_id);
4158
4159 /*
4160 * We _might_ need a new link object here, so we'll grab outside
4161 * the lock because the alloc call _might_ block.
4162 *
4163 * If the caller reserved a link beforehand, then wql_get_link
4164 * is guaranteed not to block because the caller holds an extra
4165 * reference to the link which, in turn, hold a reference to the
4166 * link table.
4167 */
4168 if (reserved_link && *reserved_link != 0) {
4169 link = wql_get_reserved(*reserved_link, WQL_LINK);
4170 /* always consume the caller's reference */
4171 *reserved_link = 0;
4172 } else {
4173 link = wql_alloc_link(WQL_LINK);
4174 }
4175 if (!link) {
4176 return KERN_NO_SPACE;
4177 }
4178
4179 if (should_lock) {
4180 waitq_lock(waitq);
4181 }
4182
4183 kr = waitq_link_internal(waitq, wqset->wqset_id, link);
4184
4185 if (should_lock) {
4186 waitq_unlock(waitq);
4187 }
4188
4189 wql_put_link(link);
4190
4191 return kr;
4192 }
4193
4194 /**
4195 * helper: unlink 'waitq' from waitq set identified by 'setid'
4196 * this function also prunes invalid objects from the tree
4197 *
4198 * Conditions:
4199 * MUST be called from walk_waitq_links link table walk
4200 * 'waitq' is locked
4201 *
4202 * Notes:
4203 * This is a helper function which compresses the link table by culling
4204 * unused or unnecessary links. See comments below for different
4205 * scenarios.
4206 */
4207 static inline int
4208 waitq_maybe_remove_link(struct waitq *waitq,
4209 uint64_t setid,
4210 struct waitq_link *parent,
4211 struct waitq_link *left,
4212 struct waitq_link *right)
4213 {
4214 uint64_t *wq_setid = &waitq->waitq_set_id;
4215
4216 /*
4217 * There are two scenarios:
4218 *
4219 * Scenario 1:
4220 * --------------------------------------------------------------------
4221 * waitq->waitq_set_id == parent
4222 *
4223 * parent(LINK)
4224 * / \
4225 * / \
4226 * / \
4227 * L(LINK/WQS_l) R(LINK/WQS_r)
4228 *
4229 * In this scenario, we assert that the original waitq points to the
4230 * parent link we were passed in. If WQS_l (or WQS_r) is the waitq
4231 * set we're looking for, we can set the corresponding parent
4232 * link id (left or right) to 0. To compress the tree, we can reset the
4233 * waitq_set_id of the original waitq to point to the side of the
4234 * parent that is still valid. We then discard the parent link object.
4235 */
4236 if (*wq_setid == parent->wql_setid.id) {
4237 if (!left && !right) {
4238 /* completely invalid children */
4239 wql_invalidate(parent);
4240 wqdbg_v("S1, L+R");
4241 *wq_setid = 0;
4242 return WQ_ITERATE_INVALID;
4243 } else if (!left || left->wql_setid.id == setid) {
4244 /*
4245 * left side matches we know it points either to the
4246 * WQS we're unlinking, or to an invalid object:
4247 * no need to invalidate it
4248 */
4249 *wq_setid = right ? right->wql_setid.id : 0;
4250 wql_invalidate(parent);
4251 wqdbg_v("S1, L");
4252 return left ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4253 } else if (!right || right->wql_setid.id == setid) {
4254 /*
4255 * if right side matches we know it points either to the
4256 * WQS we're unlinking, or to an invalid object:
4257 * no need to invalidate it
4258 */
4259 *wq_setid = left ? left->wql_setid.id : 0;
4260 wql_invalidate(parent);
4261 wqdbg_v("S1, R");
4262 return right ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4263 }
4264 }
4265
4266 /*
4267 * the tree walk starts at the top-of-tree and moves down,
4268 * so these are safe asserts.
4269 */
4270 assert(left || right); /* one of them has to be valid at this point */
4271
4272 /*
4273 * Scenario 2:
4274 * --------------------------------------------------------------------
4275 * waitq->waitq_set_id == ... (OR parent)
4276 *
4277 * ...
4278 * |
4279 * parent
4280 * / \
4281 * / \
4282 * L(LINK) R(LINK)
4283 * /\ /\
4284 * / \ / \
4285 * / \ Rl(*) Rr(*)
4286 * Ll(WQS) Lr(WQS)
4287 *
4288 * In this scenario, a leaf node of either the left or right side
4289 * could be the wait queue set we're looking to unlink. We also handle
4290 * the case where one of these links is invalid. If a leaf node is
4291 * invalid or it's the set we're looking for, we can safely remove the
4292 * middle link (left or right) and point the parent link directly to
4293 * the remaining leaf node.
4294 */
4295 if (left && wql_type(left) == WQL_LINK) {
4296 uint64_t Ll, Lr;
4297 struct waitq_link *linkLl, *linkLr;
4298 assert(left->wql_setid.id != setid);
4299 Ll = left->wql_link.left_setid;
4300 Lr = left->wql_link.right_setid;
4301 linkLl = wql_get_link(Ll);
4302 linkLr = wql_get_link(Lr);
4303 if (!linkLl && !linkLr) {
4304 /*
4305 * The left object points to two invalid objects!
4306 * We can invalidate the left w/o touching the parent.
4307 */
4308 wql_invalidate(left);
4309 wqdbg_v("S2, Ll+Lr");
4310 return WQ_ITERATE_INVALID;
4311 } else if (!linkLl || Ll == setid) {
4312 /* Ll is invalid and/or the wait queue set we're looking for */
4313 parent->wql_link.left_setid = Lr;
4314 wql_invalidate(left);
4315 wql_put_link(linkLl);
4316 wql_put_link(linkLr);
4317 wqdbg_v("S2, Ll");
4318 return linkLl ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4319 } else if (!linkLr || Lr == setid) {
4320 /* Lr is invalid and/or the wait queue set we're looking for */
4321 parent->wql_link.left_setid = Ll;
4322 wql_invalidate(left);
4323 wql_put_link(linkLr);
4324 wql_put_link(linkLl);
4325 wqdbg_v("S2, Lr");
4326 return linkLr ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4327 }
4328 wql_put_link(linkLl);
4329 wql_put_link(linkLr);
4330 }
4331
4332 if (right && wql_type(right) == WQL_LINK) {
4333 uint64_t Rl, Rr;
4334 struct waitq_link *linkRl, *linkRr;
4335 assert(right->wql_setid.id != setid);
4336 Rl = right->wql_link.left_setid;
4337 Rr = right->wql_link.right_setid;
4338 linkRl = wql_get_link(Rl);
4339 linkRr = wql_get_link(Rr);
4340 if (!linkRl && !linkRr) {
4341 /*
4342 * The right object points to two invalid objects!
4343 * We can invalidate the right w/o touching the parent.
4344 */
4345 wql_invalidate(right);
4346 wqdbg_v("S2, Rl+Rr");
4347 return WQ_ITERATE_INVALID;
4348 } else if (!linkRl || Rl == setid) {
4349 /* Rl is invalid and/or the wait queue set we're looking for */
4350 parent->wql_link.right_setid = Rr;
4351 wql_invalidate(right);
4352 wql_put_link(linkRl);
4353 wql_put_link(linkRr);
4354 wqdbg_v("S2, Rl");
4355 return linkRl ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4356 } else if (!linkRr || Rr == setid) {
4357 /* Rr is invalid and/or the wait queue set we're looking for */
4358 parent->wql_link.right_setid = Rl;
4359 wql_invalidate(right);
4360 wql_put_link(linkRl);
4361 wql_put_link(linkRr);
4362 wqdbg_v("S2, Rr");
4363 return linkRr ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4364 }
4365 wql_put_link(linkRl);
4366 wql_put_link(linkRr);
4367 }
4368
4369 return WQ_ITERATE_CONTINUE;
4370 }
4371
4372 /**
4373 * link table walk callback that unlinks 'waitq' from 'ctx->setid'
4374 *
4375 * Conditions:
4376 * called from walk_waitq_links
4377 * 'waitq' is locked
4378 *
4379 * Notes:
4380 * uses waitq_maybe_remove_link() to compress the linktable and
4381 * perform the actual unlinking
4382 */
4383 static int
4384 waitq_unlink_cb(struct waitq *waitq, void *ctx,
4385 struct waitq_link *link)
4386 {
4387 uint64_t setid = *((uint64_t *)ctx);
4388 struct waitq_link *right, *left;
4389 int ret = 0;
4390
4391 if (wql_type(link) != WQL_LINK) {
4392 return WQ_ITERATE_CONTINUE;
4393 }
4394
4395 do {
4396 left = wql_get_link(link->wql_link.left_setid);
4397 right = wql_get_link(link->wql_link.right_setid);
4398
4399 ret = waitq_maybe_remove_link(waitq, setid, link, left, right);
4400
4401 wql_put_link(left);
4402 wql_put_link(right);
4403
4404 if (!wql_is_valid(link)) {
4405 return WQ_ITERATE_INVALID;
4406 }
4407 /* A ret value of UNLINKED will break us out of table walk */
4408 } while (ret == WQ_ITERATE_INVALID);
4409
4410 return ret;
4411 }
4412
4413
4414 /**
4415 * undo/remove a prepost from 'ctx' (waitq) to 'wqset'
4416 *
4417 * Conditions:
4418 * Called from wq_prepost_foreach_locked OR wq_prepost_iterate
4419 * 'wqset' may be NULL
4420 * (ctx)->unlink_wqset is locked
4421 */
4422 static int
4423 waitq_unlink_prepost_cb(struct waitq_set __unused *wqset, void *ctx,
4424 struct wq_prepost *wqp, struct waitq *waitq)
4425 {
4426 struct wq_unlink_ctx *ulctx = (struct wq_unlink_ctx *)ctx;
4427
4428 if (waitq != ulctx->unlink_wq) {
4429 return WQ_ITERATE_CONTINUE;
4430 }
4431
4432 if (wqp_type(wqp) == WQP_WQ &&
4433 wqp->wqp_prepostid.id == ulctx->unlink_wqset->wqset_prepost_id) {
4434 /* this is the only prepost on this wait queue set */
4435 wqdbg_v("unlink wqp (WQ) 0x%llx", wqp->wqp_prepostid.id);
4436 ulctx->unlink_wqset->wqset_prepost_id = 0;
4437 return WQ_ITERATE_BREAK;
4438 }
4439
4440 assert(wqp_type(wqp) == WQP_POST);
4441
4442 /*
4443 * The prepost object 'wqp' points to a waitq which should no longer
4444 * be preposted to 'ulctx->unlink_wqset'. We can remove the prepost
4445 * object from the list and break out of the iteration. Using the
4446 * context object in this way allows this same callback function to be
4447 * used from both wq_prepost_foreach_locked and wq_prepost_iterate.
4448 */
4449 wq_prepost_remove(ulctx->unlink_wqset, wqp);
4450 return WQ_ITERATE_BREAK;
4451 }
4452
4453 /**
4454 * unlink 'waitq' from 'wqset'
4455 *
4456 * Conditions:
4457 * 'waitq' is locked
4458 * 'wqset' is _not_ locked
4459 * may (rarely) spin in prepost clear and drop/re-acquire 'waitq' lock
4460 * (see waitq_clear_prepost_locked)
4461 */
4462 static kern_return_t
4463 waitq_unlink_locked(struct waitq *waitq,
4464 struct waitq_set *wqset)
4465 {
4466 uint64_t setid;
4467 kern_return_t kr;
4468
4469 assert(!waitq_irq_safe(waitq));
4470
4471 if (waitq->waitq_set_id == 0) {
4472 /*
4473 * TODO:
4474 * it doesn't belong to anyone, and it has a prepost object?
4475 * This is an artifact of not cleaning up after kqueues when
4476 * they prepost into select sets...
4477 */
4478 if (waitq->waitq_prepost_id != 0) {
4479 (void)waitq_clear_prepost_locked(waitq);
4480 }
4481 return KERN_NOT_IN_SET;
4482 }
4483
4484 if (!waitqs_is_linked(wqset)) {
4485 /*
4486 * No link has been allocated for the wqset,
4487 * so no waitq could have been linked to it.
4488 */
4489 return KERN_NOT_IN_SET;
4490 }
4491
4492 setid = wqset->wqset_id;
4493
4494 if (waitq->waitq_set_id == setid) {
4495 waitq->waitq_set_id = 0;
4496 /*
4497 * This was the only set to which the waitq belonged: we can
4498 * safely release the waitq's prepost object. It doesn't
4499 * matter if this function drops and re-acquires the lock
4500 * because we're not manipulating waitq state any more.
4501 */
4502 (void)waitq_clear_prepost_locked(waitq);
4503 return KERN_SUCCESS;
4504 }
4505
4506 /*
4507 * The waitq was a member of more that 1 set, so we need to
4508 * handle potentially compressing the link table, and
4509 * adjusting the waitq->waitq_set_id value.
4510 *
4511 * Note: we can't free the waitq's associated prepost object (if any)
4512 * because it may be in use by the one or more _other_ sets to
4513 * which this queue belongs.
4514 *
4515 * Note: This function only handles a single level of the queue linkage.
4516 * Removing a waitq from a set to which it does not directly
4517 * belong is undefined. For example, if a waitq belonged to set
4518 * A, and set A belonged to set B. You can't remove the waitq
4519 * from set B.
4520 */
4521 kr = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
4522 WQL_LINK, (void *)&setid, waitq_unlink_cb);
4523
4524 if (kr == WQ_ITERATE_UNLINKED) {
4525 struct wq_unlink_ctx ulctx;
4526
4527 kr = KERN_SUCCESS; /* found it and dis-associated it */
4528
4529 /* don't look for preposts if it's not prepost-enabled */
4530 if (!wqset->wqset_q.waitq_prepost) {
4531 goto out;
4532 }
4533
4534 assert(!waitq_irq_safe(&wqset->wqset_q));
4535
4536 waitq_set_lock(wqset);
4537 /*
4538 * clear out any prepost from waitq into wqset
4539 * TODO: this could be more efficient than a linear search of
4540 * the waitq set's prepost list.
4541 */
4542 ulctx.unlink_wq = waitq;
4543 ulctx.unlink_wqset = wqset;
4544 (void)wq_prepost_iterate(wqset->wqset_prepost_id, (void *)&ulctx,
4545 waitq_unlink_prepost_cb);
4546 waitq_set_unlock(wqset);
4547 } else {
4548 kr = KERN_NOT_IN_SET; /* waitq is _not_ associated with wqset */
4549 }
4550
4551 out:
4552 return kr;
4553 }
4554
4555 /**
4556 * unlink 'waitq' from 'wqset'
4557 *
4558 * Conditions:
4559 * neither 'waitq' nor 'wqset' is locked
4560 * may disable and re-enable interrupts
4561 * may (rarely) spin in prepost clear
4562 * (see waitq_clear_prepost_locked)
4563 */
4564 kern_return_t
4565 waitq_unlink(struct waitq *waitq, struct waitq_set *wqset)
4566 {
4567 kern_return_t kr = KERN_SUCCESS;
4568
4569 assert(waitqs_is_set(wqset));
4570
4571 /*
4572 * we allow the waitq to be invalid because the caller may be trying
4573 * to clear out old/dirty state
4574 */
4575 if (!waitq_valid(waitq)) {
4576 return KERN_INVALID_ARGUMENT;
4577 }
4578
4579 wqdbg_v("unlink waitq %p from set 0x%llx",
4580 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), wqset->wqset_id);
4581
4582 assert(!waitq_irq_safe(waitq));
4583
4584 waitq_lock(waitq);
4585
4586 kr = waitq_unlink_locked(waitq, wqset);
4587
4588 waitq_unlock(waitq);
4589 return kr;
4590 }
4591
4592 /**
4593 * unlink a waitq from a waitq set, but reference the waitq by its prepost ID
4594 *
4595 * Conditions:
4596 * 'wqset' is unlocked
4597 * wqp_id may be valid or invalid
4598 */
4599 void
4600 waitq_unlink_by_prepost_id(uint64_t wqp_id, struct waitq_set *wqset)
4601 {
4602 struct wq_prepost *wqp;
4603
4604 disable_preemption();
4605 wqp = wq_prepost_get(wqp_id);
4606 if (wqp) {
4607 struct waitq *wq;
4608
4609 wq = wqp->wqp_wq.wqp_wq_ptr;
4610
4611 /*
4612 * lock the waitq, then release our prepost ID reference, then
4613 * unlink the waitq from the wqset: this ensures that we don't
4614 * hold a prepost ID reference during the unlink, but we also
4615 * complete the unlink operation atomically to avoid a race
4616 * with waitq_unlink[_all].
4617 */
4618 assert(!waitq_irq_safe(wq));
4619
4620 waitq_lock(wq);
4621 wq_prepost_put(wqp);
4622
4623 if (!waitq_valid(wq)) {
4624 /* someone already tore down this waitq! */
4625 waitq_unlock(wq);
4626 enable_preemption();
4627 return;
4628 }
4629
4630 /* this _may_ drop the wq lock, but that's OK */
4631 waitq_unlink_locked(wq, wqset);
4632
4633 waitq_unlock(wq);
4634 }
4635 enable_preemption();
4636 return;
4637 }
4638
4639
4640 /**
4641 * reference and lock a waitq by its prepost ID
4642 *
4643 * Conditions:
4644 * wqp_id may be valid or invalid
4645 *
4646 * Returns:
4647 * a locked waitq if wqp_id was valid
4648 * NULL on failure
4649 */
4650 struct waitq *
4651 waitq_lock_by_prepost_id(uint64_t wqp_id)
4652 {
4653 struct waitq *wq = NULL;
4654 struct wq_prepost *wqp;
4655
4656 disable_preemption();
4657 wqp = wq_prepost_get(wqp_id);
4658 if (wqp) {
4659 wq = wqp->wqp_wq.wqp_wq_ptr;
4660
4661 assert(!waitq_irq_safe(wq));
4662
4663 waitq_lock(wq);
4664 wq_prepost_put(wqp);
4665
4666 if (!waitq_valid(wq)) {
4667 /* someone already tore down this waitq! */
4668 waitq_unlock(wq);
4669 enable_preemption();
4670 return NULL;
4671 }
4672 }
4673 enable_preemption();
4674 return wq;
4675 }
4676
4677
4678 /**
4679 * unlink 'waitq' from all sets to which it belongs
4680 *
4681 * Conditions:
4682 * 'waitq' is locked on entry
4683 * returns with waitq lock dropped
4684 *
4685 * Notes:
4686 * may (rarely) spin (see waitq_clear_prepost_locked)
4687 */
4688 kern_return_t
4689 waitq_unlink_all_unlock(struct waitq *waitq)
4690 {
4691 uint64_t old_set_id = 0;
4692 wqdbg_v("unlink waitq %p from all sets",
4693 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq));
4694 assert(!waitq_irq_safe(waitq));
4695
4696 /* it's not a member of any sets */
4697 if (waitq->waitq_set_id == 0) {
4698 waitq_unlock(waitq);
4699 return KERN_SUCCESS;
4700 }
4701
4702 old_set_id = waitq->waitq_set_id;
4703 waitq->waitq_set_id = 0;
4704
4705 /*
4706 * invalidate the prepost entry for this waitq.
4707 * This may drop and re-acquire the waitq lock, but that's OK because
4708 * if it was added to another set and preposted to that set in the
4709 * time we drop the lock, the state will remain consistent.
4710 */
4711 (void)waitq_clear_prepost_locked(waitq);
4712
4713 waitq_unlock(waitq);
4714
4715 if (old_set_id) {
4716 /*
4717 * Walk the link table and invalidate each LINK object that
4718 * used to connect this waitq to one or more sets: this works
4719 * because WQL_LINK objects are private to each wait queue
4720 */
4721 (void)walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, old_set_id,
4722 WQL_LINK, NULL, waitq_unlink_all_cb);
4723 }
4724
4725 return KERN_SUCCESS;
4726 }
4727
4728 /**
4729 * unlink 'waitq' from all sets to which it belongs
4730 *
4731 * Conditions:
4732 * 'waitq' is not locked
4733 * may disable and re-enable interrupts
4734 * may (rarely) spin
4735 * (see waitq_unlink_all_locked, waitq_clear_prepost_locked)
4736 */
4737 kern_return_t
4738 waitq_unlink_all(struct waitq *waitq)
4739 {
4740 kern_return_t kr = KERN_SUCCESS;
4741
4742 if (!waitq_valid(waitq)) {
4743 panic("Invalid waitq: %p", waitq);
4744 }
4745
4746 assert(!waitq_irq_safe(waitq));
4747 waitq_lock(waitq);
4748 if (!waitq_valid(waitq)) {
4749 waitq_unlock(waitq);
4750 return KERN_SUCCESS;
4751 }
4752
4753 kr = waitq_unlink_all_unlock(waitq);
4754 /* waitq unlocked and set links deallocated */
4755
4756 return kr;
4757 }
4758
4759
4760 /**
4761 * unlink all waitqs from 'wqset'
4762 *
4763 * Conditions:
4764 * 'wqset' is locked on entry
4765 * 'wqset' is unlocked on exit and spl is restored
4766 *
4767 * Note:
4768 * may (rarely) spin/block (see waitq_clear_prepost_locked)
4769 */
4770 kern_return_t
4771 waitq_set_unlink_all_unlock(struct waitq_set *wqset)
4772 {
4773 struct waitq_link *link;
4774 uint64_t prepost_id;
4775
4776 wqdbg_v("unlink all queues from set 0x%llx", wqset->wqset_id);
4777
4778 /*
4779 * This operation does not require interaction with any of the set's
4780 * constituent wait queues. All we have to do is invalidate the SetID
4781 */
4782
4783 if (waitqs_is_linked(wqset)) {
4784 /* invalidate and re-alloc the link object first */
4785 link = wql_get_link(wqset->wqset_id);
4786
4787 /* we may have raced with a waitq_set_deinit: handle this */
4788 if (!link) {
4789 waitq_set_unlock(wqset);
4790 return KERN_SUCCESS;
4791 }
4792
4793 wql_invalidate(link);
4794
4795 /* re-alloc the object to get a new generation ID */
4796 wql_realloc_link(link, WQL_WQS);
4797 link->wql_wqs.wql_set = wqset;
4798
4799 wqset->wqset_id = link->wql_setid.id;
4800 wql_mkvalid(link);
4801 wql_put_link(link);
4802 }
4803
4804 /* clear any preposts attached to this set */
4805 prepost_id = 0;
4806 if (wqset->wqset_q.waitq_prepost && wqset->wqset_prepost_id) {
4807 prepost_id = wqset->wqset_prepost_id;
4808 }
4809 /* else { TODO: notify kqueue subsystem? } */
4810 wqset->wqset_prepost_id = 0;
4811
4812 /*
4813 * clear set linkage and prepost object associated with this set:
4814 * waitq sets may prepost to other sets if, for example, they are
4815 * associated with a kqueue which is in a select set.
4816 *
4817 * This releases all the set link objects
4818 * (links to other sets to which this set was previously added)
4819 */
4820 waitq_unlink_all_unlock(&wqset->wqset_q);
4821 /* wqset->wqset_q unlocked */
4822
4823 /* drop / unlink all the prepost table objects */
4824 if (prepost_id) {
4825 (void)wq_prepost_iterate(prepost_id, NULL,
4826 wqset_clear_prepost_chain_cb);
4827 }
4828
4829 return KERN_SUCCESS;
4830 }
4831
4832 /**
4833 * unlink all waitqs from 'wqset'
4834 *
4835 * Conditions:
4836 * 'wqset' is not locked
4837 * may (rarely) spin/block (see waitq_clear_prepost_locked)
4838 */
4839 kern_return_t
4840 waitq_set_unlink_all(struct waitq_set *wqset)
4841 {
4842 assert(waitqs_is_set(wqset));
4843 assert(!waitq_irq_safe(&wqset->wqset_q));
4844
4845 waitq_set_lock(wqset);
4846 return waitq_set_unlink_all_unlock(wqset);
4847 /* wqset unlocked and set links and preposts deallocated */
4848 }
4849
4850 static int
4851 waitq_prepost_reserve_cb(struct waitq *waitq, void *ctx,
4852 struct waitq_link *link)
4853 {
4854 uint32_t *num = (uint32_t *)ctx;
4855 (void)waitq;
4856
4857 /*
4858 * In the worst case, we'll have to allocate 2 prepost objects
4859 * per waitq set (if the set was already preposted by another
4860 * waitq).
4861 */
4862 if (wql_type(link) == WQL_WQS) {
4863 /*
4864 * check to see if the associated waitq actually supports
4865 * preposting
4866 */
4867 if (waitq_set_can_prepost(link->wql_wqs.wql_set)) {
4868 *num += 2;
4869 }
4870 }
4871 return WQ_ITERATE_CONTINUE;
4872 }
4873
4874 static int
4875 waitq_alloc_prepost_reservation(int nalloc, struct waitq *waitq,
4876 int *did_unlock, struct wq_prepost **wqp)
4877 {
4878 struct wq_prepost *tmp;
4879 struct wqp_cache *cache;
4880
4881 *did_unlock = 0;
4882
4883 /*
4884 * Before we unlock the waitq, check the per-processor prepost object
4885 * cache to see if there's enough there for us. If so, do the
4886 * allocation, keep the lock and save an entire iteration over the set
4887 * linkage!
4888 */
4889 if (waitq) {
4890 disable_preemption();
4891 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
4892 if (nalloc <= (int)cache->avail) {
4893 goto do_alloc;
4894 }
4895 enable_preemption();
4896
4897 /* unlock the waitq to perform the allocation */
4898 *did_unlock = 1;
4899 waitq_unlock(waitq);
4900 }
4901
4902 do_alloc:
4903 tmp = wq_prepost_alloc(LT_RESERVED, nalloc);
4904 if (!tmp) {
4905 panic("Couldn't reserve %d preposts for waitq @%p (wqp@%p)",
4906 nalloc, waitq, *wqp);
4907 }
4908 if (*wqp) {
4909 /* link the two lists */
4910 int __assert_only rc;
4911 rc = wq_prepost_rlink(tmp, *wqp);
4912 assert(rc == nalloc);
4913 }
4914 *wqp = tmp;
4915
4916 /*
4917 * If the caller can block, then enforce a minimum-free table element
4918 * policy here. This helps ensure that we will have enough prepost
4919 * objects for callers such as selwakeup() that can be called with
4920 * spin locks held.
4921 */
4922 if (get_preemption_level() == 0) {
4923 wq_prepost_ensure_free_space();
4924 }
4925
4926 if (waitq) {
4927 if (*did_unlock == 0) {
4928 /* decrement the preemption count if alloc from cache */
4929 enable_preemption();
4930 } else {
4931 /* otherwise: re-lock the waitq */
4932 waitq_lock(waitq);
4933 }
4934 }
4935
4936 return nalloc;
4937 }
4938
4939 static int
4940 waitq_count_prepost_reservation(struct waitq *waitq, int extra, int keep_locked)
4941 {
4942 int npreposts = 0;
4943
4944 /*
4945 * If the waitq is not currently part of a set, and we're not asked to
4946 * keep the waitq locked then we'll want to have 3 in reserve
4947 * just-in-case it becomes part of a set while we unlock and reserve.
4948 * We may need up to 1 object for the waitq, and 2 for the set.
4949 */
4950 if (waitq->waitq_set_id == 0) {
4951 npreposts = 3;
4952 } else {
4953 /* this queue has never been preposted before */
4954 if (waitq->waitq_prepost_id == 0) {
4955 npreposts = 3;
4956 }
4957
4958 /*
4959 * Walk the set of table linkages associated with this waitq
4960 * and count the worst-case number of prepost objects that
4961 * may be needed during a wakeup_all. We can walk this without
4962 * locking each set along the way because the table-based IDs
4963 * disconnect us from the set pointers themselves, and the
4964 * table walking is careful to read the setid values only once.
4965 * Locking each set up the chain also doesn't guarantee that
4966 * their membership won't change between the time we unlock
4967 * that set and when we actually go to prepost, so our
4968 * situation is no worse than before and we've alleviated lock
4969 * contention on any sets to which this waitq belongs.
4970 */
4971 (void)walk_waitq_links(LINK_WALK_FULL_DAG_UNLOCKED,
4972 waitq, waitq->waitq_set_id,
4973 WQL_WQS, (void *)&npreposts,
4974 waitq_prepost_reserve_cb);
4975 }
4976
4977 if (extra > 0) {
4978 npreposts += extra;
4979 }
4980
4981 if (npreposts == 0 && !keep_locked) {
4982 /*
4983 * If we get here, we were asked to reserve some prepost
4984 * objects for a waitq that's previously preposted, and is not
4985 * currently a member of any sets. We have also been
4986 * instructed to unlock the waitq when we're done. In this
4987 * case, we pre-allocated enough reserved objects to handle
4988 * the case where the waitq gets added to a single set when
4989 * the lock is released.
4990 */
4991 npreposts = 3;
4992 }
4993
4994 return npreposts;
4995 }
4996
4997
4998 /**
4999 * pre-allocate prepost objects for 'waitq'
5000 *
5001 * Conditions:
5002 * 'waitq' is not locked
5003 *
5004 * Returns:
5005 * panic on error
5006 *
5007 * 0 on success, '*reserved' is set to the head of a singly-linked
5008 * list of pre-allocated prepost objects.
5009 *
5010 * Notes:
5011 * If 'lock_state' is WAITQ_KEEP_LOCKED, this function performs the pre-allocation
5012 * atomically and returns 'waitq' locked.
5013 *
5014 * This function attempts to pre-allocate precisely enough prepost
5015 * objects based on the current set membership of 'waitq'. If the
5016 * operation is performed atomically, then the caller
5017 * is guaranteed to have enough pre-allocated prepost object to avoid
5018 * any (rare) blocking in the wakeup path.
5019 */
5020 uint64_t
5021 waitq_prepost_reserve(struct waitq *waitq, int extra,
5022 waitq_lock_state_t lock_state)
5023 {
5024 uint64_t reserved = 0;
5025 uint64_t prev_setid = 0, prev_prepostid = 0;
5026 struct wq_prepost *wqp = NULL;
5027 int nalloc = 0, npreposts = 0;
5028 int keep_locked = (lock_state == WAITQ_KEEP_LOCKED);
5029 int unlocked = 0;
5030
5031 wqdbg_v("Attempting to reserve prepost linkages for waitq %p (extra:%d)",
5032 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), extra);
5033
5034 if (waitq == NULL && extra > 0) {
5035 /*
5036 * Simple prepost object allocation:
5037 * we'll add 2 more because the waitq might need an object,
5038 * and the set itself may need a new POST object in addition
5039 * to the number of preposts requested by the caller
5040 */
5041 nalloc = waitq_alloc_prepost_reservation(extra + 2, NULL,
5042 &unlocked, &wqp);
5043 assert(nalloc == extra + 2);
5044 return wqp->wqp_prepostid.id;
5045 }
5046
5047 assert(lock_state == WAITQ_KEEP_LOCKED || lock_state == WAITQ_UNLOCK);
5048
5049 assert(!waitq_irq_safe(waitq));
5050
5051 waitq_lock(waitq);
5052
5053 /* remember the set ID that we started with */
5054 prev_setid = waitq->waitq_set_id;
5055 prev_prepostid = waitq->waitq_prepost_id;
5056
5057 /*
5058 * If the waitq is not part of a set, and we're asked to
5059 * keep the set locked, then we don't have to reserve
5060 * anything!
5061 */
5062 if (prev_setid == 0 && keep_locked) {
5063 goto out;
5064 }
5065
5066 npreposts = waitq_count_prepost_reservation(waitq, extra, keep_locked);
5067
5068 /* nothing for us to do! */
5069 if (npreposts == 0) {
5070 if (keep_locked) {
5071 goto out;
5072 }
5073 goto out_unlock;
5074 }
5075
5076 try_alloc:
5077 /* this _may_ unlock and relock the waitq! */
5078 nalloc = waitq_alloc_prepost_reservation(npreposts, waitq,
5079 &unlocked, &wqp);
5080
5081 if (!unlocked) {
5082 /* allocation held the waitq lock: we'd done! */
5083 if (keep_locked) {
5084 goto out;
5085 }
5086 goto out_unlock;
5087 }
5088
5089 /*
5090 * Before we return, if the allocation had to unlock the waitq, we
5091 * must check one more time to see if we have enough. If not, we'll
5092 * try to allocate the difference. If the caller requests it, we'll
5093 * also leave the waitq locked so that the use of the pre-allocated
5094 * prepost objects can be guaranteed to be enough if a wakeup_all is
5095 * performed before unlocking the waitq.
5096 */
5097
5098 /*
5099 * If the waitq is no longer associated with a set, or if the waitq's
5100 * set/prepostid has not changed since we first walked its linkage,
5101 * we're done.
5102 */
5103 if ((waitq->waitq_set_id == 0) ||
5104 (waitq->waitq_set_id == prev_setid &&
5105 waitq->waitq_prepost_id == prev_prepostid)) {
5106 if (keep_locked) {
5107 goto out;
5108 }
5109 goto out_unlock;
5110 }
5111
5112 npreposts = waitq_count_prepost_reservation(waitq, extra, keep_locked);
5113
5114 if (npreposts > nalloc) {
5115 prev_setid = waitq->waitq_set_id;
5116 prev_prepostid = waitq->waitq_prepost_id;
5117 npreposts = npreposts - nalloc; /* only allocate the diff */
5118 goto try_alloc;
5119 }
5120
5121 if (keep_locked) {
5122 goto out;
5123 }
5124
5125 out_unlock:
5126 waitq_unlock(waitq);
5127 out:
5128 if (wqp) {
5129 reserved = wqp->wqp_prepostid.id;
5130 }
5131
5132 return reserved;
5133 }
5134
5135 /**
5136 * release a linked list of prepost objects allocated via _prepost_reserve
5137 *
5138 * Conditions:
5139 * may (rarely) spin waiting for prepost table growth memcpy
5140 */
5141 void
5142 waitq_prepost_release_reserve(uint64_t id)
5143 {
5144 struct wq_prepost *wqp;
5145
5146 wqdbg_v("releasing reserved preposts starting at: 0x%llx", id);
5147
5148 wqp = wq_prepost_rfirst(id);
5149 if (!wqp) {
5150 return;
5151 }
5152
5153 wq_prepost_release_rlist(wqp);
5154 }
5155
5156
5157 /**
5158 * clear all preposts from 'wqset'
5159 *
5160 * Conditions:
5161 * 'wqset' is not locked
5162 */
5163 void
5164 waitq_set_clear_preposts(struct waitq_set *wqset)
5165 {
5166 uint64_t prepost_id;
5167 spl_t spl;
5168
5169 assert(waitqs_is_set(wqset));
5170
5171 if (!wqset->wqset_q.waitq_prepost || !wqset->wqset_prepost_id) {
5172 return;
5173 }
5174
5175 wqdbg_v("Clearing all preposted queues on waitq_set: 0x%llx",
5176 wqset->wqset_id);
5177
5178 if (waitq_irq_safe(&wqset->wqset_q)) {
5179 spl = splsched();
5180 }
5181 waitq_set_lock(wqset);
5182 prepost_id = wqset->wqset_prepost_id;
5183 wqset->wqset_prepost_id = 0;
5184 waitq_set_unlock(wqset);
5185 if (waitq_irq_safe(&wqset->wqset_q)) {
5186 splx(spl);
5187 }
5188
5189 /* drop / unlink all the prepost table objects */
5190 if (prepost_id) {
5191 (void)wq_prepost_iterate(prepost_id, NULL,
5192 wqset_clear_prepost_chain_cb);
5193 }
5194 }
5195
5196
5197 /* ----------------------------------------------------------------------
5198 *
5199 * Iteration: waitq -> sets / waitq_set -> preposts
5200 *
5201 * ---------------------------------------------------------------------- */
5202
5203 struct wq_it_ctx {
5204 void *input;
5205 void *ctx;
5206 waitq_iterator_t it;
5207 };
5208
5209 static int
5210 waitq_iterate_sets_cb(struct waitq *waitq, void *ctx,
5211 struct waitq_link *link)
5212 {
5213 struct wq_it_ctx *wctx = (struct wq_it_ctx *)(ctx);
5214 struct waitq_set *wqset;
5215 int ret;
5216
5217 (void)waitq;
5218 assert(!waitq_irq_safe(waitq));
5219 assert(wql_type(link) == WQL_WQS);
5220
5221 /*
5222 * the waitq is locked, so we can just take the set lock
5223 * and call the iterator function
5224 */
5225 wqset = link->wql_wqs.wql_set;
5226 assert(wqset != NULL);
5227 assert(!waitq_irq_safe(&wqset->wqset_q));
5228 waitq_set_lock(wqset);
5229
5230 ret = wctx->it(wctx->ctx, (struct waitq *)wctx->input, wqset);
5231
5232 waitq_set_unlock(wqset);
5233 return ret;
5234 }
5235
5236 /**
5237 * call external iterator function for each prepost object in wqset
5238 *
5239 * Conditions:
5240 * Called from wq_prepost_foreach_locked
5241 * (wqset locked, waitq _not_ locked)
5242 */
5243 static int
5244 wqset_iterate_prepost_cb(struct waitq_set *wqset, void *ctx,
5245 struct wq_prepost *wqp, struct waitq *waitq)
5246 {
5247 struct wq_it_ctx *wctx = (struct wq_it_ctx *)(ctx);
5248 uint64_t wqp_id;
5249 int ret;
5250
5251 (void)wqp;
5252
5253 /*
5254 * This is a bit tricky. The 'wqset' is locked, but the 'waitq' is not.
5255 * Taking the 'waitq' lock is a lock order violation, so we need to be
5256 * careful. We also must realize that we may have taken a reference to
5257 * the 'wqp' just as the associated waitq was being torn down (or
5258 * clearing all its preposts) - see waitq_clear_prepost_locked(). If
5259 * the 'wqp' is valid and we can get the waitq lock, then we are good
5260 * to go. If not, we need to back off, check that the 'wqp' hasn't
5261 * been invalidated, and try to re-take the locks.
5262 */
5263 assert(!waitq_irq_safe(waitq));
5264
5265 if (waitq_lock_try(waitq)) {
5266 goto call_iterator;
5267 }
5268
5269 if (!wqp_is_valid(wqp)) {
5270 return WQ_ITERATE_RESTART;
5271 }
5272
5273 /* We are passed a prepost object with a reference on it. If neither
5274 * the waitq set nor the waitq require interrupts disabled, then we
5275 * may block on the delay(1) call below. We can't hold a prepost
5276 * object reference while blocking, so we have to give that up as well
5277 * and re-acquire it when we come back.
5278 */
5279 wqp_id = wqp->wqp_prepostid.id;
5280 wq_prepost_put(wqp);
5281 waitq_set_unlock(wqset);
5282 wqdbg_v("dropped set:%p lock waiting for wqp:%p (0x%llx -> wq:%p)",
5283 wqset, wqp, wqp->wqp_prepostid.id, waitq);
5284 delay(1);
5285 waitq_set_lock(wqset);
5286 wqp = wq_prepost_get(wqp_id);
5287 if (!wqp) {
5288 /* someone cleared preposts while we slept! */
5289 return WQ_ITERATE_DROPPED;
5290 }
5291
5292 /*
5293 * TODO:
5294 * This differs slightly from the logic in ipc_mqueue.c:
5295 * ipc_mqueue_receive_on_thread(). There, if the waitq lock
5296 * can't be obtained, the prepost link is placed on the back of
5297 * the chain, and the iteration starts from the beginning. Here,
5298 * we just restart from the beginning.
5299 */
5300 return WQ_ITERATE_RESTART;
5301
5302 call_iterator:
5303 if (!wqp_is_valid(wqp)) {
5304 ret = WQ_ITERATE_RESTART;
5305 goto out_unlock;
5306 }
5307
5308 /* call the external callback */
5309 ret = wctx->it(wctx->ctx, waitq, wqset);
5310
5311 if (ret == WQ_ITERATE_BREAK_KEEP_LOCKED) {
5312 ret = WQ_ITERATE_BREAK;
5313 goto out;
5314 }
5315
5316 out_unlock:
5317 waitq_unlock(waitq);
5318 out:
5319 return ret;
5320 }
5321
5322 /**
5323 * iterator over all sets to which the given waitq has been linked
5324 *
5325 * Conditions:
5326 * 'waitq' is locked
5327 */
5328 int
5329 waitq_iterate_sets(struct waitq *waitq, void *ctx, waitq_iterator_t it)
5330 {
5331 int ret;
5332 struct wq_it_ctx wctx = {
5333 .input = (void *)waitq,
5334 .ctx = ctx,
5335 .it = it,
5336 };
5337 if (!it || !waitq) {
5338 return KERN_INVALID_ARGUMENT;
5339 }
5340
5341 ret = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
5342 WQL_WQS, (void *)&wctx, waitq_iterate_sets_cb);
5343 if (ret == WQ_ITERATE_CONTINUE) {
5344 ret = WQ_ITERATE_SUCCESS;
5345 }
5346 return ret;
5347 }
5348
5349 /**
5350 * iterator over all preposts in the given wqset
5351 *
5352 * Conditions:
5353 * 'wqset' is locked
5354 */
5355 int
5356 waitq_set_iterate_preposts(struct waitq_set *wqset,
5357 void *ctx, waitq_iterator_t it)
5358 {
5359 struct wq_it_ctx wctx = {
5360 .input = (void *)wqset,
5361 .ctx = ctx,
5362 .it = it,
5363 };
5364 if (!it || !wqset) {
5365 return WQ_ITERATE_INVALID;
5366 }
5367
5368 assert(waitq_held(&wqset->wqset_q));
5369
5370 return wq_prepost_foreach_locked(wqset, (void *)&wctx,
5371 wqset_iterate_prepost_cb);
5372 }
5373
5374
5375 /* ----------------------------------------------------------------------
5376 *
5377 * Higher-level APIs
5378 *
5379 * ---------------------------------------------------------------------- */
5380
5381
5382 /**
5383 * declare a thread's intent to wait on 'waitq' for 'wait_event'
5384 *
5385 * Conditions:
5386 * 'waitq' is not locked
5387 */
5388 wait_result_t
5389 waitq_assert_wait64(struct waitq *waitq,
5390 event64_t wait_event,
5391 wait_interrupt_t interruptible,
5392 uint64_t deadline)
5393 {
5394 thread_t thread = current_thread();
5395 wait_result_t ret;
5396 spl_t s;
5397
5398 if (!waitq_valid(waitq)) {
5399 panic("Invalid waitq: %p", waitq);
5400 }
5401
5402 if (waitq_irq_safe(waitq)) {
5403 s = splsched();
5404 }
5405
5406 waitq_lock(waitq);
5407 ret = waitq_assert_wait64_locked(waitq, wait_event, interruptible,
5408 TIMEOUT_URGENCY_SYS_NORMAL,
5409 deadline, TIMEOUT_NO_LEEWAY, thread);
5410 waitq_unlock(waitq);
5411
5412 if (waitq_irq_safe(waitq)) {
5413 splx(s);
5414 }
5415
5416 return ret;
5417 }
5418
5419 /**
5420 * declare a thread's intent to wait on 'waitq' for 'wait_event'
5421 *
5422 * Conditions:
5423 * 'waitq' is not locked
5424 * will disable and re-enable interrupts while locking current_thread()
5425 */
5426 wait_result_t
5427 waitq_assert_wait64_leeway(struct waitq *waitq,
5428 event64_t wait_event,
5429 wait_interrupt_t interruptible,
5430 wait_timeout_urgency_t urgency,
5431 uint64_t deadline,
5432 uint64_t leeway)
5433 {
5434 wait_result_t ret;
5435 thread_t thread = current_thread();
5436 spl_t s;
5437
5438 if (!waitq_valid(waitq)) {
5439 panic("Invalid waitq: %p", waitq);
5440 }
5441
5442 if (waitq_irq_safe(waitq)) {
5443 s = splsched();
5444 }
5445
5446 waitq_lock(waitq);
5447 ret = waitq_assert_wait64_locked(waitq, wait_event, interruptible,
5448 urgency, deadline, leeway, thread);
5449 waitq_unlock(waitq);
5450
5451 if (waitq_irq_safe(waitq)) {
5452 splx(s);
5453 }
5454
5455 return ret;
5456 }
5457
5458 /**
5459 * wakeup a single thread from a waitq that's waiting for a given event
5460 *
5461 * Conditions:
5462 * 'waitq' is not locked
5463 * may (rarely) block if 'waitq' is non-global and a member of 1 or more sets
5464 * may disable and re-enable interrupts
5465 *
5466 * Notes:
5467 * will _not_ block if waitq is global (or not a member of any set)
5468 */
5469 kern_return_t
5470 waitq_wakeup64_one(struct waitq *waitq, event64_t wake_event,
5471 wait_result_t result, int priority)
5472 {
5473 kern_return_t kr;
5474 uint64_t reserved_preposts = 0;
5475 spl_t spl;
5476
5477 if (!waitq_valid(waitq)) {
5478 panic("Invalid waitq: %p", waitq);
5479 }
5480
5481 if (!waitq_irq_safe(waitq)) {
5482 /* reserve preposts in addition to locking the waitq */
5483 reserved_preposts = waitq_prepost_reserve(waitq, 0, WAITQ_KEEP_LOCKED);
5484 } else {
5485 spl = splsched();
5486 waitq_lock(waitq);
5487 }
5488
5489 /* waitq is locked upon return */
5490 kr = waitq_wakeup64_one_locked(waitq, wake_event, result,
5491 &reserved_preposts, priority, WAITQ_UNLOCK);
5492
5493 if (waitq_irq_safe(waitq)) {
5494 splx(spl);
5495 }
5496
5497 /* release any left-over prepost object (won't block/lock anything) */
5498 waitq_prepost_release_reserve(reserved_preposts);
5499
5500 return kr;
5501 }
5502
5503 /**
5504 * wakeup all threads from a waitq that are waiting for a given event
5505 *
5506 * Conditions:
5507 * 'waitq' is not locked
5508 * may (rarely) block if 'waitq' is non-global and a member of 1 or more sets
5509 * may disable and re-enable interrupts
5510 *
5511 * Notes:
5512 * will _not_ block if waitq is global (or not a member of any set)
5513 */
5514 kern_return_t
5515 waitq_wakeup64_all(struct waitq *waitq,
5516 event64_t wake_event,
5517 wait_result_t result,
5518 int priority)
5519 {
5520 kern_return_t ret;
5521 uint64_t reserved_preposts = 0;
5522 spl_t s;
5523
5524 if (!waitq_valid(waitq)) {
5525 panic("Invalid waitq: %p", waitq);
5526 }
5527
5528 if (!waitq_irq_safe(waitq)) {
5529 /* reserve preposts in addition to locking waitq */
5530 reserved_preposts = waitq_prepost_reserve(waitq, 0,
5531 WAITQ_KEEP_LOCKED);
5532 } else {
5533 s = splsched();
5534 waitq_lock(waitq);
5535 }
5536
5537 ret = waitq_wakeup64_all_locked(waitq, wake_event, result,
5538 &reserved_preposts, priority,
5539 WAITQ_UNLOCK);
5540
5541 if (waitq_irq_safe(waitq)) {
5542 splx(s);
5543 }
5544
5545 waitq_prepost_release_reserve(reserved_preposts);
5546
5547 return ret;
5548 }
5549
5550 /**
5551 * wakeup a specific thread iff it's waiting on 'waitq' for 'wake_event'
5552 *
5553 * Conditions:
5554 * 'waitq' is not locked
5555 *
5556 * Notes:
5557 * May temporarily disable and re-enable interrupts
5558 */
5559 kern_return_t
5560 waitq_wakeup64_thread(struct waitq *waitq,
5561 event64_t wake_event,
5562 thread_t thread,
5563 wait_result_t result)
5564 {
5565 kern_return_t ret;
5566 spl_t s, th_spl;
5567
5568 if (!waitq_valid(waitq)) {
5569 panic("Invalid waitq: %p", waitq);
5570 }
5571
5572 if (waitq_irq_safe(waitq)) {
5573 s = splsched();
5574 }
5575 waitq_lock(waitq);
5576
5577 ret = waitq_select_thread_locked(waitq, wake_event, thread, &th_spl);
5578 /* on success, returns 'thread' locked */
5579
5580 waitq_unlock(waitq);
5581
5582 if (ret == KERN_SUCCESS) {
5583 ret = thread_go(thread, result);
5584 assert(ret == KERN_SUCCESS);
5585 thread_unlock(thread);
5586 splx(th_spl);
5587 waitq_stats_count_wakeup(waitq);
5588 } else {
5589 ret = KERN_NOT_WAITING;
5590 waitq_stats_count_fail(waitq);
5591 }
5592
5593 if (waitq_irq_safe(waitq)) {
5594 splx(s);
5595 }
5596
5597 return ret;
5598 }
5599
5600 /**
5601 * wakeup a single thread from a waitq that's waiting for a given event
5602 * and return a reference to that thread
5603 * returns THREAD_NULL if no thread was waiting
5604 *
5605 * Conditions:
5606 * 'waitq' is not locked
5607 * may (rarely) block if 'waitq' is non-global and a member of 1 or more sets
5608 * may disable and re-enable interrupts
5609 *
5610 * Notes:
5611 * will _not_ block if waitq is global (or not a member of any set)
5612 */
5613 thread_t
5614 waitq_wakeup64_identify(struct waitq *waitq,
5615 event64_t wake_event,
5616 wait_result_t result,
5617 int priority)
5618 {
5619 uint64_t reserved_preposts = 0;
5620 spl_t thread_spl = 0;
5621 thread_t thread;
5622 spl_t spl;
5623
5624 if (!waitq_valid(waitq)) {
5625 panic("Invalid waitq: %p", waitq);
5626 }
5627
5628 if (!waitq_irq_safe(waitq)) {
5629 /* reserve preposts in addition to locking waitq */
5630 reserved_preposts = waitq_prepost_reserve(waitq, 0, WAITQ_KEEP_LOCKED);
5631 } else {
5632 spl = splsched();
5633 waitq_lock(waitq);
5634 }
5635
5636 thread = waitq_wakeup64_identify_locked(waitq, wake_event, result,
5637 &thread_spl, &reserved_preposts,
5638 priority, WAITQ_UNLOCK);
5639 /* waitq is unlocked, thread is locked */
5640
5641 if (thread != THREAD_NULL) {
5642 thread_reference(thread);
5643 thread_unlock(thread);
5644 splx(thread_spl);
5645 }
5646
5647 if (waitq_irq_safe(waitq)) {
5648 splx(spl);
5649 }
5650
5651 /* release any left-over prepost object (won't block/lock anything) */
5652 waitq_prepost_release_reserve(reserved_preposts);
5653
5654 /* returns +1 ref to running thread or THREAD_NULL */
5655 return thread;
5656 }