]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/waitq.c
1a38d76feb9a34676279de9f07859cbc6ddba3e5
[apple/xnu.git] / osfmk / kern / waitq.c
1 /*
2 * Copyright (c) 2015-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 * un-comment the following lines to debug the link/prepost tables
59 * NOTE: this expands each element by ~40 bytes
60 */
61 //#define KEEP_WAITQ_LINK_STATS
62 //#define KEEP_WAITQ_PREPOST_STATS
63
64 #include <kern/ast.h>
65 #include <kern/backtrace.h>
66 #include <kern/kern_types.h>
67 #include <kern/ltable.h>
68 #include <kern/mach_param.h>
69 #include <kern/queue.h>
70 #include <kern/sched_prim.h>
71 #include <kern/simple_lock.h>
72 #include <kern/spl.h>
73 #include <kern/waitq.h>
74 #include <kern/zalloc.h>
75 #include <kern/policy_internal.h>
76 #include <kern/turnstile.h>
77
78 #include <os/hash.h>
79 #include <libkern/OSAtomic.h>
80 #include <mach/sync_policy.h>
81 #include <vm/vm_kern.h>
82
83 #include <sys/kdebug.h>
84
85 #if defined(KEEP_WAITQ_LINK_STATS) || defined(KEEP_WAITQ_PREPOST_STATS)
86 # if !CONFIG_LTABLE_STATS
87 # error "You must configure LTABLE_STATS to use WAITQ_[LINK|PREPOST]_STATS"
88 # endif
89 # if !CONFIG_WAITQ_STATS
90 # error "You must configure WAITQ_STATS to use WAITQ_[LINK|PREPOST]_STATS"
91 # endif
92 #endif
93
94 #if CONFIG_WAITQ_DEBUG
95 #define wqdbg(fmt, ...) \
96 printf("WQ[%s]: " fmt "\n", __func__, ## __VA_ARGS__)
97 #else
98 #define wqdbg(fmt, ...) do { } while (0)
99 #endif
100
101 #ifdef WAITQ_VERBOSE_DEBUG
102 #define wqdbg_v(fmt, ...) \
103 printf("WQ[v:%s]: " fmt "\n", __func__, ## __VA_ARGS__)
104 #else
105 #define wqdbg_v(fmt, ...) do { } while (0)
106 #endif
107
108 #define wqinfo(fmt, ...) \
109 printf("WQ[%s]: " fmt "\n", __func__, ## __VA_ARGS__)
110
111 #define wqerr(fmt, ...) \
112 printf("WQ[%s] ERROR: " fmt "\n", __func__, ## __VA_ARGS__)
113
114 /*
115 * file-static functions / data
116 */
117 static thread_t waitq_select_one_locked(struct waitq *waitq, event64_t event,
118 uint64_t *reserved_preposts,
119 int priority, spl_t *spl);
120
121 static kern_return_t waitq_select_thread_locked(struct waitq *waitq,
122 event64_t event,
123 thread_t thread, spl_t *spl);
124
125 #define WAITQ_SET_MAX (task_max * 3)
126 static zone_t waitq_set_zone;
127
128
129 #define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align)))
130 #define ROUNDDOWN(x, y) (((x)/(y))*(y))
131
132
133 #if CONFIG_LTABLE_STATS || CONFIG_WAITQ_STATS
134 static __inline__ void waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int skip);
135 #endif
136
137 lck_grp_t waitq_lck_grp;
138
139 #if __arm64__
140
141 #define waitq_lock_to(wq, to) \
142 (hw_lock_bit_to(&(wq)->waitq_interlock, LCK_ILOCK, to, &waitq_lck_grp))
143
144 #define waitq_lock_unlock(wq) \
145 (hw_unlock_bit(&(wq)->waitq_interlock, LCK_ILOCK))
146
147 #define waitq_lock_init(wq) \
148 (wq->waitq_interlock = 0)
149
150 #else
151
152 #define waitq_lock_to(wq, to) \
153 (hw_lock_to(&(wq)->waitq_interlock, to, &waitq_lck_grp))
154
155 #define waitq_lock_unlock(wq) \
156 (hw_lock_unlock(&(wq)->waitq_interlock))
157
158 #define waitq_lock_init(wq) \
159 (hw_lock_init(&(wq)->waitq_interlock))
160
161 #endif /* __arm64__ */
162
163 /*
164 * Prepost callback function for specially marked waitq sets
165 * (prepost alternative)
166 */
167 extern void waitq_set__CALLING_PREPOST_HOOK__(void *ctx, void *memberctx, int priority);
168
169 #define DEFAULT_MIN_FREE_TABLE_ELEM 100
170 static uint32_t g_min_free_table_elem;
171 static uint32_t g_min_free_cache;
172
173
174 /* ----------------------------------------------------------------------
175 *
176 * SetID Link Table Implementation
177 *
178 * ---------------------------------------------------------------------- */
179 static struct link_table g_wqlinktable;
180
181 enum wq_link_type {
182 WQL_ALL = -1,
183 WQL_FREE = LT_FREE,
184 WQL_WQS = LT_ELEM,
185 WQL_LINK = LT_LINK,
186 };
187
188 struct waitq_link {
189 struct lt_elem wqte;
190
191 union {
192 /* wqt_type == WQL_WQS (LT_ELEM) */
193 struct {
194 struct waitq_set *wql_set;
195 /* uint64_t sl_prepost_id; */
196 } wql_wqs;
197
198 /* wqt_type == WQL_LINK (LT_LINK) */
199 struct {
200 uint64_t left_setid;
201 uint64_t right_setid;
202 } wql_link;
203 };
204 #ifdef KEEP_WAITQ_LINK_STATS
205 thread_t sl_alloc_th;
206 task_t sl_alloc_task;
207 uintptr_t sl_alloc_bt[NWAITQ_BTFRAMES];
208 uint64_t sl_alloc_ts;
209 uintptr_t sl_invalidate_bt[NWAITQ_BTFRAMES];
210 uint64_t sl_invalidate_ts;
211 uintptr_t sl_mkvalid_bt[NWAITQ_BTFRAMES];
212 uint64_t sl_mkvalid_ts;
213 uint64_t sl_free_ts;
214 #endif
215 };
216 #if !defined(KEEP_WAITQ_LINK_STATS)
217 static_assert((sizeof(struct waitq_link) & (sizeof(struct waitq_link) - 1)) == 0,
218 "waitq_link struct must be a power of two!");
219 #endif
220
221 #define wql_refcnt(link) \
222 (lt_bits_refcnt((link)->wqte.lt_bits))
223
224 #define wql_type(link) \
225 (lt_bits_type((link)->wqte.lt_bits))
226
227 #define wql_mkvalid(link) \
228 do { \
229 lt_elem_mkvalid(&(link)->wqte); \
230 wql_do_mkvalid_stats(&(link)->wqte); \
231 } while (0)
232
233 #define wql_is_valid(link) \
234 lt_bits_valid((link)->wqte.lt_bits)
235
236 #define wql_setid wqte.lt_id
237
238 #define WQL_WQS_POISON ((void *)(0xf00df00d))
239 #define WQL_LINK_POISON (0x0bad0badffffffffull)
240
241 static void
242 wql_poison(struct link_table *table, struct lt_elem *elem)
243 {
244 struct waitq_link *link = (struct waitq_link *)elem;
245 (void)table;
246
247 switch (wql_type(link)) {
248 case WQL_WQS:
249 link->wql_wqs.wql_set = WQL_WQS_POISON;
250 break;
251 case WQL_LINK:
252 link->wql_link.left_setid = WQL_LINK_POISON;
253 link->wql_link.right_setid = WQL_LINK_POISON;
254 break;
255 default:
256 break;
257 }
258 #ifdef KEEP_WAITQ_LINK_STATS
259 memset(link->sl_alloc_bt, 0, sizeof(link->sl_alloc_bt));
260 link->sl_alloc_ts = 0;
261 memset(link->sl_mkvalid_bt, 0, sizeof(link->sl_mkvalid_bt));
262 link->sl_mkvalid_ts = 0;
263
264 link->sl_alloc_th = THREAD_NULL;
265 /* leave the sl_alloc_task in place for debugging */
266
267 link->sl_free_ts = mach_absolute_time();
268 #endif
269 }
270
271 #ifdef KEEP_WAITQ_LINK_STATS
272 static __inline__ void
273 wql_do_alloc_stats(struct lt_elem *elem)
274 {
275 if (elem) {
276 struct waitq_link *link = (struct waitq_link *)elem;
277 memset(link->sl_alloc_bt, 0, sizeof(link->sl_alloc_bt));
278 waitq_grab_backtrace(link->sl_alloc_bt, 0);
279 link->sl_alloc_th = current_thread();
280 link->sl_alloc_task = current_task();
281
282 assert(link->sl_alloc_ts == 0);
283 link->sl_alloc_ts = mach_absolute_time();
284
285 memset(link->sl_invalidate_bt, 0, sizeof(link->sl_invalidate_bt));
286 link->sl_invalidate_ts = 0;
287 }
288 }
289
290 static __inline__ void
291 wql_do_invalidate_stats(struct lt_elem *elem)
292 {
293 struct waitq_link *link = (struct waitq_link *)elem;
294
295 if (!elem) {
296 return;
297 }
298
299 assert(link->sl_mkvalid_ts > 0);
300
301 memset(link->sl_invalidate_bt, 0, sizeof(link->sl_invalidate_bt));
302 link->sl_invalidate_ts = mach_absolute_time();
303 waitq_grab_backtrace(link->sl_invalidate_bt, 0);
304 }
305
306 static __inline__ void
307 wql_do_mkvalid_stats(struct lt_elem *elem)
308 {
309 struct waitq_link *link = (struct waitq_link *)elem;
310
311 if (!elem) {
312 return;
313 }
314
315 memset(link->sl_mkvalid_bt, 0, sizeof(link->sl_mkvalid_bt));
316 link->sl_mkvalid_ts = mach_absolute_time();
317 waitq_grab_backtrace(link->sl_mkvalid_bt, 0);
318 }
319 #else
320 #define wql_do_alloc_stats(e)
321 #define wql_do_invalidate_stats(e)
322 #define wql_do_mkvalid_stats(e)
323 #endif /* KEEP_WAITQ_LINK_STATS */
324
325 static void
326 wql_init(void)
327 {
328 uint32_t tablesz = 0, max_links = 0;
329
330 if (PE_parse_boot_argn("wql_tsize", &tablesz, sizeof(tablesz)) != TRUE) {
331 tablesz = (uint32_t)g_lt_max_tbl_size;
332 }
333
334 tablesz = P2ROUNDUP(tablesz, PAGE_SIZE);
335 max_links = tablesz / sizeof(struct waitq_link);
336 assert(max_links > 0 && tablesz > 0);
337
338 /* we have a restricted index range */
339 if (max_links > (LT_IDX_MAX + 1)) {
340 max_links = LT_IDX_MAX + 1;
341 }
342
343 wqinfo("init linktable with max:%d elements (%d bytes)",
344 max_links, tablesz);
345 ltable_init(&g_wqlinktable, "wqslab.wql", max_links,
346 sizeof(struct waitq_link), wql_poison);
347 }
348
349 static void
350 wql_ensure_free_space(void)
351 {
352 if (g_wqlinktable.nelem - g_wqlinktable.used_elem < g_min_free_table_elem) {
353 /*
354 * we don't hold locks on these values, so check for underflow
355 */
356 if (g_wqlinktable.used_elem <= g_wqlinktable.nelem) {
357 wqdbg_v("Forcing table growth: nelem=%d, used=%d, min_free=%d",
358 g_wqlinktable.nelem, g_wqlinktable.used_elem,
359 g_min_free_table_elem);
360 ltable_grow(&g_wqlinktable, g_min_free_table_elem);
361 }
362 }
363 }
364
365 static struct waitq_link *
366 wql_alloc_link(int type)
367 {
368 struct lt_elem *elem;
369
370 elem = ltable_alloc_elem(&g_wqlinktable, type, 1, 0);
371 wql_do_alloc_stats(elem);
372 return (struct waitq_link *)elem;
373 }
374
375 static void
376 wql_realloc_link(struct waitq_link *link, int type)
377 {
378 ltable_realloc_elem(&g_wqlinktable, &link->wqte, type);
379 #ifdef KEEP_WAITQ_LINK_STATS
380 memset(link->sl_alloc_bt, 0, sizeof(link->sl_alloc_bt));
381 link->sl_alloc_ts = 0;
382 wql_do_alloc_stats(&link->wqte);
383
384 memset(link->sl_invalidate_bt, 0, sizeof(link->sl_invalidate_bt));
385 link->sl_invalidate_ts = 0;
386 #endif
387 }
388
389 static void
390 wql_invalidate(struct waitq_link *link)
391 {
392 lt_elem_invalidate(&link->wqte);
393 wql_do_invalidate_stats(&link->wqte);
394 }
395
396 static struct waitq_link *
397 wql_get_link(uint64_t setid)
398 {
399 struct lt_elem *elem;
400
401 elem = ltable_get_elem(&g_wqlinktable, setid);
402 return (struct waitq_link *)elem;
403 }
404
405 static void
406 wql_put_link(struct waitq_link *link)
407 {
408 if (!link) {
409 return;
410 }
411 ltable_put_elem(&g_wqlinktable, (struct lt_elem *)link);
412 }
413
414 static struct waitq_link *
415 wql_get_reserved(uint64_t setid, int type)
416 {
417 struct lt_elem *elem;
418
419 elem = lt_elem_list_first(&g_wqlinktable, setid);
420 if (!elem) {
421 return NULL;
422 }
423 ltable_realloc_elem(&g_wqlinktable, elem, type);
424 return (struct waitq_link *)elem;
425 }
426
427
428 static inline int waitq_maybe_remove_link(struct waitq *waitq,
429 uint64_t setid,
430 struct waitq_link *parent,
431 struct waitq_link *left,
432 struct waitq_link *right);
433
434 enum {
435 LINK_WALK_ONE_LEVEL = 0,
436 LINK_WALK_FULL_DAG = 1,
437 LINK_WALK_FULL_DAG_UNLOCKED = 2,
438 };
439
440 typedef int (*wql_callback_func)(struct waitq *waitq, void *ctx,
441 struct waitq_link *link);
442
443 /**
444 * walk_waitq_links: walk all table elements (of type 'link_type') pointed to by 'setid'
445 *
446 * Conditions:
447 * waitq is locked (or NULL)
448 * 'setid' is managed by 'waitq'
449 * this could be direct (waitq->waitq_set_id == setid)
450 * OR indirect (setid is the left/right ID in a LINK chain,
451 * whose root is waitq->waitq_set_id)
452 *
453 * Notes:
454 * This function uses recursion to walk the set of table elements
455 * pointed to by 'setid'. For each element encountered, 'cb' will be
456 * called. If non-zero, the return value of this callback function can
457 * early-out of the table walk.
458 *
459 * For each link element encountered, the function takes a reference to
460 * it. The reference is dropped only after the callback and any recursion
461 * has completed.
462 *
463 * The assumed table/link/tree structure:
464 * 'setid'
465 * / \
466 * / \
467 * L(LINK) R(LINK)
468 * /\ /\
469 * / \ / \
470 * / \ Rl(*) Rr(*)
471 * Ll(*) Lr(*) /\ /\
472 * /\ /\ ... ... ... ...
473 * ... ... ... ...
474 * \
475 * WQS(wqset_q.waitq_setid == Sx)
476 * [waitq set is a membet of setid, 'Sx')
477 *
478 * 'Sx'
479 * / \
480 * / \
481 * L(LINK) R(LINK)
482 * /\ /\
483 * ... ... ... ...
484 *
485 * The basic algorithm is as follows:
486 * *) take a reference to the table object pointed to by 'setid'
487 * *) if appropriate, call 'cb' (potentially early-out on non-zero return)
488 * *) if the link object points to a waitq set, and the walk type
489 * is 'FULL_DAG' (full directed-acyclic-graph), then try to lock
490 * the associated waitq set object and recursively walk all sets to
491 * which that set belongs. This is a DFS of the tree structure.
492 * *) recurse down the left side of the tree (following the
493 * 'left_setid' pointer in the link object
494 * *) recurse down the right side of the tree (following the
495 * 'right_setid' pointer in the link object
496 */
497 static __attribute__((noinline))
498 int
499 walk_waitq_links(int walk_type, struct waitq *waitq,
500 uint64_t setid, int link_type,
501 void *ctx, wql_callback_func cb)
502 {
503 struct waitq_link *link;
504 uint64_t nextid;
505 int wqltype;
506
507 link = wql_get_link(setid);
508
509 /* invalid link */
510 if (!link) {
511 return WQ_ITERATE_CONTINUE;
512 }
513
514 setid = nextid = 0;
515 wqltype = wql_type(link);
516 if (wqltype == WQL_LINK) {
517 setid = link->wql_link.left_setid;
518 nextid = link->wql_link.right_setid;
519 }
520
521 /*
522 * Make the callback only on specified link_type (or all links)
523 * Note that after the callback, the link object may be
524 * invalid. The only valid thing we can do is put our
525 * reference to it (which may put it back on the free list)
526 */
527 if (link_type == WQL_ALL || link_type == wqltype) {
528 /* allow the callback to early-out */
529 int ret = cb(waitq, ctx, link);
530 if (ret != WQ_ITERATE_CONTINUE) {
531 wql_put_link(link);
532 return ret;
533 }
534 }
535
536 if (wqltype == WQL_WQS &&
537 (walk_type == LINK_WALK_FULL_DAG ||
538 walk_type == LINK_WALK_FULL_DAG_UNLOCKED)) {
539 /*
540 * Recurse down any sets to which this wait queue set was
541 * added. We do this just before we put our reference to
542 * the link object (which may free it).
543 */
544 struct waitq_set *wqset = link->wql_wqs.wql_set;
545 int ret = WQ_ITERATE_CONTINUE;
546 int should_unlock = 0;
547 uint64_t wqset_setid = 0;
548
549 if (waitq_set_is_valid(wqset) && walk_type == LINK_WALK_FULL_DAG) {
550 assert(!waitq_irq_safe(&wqset->wqset_q));
551 waitq_set_lock(wqset);
552 should_unlock = 1;
553 }
554
555 /*
556 * verify the linked waitq set as it could have been
557 * invalidated before we grabbed the lock!
558 */
559 if (wqset->wqset_id != link->wql_setid.id) {
560 /* This is the bottom of the tree: just get out */
561 if (should_unlock) {
562 waitq_set_unlock(wqset);
563 }
564 wql_put_link(link);
565 return WQ_ITERATE_CONTINUE;
566 }
567
568 wqset_setid = wqset->wqset_q.waitq_set_id;
569
570 if (wqset_setid > 0) {
571 ret = walk_waitq_links(walk_type, &wqset->wqset_q,
572 wqset_setid, link_type, ctx, cb);
573 }
574 if (should_unlock) {
575 waitq_set_unlock(wqset);
576 }
577 if (ret != WQ_ITERATE_CONTINUE) {
578 wql_put_link(link);
579 return ret;
580 }
581 }
582
583 wql_put_link(link);
584
585 /* recurse down left side of the tree */
586 if (setid) {
587 int ret = walk_waitq_links(walk_type, waitq, setid, link_type, ctx, cb);
588 if (ret != WQ_ITERATE_CONTINUE) {
589 return ret;
590 }
591 }
592
593 /* recurse down right side of the tree */
594 if (nextid) {
595 return walk_waitq_links(walk_type, waitq, nextid, link_type, ctx, cb);
596 }
597
598 return WQ_ITERATE_CONTINUE;
599 }
600
601 /* ----------------------------------------------------------------------
602 *
603 * Prepost Link Table Implementation
604 *
605 * ---------------------------------------------------------------------- */
606 static struct link_table g_prepost_table;
607
608 enum wq_prepost_type {
609 WQP_FREE = LT_FREE,
610 WQP_WQ = LT_ELEM,
611 WQP_POST = LT_LINK,
612 };
613
614 struct wq_prepost {
615 struct lt_elem wqte;
616
617 union {
618 /* wqt_type == WQP_WQ (LT_ELEM) */
619 struct {
620 struct waitq *wqp_wq_ptr;
621 } wqp_wq;
622 /* wqt_type == WQP_POST (LT_LINK) */
623 struct {
624 uint64_t wqp_next_id;
625 uint64_t wqp_wq_id;
626 } wqp_post;
627 };
628 #ifdef KEEP_WAITQ_PREPOST_STATS
629 thread_t wqp_alloc_th;
630 task_t wqp_alloc_task;
631 uintptr_t wqp_alloc_bt[NWAITQ_BTFRAMES];
632 #endif
633 };
634 #if !defined(KEEP_WAITQ_PREPOST_STATS)
635 static_assert((sizeof(struct wq_prepost) & (sizeof(struct wq_prepost) - 1)) == 0,
636 "wq_prepost struct must be a power of two!");
637 #endif
638
639 #define wqp_refcnt(wqp) \
640 (lt_bits_refcnt((wqp)->wqte.lt_bits))
641
642 #define wqp_type(wqp) \
643 (lt_bits_type((wqp)->wqte.lt_bits))
644
645 #define wqp_set_valid(wqp) \
646 lt_elem_mkvalid(&(wqp)->wqte)
647
648 #define wqp_is_valid(wqp) \
649 lt_bits_valid((wqp)->wqte.lt_bits)
650
651 #define wqp_prepostid wqte.lt_id
652
653 #define WQP_WQ_POISON (0x0bad0badffffffffull)
654 #define WQP_POST_POISON (0xf00df00df00df00d)
655
656 static void
657 wqp_poison(struct link_table *table, struct lt_elem *elem)
658 {
659 struct wq_prepost *wqp = (struct wq_prepost *)elem;
660 (void)table;
661
662 switch (wqp_type(wqp)) {
663 case WQP_WQ:
664 break;
665 case WQP_POST:
666 wqp->wqp_post.wqp_next_id = WQP_POST_POISON;
667 wqp->wqp_post.wqp_wq_id = WQP_POST_POISON;
668 break;
669 default:
670 break;
671 }
672 }
673
674 #ifdef KEEP_WAITQ_PREPOST_STATS
675 static __inline__ void
676 wqp_do_alloc_stats(struct lt_elem *elem)
677 {
678 if (!elem) {
679 return;
680 }
681
682 struct wq_prepost *wqp = (struct wq_prepost *)elem;
683 uintptr_t alloc_bt[sizeof(wqp->wqp_alloc_bt)];
684
685 waitq_grab_backtrace(alloc_bt, NWAITQ_BTFRAMES);
686
687 /* be sure the take stats for _all_ allocated objects */
688 for (;;) {
689 memcpy(wqp->wqp_alloc_bt, alloc_bt, sizeof(alloc_bt));
690 wqp->wqp_alloc_th = current_thread();
691 wqp->wqp_alloc_task = current_task();
692 wqp = (struct wq_prepost *)lt_elem_list_next(&g_prepost_table, &wqp->wqte);
693 if (!wqp) {
694 break;
695 }
696 }
697 }
698 #else
699 #define wqp_do_alloc_stats(e)
700 #endif /* KEEP_WAITQ_LINK_STATS */
701
702 static void
703 wqp_init(void)
704 {
705 uint32_t tablesz = 0, max_wqp = 0;
706
707 if (PE_parse_boot_argn("wqp_tsize", &tablesz, sizeof(tablesz)) != TRUE) {
708 tablesz = (uint32_t)g_lt_max_tbl_size;
709 }
710
711 tablesz = P2ROUNDUP(tablesz, PAGE_SIZE);
712 max_wqp = tablesz / sizeof(struct wq_prepost);
713 assert(max_wqp > 0 && tablesz > 0);
714
715 /* we have a restricted index range */
716 if (max_wqp > (LT_IDX_MAX + 1)) {
717 max_wqp = LT_IDX_MAX + 1;
718 }
719
720 wqinfo("init prepost table with max:%d elements (%d bytes)",
721 max_wqp, tablesz);
722 ltable_init(&g_prepost_table, "wqslab.prepost", max_wqp,
723 sizeof(struct wq_prepost), wqp_poison);
724 }
725
726 /*
727 * Refill the per-CPU cache.
728 */
729 static void
730 wq_prepost_refill_cpu_cache(uint32_t nalloc)
731 {
732 struct lt_elem *new_head, *old_head;
733 struct wqp_cache *cache;
734
735 /* require preemption enabled to allocate elements */
736 if (get_preemption_level() != 0) {
737 return;
738 }
739
740 new_head = ltable_alloc_elem(&g_prepost_table,
741 LT_RESERVED, nalloc, 1);
742 if (new_head == NULL) {
743 return;
744 }
745
746 disable_preemption();
747 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
748
749 /* check once more before putting these elements on the list */
750 if (cache->avail >= WQP_CACHE_MAX) {
751 lt_elem_list_release(&g_prepost_table, new_head, LT_RESERVED);
752 enable_preemption();
753 return;
754 }
755
756 cache->avail += nalloc;
757 if (cache->head == 0 || cache->head == LT_IDX_MAX) {
758 cache->head = new_head->lt_id.id;
759 goto out;
760 }
761
762 old_head = lt_elem_list_first(&g_prepost_table, cache->head);
763 (void)lt_elem_list_link(&g_prepost_table, new_head, old_head);
764 cache->head = new_head->lt_id.id;
765
766 out:
767 enable_preemption();
768 return;
769 }
770
771 static void
772 wq_prepost_ensure_free_space(void)
773 {
774 uint32_t free_elem;
775 uint32_t min_free;
776 struct wqp_cache *cache;
777
778 if (g_min_free_cache == 0) {
779 g_min_free_cache = (WQP_CACHE_MAX * ml_get_max_cpus());
780 }
781
782 /*
783 * Ensure that we always have a pool of per-CPU prepost elements
784 */
785 disable_preemption();
786 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
787 free_elem = cache->avail;
788 enable_preemption();
789
790 if (free_elem < (WQP_CACHE_MAX / 3)) {
791 wq_prepost_refill_cpu_cache(WQP_CACHE_MAX - free_elem);
792 }
793
794 /*
795 * Now ensure that we have a sufficient amount of free table space
796 */
797 free_elem = g_prepost_table.nelem - g_prepost_table.used_elem;
798 min_free = g_min_free_table_elem + g_min_free_cache;
799 if (free_elem < min_free) {
800 /*
801 * we don't hold locks on these values, so check for underflow
802 */
803 if (g_prepost_table.used_elem <= g_prepost_table.nelem) {
804 wqdbg_v("Forcing table growth: nelem=%d, used=%d, min_free=%d+%d",
805 g_prepost_table.nelem, g_prepost_table.used_elem,
806 g_min_free_table_elem, g_min_free_cache);
807 ltable_grow(&g_prepost_table, min_free);
808 }
809 }
810 }
811
812 static struct wq_prepost *
813 wq_prepost_alloc(int type, int nelem)
814 {
815 struct lt_elem *elem;
816 struct wq_prepost *wqp;
817 struct wqp_cache *cache;
818
819 if (type != LT_RESERVED) {
820 goto do_alloc;
821 }
822 if (nelem == 0) {
823 return NULL;
824 }
825
826 /*
827 * First try to grab the elements from the per-CPU cache if we are
828 * allocating RESERVED elements
829 */
830 disable_preemption();
831 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
832 if (nelem <= (int)cache->avail) {
833 struct lt_elem *first, *next = NULL;
834 int nalloc = nelem;
835
836 cache->avail -= nelem;
837
838 /* grab the first element */
839 first = lt_elem_list_first(&g_prepost_table, cache->head);
840
841 /* find the last element and re-adjust the cache head */
842 for (elem = first; elem != NULL && nalloc > 0; elem = next) {
843 next = lt_elem_list_next(&g_prepost_table, elem);
844 if (--nalloc == 0) {
845 /* terminate the allocated list */
846 elem->lt_next_idx = LT_IDX_MAX;
847 break;
848 }
849 }
850 assert(nalloc == 0);
851 if (!next) {
852 cache->head = LT_IDX_MAX;
853 } else {
854 cache->head = next->lt_id.id;
855 }
856 /* assert that we don't have mis-matched book keeping */
857 assert(!(cache->head == LT_IDX_MAX && cache->avail > 0));
858 enable_preemption();
859 elem = first;
860 goto out;
861 }
862 enable_preemption();
863
864 do_alloc:
865 /* fall-back to standard table allocation */
866 elem = ltable_alloc_elem(&g_prepost_table, type, nelem, 0);
867 if (!elem) {
868 return NULL;
869 }
870
871 out:
872 wqp = (struct wq_prepost *)elem;
873 wqp_do_alloc_stats(elem);
874 return wqp;
875 }
876
877 static void
878 wq_prepost_invalidate(struct wq_prepost *wqp)
879 {
880 lt_elem_invalidate(&wqp->wqte);
881 }
882
883 static struct wq_prepost *
884 wq_prepost_get(uint64_t wqp_id)
885 {
886 struct lt_elem *elem;
887
888 elem = ltable_get_elem(&g_prepost_table, wqp_id);
889 return (struct wq_prepost *)elem;
890 }
891
892 static void
893 wq_prepost_put(struct wq_prepost *wqp)
894 {
895 ltable_put_elem(&g_prepost_table, (struct lt_elem *)wqp);
896 }
897
898 static int
899 wq_prepost_rlink(struct wq_prepost *parent, struct wq_prepost *child)
900 {
901 return lt_elem_list_link(&g_prepost_table, &parent->wqte, &child->wqte);
902 }
903
904 static struct wq_prepost *
905 wq_prepost_get_rnext(struct wq_prepost *head)
906 {
907 struct lt_elem *elem;
908 struct wq_prepost *wqp;
909 uint64_t id;
910
911 elem = lt_elem_list_next(&g_prepost_table, &head->wqte);
912 if (!elem) {
913 return NULL;
914 }
915 id = elem->lt_id.id;
916 elem = ltable_get_elem(&g_prepost_table, id);
917
918 if (!elem) {
919 return NULL;
920 }
921 wqp = (struct wq_prepost *)elem;
922 if (elem->lt_id.id != id ||
923 wqp_type(wqp) != WQP_POST ||
924 wqp->wqp_post.wqp_next_id != head->wqp_prepostid.id) {
925 ltable_put_elem(&g_prepost_table, elem);
926 return NULL;
927 }
928
929 return wqp;
930 }
931
932 static void
933 wq_prepost_reset_rnext(struct wq_prepost *wqp)
934 {
935 (void)lt_elem_list_break(&g_prepost_table, &wqp->wqte);
936 }
937
938
939 /**
940 * remove 'wqp' from the prepost list on 'wqset'
941 *
942 * Conditions:
943 * wqset is locked
944 * caller holds a reference on wqp (and is responsible to release it)
945 *
946 * Result:
947 * wqp is invalidated, wqset is potentially updated with a new
948 * prepost ID, and the next element of the prepost list may be
949 * consumed as well (if the list contained only 2 objects)
950 */
951 static int
952 wq_prepost_remove(struct waitq_set *wqset,
953 struct wq_prepost *wqp)
954 {
955 int more_posts = 1;
956 uint64_t next_id = wqp->wqp_post.wqp_next_id;
957 uint64_t wqp_id = wqp->wqp_prepostid.id;
958 struct wq_prepost *prev_wqp, *next_wqp;
959
960 assert(wqp_type(wqp) == WQP_POST);
961 assert(wqset->wqset_q.waitq_prepost == 1);
962
963 if (next_id == wqp_id) {
964 /* the list is singular and becoming empty */
965 wqset->wqset_prepost_id = 0;
966 more_posts = 0;
967 goto out;
968 }
969
970 prev_wqp = wq_prepost_get_rnext(wqp);
971 assert(prev_wqp != NULL);
972 assert(prev_wqp->wqp_post.wqp_next_id == wqp_id);
973 assert(prev_wqp->wqp_prepostid.id != wqp_id);
974 assert(wqp_type(prev_wqp) == WQP_POST);
975
976 if (prev_wqp->wqp_prepostid.id == next_id) {
977 /*
978 * There are two items in the list, and we're removing one. We
979 * only need to keep the WQP_WQ pointer from 'prev_wqp'
980 */
981 wqset->wqset_prepost_id = prev_wqp->wqp_post.wqp_wq_id;
982 wq_prepost_invalidate(prev_wqp);
983 wq_prepost_put(prev_wqp);
984 more_posts = 0;
985 goto out;
986 }
987
988 /* prev->next = next */
989 prev_wqp->wqp_post.wqp_next_id = next_id;
990
991 /* next->prev = prev */
992 next_wqp = wq_prepost_get(next_id);
993 assert(next_wqp != NULL);
994 assert(next_wqp != wqp);
995 assert(next_wqp != prev_wqp);
996 assert(wqp_type(next_wqp) == WQP_POST);
997
998 wq_prepost_reset_rnext(next_wqp);
999 wq_prepost_rlink(next_wqp, prev_wqp);
1000
1001 /* If we remove the head of the list, update the wqset */
1002 if (wqp_id == wqset->wqset_prepost_id) {
1003 wqset->wqset_prepost_id = next_id;
1004 }
1005
1006 wq_prepost_put(prev_wqp);
1007 wq_prepost_put(next_wqp);
1008
1009 out:
1010 wq_prepost_reset_rnext(wqp);
1011 wq_prepost_invalidate(wqp);
1012 return more_posts;
1013 }
1014
1015 static struct wq_prepost *
1016 wq_prepost_rfirst(uint64_t id)
1017 {
1018 struct lt_elem *elem;
1019 elem = lt_elem_list_first(&g_prepost_table, id);
1020 wqp_do_alloc_stats(elem);
1021 return (struct wq_prepost *)(void *)elem;
1022 }
1023
1024 static struct wq_prepost *
1025 wq_prepost_rpop(uint64_t *id, int type)
1026 {
1027 struct lt_elem *elem;
1028 elem = lt_elem_list_pop(&g_prepost_table, id, type);
1029 wqp_do_alloc_stats(elem);
1030 return (struct wq_prepost *)(void *)elem;
1031 }
1032
1033 static void
1034 wq_prepost_release_rlist(struct wq_prepost *wqp)
1035 {
1036 int nelem = 0;
1037 struct wqp_cache *cache;
1038 struct lt_elem *elem;
1039
1040 if (!wqp) {
1041 return;
1042 }
1043
1044 elem = &wqp->wqte;
1045
1046 /*
1047 * These are reserved elements: release them back to the per-cpu pool
1048 * if our cache is running low.
1049 */
1050 disable_preemption();
1051 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
1052 if (cache->avail < WQP_CACHE_MAX) {
1053 struct lt_elem *tmp = NULL;
1054 if (cache->head != LT_IDX_MAX) {
1055 tmp = lt_elem_list_first(&g_prepost_table, cache->head);
1056 }
1057 nelem = lt_elem_list_link(&g_prepost_table, elem, tmp);
1058 cache->head = elem->lt_id.id;
1059 cache->avail += nelem;
1060 enable_preemption();
1061 return;
1062 }
1063 enable_preemption();
1064
1065 /* release these elements back to the main table */
1066 nelem = lt_elem_list_release(&g_prepost_table, elem, LT_RESERVED);
1067
1068 #if CONFIG_WAITQ_STATS
1069 g_prepost_table.nreserved_releases += 1;
1070 OSDecrementAtomic64(&g_prepost_table.nreservations);
1071 #endif
1072 }
1073
1074 typedef int (*wqp_callback_func)(struct waitq_set *wqset,
1075 void *ctx,
1076 struct wq_prepost *wqp,
1077 struct waitq *waitq);
1078
1079 /**
1080 * iterate over a chain of preposts associated with a waitq set.
1081 *
1082 * Conditions:
1083 * wqset is locked
1084 *
1085 * Notes:
1086 * This loop performs automatic prepost chain management / culling, and
1087 * may reset or adjust the waitq set's prepost ID pointer. If you don't
1088 * want this extra processing, you can use wq_prepost_iterate().
1089 */
1090 static int
1091 wq_prepost_foreach_locked(struct waitq_set *wqset,
1092 void *ctx, wqp_callback_func cb)
1093 {
1094 int ret = WQ_ITERATE_SUCCESS;
1095 struct wq_prepost *wqp, *tmp_wqp;
1096
1097 assert(cb != NULL);
1098
1099 if (!wqset || !waitq_set_maybe_preposted(wqset)) {
1100 return WQ_ITERATE_SUCCESS;
1101 }
1102
1103 restart:
1104 wqp = wq_prepost_get(wqset->wqset_prepost_id);
1105 if (!wqp) {
1106 /*
1107 * The prepost object is no longer valid, reset the waitq
1108 * set's prepost id.
1109 */
1110 wqset->wqset_prepost_id = 0;
1111 return WQ_ITERATE_SUCCESS;
1112 }
1113
1114 if (wqp_type(wqp) == WQP_WQ) {
1115 uint64_t __assert_only wqp_id = wqp->wqp_prepostid.id;
1116
1117 ret = cb(wqset, ctx, wqp, wqp->wqp_wq.wqp_wq_ptr);
1118
1119 switch (ret) {
1120 case WQ_ITERATE_INVALIDATE_CONTINUE:
1121 /* the caller wants to remove the only prepost here */
1122 assert(wqp_id == wqset->wqset_prepost_id);
1123 wqset->wqset_prepost_id = 0;
1124 /* fall through */
1125 case WQ_ITERATE_CONTINUE:
1126 wq_prepost_put(wqp);
1127 ret = WQ_ITERATE_SUCCESS;
1128 break;
1129 case WQ_ITERATE_RESTART:
1130 wq_prepost_put(wqp);
1131 /* fall through */
1132 case WQ_ITERATE_DROPPED:
1133 goto restart;
1134 default:
1135 wq_prepost_put(wqp);
1136 break;
1137 }
1138 return ret;
1139 }
1140
1141 assert(wqp->wqp_prepostid.id == wqset->wqset_prepost_id);
1142 assert(wqp_type(wqp) == WQP_POST);
1143
1144 /*
1145 * At this point we know we have a list of POST objects.
1146 * Grab a handle to the last element in the list and start
1147 * the iteration.
1148 */
1149 tmp_wqp = wq_prepost_get_rnext(wqp);
1150 assert(tmp_wqp != NULL && wqp_type(tmp_wqp) == WQP_POST);
1151
1152 uint64_t last_id = tmp_wqp->wqp_prepostid.id;
1153 wq_prepost_put(tmp_wqp);
1154
1155 ret = WQ_ITERATE_SUCCESS;
1156 for (;;) {
1157 uint64_t wqp_id, first_id, next_id;
1158
1159 wqp_id = wqp->wqp_prepostid.id;
1160 first_id = wqset->wqset_prepost_id;
1161 next_id = wqp->wqp_post.wqp_next_id;
1162
1163 /* grab the WQP_WQ object this _POST points to */
1164 tmp_wqp = wq_prepost_get(wqp->wqp_post.wqp_wq_id);
1165 if (!tmp_wqp) {
1166 /*
1167 * This WQP_POST object points to an invalid
1168 * WQP_WQ object - remove the POST object from
1169 * the list.
1170 */
1171 if (wq_prepost_remove(wqset, wqp) == 0) {
1172 wq_prepost_put(wqp);
1173 goto restart;
1174 }
1175 goto next_prepost;
1176 }
1177 assert(wqp_type(tmp_wqp) == WQP_WQ);
1178 /*
1179 * make the callback: note that this could remove 'wqp' or
1180 * drop the lock on our waitq set. We need to re-validate
1181 * our state when this function returns.
1182 */
1183 ret = cb(wqset, ctx, wqp, tmp_wqp->wqp_wq.wqp_wq_ptr);
1184 wq_prepost_put(tmp_wqp);
1185
1186 switch (ret) {
1187 case WQ_ITERATE_CONTINUE:
1188 /* continue iteration */
1189 break;
1190 case WQ_ITERATE_INVALIDATE_CONTINUE:
1191 assert(next_id == wqp->wqp_post.wqp_next_id);
1192 if (wq_prepost_remove(wqset, wqp) == 0) {
1193 wq_prepost_put(wqp);
1194 goto restart;
1195 }
1196 goto next_prepost;
1197 case WQ_ITERATE_RESTART:
1198 wq_prepost_put(wqp);
1199 /* fall-through */
1200 case WQ_ITERATE_DROPPED:
1201 /* the callback dropped the ref to wqp: just restart */
1202 goto restart;
1203 default:
1204 /* break out of the iteration for some other reason */
1205 goto finish_prepost_foreach;
1206 }
1207
1208 /*
1209 * the set lock may have been dropped during callback,
1210 * if something looks different, restart the prepost iteration
1211 */
1212 if (!wqp_is_valid(wqp) ||
1213 (wqp->wqp_post.wqp_next_id != next_id) ||
1214 wqset->wqset_prepost_id != first_id) {
1215 wq_prepost_put(wqp);
1216 goto restart;
1217 }
1218
1219 next_prepost:
1220 /* this was the last object in the list */
1221 if (wqp_id == last_id) {
1222 break;
1223 }
1224
1225 /* get the next object */
1226 tmp_wqp = wq_prepost_get(next_id);
1227 if (!tmp_wqp) {
1228 /*
1229 * At this point we've already checked our state
1230 * after the callback (which may have dropped the set
1231 * lock). If we find an invalid member of the list
1232 * then something is wrong.
1233 */
1234 panic("Invalid WQP_POST member 0x%llx in waitq set "
1235 "0x%llx prepost list (first:%llx, "
1236 "wqp:%p)",
1237 next_id, wqset->wqset_id, first_id, wqp);
1238 }
1239 wq_prepost_put(wqp);
1240 wqp = tmp_wqp;
1241
1242 assert(wqp_type(wqp) == WQP_POST);
1243 }
1244
1245 finish_prepost_foreach:
1246 wq_prepost_put(wqp);
1247 if (ret == WQ_ITERATE_CONTINUE) {
1248 ret = WQ_ITERATE_SUCCESS;
1249 }
1250
1251 return ret;
1252 }
1253
1254 /**
1255 * Perform a simple loop over a chain of prepost objects
1256 *
1257 * Conditions:
1258 * If 'prepost_id' is associated with a waitq (set) then that object must
1259 * be locked before calling this function.
1260 * Callback function, 'cb', must be able to handle a NULL wqset pointer
1261 * and a NULL waitq pointer!
1262 *
1263 * Notes:
1264 * This prepost chain iteration will _not_ automatically adjust any chain
1265 * element or linkage. This is the responsibility of the caller! If you
1266 * want automatic prepost chain management (at a cost of extra CPU time),
1267 * you can use: wq_prepost_foreach_locked().
1268 */
1269 static int
1270 wq_prepost_iterate(uint64_t prepost_id,
1271 void *ctx, wqp_callback_func cb)
1272 {
1273 int ret;
1274 struct wq_prepost *wqp;
1275
1276 if (!prepost_id) {
1277 return WQ_ITERATE_SUCCESS;
1278 }
1279
1280 wqp = wq_prepost_get(prepost_id);
1281 if (!wqp) {
1282 return WQ_ITERATE_SUCCESS;
1283 }
1284
1285 if (wqp_type(wqp) == WQP_WQ) {
1286 ret = WQ_ITERATE_SUCCESS;
1287 if (cb) {
1288 ret = cb(NULL, ctx, wqp, wqp->wqp_wq.wqp_wq_ptr);
1289 }
1290
1291 if (ret != WQ_ITERATE_DROPPED) {
1292 wq_prepost_put(wqp);
1293 }
1294 return ret;
1295 }
1296
1297 assert(wqp->wqp_prepostid.id == prepost_id);
1298 assert(wqp_type(wqp) == WQP_POST);
1299
1300 /* at this point we know we have a list of POST objects */
1301 uint64_t next_id;
1302
1303 ret = WQ_ITERATE_CONTINUE;
1304 do {
1305 struct wq_prepost *tmp_wqp;
1306 struct waitq *wq = NULL;
1307
1308 next_id = wqp->wqp_post.wqp_next_id;
1309
1310 /* grab the WQP_WQ object this _POST points to */
1311 tmp_wqp = wq_prepost_get(wqp->wqp_post.wqp_wq_id);
1312 if (tmp_wqp) {
1313 assert(wqp_type(tmp_wqp) == WQP_WQ);
1314 wq = tmp_wqp->wqp_wq.wqp_wq_ptr;
1315 }
1316
1317 if (cb) {
1318 ret = cb(NULL, ctx, wqp, wq);
1319 }
1320 if (tmp_wqp) {
1321 wq_prepost_put(tmp_wqp);
1322 }
1323
1324 if (ret != WQ_ITERATE_CONTINUE) {
1325 break;
1326 }
1327
1328 tmp_wqp = wq_prepost_get(next_id);
1329 if (!tmp_wqp) {
1330 /*
1331 * the chain is broken: nothing we can do here besides
1332 * bail from the iteration.
1333 */
1334 ret = WQ_ITERATE_ABORTED;
1335 break;
1336 }
1337
1338 wq_prepost_put(wqp);
1339 wqp = tmp_wqp;
1340
1341 assert(wqp_type(wqp) == WQP_POST);
1342 } while (next_id != prepost_id);
1343
1344 if (ret != WQ_ITERATE_DROPPED) {
1345 wq_prepost_put(wqp);
1346 }
1347
1348 if (ret == WQ_ITERATE_CONTINUE) {
1349 ret = WQ_ITERATE_SUCCESS;
1350 }
1351 return ret;
1352 }
1353
1354
1355 struct _is_posted_ctx {
1356 struct waitq *posting_wq;
1357 int did_prepost;
1358 };
1359
1360 static int
1361 wq_is_preposted_on_set_cb(struct waitq_set *wqset, void *ctx,
1362 struct wq_prepost *wqp, struct waitq *waitq)
1363 {
1364 struct _is_posted_ctx *pctx = (struct _is_posted_ctx *)ctx;
1365
1366 (void)wqset;
1367 (void)wqp;
1368
1369 /*
1370 * Don't early-out, run through the _entire_ list:
1371 * This ensures that we retain a minimum number of invalid elements.
1372 */
1373 if (pctx->posting_wq == waitq) {
1374 pctx->did_prepost = 1;
1375 }
1376
1377 return WQ_ITERATE_CONTINUE;
1378 }
1379
1380
1381 /**
1382 * checks if 'waitq' has already preposted on 'wqset'
1383 *
1384 * Parameters:
1385 * waitq The waitq that's preposting
1386 * wqset The set onto which waitq may be preposted
1387 *
1388 * Conditions:
1389 * both waitq and wqset are locked
1390 *
1391 * Returns non-zero if 'waitq' has already preposted to 'wqset'
1392 */
1393 static int
1394 wq_is_preposted_on_set(struct waitq *waitq, struct waitq_set *wqset)
1395 {
1396 int ret;
1397 struct _is_posted_ctx pctx;
1398
1399 /*
1400 * If the set's only prepost matches the waitq's prepost ID,
1401 * then it obviously already preposted to the set.
1402 */
1403 if (waitq->waitq_prepost_id != 0 &&
1404 wqset->wqset_prepost_id == waitq->waitq_prepost_id) {
1405 return 1;
1406 }
1407
1408 /* use full prepost iteration: always trim the list */
1409 pctx.posting_wq = waitq;
1410 pctx.did_prepost = 0;
1411 ret = wq_prepost_foreach_locked(wqset, (void *)&pctx,
1412 wq_is_preposted_on_set_cb);
1413 return pctx.did_prepost;
1414 }
1415
1416 static struct wq_prepost *
1417 wq_get_prepost_obj(uint64_t *reserved, int type)
1418 {
1419 struct wq_prepost *wqp = NULL;
1420 /*
1421 * don't fail just because the caller doesn't have enough
1422 * reservations, we've kept a low-water mark on the prepost table,
1423 * so there should be some available for us.
1424 */
1425 if (reserved && *reserved) {
1426 wqp = wq_prepost_rpop(reserved, type);
1427 assert(wqp->wqte.lt_id.idx < g_prepost_table.nelem);
1428 } else {
1429 /*
1430 * TODO: if in interrupt context, grab from a special
1431 * region / reserved list!
1432 */
1433 wqp = wq_prepost_alloc(type, 1);
1434 }
1435
1436 if (wqp == NULL) {
1437 panic("Couldn't allocate prepost object!");
1438 }
1439 return wqp;
1440 }
1441
1442
1443 /**
1444 * prepost a waitq onto a waitq set
1445 *
1446 * Parameters:
1447 * wqset The set onto which waitq will be preposted
1448 * waitq The waitq that's preposting
1449 * reserved List (lt_elem_list_ style) of pre-allocated prepost elements
1450 * Could be NULL
1451 *
1452 * Conditions:
1453 * both wqset and waitq are locked
1454 *
1455 * Notes:
1456 * If reserved is NULL, this may block on prepost table growth.
1457 */
1458 static void
1459 wq_prepost_do_post_locked(struct waitq_set *wqset,
1460 struct waitq *waitq,
1461 uint64_t *reserved)
1462 {
1463 struct wq_prepost *wqp_post, *wqp_head, *wqp_tail;
1464
1465 assert(waitq_held(waitq) && waitq_held(&wqset->wqset_q));
1466
1467 /*
1468 * nothing to do if it's already preposted:
1469 * note that this also culls any invalid prepost objects
1470 */
1471 if (wq_is_preposted_on_set(waitq, wqset)) {
1472 return;
1473 }
1474
1475 assert(waitqs_is_linked(wqset));
1476
1477 /*
1478 * This function is called because an event is being posted to 'waitq'.
1479 * We need a prepost object associated with this queue. Allocate one
1480 * now if the waitq isn't already associated with one.
1481 */
1482 if (waitq->waitq_prepost_id == 0) {
1483 struct wq_prepost *wqp;
1484 wqp = wq_get_prepost_obj(reserved, WQP_WQ);
1485 wqp->wqp_wq.wqp_wq_ptr = waitq;
1486 wqp_set_valid(wqp);
1487 waitq->waitq_prepost_id = wqp->wqp_prepostid.id;
1488 wq_prepost_put(wqp);
1489 }
1490
1491 #if CONFIG_LTABLE_STATS
1492 g_prepost_table.npreposts += 1;
1493 #endif
1494
1495 wqdbg_v("preposting waitq %p (0x%llx) to set 0x%llx",
1496 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq),
1497 waitq->waitq_prepost_id, wqset->wqset_id);
1498
1499 if (wqset->wqset_prepost_id == 0) {
1500 /* the set has no previous preposts */
1501 wqset->wqset_prepost_id = waitq->waitq_prepost_id;
1502 return;
1503 }
1504
1505 wqp_head = wq_prepost_get(wqset->wqset_prepost_id);
1506 if (!wqp_head) {
1507 /* the previous prepost has become invalid */
1508 wqset->wqset_prepost_id = waitq->waitq_prepost_id;
1509 return;
1510 }
1511
1512 assert(wqp_head->wqp_prepostid.id == wqset->wqset_prepost_id);
1513
1514 /*
1515 * If we get here, we're going to need at least one new wq_prepost
1516 * object. If the previous wqset_prepost_id points to a WQP_WQ, we
1517 * actually need to allocate 2 wq_prepost objects because the WQP_WQ
1518 * is tied to the waitq and shared across all sets.
1519 */
1520 wqp_post = wq_get_prepost_obj(reserved, WQP_POST);
1521
1522 wqp_post->wqp_post.wqp_wq_id = waitq->waitq_prepost_id;
1523 wqdbg_v("POST 0x%llx :: WQ 0x%llx", wqp_post->wqp_prepostid.id,
1524 waitq->waitq_prepost_id);
1525
1526 if (wqp_type(wqp_head) == WQP_WQ) {
1527 /*
1528 * We must replace the wqset_prepost_id with a pointer
1529 * to two new WQP_POST objects
1530 */
1531 uint64_t wqp_id = wqp_head->wqp_prepostid.id;
1532 wqdbg_v("set 0x%llx previous had 1 WQ prepost (0x%llx): "
1533 "replacing with two POST preposts",
1534 wqset->wqset_id, wqp_id);
1535
1536 /* drop the old reference */
1537 wq_prepost_put(wqp_head);
1538
1539 /* grab another new object (the 2nd of two) */
1540 wqp_head = wq_get_prepost_obj(reserved, WQP_POST);
1541
1542 /* point this one to the original WQP_WQ object */
1543 wqp_head->wqp_post.wqp_wq_id = wqp_id;
1544 wqdbg_v("POST 0x%llx :: WQ 0x%llx",
1545 wqp_head->wqp_prepostid.id, wqp_id);
1546
1547 /* link it to the new wqp_post object allocated earlier */
1548 wqp_head->wqp_post.wqp_next_id = wqp_post->wqp_prepostid.id;
1549 /* make the list a double-linked and circular */
1550 wq_prepost_rlink(wqp_head, wqp_post);
1551
1552 /*
1553 * Finish setting up the new prepost: point it back to the
1554 * POST object we allocated to replace the original wqset
1555 * WQ prepost object
1556 */
1557 wqp_post->wqp_post.wqp_next_id = wqp_head->wqp_prepostid.id;
1558 wq_prepost_rlink(wqp_post, wqp_head);
1559
1560 /* mark objects valid, and reset the wqset prepost list head */
1561 wqp_set_valid(wqp_head);
1562 wqp_set_valid(wqp_post);
1563 wqset->wqset_prepost_id = wqp_head->wqp_prepostid.id;
1564
1565 /* release both references */
1566 wq_prepost_put(wqp_head);
1567 wq_prepost_put(wqp_post);
1568
1569 wqdbg_v("set 0x%llx: 0x%llx/0x%llx -> 0x%llx/0x%llx -> 0x%llx",
1570 wqset->wqset_id, wqset->wqset_prepost_id,
1571 wqp_head->wqp_prepostid.id, wqp_head->wqp_post.wqp_next_id,
1572 wqp_post->wqp_prepostid.id,
1573 wqp_post->wqp_post.wqp_next_id);
1574 return;
1575 }
1576
1577 assert(wqp_type(wqp_head) == WQP_POST);
1578
1579 /*
1580 * Add the new prepost to the end of the prepost list
1581 */
1582 wqp_tail = wq_prepost_get_rnext(wqp_head);
1583 assert(wqp_tail != NULL);
1584 assert(wqp_tail->wqp_post.wqp_next_id == wqset->wqset_prepost_id);
1585
1586 /*
1587 * link the head to the new tail
1588 * NOTE: this needs to happen first in case wqp_tail == wqp_head
1589 */
1590 wq_prepost_reset_rnext(wqp_head);
1591 wq_prepost_rlink(wqp_head, wqp_post);
1592
1593 /* point the new object to the list head, and list tail */
1594 wqp_post->wqp_post.wqp_next_id = wqp_head->wqp_prepostid.id;
1595 wq_prepost_rlink(wqp_post, wqp_tail);
1596
1597 /* point the last item in the waitq set's list to the new object */
1598 wqp_tail->wqp_post.wqp_next_id = wqp_post->wqp_prepostid.id;
1599
1600 wqp_set_valid(wqp_post);
1601
1602 wq_prepost_put(wqp_head);
1603 wq_prepost_put(wqp_tail);
1604 wq_prepost_put(wqp_post);
1605
1606 wqdbg_v("set 0x%llx (wqp:0x%llx) last_prepost:0x%llx, "
1607 "new_prepost:0x%llx->0x%llx", wqset->wqset_id,
1608 wqset->wqset_prepost_id, wqp_head->wqp_prepostid.id,
1609 wqp_post->wqp_prepostid.id, wqp_post->wqp_post.wqp_next_id);
1610
1611 return;
1612 }
1613
1614
1615 /* ----------------------------------------------------------------------
1616 *
1617 * Stats collection / reporting
1618 *
1619 * ---------------------------------------------------------------------- */
1620 #if CONFIG_LTABLE_STATS && CONFIG_WAITQ_STATS
1621 static void
1622 wq_table_stats(struct link_table *table, struct wq_table_stats *stats)
1623 {
1624 stats->version = WAITQ_STATS_VERSION;
1625 stats->table_elements = table->nelem;
1626 stats->table_used_elems = table->used_elem;
1627 stats->table_elem_sz = table->elem_sz;
1628 stats->table_slabs = table->nslabs;
1629 stats->table_slab_sz = table->slab_sz;
1630
1631 stats->table_num_allocs = table->nallocs;
1632 stats->table_num_preposts = table->npreposts;
1633 stats->table_num_reservations = table->nreservations;
1634
1635 stats->table_max_used = table->max_used;
1636 stats->table_avg_used = table->avg_used;
1637 stats->table_max_reservations = table->max_reservations;
1638 stats->table_avg_reservations = table->avg_reservations;
1639 }
1640
1641 void
1642 waitq_link_stats(struct wq_table_stats *stats)
1643 {
1644 if (!stats) {
1645 return;
1646 }
1647 wq_table_stats(&g_wqlinktable, stats);
1648 }
1649
1650 void
1651 waitq_prepost_stats(struct wq_table_stats *stats)
1652 {
1653 wq_table_stats(&g_prepost_table, stats);
1654 }
1655 #endif
1656
1657
1658 /* ----------------------------------------------------------------------
1659 *
1660 * Global Wait Queues
1661 *
1662 * ---------------------------------------------------------------------- */
1663
1664 static struct waitq g_boot_waitq;
1665 static struct waitq *global_waitqs = &g_boot_waitq;
1666 static uint32_t g_num_waitqs = 1;
1667
1668 /*
1669 * Zero out the used MSBs of the event.
1670 */
1671 #define _CAST_TO_EVENT_MASK(event) ((uintptr_t)(event) & ((1ul << _EVENT_MASK_BITS) - 1ul))
1672
1673 static __inline__ uint32_t
1674 waitq_hash(char *key, size_t length)
1675 {
1676 uint32_t hash = os_hash_jenkins(key, length);
1677
1678 hash &= (g_num_waitqs - 1);
1679 return hash;
1680 }
1681
1682 /* return a global waitq pointer corresponding to the given event */
1683 struct waitq *
1684 _global_eventq(char *event, size_t event_length)
1685 {
1686 return &global_waitqs[waitq_hash(event, event_length)];
1687 }
1688
1689 /* return an indexed global waitq pointer */
1690 struct waitq *
1691 global_waitq(int index)
1692 {
1693 return &global_waitqs[index % g_num_waitqs];
1694 }
1695
1696
1697 #if CONFIG_LTABLE_STATS || CONFIG_WAITQ_STATS
1698 /* this global is for lldb */
1699 const uint32_t g_nwaitq_btframes = NWAITQ_BTFRAMES;
1700
1701 static __inline__ void
1702 waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int skip)
1703 {
1704 uintptr_t buf[NWAITQ_BTFRAMES + skip];
1705 if (skip < 0) {
1706 skip = 0;
1707 }
1708 memset(buf, 0, (NWAITQ_BTFRAMES + skip) * sizeof(uintptr_t));
1709 backtrace(buf, g_nwaitq_btframes + skip);
1710 memcpy(&bt[0], &buf[skip], NWAITQ_BTFRAMES * sizeof(uintptr_t));
1711 }
1712 #else /* no stats */
1713 #define waitq_grab_backtrace(...)
1714 #endif
1715
1716 #if CONFIG_WAITQ_STATS
1717
1718 struct wq_stats g_boot_stats;
1719 struct wq_stats *g_waitq_stats = &g_boot_stats;
1720
1721 static __inline__ struct wq_stats *
1722 waitq_global_stats(struct waitq *waitq)
1723 {
1724 struct wq_stats *wqs;
1725 uint32_t idx;
1726
1727 if (!waitq_is_global(waitq)) {
1728 return NULL;
1729 }
1730
1731 idx = (uint32_t)(((uintptr_t)waitq - (uintptr_t)global_waitqs) / sizeof(*waitq));
1732 assert(idx < g_num_waitqs);
1733 wqs = &g_waitq_stats[idx];
1734 return wqs;
1735 }
1736
1737 static __inline__ void
1738 waitq_stats_count_wait(struct waitq *waitq)
1739 {
1740 struct wq_stats *wqs = waitq_global_stats(waitq);
1741 if (wqs != NULL) {
1742 wqs->waits++;
1743 waitq_grab_backtrace(wqs->last_wait, 2);
1744 }
1745 }
1746
1747 static __inline__ void
1748 waitq_stats_count_wakeup(struct waitq *waitq)
1749 {
1750 struct wq_stats *wqs = waitq_global_stats(waitq);
1751 if (wqs != NULL) {
1752 wqs->wakeups++;
1753 waitq_grab_backtrace(wqs->last_wakeup, 2);
1754 }
1755 }
1756
1757 static __inline__ void
1758 waitq_stats_count_clear_wakeup(struct waitq *waitq)
1759 {
1760 struct wq_stats *wqs = waitq_global_stats(waitq);
1761 if (wqs != NULL) {
1762 wqs->wakeups++;
1763 wqs->clears++;
1764 waitq_grab_backtrace(wqs->last_wakeup, 2);
1765 }
1766 }
1767
1768 static __inline__ void
1769 waitq_stats_count_fail(struct waitq *waitq)
1770 {
1771 struct wq_stats *wqs = waitq_global_stats(waitq);
1772 if (wqs != NULL) {
1773 wqs->failed_wakeups++;
1774 waitq_grab_backtrace(wqs->last_failed_wakeup, 2);
1775 }
1776 }
1777 #else /* !CONFIG_WAITQ_STATS */
1778 #define waitq_stats_count_wait(q) do { } while (0)
1779 #define waitq_stats_count_wakeup(q) do { } while (0)
1780 #define waitq_stats_count_clear_wakeup(q) do { } while (0)
1781 #define waitq_stats_count_fail(q) do { } while (0)
1782 #endif
1783
1784 int
1785 waitq_is_valid(struct waitq *waitq)
1786 {
1787 return (waitq != NULL) && waitq->waitq_isvalid;
1788 }
1789
1790 int
1791 waitq_set_is_valid(struct waitq_set *wqset)
1792 {
1793 return (wqset != NULL) && wqset->wqset_q.waitq_isvalid && waitqs_is_set(wqset);
1794 }
1795
1796 int
1797 waitq_is_global(struct waitq *waitq)
1798 {
1799 if (waitq >= global_waitqs && waitq < global_waitqs + g_num_waitqs) {
1800 return 1;
1801 }
1802 return 0;
1803 }
1804
1805 int
1806 waitq_irq_safe(struct waitq *waitq)
1807 {
1808 /* global wait queues have this bit set on initialization */
1809 return waitq->waitq_irq;
1810 }
1811
1812 struct waitq *
1813 waitq_get_safeq(struct waitq *waitq)
1814 {
1815 struct waitq *safeq;
1816
1817 /* Check if it's a port waitq */
1818 if (waitq_is_port_queue(waitq)) {
1819 assert(!waitq_irq_safe(waitq));
1820 safeq = ipc_port_rcv_turnstile_waitq(waitq);
1821 } else {
1822 safeq = global_eventq(waitq);
1823 }
1824 return safeq;
1825 }
1826
1827 static uint32_t
1828 waitq_hash_size(void)
1829 {
1830 uint32_t hsize, queues;
1831
1832 if (PE_parse_boot_argn("wqsize", &hsize, sizeof(hsize))) {
1833 return hsize;
1834 }
1835
1836 queues = thread_max / 5;
1837 hsize = P2ROUNDUP(queues * sizeof(struct waitq), PAGE_SIZE);
1838
1839 return hsize;
1840 }
1841
1842 /*
1843 * Since the priority ordered waitq uses basepri as the
1844 * ordering key assert that this value fits in a uint8_t.
1845 */
1846 static_assert(MAXPRI <= UINT8_MAX);
1847
1848 static inline void
1849 waitq_thread_insert(struct waitq *wq,
1850 thread_t thread, boolean_t fifo)
1851 {
1852 if (waitq_is_turnstile_queue(wq)) {
1853 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1854 (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (THREAD_ADDED_TO_TURNSTILE_WAITQ))) | DBG_FUNC_NONE,
1855 VM_KERNEL_UNSLIDE_OR_PERM(waitq_to_turnstile(wq)),
1856 thread_tid(thread),
1857 thread->base_pri, 0, 0);
1858
1859 turnstile_stats_update(0, TSU_TURNSTILE_BLOCK_COUNT, NULL);
1860
1861 /*
1862 * For turnstile queues (which use priority queues),
1863 * insert the thread in the heap based on its current
1864 * base_pri. Note that the priority queue implementation
1865 * is currently not stable, so does not maintain fifo for
1866 * threads at the same base_pri. Also, if the base_pri
1867 * of the thread changes while its blocked in the waitq,
1868 * the thread position should be updated in the priority
1869 * queue by calling priority queue increase/decrease
1870 * operations.
1871 */
1872 priority_queue_entry_init(&(thread->wait_prioq_links));
1873 priority_queue_insert(&wq->waitq_prio_queue,
1874 &thread->wait_prioq_links, thread->base_pri,
1875 PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
1876 } else {
1877 turnstile_stats_update(0, TSU_REGULAR_WAITQ_BLOCK_COUNT, NULL);
1878 if (fifo) {
1879 enqueue_tail(&wq->waitq_queue, &thread->wait_links);
1880 } else {
1881 enqueue_head(&wq->waitq_queue, &thread->wait_links);
1882 }
1883 }
1884 }
1885
1886 static inline void
1887 waitq_thread_remove(struct waitq *wq,
1888 thread_t thread)
1889 {
1890 if (waitq_is_turnstile_queue(wq)) {
1891 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1892 (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (THREAD_REMOVED_FROM_TURNSTILE_WAITQ))) | DBG_FUNC_NONE,
1893 VM_KERNEL_UNSLIDE_OR_PERM(waitq_to_turnstile(wq)),
1894 thread_tid(thread),
1895 0, 0, 0);
1896 priority_queue_remove(&wq->waitq_prio_queue, &thread->wait_prioq_links,
1897 PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
1898 } else {
1899 remqueue(&(thread->wait_links));
1900 }
1901 }
1902
1903 void
1904 waitq_bootstrap(void)
1905 {
1906 kern_return_t kret;
1907 uint32_t whsize, qsz, tmp32;
1908
1909 g_min_free_table_elem = DEFAULT_MIN_FREE_TABLE_ELEM;
1910 if (PE_parse_boot_argn("wqt_min_free", &tmp32, sizeof(tmp32)) == TRUE) {
1911 g_min_free_table_elem = tmp32;
1912 }
1913 wqdbg("Minimum free table elements: %d", tmp32);
1914
1915 lck_grp_init(&waitq_lck_grp, "waitq", LCK_GRP_ATTR_NULL);
1916
1917 /*
1918 * Determine the amount of memory we're willing to reserve for
1919 * the waitqueue hash table
1920 */
1921 whsize = waitq_hash_size();
1922
1923 /* Determine the number of waitqueues we can fit. */
1924 qsz = sizeof(struct waitq);
1925 whsize = ROUNDDOWN(whsize, qsz);
1926 g_num_waitqs = whsize / qsz;
1927
1928 /*
1929 * The hash algorithm requires that this be a power of 2, so we
1930 * just mask off all the low-order bits.
1931 */
1932 for (uint32_t i = 0; i < 31; i++) {
1933 uint32_t bit = (1 << i);
1934 if ((g_num_waitqs & bit) == g_num_waitqs) {
1935 break;
1936 }
1937 g_num_waitqs &= ~bit;
1938 }
1939 assert(g_num_waitqs > 0);
1940
1941 /* Now determine how much memory we really need. */
1942 whsize = P2ROUNDUP(g_num_waitqs * qsz, PAGE_SIZE);
1943
1944 wqdbg("allocating %d global queues (%d bytes)", g_num_waitqs, whsize);
1945 kret = kernel_memory_allocate(kernel_map, (vm_offset_t *)&global_waitqs,
1946 whsize, 0, KMA_KOBJECT | KMA_NOPAGEWAIT, VM_KERN_MEMORY_WAITQ);
1947 if (kret != KERN_SUCCESS || global_waitqs == NULL) {
1948 panic("kernel_memory_allocate() failed to alloc global_waitqs"
1949 ", error: %d, whsize: 0x%x", kret, whsize);
1950 }
1951
1952 #if CONFIG_WAITQ_STATS
1953 whsize = P2ROUNDUP(g_num_waitqs * sizeof(struct wq_stats), PAGE_SIZE);
1954 kret = kernel_memory_allocate(kernel_map, (vm_offset_t *)&g_waitq_stats,
1955 whsize, 0, KMA_KOBJECT | KMA_NOPAGEWAIT, VM_KERN_MEMORY_WAITQ);
1956 if (kret != KERN_SUCCESS || global_waitqs == NULL) {
1957 panic("kernel_memory_allocate() failed to alloc g_waitq_stats"
1958 ", error: %d, whsize: 0x%x", kret, whsize);
1959 }
1960 memset(g_waitq_stats, 0, whsize);
1961 #endif
1962
1963 for (uint32_t i = 0; i < g_num_waitqs; i++) {
1964 waitq_init(&global_waitqs[i], SYNC_POLICY_FIFO | SYNC_POLICY_DISABLE_IRQ);
1965 }
1966
1967 waitq_set_zone = zinit(sizeof(struct waitq_set),
1968 WAITQ_SET_MAX * sizeof(struct waitq_set),
1969 sizeof(struct waitq_set),
1970 "waitq sets");
1971 zone_change(waitq_set_zone, Z_NOENCRYPT, TRUE);
1972
1973 /* initialize the global waitq link table */
1974 wql_init();
1975
1976 /* initialize the global waitq prepost table */
1977 wqp_init();
1978 }
1979
1980
1981 /* ----------------------------------------------------------------------
1982 *
1983 * Wait Queue Implementation
1984 *
1985 * ---------------------------------------------------------------------- */
1986
1987 /*
1988 * Double the standard lock timeout, because wait queues tend
1989 * to iterate over a number of threads - locking each. If there is
1990 * a problem with a thread lock, it normally times out at the wait
1991 * queue level first, hiding the real problem.
1992 */
1993 /* For x86, the hardware timeout is in TSC units. */
1994 #if defined(__i386__) || defined(__x86_64__)
1995 #define hwLockTimeOut LockTimeOutTSC
1996 #else
1997 #define hwLockTimeOut LockTimeOut
1998 #endif
1999
2000 void
2001 waitq_lock(struct waitq *wq)
2002 {
2003 if (__improbable(waitq_lock_to(wq,
2004 hwLockTimeOut * 2) == 0)) {
2005 boolean_t wql_acquired = FALSE;
2006
2007 while (machine_timeout_suspended()) {
2008 mp_enable_preemption();
2009 wql_acquired = waitq_lock_to(wq,
2010 hwLockTimeOut * 2);
2011 if (wql_acquired) {
2012 break;
2013 }
2014 }
2015 if (wql_acquired == FALSE) {
2016 panic("waitq deadlock - waitq=%p, cpu=%d\n",
2017 wq, cpu_number());
2018 }
2019 }
2020 #if defined(__x86_64__)
2021 pltrace(FALSE);
2022 #endif
2023 assert(waitq_held(wq));
2024 }
2025
2026 void
2027 waitq_unlock(struct waitq *wq)
2028 {
2029 assert(waitq_held(wq));
2030 #if defined(__x86_64__)
2031 pltrace(TRUE);
2032 #endif
2033 waitq_lock_unlock(wq);
2034 }
2035
2036
2037 /**
2038 * clear the thread-related waitq state
2039 *
2040 * Conditions:
2041 * 'thread' is locked
2042 */
2043 static inline void
2044 thread_clear_waitq_state(thread_t thread)
2045 {
2046 thread->waitq = NULL;
2047 thread->wait_event = NO_EVENT64;
2048 thread->at_safe_point = FALSE;
2049 }
2050
2051
2052 typedef thread_t (*waitq_select_cb)(void *ctx, struct waitq *waitq,
2053 int is_global, thread_t thread);
2054
2055 struct waitq_select_args {
2056 /* input parameters */
2057 struct waitq *posted_waitq;
2058 struct waitq *waitq;
2059 event64_t event;
2060 waitq_select_cb select_cb;
2061 void *select_ctx;
2062
2063 uint64_t *reserved_preposts;
2064
2065 /* output parameters */
2066 queue_t threadq;
2067 int max_threads;
2068 int *nthreads;
2069 spl_t *spl;
2070 };
2071
2072 static void do_waitq_select_n_locked(struct waitq_select_args *args);
2073
2074 /**
2075 * callback invoked once for every waitq set to which a waitq belongs
2076 *
2077 * Conditions:
2078 * ctx->posted_waitq is locked
2079 * 'link' points to a valid waitq set
2080 *
2081 * Notes:
2082 * Takes the waitq set lock on the set pointed to by 'link'
2083 * Calls do_waitq_select_n_locked() which could recurse back into
2084 * this function if the waitq set is a member of other sets.
2085 * If no threads were selected, it preposts the input waitq
2086 * onto the waitq set pointed to by 'link'.
2087 */
2088 static int
2089 waitq_select_walk_cb(struct waitq *waitq, void *ctx,
2090 struct waitq_link *link)
2091 {
2092 int ret = WQ_ITERATE_CONTINUE;
2093 struct waitq_select_args args = *((struct waitq_select_args *)ctx);
2094 struct waitq_set *wqset;
2095
2096 (void)waitq;
2097 assert(wql_type(link) == WQL_WQS);
2098
2099 wqset = link->wql_wqs.wql_set;
2100 args.waitq = &wqset->wqset_q;
2101
2102 assert(!waitq_irq_safe(waitq));
2103 assert(!waitq_irq_safe(&wqset->wqset_q));
2104
2105 waitq_set_lock(wqset);
2106 /*
2107 * verify that the link wasn't invalidated just before
2108 * we were able to take the lock.
2109 */
2110 if (wqset->wqset_id != link->wql_setid.id) {
2111 goto out_unlock;
2112 }
2113
2114 assert(waitqs_is_linked(wqset));
2115
2116 /*
2117 * Find any threads waiting on this wait queue set,
2118 * and recurse into any waitq set to which this set belongs.
2119 */
2120 do_waitq_select_n_locked(&args);
2121
2122 if (*(args.nthreads) > 0 ||
2123 (args.threadq && !queue_empty(args.threadq))) {
2124 /* at least 1 thread was selected and returned: don't prepost */
2125 if (args.max_threads > 0 &&
2126 *(args.nthreads) >= args.max_threads) {
2127 /* break out of the setid walk */
2128 ret = WQ_ITERATE_FOUND;
2129 }
2130 goto out_unlock;
2131 } else {
2132 /*
2133 * No thread selected: prepost 'waitq' to 'wqset'
2134 * if wqset can handle preposts and the event is set to 0.
2135 * We also make sure to not post waitq sets to other sets.
2136 *
2137 * If the set doesn't support preposts, but does support
2138 * prepost callout/hook interaction, invoke the predefined
2139 * callout function and pass the set's 'prepost_hook.' This
2140 * could potentially release another thread to handle events.
2141 */
2142 if (args.event == NO_EVENT64) {
2143 if (waitq_set_can_prepost(wqset)) {
2144 wq_prepost_do_post_locked(
2145 wqset, waitq, args.reserved_preposts);
2146 } else if (waitq_set_has_prepost_hook(wqset)) {
2147 waitq_set__CALLING_PREPOST_HOOK__(
2148 wqset->wqset_prepost_hook, waitq, 0);
2149 }
2150 }
2151 }
2152
2153 out_unlock:
2154 waitq_set_unlock(wqset);
2155 return ret;
2156 }
2157
2158 /**
2159 * Routine to iterate over the waitq for non-priority ordered waitqs
2160 *
2161 * Conditions:
2162 * args->waitq (and args->posted_waitq) is locked
2163 *
2164 * Notes:
2165 * Uses the optional select callback function to refine the selection
2166 * of one or more threads from a waitq. The select callback is invoked
2167 * once for every thread that is found to be waiting on the input args->waitq.
2168 *
2169 * If one or more threads are selected, this may disable interrupts.
2170 * The previous interrupt state is returned in args->spl and should
2171 * be used in a call to splx() if threads are returned to the caller.
2172 */
2173 static thread_t
2174 waitq_queue_iterate_locked(struct waitq *safeq, struct waitq *waitq,
2175 spl_t spl, struct waitq_select_args *args,
2176 uint32_t *remaining_eventmask)
2177 {
2178 int max_threads = args->max_threads;
2179 int *nthreads = args->nthreads;
2180 thread_t thread = THREAD_NULL;
2181 thread_t first_thread = THREAD_NULL;
2182
2183 qe_foreach_element_safe(thread, &safeq->waitq_queue, wait_links) {
2184 thread_t t = THREAD_NULL;
2185 assert_thread_magic(thread);
2186
2187 /*
2188 * For non-priority ordered waitqs, we allow multiple events to be
2189 * mux'ed into the same waitq. Also safeqs may contain threads from
2190 * multiple waitqs. Only pick threads that match the
2191 * requested wait event.
2192 */
2193 if (thread->waitq == waitq && thread->wait_event == args->event) {
2194 t = thread;
2195 if (first_thread == THREAD_NULL) {
2196 first_thread = thread;
2197 }
2198
2199 /* allow the caller to futher refine the selection */
2200 if (args->select_cb) {
2201 t = args->select_cb(args->select_ctx, waitq,
2202 waitq_is_global(waitq), thread);
2203 }
2204 if (t != THREAD_NULL) {
2205 *nthreads += 1;
2206 if (args->threadq) {
2207 /* if output queue, add locked thread to it */
2208 if (*nthreads == 1) {
2209 *(args->spl) = (safeq != waitq) ? spl : splsched();
2210 }
2211 thread_lock(t);
2212 thread_clear_waitq_state(t);
2213 re_queue_tail(args->threadq, &t->wait_links);
2214 }
2215 /* only enqueue up to 'max' threads */
2216 if (*nthreads >= max_threads && max_threads > 0) {
2217 break;
2218 }
2219 }
2220 }
2221 /* thread wasn't selected so track it's event */
2222 if (t == THREAD_NULL) {
2223 *remaining_eventmask |= (thread->waitq != safeq) ?
2224 _CAST_TO_EVENT_MASK(thread->waitq) : _CAST_TO_EVENT_MASK(thread->wait_event);
2225 }
2226 }
2227
2228 return first_thread;
2229 }
2230
2231 /**
2232 * Routine to iterate and remove threads from priority ordered waitqs
2233 *
2234 * Conditions:
2235 * args->waitq (and args->posted_waitq) is locked
2236 *
2237 * Notes:
2238 * The priority ordered waitqs only support maximum priority element removal.
2239 *
2240 * Also, the implementation makes sure that all threads in a priority ordered
2241 * waitq are waiting on the same wait event. This is not necessarily true for
2242 * non-priority ordered waitqs. If one or more threads are selected, this may
2243 * disable interrupts. The previous interrupt state is returned in args->spl
2244 * and should be used in a call to splx() if threads are returned to the caller.
2245 *
2246 * In the future, we could support priority ordered waitqs with multiple wait
2247 * events in the same queue. The way to implement that would be to keep removing
2248 * elements from the waitq and if the event does not match the requested one,
2249 * add it to a local list. This local list of elements needs to be re-inserted
2250 * into the priority queue at the end and the select_cb return value &
2251 * remaining_eventmask would need to be handled appropriately. The implementation
2252 * is not very efficient but would work functionally.
2253 */
2254 static thread_t
2255 waitq_prioq_iterate_locked(struct waitq *safeq, struct waitq *waitq,
2256 spl_t spl, struct waitq_select_args *args,
2257 uint32_t *remaining_eventmask)
2258 {
2259 int max_threads = args->max_threads;
2260 int *nthreads = args->nthreads;
2261 thread_t first_thread = THREAD_NULL;
2262 thread_t thread = THREAD_NULL;
2263
2264 /*
2265 * The waitq select routines need to handle two cases:
2266 * Case 1: Peek at maximum priority thread in the waitq (remove_op = 0)
2267 * Get the maximum priority thread from the waitq without removing it.
2268 * In that case args->threadq == NULL and max_threads == 1.
2269 * Case 2: Remove 'n' highest priority threads from waitq (remove_op = 1)
2270 * Get max_threads (if available) while removing them from the waitq.
2271 * In that case args->threadq != NULL and max_threads is one of {-1, 1}.
2272 *
2273 * The only possible values for remaining_eventmask for the priority queue
2274 * waitq are either 0 (for the remove all threads case) or the original
2275 * safeq->waitq_eventmask (for the lookup/remove one thread cases).
2276 */
2277 *remaining_eventmask = safeq->waitq_eventmask;
2278 boolean_t remove_op = !!(args->threadq);
2279
2280 while ((max_threads <= 0) || (*nthreads < max_threads)) {
2281 if (priority_queue_empty(&(safeq->waitq_prio_queue))) {
2282 *remaining_eventmask = 0;
2283 break;
2284 }
2285
2286 if (remove_op) {
2287 thread = priority_queue_remove_max(&safeq->waitq_prio_queue,
2288 struct thread, wait_prioq_links,
2289 PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
2290 } else {
2291 /* For the peek operation, the only valid value for max_threads is 1 */
2292 assert(max_threads == 1);
2293 thread = priority_queue_max(&safeq->waitq_prio_queue,
2294 struct thread, wait_prioq_links);
2295 }
2296 /*
2297 * Ensure the wait event matches since priority ordered waitqs do not
2298 * support multiple events in the same waitq.
2299 */
2300 assert((thread->waitq == waitq) && (thread->wait_event == args->event));
2301
2302 if (args->select_cb) {
2303 /*
2304 * Call the select_cb passed into the waitq_select args. The callback
2305 * updates the select_ctx with information about the highest priority
2306 * thread which is eventually used by the caller.
2307 */
2308 thread_t __assert_only ret_thread = args->select_cb(args->select_ctx, waitq,
2309 waitq_is_global(waitq), thread);
2310 if (!remove_op) {
2311 /* For the peek operation, the thread should not be selected for addition */
2312 assert(ret_thread == THREAD_NULL);
2313 } else {
2314 /*
2315 * For the remove operation, the select routine should always return a valid
2316 * thread for priority waitqs. Since all threads in a prioq are equally
2317 * eligible, it should match the thread removed from the prioq. If this
2318 * invariant changes, the implementation would need to handle the
2319 * remaining_eventmask here correctly.
2320 */
2321 assert(ret_thread == thread);
2322 }
2323 }
2324
2325 if (first_thread == THREAD_NULL) {
2326 first_thread = thread;
2327 }
2328
2329 /* For the peek operation, break out early */
2330 if (!remove_op) {
2331 break;
2332 }
2333
2334 /* Add the thread to the result thread list */
2335 *nthreads += 1;
2336 if (*nthreads == 1) {
2337 *(args->spl) = (safeq != waitq) ? spl : splsched();
2338 }
2339 thread_lock(thread);
2340 thread_clear_waitq_state(thread);
2341 enqueue_tail(args->threadq, &(thread->wait_links));
2342 }
2343
2344 return first_thread;
2345 }
2346
2347 /**
2348 * generic thread selection from a waitq (and sets to which the waitq belongs)
2349 *
2350 * Conditions:
2351 * args->waitq (and args->posted_waitq) is locked
2352 *
2353 * Notes:
2354 * Uses the optional select callback function to refine the selection
2355 * of one or more threads from a waitq and any set to which the waitq
2356 * belongs. The select callback is invoked once for every thread that
2357 * is found to be waiting on the input args->waitq.
2358 *
2359 * If one or more threads are selected, this may disable interrupts.
2360 * The previous interrupt state is returned in args->spl and should
2361 * be used in a call to splx() if threads are returned to the caller.
2362 */
2363 static void
2364 do_waitq_select_n_locked(struct waitq_select_args *args)
2365 {
2366 struct waitq *waitq = args->waitq;
2367 int max_threads = args->max_threads;
2368 thread_t first_thread = THREAD_NULL;
2369 struct waitq *safeq;
2370 uint32_t remaining_eventmask = 0;
2371 uint32_t eventmask;
2372 int *nthreads = args->nthreads;
2373 spl_t spl = 0;
2374
2375 assert(max_threads != 0);
2376
2377 if (!waitq_irq_safe(waitq)) {
2378 /* JMM - add flag to waitq to avoid global lookup if no waiters */
2379 eventmask = _CAST_TO_EVENT_MASK(waitq);
2380 safeq = waitq_get_safeq(waitq);
2381 if (*nthreads == 0) {
2382 spl = splsched();
2383 }
2384 waitq_lock(safeq);
2385 } else {
2386 eventmask = _CAST_TO_EVENT_MASK(args->event);
2387 safeq = waitq;
2388 }
2389
2390 /*
2391 * If the safeq doesn't have an eventmask (not global) or the event
2392 * we're looking for IS set in its eventmask, then scan the threads
2393 * in that queue for ones that match the original <waitq,event> pair.
2394 */
2395 if (!waitq_is_global(safeq) ||
2396 (safeq->waitq_eventmask & eventmask) == eventmask) {
2397 if (waitq_is_turnstile_queue(safeq)) {
2398 first_thread = waitq_prioq_iterate_locked(safeq, waitq,
2399 spl, args,
2400 &remaining_eventmask);
2401 } else {
2402 first_thread = waitq_queue_iterate_locked(safeq, waitq,
2403 spl, args,
2404 &remaining_eventmask);
2405 }
2406
2407 /*
2408 * Update the eventmask of global queues we just scanned:
2409 * - If we selected all the threads in the queue, we can clear its
2410 * eventmask.
2411 *
2412 * - If we didn't find enough threads to fill our needs, then we can
2413 * assume we looked at every thread in the queue and the mask we
2414 * computed is complete - so reset it.
2415 */
2416 if (waitq_is_global(safeq)) {
2417 if (waitq_empty(safeq)) {
2418 safeq->waitq_eventmask = 0;
2419 } else if (max_threads < 0 || *nthreads < max_threads) {
2420 safeq->waitq_eventmask = remaining_eventmask;
2421 }
2422 }
2423 }
2424
2425 /*
2426 * Grab the first thread in the queue if no other thread was selected.
2427 * We can guarantee that no one has manipulated this thread because
2428 * it's waiting on the given waitq, and we have that waitq locked.
2429 */
2430 if (*nthreads == 0 && first_thread != THREAD_NULL && args->threadq) {
2431 /* we know this is the first (and only) thread */
2432 ++(*nthreads);
2433 *(args->spl) = (safeq != waitq) ? spl : splsched();
2434 thread_lock(first_thread);
2435 thread_clear_waitq_state(first_thread);
2436 waitq_thread_remove(safeq, first_thread);
2437 enqueue_tail(args->threadq, &(first_thread->wait_links));
2438
2439 /* update the eventmask on [now] empty global queues */
2440 if (waitq_is_global(safeq) && waitq_empty(safeq)) {
2441 safeq->waitq_eventmask = 0;
2442 }
2443 }
2444
2445 /* unlock the safe queue if we locked one above */
2446 if (safeq != waitq) {
2447 waitq_unlock(safeq);
2448 if (*nthreads == 0) {
2449 splx(spl);
2450 }
2451 }
2452
2453 if (max_threads > 0 && *nthreads >= max_threads) {
2454 return;
2455 }
2456
2457 /*
2458 * wait queues that are not in any sets
2459 * are the bottom of the recursion
2460 */
2461 if (!waitq->waitq_set_id) {
2462 return;
2463 }
2464
2465 /* check to see if the set ID for this wait queue is valid */
2466 struct waitq_link *link = wql_get_link(waitq->waitq_set_id);
2467 if (!link) {
2468 /* the waitq set to which this waitq belonged, has been invalidated */
2469 waitq->waitq_set_id = 0;
2470 return;
2471 }
2472
2473 wql_put_link(link);
2474
2475 /*
2476 * If this waitq is a member of any wait queue sets, we need to look
2477 * for waiting thread(s) in any of those sets, and prepost all sets that
2478 * don't have active waiters.
2479 *
2480 * Note that we do a local walk of this waitq's links - we manually
2481 * recurse down wait queue set's with non-zero wqset_q.waitq_set_id
2482 */
2483 (void)walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
2484 WQL_WQS, (void *)args, waitq_select_walk_cb);
2485 }
2486
2487 /**
2488 * main entry point for thread selection from a waitq
2489 *
2490 * Conditions:
2491 * waitq is locked
2492 *
2493 * Returns:
2494 * The number of threads waiting on 'waitq' for 'event' which have
2495 * been placed onto the input 'threadq'
2496 *
2497 * Notes:
2498 * The 'select_cb' function is invoked for every thread found waiting on
2499 * 'waitq' for 'event'. The thread is _not_ locked upon callback
2500 * invocation. This parameter may be NULL.
2501 *
2502 * If one or more threads are returned in 'threadq' then the caller is
2503 * responsible to call splx() using the returned 'spl' value. Each
2504 * returned thread is locked.
2505 */
2506 static __inline__ int
2507 waitq_select_n_locked(struct waitq *waitq,
2508 event64_t event,
2509 waitq_select_cb select_cb,
2510 void *select_ctx,
2511 uint64_t *reserved_preposts,
2512 queue_t threadq,
2513 int max_threads, spl_t *spl)
2514 {
2515 int nthreads = 0;
2516
2517 struct waitq_select_args args = {
2518 .posted_waitq = waitq,
2519 .waitq = waitq,
2520 .event = event,
2521 .select_cb = select_cb,
2522 .select_ctx = select_ctx,
2523 .reserved_preposts = reserved_preposts,
2524 .threadq = threadq,
2525 .max_threads = max_threads,
2526 .nthreads = &nthreads,
2527 .spl = spl,
2528 };
2529
2530 do_waitq_select_n_locked(&args);
2531 return nthreads;
2532 }
2533
2534 /**
2535 * select from a waitq a single thread waiting for a given event
2536 *
2537 * Conditions:
2538 * 'waitq' is locked
2539 *
2540 * Returns:
2541 * A locked thread that's been removed from the waitq, but has not
2542 * yet been put on a run queue. Caller is responsible to call splx
2543 * with the '*spl' value.
2544 */
2545 static thread_t
2546 waitq_select_one_locked(struct waitq *waitq, event64_t event,
2547 uint64_t *reserved_preposts,
2548 int priority, spl_t *spl)
2549 {
2550 (void)priority;
2551 int nthreads;
2552 queue_head_t threadq;
2553
2554 queue_init(&threadq);
2555
2556 nthreads = waitq_select_n_locked(waitq, event, NULL, NULL,
2557 reserved_preposts, &threadq, 1, spl);
2558
2559 /* if we selected a thread, return it (still locked) */
2560 if (!queue_empty(&threadq)) {
2561 thread_t t;
2562 queue_entry_t qe = dequeue_head(&threadq);
2563 t = qe_element(qe, struct thread, wait_links);
2564 assert(queue_empty(&threadq)); /* there should be 1 entry */
2565 /* t has been locked and removed from all queues */
2566 return t;
2567 }
2568
2569 return THREAD_NULL;
2570 }
2571
2572 struct find_max_pri_ctx {
2573 integer_t max_sched_pri;
2574 integer_t max_base_pri;
2575 thread_t highest_thread;
2576 };
2577
2578 /**
2579 * callback function that finds the max priority thread
2580 *
2581 * Conditions:
2582 * 'waitq' is locked
2583 * 'thread' is not locked
2584 */
2585 static thread_t
2586 waitq_find_max_pri_cb(void *ctx_in,
2587 __unused struct waitq *waitq,
2588 __unused int is_global,
2589 thread_t thread)
2590 {
2591 struct find_max_pri_ctx *ctx = (struct find_max_pri_ctx *)ctx_in;
2592
2593 /*
2594 * thread is not locked, use pri as a hint only
2595 * wake up the highest base pri, and find the highest sched pri at that base pri
2596 */
2597 integer_t sched_pri = *(volatile int16_t *)&thread->sched_pri;
2598 integer_t base_pri = *(volatile int16_t *)&thread->base_pri;
2599
2600 if (ctx->highest_thread == THREAD_NULL ||
2601 (base_pri > ctx->max_base_pri) ||
2602 (base_pri == ctx->max_base_pri && sched_pri > ctx->max_sched_pri)) {
2603 /* don't select the thread, just update ctx */
2604
2605 ctx->max_sched_pri = sched_pri;
2606 ctx->max_base_pri = base_pri;
2607 ctx->highest_thread = thread;
2608 }
2609
2610 return THREAD_NULL;
2611 }
2612
2613 /**
2614 * select from a waitq the highest priority thread waiting for a given event
2615 *
2616 * Conditions:
2617 * 'waitq' is locked
2618 *
2619 * Returns:
2620 * A locked thread that's been removed from the waitq, but has not
2621 * yet been put on a run queue. Caller is responsible to call splx
2622 * with the '*spl' value.
2623 */
2624 static thread_t
2625 waitq_select_max_locked(struct waitq *waitq, event64_t event,
2626 uint64_t *reserved_preposts,
2627 spl_t *spl)
2628 {
2629 __assert_only int nthreads;
2630 assert(!waitq->waitq_set_id); /* doesn't support recursive sets */
2631
2632 struct find_max_pri_ctx ctx = {
2633 .max_sched_pri = 0,
2634 .max_base_pri = 0,
2635 .highest_thread = THREAD_NULL,
2636 };
2637
2638 /*
2639 * Scan the waitq to find the highest priority thread.
2640 * This doesn't remove any thread from the queue
2641 */
2642 nthreads = waitq_select_n_locked(waitq, event,
2643 waitq_find_max_pri_cb,
2644 &ctx, reserved_preposts, NULL, 1, spl);
2645
2646 assert(nthreads == 0);
2647
2648 if (ctx.highest_thread != THREAD_NULL) {
2649 __assert_only kern_return_t ret;
2650
2651 /* Remove only the thread we just found */
2652 ret = waitq_select_thread_locked(waitq, event, ctx.highest_thread, spl);
2653
2654 assert(ret == KERN_SUCCESS);
2655 return ctx.highest_thread;
2656 }
2657
2658 return THREAD_NULL;
2659 }
2660
2661
2662 struct select_thread_ctx {
2663 thread_t thread;
2664 event64_t event;
2665 spl_t *spl;
2666 };
2667
2668 /**
2669 * link walk callback invoked once for each set to which a waitq belongs
2670 *
2671 * Conditions:
2672 * initial waitq is locked
2673 * ctx->thread is unlocked
2674 *
2675 * Notes:
2676 * This may disable interrupts and early-out of the full DAG link walk by
2677 * returning KERN_ALREADY_IN_SET. In this case, the returned thread has
2678 * been removed from the waitq, it's waitq state has been reset, and the
2679 * caller is responsible to call splx() with the returned interrupt state
2680 * in ctx->spl.
2681 */
2682 static int
2683 waitq_select_thread_cb(struct waitq *waitq, void *ctx,
2684 struct waitq_link *link)
2685 {
2686 struct select_thread_ctx *stctx = (struct select_thread_ctx *)ctx;
2687 struct waitq_set *wqset;
2688 struct waitq *wqsetq;
2689 struct waitq *safeq;
2690 spl_t s;
2691
2692 (void)waitq;
2693
2694 thread_t thread = stctx->thread;
2695 event64_t event = stctx->event;
2696
2697 if (wql_type(link) != WQL_WQS) {
2698 return WQ_ITERATE_CONTINUE;
2699 }
2700
2701 wqset = link->wql_wqs.wql_set;
2702 wqsetq = &wqset->wqset_q;
2703
2704 assert(!waitq_irq_safe(waitq));
2705 assert(!waitq_irq_safe(wqsetq));
2706
2707 waitq_set_lock(wqset);
2708
2709 s = splsched();
2710
2711 /* find and lock the interrupt-safe waitq the thread is thought to be on */
2712 safeq = waitq_get_safeq(wqsetq);
2713 waitq_lock(safeq);
2714
2715 thread_lock(thread);
2716
2717 if ((thread->waitq == wqsetq) && (thread->wait_event == event)) {
2718 waitq_thread_remove(wqsetq, thread);
2719 if (waitq_empty(safeq)) {
2720 safeq->waitq_eventmask = 0;
2721 }
2722 thread_clear_waitq_state(thread);
2723 waitq_unlock(safeq);
2724 waitq_set_unlock(wqset);
2725 /*
2726 * thread still locked,
2727 * return non-zero to break out of WQS walk
2728 */
2729 *(stctx->spl) = s;
2730 return WQ_ITERATE_FOUND;
2731 }
2732
2733 thread_unlock(thread);
2734 waitq_set_unlock(wqset);
2735 waitq_unlock(safeq);
2736 splx(s);
2737
2738 return WQ_ITERATE_CONTINUE;
2739 }
2740
2741 /**
2742 * returns KERN_SUCCESS and locks 'thread' if-and-only-if 'thread' is waiting
2743 * on 'waitq' (or any set to which waitq belongs) for 'event'
2744 *
2745 * Conditions:
2746 * 'waitq' is locked
2747 * 'thread' is unlocked
2748 */
2749 static kern_return_t
2750 waitq_select_thread_locked(struct waitq *waitq,
2751 event64_t event,
2752 thread_t thread, spl_t *spl)
2753 {
2754 struct waitq *safeq;
2755 struct waitq_link *link;
2756 struct select_thread_ctx ctx;
2757 kern_return_t kr;
2758 spl_t s;
2759
2760 s = splsched();
2761
2762 /* Find and lock the interrupts disabled queue the thread is actually on */
2763 if (!waitq_irq_safe(waitq)) {
2764 safeq = waitq_get_safeq(waitq);
2765 waitq_lock(safeq);
2766 } else {
2767 safeq = waitq;
2768 }
2769
2770 thread_lock(thread);
2771
2772 if ((thread->waitq == waitq) && (thread->wait_event == event)) {
2773 waitq_thread_remove(safeq, thread);
2774 if (waitq_empty(safeq)) {
2775 safeq->waitq_eventmask = 0;
2776 }
2777 thread_clear_waitq_state(thread);
2778 *spl = s;
2779 /* thread still locked */
2780 return KERN_SUCCESS;
2781 }
2782
2783 thread_unlock(thread);
2784
2785 if (safeq != waitq) {
2786 waitq_unlock(safeq);
2787 }
2788
2789 splx(s);
2790
2791 if (!waitq->waitq_set_id) {
2792 return KERN_NOT_WAITING;
2793 }
2794
2795 /* check to see if the set ID for this wait queue is valid */
2796 link = wql_get_link(waitq->waitq_set_id);
2797 if (!link) {
2798 /* the waitq to which this set belonged, has been invalidated */
2799 waitq->waitq_set_id = 0;
2800 return KERN_NOT_WAITING;
2801 }
2802
2803 /*
2804 * The thread may be waiting on a wait queue set to which
2805 * the input 'waitq' belongs. Go look for the thread in
2806 * all wait queue sets. If it's there, we'll remove it
2807 * because it's equivalent to waiting directly on the input waitq.
2808 */
2809 ctx.thread = thread;
2810 ctx.event = event;
2811 ctx.spl = spl;
2812 kr = walk_waitq_links(LINK_WALK_FULL_DAG, waitq, waitq->waitq_set_id,
2813 WQL_WQS, (void *)&ctx, waitq_select_thread_cb);
2814
2815 wql_put_link(link);
2816
2817 /* we found a thread, return success */
2818 if (kr == WQ_ITERATE_FOUND) {
2819 return KERN_SUCCESS;
2820 }
2821
2822 return KERN_NOT_WAITING;
2823 }
2824
2825 static int
2826 prepost_exists_cb(struct waitq_set __unused *wqset,
2827 void __unused *ctx,
2828 struct wq_prepost __unused *wqp,
2829 struct waitq __unused *waitq)
2830 {
2831 /* if we get here, then we know that there is a valid prepost object! */
2832 return WQ_ITERATE_FOUND;
2833 }
2834
2835 /**
2836 * declare a thread's intent to wait on 'waitq' for 'wait_event'
2837 *
2838 * Conditions:
2839 * 'waitq' is locked
2840 */
2841 wait_result_t
2842 waitq_assert_wait64_locked(struct waitq *waitq,
2843 event64_t wait_event,
2844 wait_interrupt_t interruptible,
2845 wait_timeout_urgency_t urgency,
2846 uint64_t deadline,
2847 uint64_t leeway,
2848 thread_t thread)
2849 {
2850 wait_result_t wait_result;
2851 int realtime = 0;
2852 struct waitq *safeq;
2853 uintptr_t eventmask;
2854 spl_t s;
2855
2856
2857 /*
2858 * Warning: Do _not_ place debugging print statements here.
2859 * The waitq is locked!
2860 */
2861 assert(!thread->started || thread == current_thread());
2862
2863 if (thread->waitq != NULL) {
2864 panic("thread already waiting on %p", thread->waitq);
2865 }
2866
2867 if (waitq_is_set(waitq)) {
2868 struct waitq_set *wqset = (struct waitq_set *)waitq;
2869 /*
2870 * early-out if the thread is waiting on a wait queue set
2871 * that has already been pre-posted.
2872 */
2873 if (wait_event == NO_EVENT64 && waitq_set_maybe_preposted(wqset)) {
2874 int ret;
2875 /*
2876 * Run through the list of potential preposts. Because
2877 * this is a hot path, we short-circuit the iteration
2878 * if we find just one prepost object.
2879 */
2880 ret = wq_prepost_foreach_locked(wqset, NULL,
2881 prepost_exists_cb);
2882 if (ret == WQ_ITERATE_FOUND) {
2883 s = splsched();
2884 thread_lock(thread);
2885 thread->wait_result = THREAD_AWAKENED;
2886 thread_unlock(thread);
2887 splx(s);
2888 return THREAD_AWAKENED;
2889 }
2890 }
2891 }
2892
2893 s = splsched();
2894
2895 /*
2896 * If already dealing with an irq safe wait queue, we are all set.
2897 * Otherwise, determine a global queue to use and lock it.
2898 */
2899 if (!waitq_irq_safe(waitq)) {
2900 safeq = waitq_get_safeq(waitq);
2901 eventmask = _CAST_TO_EVENT_MASK(waitq);
2902 waitq_lock(safeq);
2903 } else {
2904 safeq = waitq;
2905 eventmask = _CAST_TO_EVENT_MASK(wait_event);
2906 }
2907
2908 /* lock the thread now that we have the irq-safe waitq locked */
2909 thread_lock(thread);
2910
2911 /*
2912 * Realtime threads get priority for wait queue placements.
2913 * This allows wait_queue_wakeup_one to prefer a waiting
2914 * realtime thread, similar in principle to performing
2915 * a wait_queue_wakeup_all and allowing scheduler prioritization
2916 * to run the realtime thread, but without causing the
2917 * lock contention of that scenario.
2918 */
2919 if (thread->sched_pri >= BASEPRI_REALTIME) {
2920 realtime = 1;
2921 }
2922
2923 /*
2924 * This is the extent to which we currently take scheduling attributes
2925 * into account. If the thread is vm priviledged, we stick it at
2926 * the front of the queue. Later, these queues will honor the policy
2927 * value set at waitq_init time.
2928 */
2929 wait_result = thread_mark_wait_locked(thread, interruptible);
2930 /* thread->wait_result has been set */
2931 if (wait_result == THREAD_WAITING) {
2932 if (!safeq->waitq_fifo
2933 || (thread->options & TH_OPT_VMPRIV) || realtime) {
2934 waitq_thread_insert(safeq, thread, false);
2935 } else {
2936 waitq_thread_insert(safeq, thread, true);
2937 }
2938
2939 /* mark the event and real waitq, even if enqueued on a global safeq */
2940 thread->wait_event = wait_event;
2941 thread->waitq = waitq;
2942
2943 if (deadline != 0) {
2944 boolean_t act;
2945
2946 act = timer_call_enter_with_leeway(&thread->wait_timer,
2947 NULL,
2948 deadline, leeway,
2949 urgency, FALSE);
2950 if (!act) {
2951 thread->wait_timer_active++;
2952 }
2953 thread->wait_timer_is_set = TRUE;
2954 }
2955
2956 if (waitq_is_global(safeq)) {
2957 safeq->waitq_eventmask |= eventmask;
2958 }
2959
2960 waitq_stats_count_wait(waitq);
2961 }
2962
2963 /* unlock the thread */
2964 thread_unlock(thread);
2965
2966 /* update the inheritor's thread priority if the waitq is embedded in turnstile */
2967 if (waitq_is_turnstile_queue(safeq) && wait_result == THREAD_WAITING) {
2968 turnstile_recompute_priority_locked(waitq_to_turnstile(safeq));
2969 turnstile_update_inheritor_locked(waitq_to_turnstile(safeq));
2970 }
2971
2972 /* unlock the safeq if we locked it here */
2973 if (safeq != waitq) {
2974 waitq_unlock(safeq);
2975 }
2976
2977 splx(s);
2978
2979 return wait_result;
2980 }
2981
2982 /**
2983 * remove 'thread' from its current blocking state on 'waitq'
2984 *
2985 * Conditions:
2986 * 'thread' is locked
2987 *
2988 * Notes:
2989 * This function is primarily used by clear_wait_internal in
2990 * sched_prim.c from the thread timer wakeup path
2991 * (i.e. the thread was waiting on 'waitq' with a timeout that expired)
2992 */
2993 int
2994 waitq_pull_thread_locked(struct waitq *waitq, thread_t thread)
2995 {
2996 struct waitq *safeq;
2997
2998 assert_thread_magic(thread);
2999 assert(thread->waitq == waitq);
3000
3001 /* Find the interrupts disabled queue thread is waiting on */
3002 if (!waitq_irq_safe(waitq)) {
3003 safeq = waitq_get_safeq(waitq);
3004 } else {
3005 safeq = waitq;
3006 }
3007
3008 /* thread is already locked so have to try for the waitq lock */
3009 if (!waitq_lock_try(safeq)) {
3010 return 0;
3011 }
3012
3013 waitq_thread_remove(safeq, thread);
3014 thread_clear_waitq_state(thread);
3015 waitq_stats_count_clear_wakeup(waitq);
3016
3017 /* clear the global event mask if this was the last thread there! */
3018 if (waitq_is_global(safeq) && waitq_empty(safeq)) {
3019 safeq->waitq_eventmask = 0;
3020 /* JMM - also mark no-waiters on waitq (if not the same as the safeq) */
3021 }
3022
3023 waitq_unlock(safeq);
3024
3025 return 1;
3026 }
3027
3028
3029 static __inline__
3030 void
3031 maybe_adjust_thread_pri(thread_t thread,
3032 int priority,
3033 __kdebug_only struct waitq *waitq)
3034 {
3035 /*
3036 * If the caller is requesting the waitq subsystem to promote the
3037 * priority of the awoken thread, then boost the thread's priority to
3038 * the default WAITQ_BOOST_PRIORITY (if it's not already equal or
3039 * higher priority). This boost must be removed via a call to
3040 * waitq_clear_promotion_locked before the thread waits again.
3041 *
3042 * WAITQ_PROMOTE_PRIORITY is -2.
3043 * Anything above 0 represents a mutex promotion.
3044 * The default 'no action' value is -1.
3045 * TODO: define this in a header
3046 */
3047 if (priority == WAITQ_PROMOTE_PRIORITY) {
3048 uintptr_t trace_waitq = 0;
3049 if (__improbable(kdebug_enable)) {
3050 trace_waitq = VM_KERNEL_UNSLIDE_OR_PERM(waitq);
3051 }
3052
3053 sched_thread_promote_reason(thread, TH_SFLAG_WAITQ_PROMOTED, trace_waitq);
3054 } else if (priority > 0) {
3055 /* Mutex subsystem wants to see this thread before we 'go' it */
3056 lck_mtx_wakeup_adjust_pri(thread, priority);
3057 }
3058 }
3059
3060 /*
3061 * Clear a potential thread priority promotion from a waitq wakeup
3062 * with WAITQ_PROMOTE_PRIORITY.
3063 *
3064 * This must be called on the thread which was woken up with TH_SFLAG_WAITQ_PROMOTED.
3065 */
3066 void
3067 waitq_clear_promotion_locked(struct waitq *waitq, thread_t thread)
3068 {
3069 spl_t s;
3070
3071 assert(waitq_held(waitq));
3072 assert(thread != THREAD_NULL);
3073 assert(thread == current_thread());
3074
3075 /* This flag is only cleared by the thread itself, so safe to check outside lock */
3076 if ((thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED) != TH_SFLAG_WAITQ_PROMOTED) {
3077 return;
3078 }
3079
3080 if (!waitq_irq_safe(waitq)) {
3081 s = splsched();
3082 }
3083 thread_lock(thread);
3084
3085 sched_thread_unpromote_reason(thread, TH_SFLAG_WAITQ_PROMOTED, 0);
3086
3087 thread_unlock(thread);
3088 if (!waitq_irq_safe(waitq)) {
3089 splx(s);
3090 }
3091 }
3092
3093 /**
3094 * wakeup all threads waiting on 'waitq' for 'wake_event'
3095 *
3096 * Conditions:
3097 * 'waitq' is locked
3098 *
3099 * Notes:
3100 * May temporarily disable and re-enable interrupts
3101 * and re-adjust thread priority of each awoken thread.
3102 *
3103 * If the input 'lock_state' == WAITQ_UNLOCK then the waitq will have
3104 * been unlocked before calling thread_go() on any returned threads, and
3105 * is guaranteed to be unlocked upon function return.
3106 */
3107 kern_return_t
3108 waitq_wakeup64_all_locked(struct waitq *waitq,
3109 event64_t wake_event,
3110 wait_result_t result,
3111 uint64_t *reserved_preposts,
3112 int priority,
3113 waitq_lock_state_t lock_state)
3114 {
3115 kern_return_t ret;
3116 thread_t thread;
3117 spl_t th_spl;
3118 int nthreads;
3119 queue_head_t wakeup_queue;
3120
3121 assert(waitq_held(waitq));
3122 queue_init(&wakeup_queue);
3123
3124 nthreads = waitq_select_n_locked(waitq, wake_event, NULL, NULL,
3125 reserved_preposts,
3126 &wakeup_queue, -1, &th_spl);
3127
3128 /* set each thread running */
3129 ret = KERN_NOT_WAITING;
3130
3131 #if CONFIG_WAITQ_STATS
3132 qe_foreach_element(thread, &wakeup_queue, wait_links)
3133 waitq_stats_count_wakeup(waitq);
3134 #endif
3135 if (lock_state == WAITQ_UNLOCK) {
3136 waitq_unlock(waitq);
3137 }
3138
3139 qe_foreach_element_safe(thread, &wakeup_queue, wait_links) {
3140 assert_thread_magic(thread);
3141 remqueue(&thread->wait_links);
3142 maybe_adjust_thread_pri(thread, priority, waitq);
3143 ret = thread_go(thread, result);
3144 assert(ret == KERN_SUCCESS);
3145 thread_unlock(thread);
3146 }
3147 if (nthreads > 0) {
3148 splx(th_spl);
3149 } else {
3150 waitq_stats_count_fail(waitq);
3151 }
3152
3153 return ret;
3154 }
3155
3156 /**
3157 * wakeup one thread waiting on 'waitq' for 'wake_event'
3158 *
3159 * Conditions:
3160 * 'waitq' is locked
3161 *
3162 * Notes:
3163 * May temporarily disable and re-enable interrupts.
3164 */
3165 kern_return_t
3166 waitq_wakeup64_one_locked(struct waitq *waitq,
3167 event64_t wake_event,
3168 wait_result_t result,
3169 uint64_t *reserved_preposts,
3170 int priority,
3171 waitq_lock_state_t lock_state)
3172 {
3173 thread_t thread;
3174 spl_t th_spl;
3175
3176 assert(waitq_held(waitq));
3177
3178 if (priority == WAITQ_SELECT_MAX_PRI) {
3179 thread = waitq_select_max_locked(waitq, wake_event,
3180 reserved_preposts,
3181 &th_spl);
3182 } else {
3183 thread = waitq_select_one_locked(waitq, wake_event,
3184 reserved_preposts,
3185 priority, &th_spl);
3186 }
3187
3188
3189 if (thread != THREAD_NULL) {
3190 waitq_stats_count_wakeup(waitq);
3191 } else {
3192 waitq_stats_count_fail(waitq);
3193 }
3194
3195 if (lock_state == WAITQ_UNLOCK) {
3196 waitq_unlock(waitq);
3197 }
3198
3199 if (thread != THREAD_NULL) {
3200 maybe_adjust_thread_pri(thread, priority, waitq);
3201 kern_return_t ret = thread_go(thread, result);
3202 assert(ret == KERN_SUCCESS);
3203 thread_unlock(thread);
3204 splx(th_spl);
3205 return ret;
3206 }
3207
3208 return KERN_NOT_WAITING;
3209 }
3210
3211 /**
3212 * wakeup one thread waiting on 'waitq' for 'wake_event'
3213 *
3214 * Conditions:
3215 * 'waitq' is locked
3216 *
3217 * Returns:
3218 * A locked, runnable thread.
3219 * If return value is non-NULL, interrupts have also
3220 * been disabled, and the caller is responsible to call
3221 * splx() with the returned '*spl' value.
3222 */
3223 thread_t
3224 waitq_wakeup64_identify_locked(struct waitq *waitq,
3225 event64_t wake_event,
3226 wait_result_t result,
3227 spl_t *spl,
3228 uint64_t *reserved_preposts,
3229 int priority,
3230 waitq_lock_state_t lock_state)
3231 {
3232 thread_t thread;
3233
3234 assert(waitq_held(waitq));
3235
3236 if (priority == WAITQ_SELECT_MAX_PRI) {
3237 thread = waitq_select_max_locked(waitq, wake_event,
3238 reserved_preposts,
3239 spl);
3240 } else {
3241 thread = waitq_select_one_locked(waitq, wake_event,
3242 reserved_preposts,
3243 priority, spl);
3244 }
3245
3246 if (thread != THREAD_NULL) {
3247 waitq_stats_count_wakeup(waitq);
3248 } else {
3249 waitq_stats_count_fail(waitq);
3250 }
3251
3252 if (lock_state == WAITQ_UNLOCK) {
3253 waitq_unlock(waitq);
3254 }
3255
3256 if (thread != THREAD_NULL) {
3257 kern_return_t __assert_only ret;
3258 ret = thread_go(thread, result);
3259 assert(ret == KERN_SUCCESS);
3260 }
3261
3262 return thread; /* locked if not NULL (caller responsible for spl) */
3263 }
3264
3265 /**
3266 * wakeup a specific thread iff it's waiting on 'waitq' for 'wake_event'
3267 *
3268 * Conditions:
3269 * 'waitq' is locked
3270 * 'thread' is unlocked
3271 *
3272 * Notes:
3273 * May temporarily disable and re-enable interrupts
3274 *
3275 * If the input lock_state == WAITQ_UNLOCK then the waitq will have been
3276 * unlocked before calling thread_go() if 'thread' is to be awoken, and
3277 * is guaranteed to be unlocked upon function return.
3278 */
3279 kern_return_t
3280 waitq_wakeup64_thread_locked(struct waitq *waitq,
3281 event64_t wake_event,
3282 thread_t thread,
3283 wait_result_t result,
3284 waitq_lock_state_t lock_state)
3285 {
3286 kern_return_t ret;
3287 spl_t th_spl;
3288
3289 assert(waitq_held(waitq));
3290 assert_thread_magic(thread);
3291
3292 /*
3293 * See if the thread was still waiting there. If so, it got
3294 * dequeued and returned locked.
3295 */
3296 ret = waitq_select_thread_locked(waitq, wake_event, thread, &th_spl);
3297
3298 if (ret == KERN_SUCCESS) {
3299 waitq_stats_count_wakeup(waitq);
3300 } else {
3301 waitq_stats_count_fail(waitq);
3302 }
3303
3304 if (lock_state == WAITQ_UNLOCK) {
3305 waitq_unlock(waitq);
3306 }
3307
3308 if (ret != KERN_SUCCESS) {
3309 return KERN_NOT_WAITING;
3310 }
3311
3312 ret = thread_go(thread, result);
3313 assert(ret == KERN_SUCCESS);
3314 thread_unlock(thread);
3315 splx(th_spl);
3316
3317 return ret;
3318 }
3319
3320
3321
3322 /* ----------------------------------------------------------------------
3323 *
3324 * In-Kernel API
3325 *
3326 * ---------------------------------------------------------------------- */
3327
3328 /**
3329 * initialize a waitq object
3330 */
3331 kern_return_t
3332 waitq_init(struct waitq *waitq, int policy)
3333 {
3334 assert(waitq != NULL);
3335
3336 /* only FIFO and LIFO for now */
3337 if ((policy & SYNC_POLICY_FIXED_PRIORITY) != 0) {
3338 return KERN_INVALID_ARGUMENT;
3339 }
3340
3341 waitq->waitq_fifo = ((policy & SYNC_POLICY_REVERSED) == 0);
3342 waitq->waitq_irq = !!(policy & SYNC_POLICY_DISABLE_IRQ);
3343 waitq->waitq_prepost = 0;
3344 waitq->waitq_type = WQT_QUEUE;
3345 waitq->waitq_turnstile_or_port = !!(policy & SYNC_POLICY_TURNSTILE);
3346 waitq->waitq_eventmask = 0;
3347
3348 waitq->waitq_set_id = 0;
3349 waitq->waitq_prepost_id = 0;
3350
3351 waitq_lock_init(waitq);
3352 if (waitq_is_turnstile_queue(waitq)) {
3353 /* For turnstile, initialize it as a priority queue */
3354 priority_queue_init(&waitq->waitq_prio_queue,
3355 PRIORITY_QUEUE_BUILTIN_MAX_HEAP);
3356 assert(waitq->waitq_fifo == 0);
3357 } else {
3358 queue_init(&waitq->waitq_queue);
3359 }
3360
3361 waitq->waitq_isvalid = 1;
3362 return KERN_SUCCESS;
3363 }
3364
3365 struct wq_unlink_ctx {
3366 struct waitq *unlink_wq;
3367 struct waitq_set *unlink_wqset;
3368 };
3369
3370 static int waitq_unlink_prepost_cb(struct waitq_set __unused *wqset, void *ctx,
3371 struct wq_prepost *wqp, struct waitq *waitq);
3372
3373 /**
3374 * walk_waitq_links callback to invalidate 'link' parameter
3375 *
3376 * Conditions:
3377 * Called from walk_waitq_links.
3378 * Note that unlink other callbacks, this one make no assumptions about
3379 * the 'waitq' parameter, specifically it does not have to be locked or
3380 * even valid.
3381 */
3382 static int
3383 waitq_unlink_all_cb(struct waitq *waitq, void *ctx,
3384 struct waitq_link *link)
3385 {
3386 (void)waitq;
3387 (void)ctx;
3388 if (wql_type(link) == WQL_LINK && wql_is_valid(link)) {
3389 wql_invalidate(link);
3390 }
3391
3392 if (wql_type(link) == WQL_WQS) {
3393 struct waitq_set *wqset;
3394 struct wq_unlink_ctx ulctx;
3395
3396 /*
3397 * When destroying the waitq, take the time to clear out any
3398 * preposts it may have made. This could potentially save time
3399 * on the IPC send path which would otherwise have to iterate
3400 * over lots of dead port preposts.
3401 */
3402 if (waitq->waitq_prepost_id == 0) {
3403 goto out;
3404 }
3405
3406 wqset = link->wql_wqs.wql_set;
3407 assert(wqset != NULL);
3408 assert(!waitq_irq_safe(&wqset->wqset_q));
3409
3410 waitq_set_lock(wqset);
3411
3412 if (!waitq_set_is_valid(wqset)) {
3413 /* someone raced us to teardown */
3414 goto out_unlock;
3415 }
3416 if (!waitq_set_maybe_preposted(wqset)) {
3417 goto out_unlock;
3418 }
3419
3420 ulctx.unlink_wq = waitq;
3421 ulctx.unlink_wqset = wqset;
3422 (void)wq_prepost_iterate(wqset->wqset_prepost_id, &ulctx,
3423 waitq_unlink_prepost_cb);
3424 out_unlock:
3425 waitq_set_unlock(wqset);
3426 }
3427
3428 out:
3429 return WQ_ITERATE_CONTINUE;
3430 }
3431
3432
3433 /**
3434 * cleanup any link/prepost table resources associated with a waitq
3435 */
3436 void
3437 waitq_deinit(struct waitq *waitq)
3438 {
3439 spl_t s;
3440
3441 if (!waitq || !waitq_is_queue(waitq)) {
3442 return;
3443 }
3444
3445 if (waitq_irq_safe(waitq)) {
3446 s = splsched();
3447 }
3448 waitq_lock(waitq);
3449 if (!waitq_valid(waitq)) {
3450 waitq_unlock(waitq);
3451 if (waitq_irq_safe(waitq)) {
3452 splx(s);
3453 }
3454 return;
3455 }
3456
3457 waitq->waitq_isvalid = 0;
3458
3459 if (!waitq_irq_safe(waitq)) {
3460 waitq_unlink_all_unlock(waitq);
3461 /* waitq unlocked and set links deallocated */
3462 } else {
3463 waitq_unlock(waitq);
3464 splx(s);
3465 }
3466
3467 assert(waitq_empty(waitq));
3468 }
3469
3470 void
3471 waitq_invalidate_locked(struct waitq *waitq)
3472 {
3473 assert(waitq_held(waitq));
3474 assert(waitq_is_valid(waitq));
3475 waitq->waitq_isvalid = 0;
3476 }
3477
3478 /**
3479 * invalidate the given wq_prepost object
3480 *
3481 * Conditions:
3482 * Called from wq_prepost_iterate (_not_ from wq_prepost_foreach_locked!)
3483 */
3484 static int
3485 wqset_clear_prepost_chain_cb(struct waitq_set __unused *wqset,
3486 void __unused *ctx,
3487 struct wq_prepost *wqp,
3488 struct waitq __unused *waitq)
3489 {
3490 if (wqp_type(wqp) == WQP_POST) {
3491 wq_prepost_invalidate(wqp);
3492 }
3493 return WQ_ITERATE_CONTINUE;
3494 }
3495
3496
3497 /**
3498 * allocate and initialize a waitq set object
3499 *
3500 * Conditions:
3501 * may block
3502 *
3503 * Returns:
3504 * allocated / initialized waitq_set object.
3505 * the waits_set object returned does not have
3506 * a waitq_link associated.
3507 *
3508 * NULL on failure
3509 */
3510 struct waitq_set *
3511 waitq_set_alloc(int policy, void *prepost_hook)
3512 {
3513 struct waitq_set *wqset;
3514
3515 wqset = (struct waitq_set *)zalloc(waitq_set_zone);
3516 if (!wqset) {
3517 panic("Can't allocate a new waitq set from zone %p", waitq_set_zone);
3518 }
3519
3520 kern_return_t ret;
3521 ret = waitq_set_init(wqset, policy, NULL, prepost_hook);
3522 if (ret != KERN_SUCCESS) {
3523 zfree(waitq_set_zone, wqset);
3524 wqset = NULL;
3525 }
3526
3527 return wqset;
3528 }
3529
3530 /**
3531 * initialize a waitq set object
3532 *
3533 * if no 'reserved_link' object is passed
3534 * the waitq_link will be lazily allocated
3535 * on demand through waitq_set_lazy_init_link.
3536 */
3537 kern_return_t
3538 waitq_set_init(struct waitq_set *wqset,
3539 int policy, uint64_t *reserved_link,
3540 void *prepost_hook)
3541 {
3542 struct waitq_link *link;
3543 kern_return_t ret;
3544
3545 memset(wqset, 0, sizeof(*wqset));
3546
3547 ret = waitq_init(&wqset->wqset_q, policy);
3548 if (ret != KERN_SUCCESS) {
3549 return ret;
3550 }
3551
3552 wqset->wqset_q.waitq_type = WQT_SET;
3553 if (policy & SYNC_POLICY_PREPOST) {
3554 wqset->wqset_q.waitq_prepost = 1;
3555 wqset->wqset_prepost_id = 0;
3556 assert(prepost_hook == NULL);
3557 } else {
3558 wqset->wqset_q.waitq_prepost = 0;
3559 wqset->wqset_prepost_hook = prepost_hook;
3560 }
3561
3562 if (reserved_link && *reserved_link != 0) {
3563 link = wql_get_reserved(*reserved_link, WQL_WQS);
3564
3565 if (!link) {
3566 panic("Can't allocate link object for waitq set: %p", wqset);
3567 }
3568
3569 /* always consume the caller's reference */
3570 *reserved_link = 0;
3571
3572 link->wql_wqs.wql_set = wqset;
3573 wql_mkvalid(link);
3574
3575 wqset->wqset_id = link->wql_setid.id;
3576 wql_put_link(link);
3577 } else {
3578 /*
3579 * Lazy allocate the link only when an actual id is needed.
3580 */
3581 wqset->wqset_id = WQSET_NOT_LINKED;
3582 }
3583
3584 return KERN_SUCCESS;
3585 }
3586
3587 #if DEVELOPMENT || DEBUG
3588
3589 int
3590 sysctl_helper_waitq_set_nelem(void)
3591 {
3592 return ltable_nelem(&g_wqlinktable);
3593 }
3594
3595 #endif
3596
3597 /**
3598 * initialize a waitq set link.
3599 *
3600 * Conditions:
3601 * may block
3602 * locks and unlocks the waiq set lock
3603 *
3604 */
3605 void
3606 waitq_set_lazy_init_link(struct waitq_set *wqset)
3607 {
3608 struct waitq_link *link;
3609
3610 assert(get_preemption_level() == 0 && waitq_wait_possible(current_thread()));
3611
3612 waitq_set_lock(wqset);
3613 if (!waitq_set_should_lazy_init_link(wqset)) {
3614 waitq_set_unlock(wqset);
3615 return;
3616 }
3617
3618 assert(wqset->wqset_id == WQSET_NOT_LINKED);
3619 waitq_set_unlock(wqset);
3620
3621 link = wql_alloc_link(WQL_WQS);
3622 if (!link) {
3623 panic("Can't allocate link object for waitq set: %p", wqset);
3624 }
3625
3626 link->wql_wqs.wql_set = wqset;
3627
3628 waitq_set_lock(wqset);
3629 if (waitq_set_should_lazy_init_link(wqset)) {
3630 wql_mkvalid(link);
3631 wqset->wqset_id = link->wql_setid.id;
3632 }
3633
3634 assert(wqset->wqset_id != 0);
3635 assert(wqset->wqset_id != WQSET_NOT_LINKED);
3636
3637 waitq_set_unlock(wqset);
3638
3639 wql_put_link(link);
3640
3641 return;
3642 }
3643
3644 /**
3645 * checks if a waitq set needs to be linked.
3646 *
3647 */
3648 boolean_t
3649 waitq_set_should_lazy_init_link(struct waitq_set *wqset)
3650 {
3651 if (waitqs_is_linked(wqset) || wqset->wqset_id == 0) {
3652 return FALSE;
3653 }
3654 return TRUE;
3655 }
3656
3657 /**
3658 * clear out / release any resources associated with a waitq set
3659 *
3660 * Conditions:
3661 * may block
3662 * Note:
3663 * This will render the waitq set invalid, and it must
3664 * be re-initialized with waitq_set_init before it can be used again
3665 */
3666 void
3667 waitq_set_deinit(struct waitq_set *wqset)
3668 {
3669 struct waitq_link *link = NULL;
3670 uint64_t set_id, prepost_id;
3671
3672 if (!waitqs_is_set(wqset)) {
3673 panic("trying to de-initialize an invalid wqset @%p", wqset);
3674 }
3675
3676 assert(!waitq_irq_safe(&wqset->wqset_q));
3677
3678 waitq_set_lock(wqset);
3679
3680 set_id = wqset->wqset_id;
3681
3682 if (waitqs_is_linked(wqset) || set_id == 0) {
3683 /* grab the set's link object */
3684 link = wql_get_link(set_id);
3685 if (link) {
3686 wql_invalidate(link);
3687 }
3688 /* someone raced us to deinit */
3689 if (!link || wqset->wqset_id != set_id || set_id != link->wql_setid.id) {
3690 if (link) {
3691 wql_put_link(link);
3692 }
3693 waitq_set_unlock(wqset);
3694 return;
3695 }
3696
3697 /* the link should be a valid link object at this point */
3698 assert(link != NULL && wql_type(link) == WQL_WQS);
3699
3700 wqset->wqset_id = 0;
3701 }
3702
3703 /*
3704 * This set may have a lot of preposts, or may have been a member of
3705 * many other sets. To minimize spinlock hold times, we clear out the
3706 * waitq set data structure under the lock-hold, but don't clear any
3707 * table objects. We keep handles to the prepost and set linkage
3708 * objects and free those outside the critical section.
3709 */
3710 prepost_id = 0;
3711 if (wqset->wqset_q.waitq_prepost && wqset->wqset_prepost_id) {
3712 assert(link != NULL);
3713 prepost_id = wqset->wqset_prepost_id;
3714 }
3715 /* else { TODO: notify kqueue subsystem? } */
3716 wqset->wqset_prepost_id = 0;
3717
3718 wqset->wqset_q.waitq_fifo = 0;
3719 wqset->wqset_q.waitq_prepost = 0;
3720 wqset->wqset_q.waitq_isvalid = 0;
3721
3722 /* don't clear the 'waitq_irq' bit: it's used in locking! */
3723 wqset->wqset_q.waitq_eventmask = 0;
3724
3725 waitq_unlink_all_unlock(&wqset->wqset_q);
3726 /* wqset->wqset_q unlocked and set links deallocated */
3727
3728
3729 if (link) {
3730 /*
3731 * walk_waitq_links may race with us for access to the waitq set.
3732 * If walk_waitq_links has a reference to the set, then we should wait
3733 * until the link's refcount goes to 1 (our reference) before we exit
3734 * this function. That way we ensure that the waitq set memory will
3735 * remain valid even though it's been cleared out.
3736 */
3737 while (wql_refcnt(link) > 1) {
3738 delay(1);
3739 }
3740 wql_put_link(link);
3741 }
3742
3743 /* drop / unlink all the prepost table objects */
3744 /* JMM - can this happen before the delay? */
3745 if (prepost_id) {
3746 (void)wq_prepost_iterate(prepost_id, NULL,
3747 wqset_clear_prepost_chain_cb);
3748 }
3749 }
3750
3751 /**
3752 * de-initialize and free an allocated waitq set object
3753 *
3754 * Conditions:
3755 * may block
3756 */
3757 kern_return_t
3758 waitq_set_free(struct waitq_set *wqset)
3759 {
3760 waitq_set_deinit(wqset);
3761
3762 memset(wqset, 0, sizeof(*wqset));
3763 zfree(waitq_set_zone, wqset);
3764
3765 return KERN_SUCCESS;
3766 }
3767
3768 #if DEVELOPMENT || DEBUG
3769 #if CONFIG_WAITQ_DEBUG
3770 /**
3771 * return the set ID of 'wqset'
3772 */
3773 uint64_t
3774 wqset_id(struct waitq_set *wqset)
3775 {
3776 if (!wqset) {
3777 return 0;
3778 }
3779
3780 assert(waitqs_is_set(wqset));
3781
3782 if (!waitqs_is_linked(wqset)) {
3783 waitq_set_lazy_init_link(wqset);
3784 }
3785
3786 return wqset->wqset_id;
3787 }
3788
3789 /**
3790 * returns a pointer to the waitq object embedded in 'wqset'
3791 */
3792 struct waitq *
3793 wqset_waitq(struct waitq_set *wqset)
3794 {
3795 if (!wqset) {
3796 return NULL;
3797 }
3798
3799 assert(waitqs_is_set(wqset));
3800
3801 return &wqset->wqset_q;
3802 }
3803 #endif /* CONFIG_WAITQ_DEBUG */
3804 #endif /* DEVELOPMENT || DEBUG */
3805
3806
3807 /**
3808 * clear all preposts originating from 'waitq'
3809 *
3810 * Conditions:
3811 * 'waitq' locked
3812 * may (rarely) spin waiting for another on-core thread to
3813 * release the last reference to the waitq's prepost link object
3814 *
3815 * NOTE:
3816 * If this function needs to spin, it will drop the waitq lock!
3817 * The return value of the function indicates whether or not this
3818 * happened: 1 == lock was dropped, 0 == lock held
3819 */
3820 int
3821 waitq_clear_prepost_locked(struct waitq *waitq)
3822 {
3823 struct wq_prepost *wqp;
3824 int dropped_lock = 0;
3825
3826 assert(!waitq_irq_safe(waitq));
3827
3828 if (waitq->waitq_prepost_id == 0) {
3829 return 0;
3830 }
3831
3832 wqp = wq_prepost_get(waitq->waitq_prepost_id);
3833 waitq->waitq_prepost_id = 0;
3834 if (wqp) {
3835 uint64_t wqp_id = wqp->wqp_prepostid.id;
3836 wqdbg_v("invalidate prepost 0x%llx (refcnt:%d)",
3837 wqp->wqp_prepostid.id, wqp_refcnt(wqp));
3838 wq_prepost_invalidate(wqp);
3839 while (wqp_refcnt(wqp) > 1) {
3840 /*
3841 * Some other thread must have raced us to grab a link
3842 * object reference before we invalidated it. This
3843 * means that they are probably trying to access the
3844 * waitq to which the prepost object points. We need
3845 * to wait here until the other thread drops their
3846 * reference. We know that no one else can get a
3847 * reference (the object has been invalidated), and
3848 * that prepost references are short-lived (dropped on
3849 * a call to wq_prepost_put). We also know that no one
3850 * blocks while holding a reference therefore the
3851 * other reference holder must be on-core. We'll just
3852 * sit and wait for the other reference to be dropped.
3853 */
3854 disable_preemption();
3855
3856 waitq_unlock(waitq);
3857 dropped_lock = 1;
3858 /*
3859 * don't yield here, just spin and assume the other
3860 * consumer is already on core...
3861 */
3862 delay(1);
3863
3864 waitq_lock(waitq);
3865
3866 enable_preemption();
3867 }
3868 if (wqp_refcnt(wqp) > 0 && wqp->wqp_prepostid.id == wqp_id) {
3869 wq_prepost_put(wqp);
3870 }
3871 }
3872
3873 return dropped_lock;
3874 }
3875
3876 /**
3877 * clear all preposts originating from 'waitq'
3878 *
3879 * Conditions:
3880 * 'waitq' is not locked
3881 * may disable and re-enable interrupts
3882 */
3883 void
3884 waitq_clear_prepost(struct waitq *waitq)
3885 {
3886 assert(waitq_valid(waitq));
3887 assert(!waitq_irq_safe(waitq));
3888
3889 waitq_lock(waitq);
3890 /* it doesn't matter to us if the lock is dropped here */
3891 (void)waitq_clear_prepost_locked(waitq);
3892 waitq_unlock(waitq);
3893 }
3894
3895 /**
3896 * return a the waitq's prepost object ID (allocate if necessary)
3897 *
3898 * Conditions:
3899 * 'waitq' is unlocked
3900 */
3901 uint64_t
3902 waitq_get_prepost_id(struct waitq *waitq)
3903 {
3904 struct wq_prepost *wqp;
3905 uint64_t wqp_id = 0;
3906
3907 if (!waitq_valid(waitq)) {
3908 return 0;
3909 }
3910
3911 assert(!waitq_irq_safe(waitq));
3912
3913 waitq_lock(waitq);
3914
3915 if (!waitq_valid(waitq)) {
3916 goto out_unlock;
3917 }
3918
3919 if (waitq->waitq_prepost_id) {
3920 wqp_id = waitq->waitq_prepost_id;
3921 goto out_unlock;
3922 }
3923
3924 /* don't hold a spinlock while allocating a prepost object */
3925 waitq_unlock(waitq);
3926
3927 wqp = wq_prepost_alloc(WQP_WQ, 1);
3928 if (!wqp) {
3929 return 0;
3930 }
3931
3932 /* re-acquire the waitq lock */
3933 waitq_lock(waitq);
3934
3935 if (!waitq_valid(waitq)) {
3936 wq_prepost_put(wqp);
3937 wqp_id = 0;
3938 goto out_unlock;
3939 }
3940
3941 if (waitq->waitq_prepost_id) {
3942 /* we were beat by someone else */
3943 wq_prepost_put(wqp);
3944 wqp_id = waitq->waitq_prepost_id;
3945 goto out_unlock;
3946 }
3947
3948 wqp->wqp_wq.wqp_wq_ptr = waitq;
3949
3950 wqp_set_valid(wqp);
3951 wqp_id = wqp->wqp_prepostid.id;
3952 waitq->waitq_prepost_id = wqp_id;
3953
3954 wq_prepost_put(wqp);
3955
3956 out_unlock:
3957 waitq_unlock(waitq);
3958
3959 return wqp_id;
3960 }
3961
3962
3963 static int
3964 waitq_inset_cb(struct waitq *waitq, void *ctx, struct waitq_link *link)
3965 {
3966 uint64_t setid = *(uint64_t *)ctx;
3967 int wqltype = wql_type(link);
3968 (void)waitq;
3969 if (wqltype == WQL_WQS && link->wql_setid.id == setid) {
3970 wqdbg_v(" waitq already in set 0x%llx", setid);
3971 return WQ_ITERATE_FOUND;
3972 } else if (wqltype == WQL_LINK) {
3973 /*
3974 * break out early if we see a link that points to the setid
3975 * in question. This saves us a step in the
3976 * iteration/recursion
3977 */
3978 wqdbg_v(" waitq already in set 0x%llx (WQL_LINK)", setid);
3979 if (link->wql_link.left_setid == setid ||
3980 link->wql_link.right_setid == setid) {
3981 return WQ_ITERATE_FOUND;
3982 }
3983 }
3984
3985 return WQ_ITERATE_CONTINUE;
3986 }
3987
3988 /**
3989 * determine if 'waitq' is a member of 'wqset'
3990 *
3991 * Conditions:
3992 * neither 'waitq' nor 'wqset' is not locked
3993 * may disable and re-enable interrupts while locking 'waitq'
3994 */
3995 boolean_t
3996 waitq_member(struct waitq *waitq, struct waitq_set *wqset)
3997 {
3998 kern_return_t kr = WQ_ITERATE_SUCCESS;
3999 uint64_t setid;
4000
4001 if (!waitq_valid(waitq)) {
4002 panic("Invalid waitq: %p", waitq);
4003 }
4004 assert(!waitq_irq_safe(waitq));
4005
4006 if (!waitqs_is_set(wqset)) {
4007 return FALSE;
4008 }
4009
4010 waitq_lock(waitq);
4011
4012 if (!waitqs_is_linked(wqset)) {
4013 goto out_unlock;
4014 }
4015
4016 setid = wqset->wqset_id;
4017
4018 /* fast path: most waitqs are members of only 1 set */
4019 if (waitq->waitq_set_id == setid) {
4020 waitq_unlock(waitq);
4021 return TRUE;
4022 }
4023
4024 /* walk the link table and look for the Set ID of wqset */
4025 kr = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
4026 WQL_ALL, (void *)&setid, waitq_inset_cb);
4027
4028 out_unlock:
4029 waitq_unlock(waitq);
4030 return kr == WQ_ITERATE_FOUND;
4031 }
4032
4033 /**
4034 * Returns true is the given waitq is a member of at least 1 set
4035 */
4036 boolean_t
4037 waitq_in_set(struct waitq *waitq)
4038 {
4039 struct waitq_link *link;
4040 boolean_t inset = FALSE;
4041
4042 if (waitq_irq_safe(waitq)) {
4043 return FALSE;
4044 }
4045
4046 waitq_lock(waitq);
4047
4048 if (!waitq->waitq_set_id) {
4049 goto out_unlock;
4050 }
4051
4052 link = wql_get_link(waitq->waitq_set_id);
4053 if (link) {
4054 /* if we get here, the waitq is in _at_least_one_ set */
4055 inset = TRUE;
4056 wql_put_link(link);
4057 } else {
4058 /* we can just optimize this for next time */
4059 waitq->waitq_set_id = 0;
4060 }
4061
4062 out_unlock:
4063 waitq_unlock(waitq);
4064 return inset;
4065 }
4066
4067
4068 /**
4069 * pre-allocate a waitq link structure from the link table
4070 *
4071 * Conditions:
4072 * 'waitq' is not locked
4073 * may (rarely) block if link table needs to grow
4074 */
4075 uint64_t
4076 waitq_link_reserve(struct waitq *waitq)
4077 {
4078 struct waitq_link *link;
4079 uint64_t reserved_id = 0;
4080
4081 assert(get_preemption_level() == 0 && waitq_wait_possible(current_thread()));
4082
4083 /*
4084 * We've asserted that the caller can block, so we enforce a
4085 * minimum-free table element policy here.
4086 */
4087 wql_ensure_free_space();
4088
4089 (void)waitq;
4090 link = wql_alloc_link(LT_RESERVED);
4091 if (!link) {
4092 return 0;
4093 }
4094
4095 reserved_id = link->wql_setid.id;
4096
4097 return reserved_id;
4098 }
4099
4100 /**
4101 * release a pre-allocated waitq link structure
4102 */
4103 void
4104 waitq_link_release(uint64_t id)
4105 {
4106 struct waitq_link *link;
4107
4108 if (id == 0) {
4109 return;
4110 }
4111
4112 link = wql_get_reserved(id, WQL_LINK);
4113 if (!link) {
4114 return;
4115 }
4116
4117 /*
4118 * if we successfully got a link object, then we know
4119 * it's not been marked valid, and can be released with
4120 * a standard wql_put_link() which should free the element.
4121 */
4122 wql_put_link(link);
4123 #if CONFIG_LTABLE_STATS
4124 g_wqlinktable.nreserved_releases += 1;
4125 #endif
4126 }
4127
4128 /**
4129 * link 'waitq' to the set identified by 'setid' using the 'link' structure
4130 *
4131 * Conditions:
4132 * 'waitq' is locked
4133 * caller should have a reference to the 'link' object
4134 */
4135 static kern_return_t
4136 waitq_link_internal(struct waitq *waitq,
4137 uint64_t setid, struct waitq_link *link)
4138 {
4139 struct waitq_link *qlink;
4140 kern_return_t kr;
4141
4142 assert(waitq_held(waitq));
4143 assert(setid != 0);
4144 assert(setid != WQSET_NOT_LINKED);
4145
4146 /*
4147 * If the waitq_set_id field is empty, then this waitq is not
4148 * a member of any other set. All we have to do is update the
4149 * field.
4150 */
4151 if (!waitq->waitq_set_id) {
4152 waitq->waitq_set_id = setid;
4153 return KERN_SUCCESS;
4154 }
4155
4156 qlink = wql_get_link(waitq->waitq_set_id);
4157 if (!qlink) {
4158 /*
4159 * The set to which this wait queue belonged has been
4160 * destroyed / invalidated. We can re-use the waitq field.
4161 */
4162 waitq->waitq_set_id = setid;
4163 return KERN_SUCCESS;
4164 }
4165 wql_put_link(qlink);
4166
4167 /*
4168 * Check to see if it's already a member of the set.
4169 *
4170 * TODO: check for cycles!
4171 */
4172 kr = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
4173 WQL_ALL, (void *)&setid, waitq_inset_cb);
4174 if (kr == WQ_ITERATE_FOUND) {
4175 return KERN_ALREADY_IN_SET;
4176 }
4177
4178 /*
4179 * This wait queue is a member of at least one set already,
4180 * and _not_ a member of the given set. Use our previously
4181 * allocated link object, and hook it up to the wait queue.
4182 * Note that it's possible that one or more of the wait queue sets to
4183 * which the wait queue belongs was invalidated before we allocated
4184 * this link object. That's OK because the next time we use that
4185 * object we'll just ignore it.
4186 */
4187 link->wql_link.left_setid = setid;
4188 link->wql_link.right_setid = waitq->waitq_set_id;
4189 wql_mkvalid(link);
4190
4191 waitq->waitq_set_id = link->wql_setid.id;
4192
4193 return KERN_SUCCESS;
4194 }
4195
4196 /**
4197 * link 'waitq' to 'wqset'
4198 *
4199 * Conditions:
4200 * if 'lock_state' contains WAITQ_SHOULD_LOCK, 'waitq' must be unlocked.
4201 * Otherwise, 'waitq' must be locked.
4202 *
4203 * may (rarely) block on link table allocation if the table has to grow,
4204 * and no 'reserved_link' object is passed.
4205 *
4206 * may block and acquire wqset lock if the wqset passed has no link.
4207 *
4208 * Notes:
4209 * The caller can guarantee that this function will never block by
4210 * - pre-allocating a link table object and passing its ID in 'reserved_link'
4211 * - and pre-allocating the waitq set link calling waitq_set_lazy_init_link.
4212 * It is not possible to provide a reserved_link without having also linked
4213 * the wqset.
4214 */
4215 kern_return_t
4216 waitq_link(struct waitq *waitq, struct waitq_set *wqset,
4217 waitq_lock_state_t lock_state, uint64_t *reserved_link)
4218 {
4219 kern_return_t kr;
4220 struct waitq_link *link;
4221 int should_lock = (lock_state == WAITQ_SHOULD_LOCK);
4222
4223 if (!waitq_valid(waitq) || waitq_irq_safe(waitq)) {
4224 panic("Invalid waitq: %p", waitq);
4225 }
4226
4227 if (!waitqs_is_set(wqset)) {
4228 return KERN_INVALID_ARGUMENT;
4229 }
4230
4231 if (!reserved_link || *reserved_link == 0) {
4232 if (!waitqs_is_linked(wqset)) {
4233 waitq_set_lazy_init_link(wqset);
4234 }
4235 }
4236
4237 wqdbg_v("Link waitq %p to wqset 0x%llx",
4238 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), wqset->wqset_id);
4239
4240 /*
4241 * We _might_ need a new link object here, so we'll grab outside
4242 * the lock because the alloc call _might_ block.
4243 *
4244 * If the caller reserved a link beforehand, then wql_get_link
4245 * is guaranteed not to block because the caller holds an extra
4246 * reference to the link which, in turn, hold a reference to the
4247 * link table.
4248 */
4249 if (reserved_link && *reserved_link != 0) {
4250 link = wql_get_reserved(*reserved_link, WQL_LINK);
4251 /* always consume the caller's reference */
4252 *reserved_link = 0;
4253 } else {
4254 link = wql_alloc_link(WQL_LINK);
4255 }
4256 if (!link) {
4257 return KERN_NO_SPACE;
4258 }
4259
4260 if (should_lock) {
4261 waitq_lock(waitq);
4262 }
4263
4264 kr = waitq_link_internal(waitq, wqset->wqset_id, link);
4265
4266 if (should_lock) {
4267 waitq_unlock(waitq);
4268 }
4269
4270 wql_put_link(link);
4271
4272 return kr;
4273 }
4274
4275 /**
4276 * helper: unlink 'waitq' from waitq set identified by 'setid'
4277 * this function also prunes invalid objects from the tree
4278 *
4279 * Conditions:
4280 * MUST be called from walk_waitq_links link table walk
4281 * 'waitq' is locked
4282 *
4283 * Notes:
4284 * This is a helper function which compresses the link table by culling
4285 * unused or unnecessary links. See comments below for different
4286 * scenarios.
4287 */
4288 static inline int
4289 waitq_maybe_remove_link(struct waitq *waitq,
4290 uint64_t setid,
4291 struct waitq_link *parent,
4292 struct waitq_link *left,
4293 struct waitq_link *right)
4294 {
4295 uint64_t *wq_setid = &waitq->waitq_set_id;
4296
4297 /*
4298 * There are two scenarios:
4299 *
4300 * Scenario 1:
4301 * --------------------------------------------------------------------
4302 * waitq->waitq_set_id == parent
4303 *
4304 * parent(LINK)
4305 * / \
4306 * / \
4307 * / \
4308 * L(LINK/WQS_l) R(LINK/WQS_r)
4309 *
4310 * In this scenario, we assert that the original waitq points to the
4311 * parent link we were passed in. If WQS_l (or WQS_r) is the waitq
4312 * set we're looking for, we can set the corresponding parent
4313 * link id (left or right) to 0. To compress the tree, we can reset the
4314 * waitq_set_id of the original waitq to point to the side of the
4315 * parent that is still valid. We then discard the parent link object.
4316 */
4317 if (*wq_setid == parent->wql_setid.id) {
4318 if (!left && !right) {
4319 /* completely invalid children */
4320 wql_invalidate(parent);
4321 wqdbg_v("S1, L+R");
4322 *wq_setid = 0;
4323 return WQ_ITERATE_INVALID;
4324 } else if (!left || left->wql_setid.id == setid) {
4325 /*
4326 * left side matches we know it points either to the
4327 * WQS we're unlinking, or to an invalid object:
4328 * no need to invalidate it
4329 */
4330 *wq_setid = right ? right->wql_setid.id : 0;
4331 wql_invalidate(parent);
4332 wqdbg_v("S1, L");
4333 return left ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4334 } else if (!right || right->wql_setid.id == setid) {
4335 /*
4336 * if right side matches we know it points either to the
4337 * WQS we're unlinking, or to an invalid object:
4338 * no need to invalidate it
4339 */
4340 *wq_setid = left ? left->wql_setid.id : 0;
4341 wql_invalidate(parent);
4342 wqdbg_v("S1, R");
4343 return right ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4344 }
4345 }
4346
4347 /*
4348 * the tree walk starts at the top-of-tree and moves down,
4349 * so these are safe asserts.
4350 */
4351 assert(left || right); /* one of them has to be valid at this point */
4352
4353 /*
4354 * Scenario 2:
4355 * --------------------------------------------------------------------
4356 * waitq->waitq_set_id == ... (OR parent)
4357 *
4358 * ...
4359 * |
4360 * parent
4361 * / \
4362 * / \
4363 * L(LINK) R(LINK)
4364 * /\ /\
4365 * / \ / \
4366 * / \ Rl(*) Rr(*)
4367 * Ll(WQS) Lr(WQS)
4368 *
4369 * In this scenario, a leaf node of either the left or right side
4370 * could be the wait queue set we're looking to unlink. We also handle
4371 * the case where one of these links is invalid. If a leaf node is
4372 * invalid or it's the set we're looking for, we can safely remove the
4373 * middle link (left or right) and point the parent link directly to
4374 * the remaining leaf node.
4375 */
4376 if (left && wql_type(left) == WQL_LINK) {
4377 uint64_t Ll, Lr;
4378 struct waitq_link *linkLl, *linkLr;
4379 assert(left->wql_setid.id != setid);
4380 Ll = left->wql_link.left_setid;
4381 Lr = left->wql_link.right_setid;
4382 linkLl = wql_get_link(Ll);
4383 linkLr = wql_get_link(Lr);
4384 if (!linkLl && !linkLr) {
4385 /*
4386 * The left object points to two invalid objects!
4387 * We can invalidate the left w/o touching the parent.
4388 */
4389 wql_invalidate(left);
4390 wqdbg_v("S2, Ll+Lr");
4391 return WQ_ITERATE_INVALID;
4392 } else if (!linkLl || Ll == setid) {
4393 /* Ll is invalid and/or the wait queue set we're looking for */
4394 parent->wql_link.left_setid = Lr;
4395 wql_invalidate(left);
4396 wql_put_link(linkLl);
4397 wql_put_link(linkLr);
4398 wqdbg_v("S2, Ll");
4399 return linkLl ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4400 } else if (!linkLr || Lr == setid) {
4401 /* Lr is invalid and/or the wait queue set we're looking for */
4402 parent->wql_link.left_setid = Ll;
4403 wql_invalidate(left);
4404 wql_put_link(linkLr);
4405 wql_put_link(linkLl);
4406 wqdbg_v("S2, Lr");
4407 return linkLr ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4408 }
4409 wql_put_link(linkLl);
4410 wql_put_link(linkLr);
4411 }
4412
4413 if (right && wql_type(right) == WQL_LINK) {
4414 uint64_t Rl, Rr;
4415 struct waitq_link *linkRl, *linkRr;
4416 assert(right->wql_setid.id != setid);
4417 Rl = right->wql_link.left_setid;
4418 Rr = right->wql_link.right_setid;
4419 linkRl = wql_get_link(Rl);
4420 linkRr = wql_get_link(Rr);
4421 if (!linkRl && !linkRr) {
4422 /*
4423 * The right object points to two invalid objects!
4424 * We can invalidate the right w/o touching the parent.
4425 */
4426 wql_invalidate(right);
4427 wqdbg_v("S2, Rl+Rr");
4428 return WQ_ITERATE_INVALID;
4429 } else if (!linkRl || Rl == setid) {
4430 /* Rl is invalid and/or the wait queue set we're looking for */
4431 parent->wql_link.right_setid = Rr;
4432 wql_invalidate(right);
4433 wql_put_link(linkRl);
4434 wql_put_link(linkRr);
4435 wqdbg_v("S2, Rl");
4436 return linkRl ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4437 } else if (!linkRr || Rr == setid) {
4438 /* Rr is invalid and/or the wait queue set we're looking for */
4439 parent->wql_link.right_setid = Rl;
4440 wql_invalidate(right);
4441 wql_put_link(linkRl);
4442 wql_put_link(linkRr);
4443 wqdbg_v("S2, Rr");
4444 return linkRr ? WQ_ITERATE_UNLINKED : WQ_ITERATE_INVALID;
4445 }
4446 wql_put_link(linkRl);
4447 wql_put_link(linkRr);
4448 }
4449
4450 return WQ_ITERATE_CONTINUE;
4451 }
4452
4453 /**
4454 * link table walk callback that unlinks 'waitq' from 'ctx->setid'
4455 *
4456 * Conditions:
4457 * called from walk_waitq_links
4458 * 'waitq' is locked
4459 *
4460 * Notes:
4461 * uses waitq_maybe_remove_link() to compress the linktable and
4462 * perform the actual unlinking
4463 */
4464 static int
4465 waitq_unlink_cb(struct waitq *waitq, void *ctx,
4466 struct waitq_link *link)
4467 {
4468 uint64_t setid = *((uint64_t *)ctx);
4469 struct waitq_link *right, *left;
4470 int ret = 0;
4471
4472 if (wql_type(link) != WQL_LINK) {
4473 return WQ_ITERATE_CONTINUE;
4474 }
4475
4476 do {
4477 left = wql_get_link(link->wql_link.left_setid);
4478 right = wql_get_link(link->wql_link.right_setid);
4479
4480 ret = waitq_maybe_remove_link(waitq, setid, link, left, right);
4481
4482 wql_put_link(left);
4483 wql_put_link(right);
4484
4485 if (!wql_is_valid(link)) {
4486 return WQ_ITERATE_INVALID;
4487 }
4488 /* A ret value of UNLINKED will break us out of table walk */
4489 } while (ret == WQ_ITERATE_INVALID);
4490
4491 return ret;
4492 }
4493
4494
4495 /**
4496 * undo/remove a prepost from 'ctx' (waitq) to 'wqset'
4497 *
4498 * Conditions:
4499 * Called from wq_prepost_foreach_locked OR wq_prepost_iterate
4500 * 'wqset' may be NULL
4501 * (ctx)->unlink_wqset is locked
4502 */
4503 static int
4504 waitq_unlink_prepost_cb(struct waitq_set __unused *wqset, void *ctx,
4505 struct wq_prepost *wqp, struct waitq *waitq)
4506 {
4507 struct wq_unlink_ctx *ulctx = (struct wq_unlink_ctx *)ctx;
4508
4509 if (waitq != ulctx->unlink_wq) {
4510 return WQ_ITERATE_CONTINUE;
4511 }
4512
4513 if (wqp_type(wqp) == WQP_WQ &&
4514 wqp->wqp_prepostid.id == ulctx->unlink_wqset->wqset_prepost_id) {
4515 /* this is the only prepost on this wait queue set */
4516 wqdbg_v("unlink wqp (WQ) 0x%llx", wqp->wqp_prepostid.id);
4517 ulctx->unlink_wqset->wqset_prepost_id = 0;
4518 return WQ_ITERATE_BREAK;
4519 }
4520
4521 assert(wqp_type(wqp) == WQP_POST);
4522
4523 /*
4524 * The prepost object 'wqp' points to a waitq which should no longer
4525 * be preposted to 'ulctx->unlink_wqset'. We can remove the prepost
4526 * object from the list and break out of the iteration. Using the
4527 * context object in this way allows this same callback function to be
4528 * used from both wq_prepost_foreach_locked and wq_prepost_iterate.
4529 */
4530 wq_prepost_remove(ulctx->unlink_wqset, wqp);
4531 return WQ_ITERATE_BREAK;
4532 }
4533
4534 /**
4535 * unlink 'waitq' from 'wqset'
4536 *
4537 * Conditions:
4538 * 'waitq' is locked
4539 * 'wqset' is _not_ locked
4540 * may (rarely) spin in prepost clear and drop/re-acquire 'waitq' lock
4541 * (see waitq_clear_prepost_locked)
4542 */
4543 static kern_return_t
4544 waitq_unlink_locked(struct waitq *waitq,
4545 struct waitq_set *wqset)
4546 {
4547 uint64_t setid;
4548 kern_return_t kr;
4549
4550 assert(!waitq_irq_safe(waitq));
4551
4552 if (waitq->waitq_set_id == 0) {
4553 /*
4554 * TODO:
4555 * it doesn't belong to anyone, and it has a prepost object?
4556 * This is an artifact of not cleaning up after kqueues when
4557 * they prepost into select sets...
4558 */
4559 if (waitq->waitq_prepost_id != 0) {
4560 (void)waitq_clear_prepost_locked(waitq);
4561 }
4562 return KERN_NOT_IN_SET;
4563 }
4564
4565 if (!waitqs_is_linked(wqset)) {
4566 /*
4567 * No link has been allocated for the wqset,
4568 * so no waitq could have been linked to it.
4569 */
4570 return KERN_NOT_IN_SET;
4571 }
4572
4573 setid = wqset->wqset_id;
4574
4575 if (waitq->waitq_set_id == setid) {
4576 waitq->waitq_set_id = 0;
4577 /*
4578 * This was the only set to which the waitq belonged: we can
4579 * safely release the waitq's prepost object. It doesn't
4580 * matter if this function drops and re-acquires the lock
4581 * because we're not manipulating waitq state any more.
4582 */
4583 (void)waitq_clear_prepost_locked(waitq);
4584 return KERN_SUCCESS;
4585 }
4586
4587 /*
4588 * The waitq was a member of more that 1 set, so we need to
4589 * handle potentially compressing the link table, and
4590 * adjusting the waitq->waitq_set_id value.
4591 *
4592 * Note: we can't free the waitq's associated prepost object (if any)
4593 * because it may be in use by the one or more _other_ sets to
4594 * which this queue belongs.
4595 *
4596 * Note: This function only handles a single level of the queue linkage.
4597 * Removing a waitq from a set to which it does not directly
4598 * belong is undefined. For example, if a waitq belonged to set
4599 * A, and set A belonged to set B. You can't remove the waitq
4600 * from set B.
4601 */
4602 kr = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
4603 WQL_LINK, (void *)&setid, waitq_unlink_cb);
4604
4605 if (kr == WQ_ITERATE_UNLINKED) {
4606 struct wq_unlink_ctx ulctx;
4607
4608 kr = KERN_SUCCESS; /* found it and dis-associated it */
4609
4610 /* don't look for preposts if it's not prepost-enabled */
4611 if (!wqset->wqset_q.waitq_prepost) {
4612 goto out;
4613 }
4614
4615 assert(!waitq_irq_safe(&wqset->wqset_q));
4616
4617 waitq_set_lock(wqset);
4618 /*
4619 * clear out any prepost from waitq into wqset
4620 * TODO: this could be more efficient than a linear search of
4621 * the waitq set's prepost list.
4622 */
4623 ulctx.unlink_wq = waitq;
4624 ulctx.unlink_wqset = wqset;
4625 (void)wq_prepost_iterate(wqset->wqset_prepost_id, (void *)&ulctx,
4626 waitq_unlink_prepost_cb);
4627 waitq_set_unlock(wqset);
4628 } else {
4629 kr = KERN_NOT_IN_SET; /* waitq is _not_ associated with wqset */
4630 }
4631
4632 out:
4633 return kr;
4634 }
4635
4636 /**
4637 * unlink 'waitq' from 'wqset'
4638 *
4639 * Conditions:
4640 * neither 'waitq' nor 'wqset' is locked
4641 * may disable and re-enable interrupts
4642 * may (rarely) spin in prepost clear
4643 * (see waitq_clear_prepost_locked)
4644 */
4645 kern_return_t
4646 waitq_unlink(struct waitq *waitq, struct waitq_set *wqset)
4647 {
4648 kern_return_t kr = KERN_SUCCESS;
4649
4650 assert(waitqs_is_set(wqset));
4651
4652 /*
4653 * we allow the waitq to be invalid because the caller may be trying
4654 * to clear out old/dirty state
4655 */
4656 if (!waitq_valid(waitq)) {
4657 return KERN_INVALID_ARGUMENT;
4658 }
4659
4660 wqdbg_v("unlink waitq %p from set 0x%llx",
4661 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), wqset->wqset_id);
4662
4663 assert(!waitq_irq_safe(waitq));
4664
4665 waitq_lock(waitq);
4666
4667 kr = waitq_unlink_locked(waitq, wqset);
4668
4669 waitq_unlock(waitq);
4670 return kr;
4671 }
4672
4673 /**
4674 * unlink a waitq from a waitq set, but reference the waitq by its prepost ID
4675 *
4676 * Conditions:
4677 * 'wqset' is unlocked
4678 * wqp_id may be valid or invalid
4679 */
4680 void
4681 waitq_unlink_by_prepost_id(uint64_t wqp_id, struct waitq_set *wqset)
4682 {
4683 struct wq_prepost *wqp;
4684
4685 disable_preemption();
4686 wqp = wq_prepost_get(wqp_id);
4687 if (wqp) {
4688 struct waitq *wq;
4689
4690 wq = wqp->wqp_wq.wqp_wq_ptr;
4691
4692 /*
4693 * lock the waitq, then release our prepost ID reference, then
4694 * unlink the waitq from the wqset: this ensures that we don't
4695 * hold a prepost ID reference during the unlink, but we also
4696 * complete the unlink operation atomically to avoid a race
4697 * with waitq_unlink[_all].
4698 */
4699 assert(!waitq_irq_safe(wq));
4700
4701 waitq_lock(wq);
4702 wq_prepost_put(wqp);
4703
4704 if (!waitq_valid(wq)) {
4705 /* someone already tore down this waitq! */
4706 waitq_unlock(wq);
4707 enable_preemption();
4708 return;
4709 }
4710
4711 /* this _may_ drop the wq lock, but that's OK */
4712 waitq_unlink_locked(wq, wqset);
4713
4714 waitq_unlock(wq);
4715 }
4716 enable_preemption();
4717 return;
4718 }
4719
4720
4721 /**
4722 * reference and lock a waitq by its prepost ID
4723 *
4724 * Conditions:
4725 * wqp_id may be valid or invalid
4726 *
4727 * Returns:
4728 * a locked waitq if wqp_id was valid
4729 * NULL on failure
4730 */
4731 struct waitq *
4732 waitq_lock_by_prepost_id(uint64_t wqp_id)
4733 {
4734 struct waitq *wq = NULL;
4735 struct wq_prepost *wqp;
4736
4737 disable_preemption();
4738 wqp = wq_prepost_get(wqp_id);
4739 if (wqp) {
4740 wq = wqp->wqp_wq.wqp_wq_ptr;
4741
4742 assert(!waitq_irq_safe(wq));
4743
4744 waitq_lock(wq);
4745 wq_prepost_put(wqp);
4746
4747 if (!waitq_valid(wq)) {
4748 /* someone already tore down this waitq! */
4749 waitq_unlock(wq);
4750 enable_preemption();
4751 return NULL;
4752 }
4753 }
4754 enable_preemption();
4755 return wq;
4756 }
4757
4758
4759 /**
4760 * unlink 'waitq' from all sets to which it belongs
4761 *
4762 * Conditions:
4763 * 'waitq' is locked on entry
4764 * returns with waitq lock dropped
4765 *
4766 * Notes:
4767 * may (rarely) spin (see waitq_clear_prepost_locked)
4768 */
4769 kern_return_t
4770 waitq_unlink_all_unlock(struct waitq *waitq)
4771 {
4772 uint64_t old_set_id = 0;
4773 wqdbg_v("unlink waitq %p from all sets",
4774 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq));
4775 assert(!waitq_irq_safe(waitq));
4776
4777 /* it's not a member of any sets */
4778 if (waitq->waitq_set_id == 0) {
4779 waitq_unlock(waitq);
4780 return KERN_SUCCESS;
4781 }
4782
4783 old_set_id = waitq->waitq_set_id;
4784 waitq->waitq_set_id = 0;
4785
4786 /*
4787 * invalidate the prepost entry for this waitq.
4788 * This may drop and re-acquire the waitq lock, but that's OK because
4789 * if it was added to another set and preposted to that set in the
4790 * time we drop the lock, the state will remain consistent.
4791 */
4792 (void)waitq_clear_prepost_locked(waitq);
4793
4794 waitq_unlock(waitq);
4795
4796 if (old_set_id) {
4797 /*
4798 * Walk the link table and invalidate each LINK object that
4799 * used to connect this waitq to one or more sets: this works
4800 * because WQL_LINK objects are private to each wait queue
4801 */
4802 (void)walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, old_set_id,
4803 WQL_LINK, NULL, waitq_unlink_all_cb);
4804 }
4805
4806 return KERN_SUCCESS;
4807 }
4808
4809 /**
4810 * unlink 'waitq' from all sets to which it belongs
4811 *
4812 * Conditions:
4813 * 'waitq' is not locked
4814 * may disable and re-enable interrupts
4815 * may (rarely) spin
4816 * (see waitq_unlink_all_locked, waitq_clear_prepost_locked)
4817 */
4818 kern_return_t
4819 waitq_unlink_all(struct waitq *waitq)
4820 {
4821 kern_return_t kr = KERN_SUCCESS;
4822
4823 if (!waitq_valid(waitq)) {
4824 panic("Invalid waitq: %p", waitq);
4825 }
4826
4827 assert(!waitq_irq_safe(waitq));
4828 waitq_lock(waitq);
4829 if (!waitq_valid(waitq)) {
4830 waitq_unlock(waitq);
4831 return KERN_SUCCESS;
4832 }
4833
4834 kr = waitq_unlink_all_unlock(waitq);
4835 /* waitq unlocked and set links deallocated */
4836
4837 return kr;
4838 }
4839
4840
4841 /**
4842 * unlink all waitqs from 'wqset'
4843 *
4844 * Conditions:
4845 * 'wqset' is locked on entry
4846 * 'wqset' is unlocked on exit and spl is restored
4847 *
4848 * Note:
4849 * may (rarely) spin/block (see waitq_clear_prepost_locked)
4850 */
4851 kern_return_t
4852 waitq_set_unlink_all_unlock(struct waitq_set *wqset)
4853 {
4854 struct waitq_link *link;
4855 uint64_t prepost_id;
4856
4857 wqdbg_v("unlink all queues from set 0x%llx", wqset->wqset_id);
4858
4859 /*
4860 * This operation does not require interaction with any of the set's
4861 * constituent wait queues. All we have to do is invalidate the SetID
4862 */
4863
4864 if (waitqs_is_linked(wqset)) {
4865 /* invalidate and re-alloc the link object first */
4866 link = wql_get_link(wqset->wqset_id);
4867
4868 /* we may have raced with a waitq_set_deinit: handle this */
4869 if (!link) {
4870 waitq_set_unlock(wqset);
4871 return KERN_SUCCESS;
4872 }
4873
4874 wql_invalidate(link);
4875
4876 /* re-alloc the object to get a new generation ID */
4877 wql_realloc_link(link, WQL_WQS);
4878 link->wql_wqs.wql_set = wqset;
4879
4880 wqset->wqset_id = link->wql_setid.id;
4881 wql_mkvalid(link);
4882 wql_put_link(link);
4883 }
4884
4885 /* clear any preposts attached to this set */
4886 prepost_id = 0;
4887 if (wqset->wqset_q.waitq_prepost && wqset->wqset_prepost_id) {
4888 prepost_id = wqset->wqset_prepost_id;
4889 }
4890 /* else { TODO: notify kqueue subsystem? } */
4891 wqset->wqset_prepost_id = 0;
4892
4893 /*
4894 * clear set linkage and prepost object associated with this set:
4895 * waitq sets may prepost to other sets if, for example, they are
4896 * associated with a kqueue which is in a select set.
4897 *
4898 * This releases all the set link objects
4899 * (links to other sets to which this set was previously added)
4900 */
4901 waitq_unlink_all_unlock(&wqset->wqset_q);
4902 /* wqset->wqset_q unlocked */
4903
4904 /* drop / unlink all the prepost table objects */
4905 if (prepost_id) {
4906 (void)wq_prepost_iterate(prepost_id, NULL,
4907 wqset_clear_prepost_chain_cb);
4908 }
4909
4910 return KERN_SUCCESS;
4911 }
4912
4913 /**
4914 * unlink all waitqs from 'wqset'
4915 *
4916 * Conditions:
4917 * 'wqset' is not locked
4918 * may (rarely) spin/block (see waitq_clear_prepost_locked)
4919 */
4920 kern_return_t
4921 waitq_set_unlink_all(struct waitq_set *wqset)
4922 {
4923 assert(waitqs_is_set(wqset));
4924 assert(!waitq_irq_safe(&wqset->wqset_q));
4925
4926 waitq_set_lock(wqset);
4927 return waitq_set_unlink_all_unlock(wqset);
4928 /* wqset unlocked and set links and preposts deallocated */
4929 }
4930
4931 static int
4932 waitq_prepost_reserve_cb(struct waitq *waitq, void *ctx,
4933 struct waitq_link *link)
4934 {
4935 uint32_t *num = (uint32_t *)ctx;
4936 (void)waitq;
4937
4938 /*
4939 * In the worst case, we'll have to allocate 2 prepost objects
4940 * per waitq set (if the set was already preposted by another
4941 * waitq).
4942 */
4943 if (wql_type(link) == WQL_WQS) {
4944 /*
4945 * check to see if the associated waitq actually supports
4946 * preposting
4947 */
4948 if (waitq_set_can_prepost(link->wql_wqs.wql_set)) {
4949 *num += 2;
4950 }
4951 }
4952 return WQ_ITERATE_CONTINUE;
4953 }
4954
4955 static int
4956 waitq_alloc_prepost_reservation(int nalloc, struct waitq *waitq,
4957 int *did_unlock, struct wq_prepost **wqp)
4958 {
4959 struct wq_prepost *tmp;
4960 struct wqp_cache *cache;
4961
4962 *did_unlock = 0;
4963
4964 /*
4965 * Before we unlock the waitq, check the per-processor prepost object
4966 * cache to see if there's enough there for us. If so, do the
4967 * allocation, keep the lock and save an entire iteration over the set
4968 * linkage!
4969 */
4970 if (waitq) {
4971 disable_preemption();
4972 cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
4973 if (nalloc <= (int)cache->avail) {
4974 goto do_alloc;
4975 }
4976 enable_preemption();
4977
4978 /* unlock the waitq to perform the allocation */
4979 *did_unlock = 1;
4980 waitq_unlock(waitq);
4981 }
4982
4983 do_alloc:
4984 tmp = wq_prepost_alloc(LT_RESERVED, nalloc);
4985 if (!tmp) {
4986 panic("Couldn't reserve %d preposts for waitq @%p (wqp@%p)",
4987 nalloc, waitq, *wqp);
4988 }
4989 if (*wqp) {
4990 /* link the two lists */
4991 int __assert_only rc;
4992 rc = wq_prepost_rlink(tmp, *wqp);
4993 assert(rc == nalloc);
4994 }
4995 *wqp = tmp;
4996
4997 /*
4998 * If the caller can block, then enforce a minimum-free table element
4999 * policy here. This helps ensure that we will have enough prepost
5000 * objects for callers such as selwakeup() that can be called with
5001 * spin locks held.
5002 */
5003 if (get_preemption_level() == 0) {
5004 wq_prepost_ensure_free_space();
5005 }
5006
5007 if (waitq) {
5008 if (*did_unlock == 0) {
5009 /* decrement the preemption count if alloc from cache */
5010 enable_preemption();
5011 } else {
5012 /* otherwise: re-lock the waitq */
5013 waitq_lock(waitq);
5014 }
5015 }
5016
5017 return nalloc;
5018 }
5019
5020 static int
5021 waitq_count_prepost_reservation(struct waitq *waitq, int extra, int keep_locked)
5022 {
5023 int npreposts = 0;
5024
5025 /*
5026 * If the waitq is not currently part of a set, and we're not asked to
5027 * keep the waitq locked then we'll want to have 3 in reserve
5028 * just-in-case it becomes part of a set while we unlock and reserve.
5029 * We may need up to 1 object for the waitq, and 2 for the set.
5030 */
5031 if (waitq->waitq_set_id == 0) {
5032 npreposts = 3;
5033 } else {
5034 /* this queue has never been preposted before */
5035 if (waitq->waitq_prepost_id == 0) {
5036 npreposts = 3;
5037 }
5038
5039 /*
5040 * Walk the set of table linkages associated with this waitq
5041 * and count the worst-case number of prepost objects that
5042 * may be needed during a wakeup_all. We can walk this without
5043 * locking each set along the way because the table-based IDs
5044 * disconnect us from the set pointers themselves, and the
5045 * table walking is careful to read the setid values only once.
5046 * Locking each set up the chain also doesn't guarantee that
5047 * their membership won't change between the time we unlock
5048 * that set and when we actually go to prepost, so our
5049 * situation is no worse than before and we've alleviated lock
5050 * contention on any sets to which this waitq belongs.
5051 */
5052 (void)walk_waitq_links(LINK_WALK_FULL_DAG_UNLOCKED,
5053 waitq, waitq->waitq_set_id,
5054 WQL_WQS, (void *)&npreposts,
5055 waitq_prepost_reserve_cb);
5056 }
5057
5058 if (extra > 0) {
5059 npreposts += extra;
5060 }
5061
5062 if (npreposts == 0 && !keep_locked) {
5063 /*
5064 * If we get here, we were asked to reserve some prepost
5065 * objects for a waitq that's previously preposted, and is not
5066 * currently a member of any sets. We have also been
5067 * instructed to unlock the waitq when we're done. In this
5068 * case, we pre-allocated enough reserved objects to handle
5069 * the case where the waitq gets added to a single set when
5070 * the lock is released.
5071 */
5072 npreposts = 3;
5073 }
5074
5075 return npreposts;
5076 }
5077
5078
5079 /**
5080 * pre-allocate prepost objects for 'waitq'
5081 *
5082 * Conditions:
5083 * 'waitq' is not locked
5084 *
5085 * Returns:
5086 * panic on error
5087 *
5088 * 0 on success, '*reserved' is set to the head of a singly-linked
5089 * list of pre-allocated prepost objects.
5090 *
5091 * Notes:
5092 * If 'lock_state' is WAITQ_KEEP_LOCKED, this function performs the pre-allocation
5093 * atomically and returns 'waitq' locked.
5094 *
5095 * This function attempts to pre-allocate precisely enough prepost
5096 * objects based on the current set membership of 'waitq'. If the
5097 * operation is performed atomically, then the caller
5098 * is guaranteed to have enough pre-allocated prepost object to avoid
5099 * any (rare) blocking in the wakeup path.
5100 */
5101 uint64_t
5102 waitq_prepost_reserve(struct waitq *waitq, int extra,
5103 waitq_lock_state_t lock_state)
5104 {
5105 uint64_t reserved = 0;
5106 uint64_t prev_setid = 0, prev_prepostid = 0;
5107 struct wq_prepost *wqp = NULL;
5108 int nalloc = 0, npreposts = 0;
5109 int keep_locked = (lock_state == WAITQ_KEEP_LOCKED);
5110 int unlocked = 0;
5111
5112 wqdbg_v("Attempting to reserve prepost linkages for waitq %p (extra:%d)",
5113 (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), extra);
5114
5115 if (waitq == NULL && extra > 0) {
5116 /*
5117 * Simple prepost object allocation:
5118 * we'll add 2 more because the waitq might need an object,
5119 * and the set itself may need a new POST object in addition
5120 * to the number of preposts requested by the caller
5121 */
5122 nalloc = waitq_alloc_prepost_reservation(extra + 2, NULL,
5123 &unlocked, &wqp);
5124 assert(nalloc == extra + 2);
5125 return wqp->wqp_prepostid.id;
5126 }
5127
5128 assert(lock_state == WAITQ_KEEP_LOCKED || lock_state == WAITQ_UNLOCK);
5129
5130 assert(!waitq_irq_safe(waitq));
5131
5132 waitq_lock(waitq);
5133
5134 /* remember the set ID that we started with */
5135 prev_setid = waitq->waitq_set_id;
5136 prev_prepostid = waitq->waitq_prepost_id;
5137
5138 /*
5139 * If the waitq is not part of a set, and we're asked to
5140 * keep the set locked, then we don't have to reserve
5141 * anything!
5142 */
5143 if (prev_setid == 0 && keep_locked) {
5144 goto out;
5145 }
5146
5147 npreposts = waitq_count_prepost_reservation(waitq, extra, keep_locked);
5148
5149 /* nothing for us to do! */
5150 if (npreposts == 0) {
5151 if (keep_locked) {
5152 goto out;
5153 }
5154 goto out_unlock;
5155 }
5156
5157 try_alloc:
5158 /* this _may_ unlock and relock the waitq! */
5159 nalloc = waitq_alloc_prepost_reservation(npreposts, waitq,
5160 &unlocked, &wqp);
5161
5162 if (!unlocked) {
5163 /* allocation held the waitq lock: we'd done! */
5164 if (keep_locked) {
5165 goto out;
5166 }
5167 goto out_unlock;
5168 }
5169
5170 /*
5171 * Before we return, if the allocation had to unlock the waitq, we
5172 * must check one more time to see if we have enough. If not, we'll
5173 * try to allocate the difference. If the caller requests it, we'll
5174 * also leave the waitq locked so that the use of the pre-allocated
5175 * prepost objects can be guaranteed to be enough if a wakeup_all is
5176 * performed before unlocking the waitq.
5177 */
5178
5179 /*
5180 * If the waitq is no longer associated with a set, or if the waitq's
5181 * set/prepostid has not changed since we first walked its linkage,
5182 * we're done.
5183 */
5184 if ((waitq->waitq_set_id == 0) ||
5185 (waitq->waitq_set_id == prev_setid &&
5186 waitq->waitq_prepost_id == prev_prepostid)) {
5187 if (keep_locked) {
5188 goto out;
5189 }
5190 goto out_unlock;
5191 }
5192
5193 npreposts = waitq_count_prepost_reservation(waitq, extra, keep_locked);
5194
5195 if (npreposts > nalloc) {
5196 prev_setid = waitq->waitq_set_id;
5197 prev_prepostid = waitq->waitq_prepost_id;
5198 npreposts = npreposts - nalloc; /* only allocate the diff */
5199 goto try_alloc;
5200 }
5201
5202 if (keep_locked) {
5203 goto out;
5204 }
5205
5206 out_unlock:
5207 waitq_unlock(waitq);
5208 out:
5209 if (wqp) {
5210 reserved = wqp->wqp_prepostid.id;
5211 }
5212
5213 return reserved;
5214 }
5215
5216 /**
5217 * release a linked list of prepost objects allocated via _prepost_reserve
5218 *
5219 * Conditions:
5220 * may (rarely) spin waiting for prepost table growth memcpy
5221 */
5222 void
5223 waitq_prepost_release_reserve(uint64_t id)
5224 {
5225 struct wq_prepost *wqp;
5226
5227 wqdbg_v("releasing reserved preposts starting at: 0x%llx", id);
5228
5229 wqp = wq_prepost_rfirst(id);
5230 if (!wqp) {
5231 return;
5232 }
5233
5234 wq_prepost_release_rlist(wqp);
5235 }
5236
5237
5238 /**
5239 * clear all preposts from 'wqset'
5240 *
5241 * Conditions:
5242 * 'wqset' is not locked
5243 */
5244 void
5245 waitq_set_clear_preposts(struct waitq_set *wqset)
5246 {
5247 uint64_t prepost_id;
5248 spl_t spl;
5249
5250 assert(waitqs_is_set(wqset));
5251
5252 if (!wqset->wqset_q.waitq_prepost || !wqset->wqset_prepost_id) {
5253 return;
5254 }
5255
5256 wqdbg_v("Clearing all preposted queues on waitq_set: 0x%llx",
5257 wqset->wqset_id);
5258
5259 if (waitq_irq_safe(&wqset->wqset_q)) {
5260 spl = splsched();
5261 }
5262 waitq_set_lock(wqset);
5263 prepost_id = wqset->wqset_prepost_id;
5264 wqset->wqset_prepost_id = 0;
5265 waitq_set_unlock(wqset);
5266 if (waitq_irq_safe(&wqset->wqset_q)) {
5267 splx(spl);
5268 }
5269
5270 /* drop / unlink all the prepost table objects */
5271 if (prepost_id) {
5272 (void)wq_prepost_iterate(prepost_id, NULL,
5273 wqset_clear_prepost_chain_cb);
5274 }
5275 }
5276
5277
5278 /* ----------------------------------------------------------------------
5279 *
5280 * Iteration: waitq -> sets / waitq_set -> preposts
5281 *
5282 * ---------------------------------------------------------------------- */
5283
5284 struct wq_it_ctx {
5285 void *input;
5286 void *ctx;
5287 waitq_iterator_t it;
5288 };
5289
5290 static int
5291 waitq_iterate_sets_cb(struct waitq *waitq, void *ctx,
5292 struct waitq_link *link)
5293 {
5294 struct wq_it_ctx *wctx = (struct wq_it_ctx *)(ctx);
5295 struct waitq_set *wqset;
5296 int ret;
5297
5298 (void)waitq;
5299 assert(!waitq_irq_safe(waitq));
5300 assert(wql_type(link) == WQL_WQS);
5301
5302 /*
5303 * the waitq is locked, so we can just take the set lock
5304 * and call the iterator function
5305 */
5306 wqset = link->wql_wqs.wql_set;
5307 assert(wqset != NULL);
5308 assert(!waitq_irq_safe(&wqset->wqset_q));
5309 waitq_set_lock(wqset);
5310
5311 ret = wctx->it(wctx->ctx, (struct waitq *)wctx->input, wqset);
5312
5313 waitq_set_unlock(wqset);
5314 return ret;
5315 }
5316
5317 /**
5318 * call external iterator function for each prepost object in wqset
5319 *
5320 * Conditions:
5321 * Called from wq_prepost_foreach_locked
5322 * (wqset locked, waitq _not_ locked)
5323 */
5324 static int
5325 wqset_iterate_prepost_cb(struct waitq_set *wqset, void *ctx,
5326 struct wq_prepost *wqp, struct waitq *waitq)
5327 {
5328 struct wq_it_ctx *wctx = (struct wq_it_ctx *)(ctx);
5329 uint64_t wqp_id;
5330 int ret;
5331
5332 (void)wqp;
5333
5334 /*
5335 * This is a bit tricky. The 'wqset' is locked, but the 'waitq' is not.
5336 * Taking the 'waitq' lock is a lock order violation, so we need to be
5337 * careful. We also must realize that we may have taken a reference to
5338 * the 'wqp' just as the associated waitq was being torn down (or
5339 * clearing all its preposts) - see waitq_clear_prepost_locked(). If
5340 * the 'wqp' is valid and we can get the waitq lock, then we are good
5341 * to go. If not, we need to back off, check that the 'wqp' hasn't
5342 * been invalidated, and try to re-take the locks.
5343 */
5344 assert(!waitq_irq_safe(waitq));
5345
5346 if (waitq_lock_try(waitq)) {
5347 goto call_iterator;
5348 }
5349
5350 if (!wqp_is_valid(wqp)) {
5351 return WQ_ITERATE_RESTART;
5352 }
5353
5354 /* We are passed a prepost object with a reference on it. If neither
5355 * the waitq set nor the waitq require interrupts disabled, then we
5356 * may block on the delay(1) call below. We can't hold a prepost
5357 * object reference while blocking, so we have to give that up as well
5358 * and re-acquire it when we come back.
5359 */
5360 wqp_id = wqp->wqp_prepostid.id;
5361 wq_prepost_put(wqp);
5362 waitq_set_unlock(wqset);
5363 wqdbg_v("dropped set:%p lock waiting for wqp:%p (0x%llx -> wq:%p)",
5364 wqset, wqp, wqp->wqp_prepostid.id, waitq);
5365 delay(1);
5366 waitq_set_lock(wqset);
5367 wqp = wq_prepost_get(wqp_id);
5368 if (!wqp) {
5369 /* someone cleared preposts while we slept! */
5370 return WQ_ITERATE_DROPPED;
5371 }
5372
5373 /*
5374 * TODO:
5375 * This differs slightly from the logic in ipc_mqueue.c:
5376 * ipc_mqueue_receive_on_thread(). There, if the waitq lock
5377 * can't be obtained, the prepost link is placed on the back of
5378 * the chain, and the iteration starts from the beginning. Here,
5379 * we just restart from the beginning.
5380 */
5381 return WQ_ITERATE_RESTART;
5382
5383 call_iterator:
5384 if (!wqp_is_valid(wqp)) {
5385 ret = WQ_ITERATE_RESTART;
5386 goto out_unlock;
5387 }
5388
5389 /* call the external callback */
5390 ret = wctx->it(wctx->ctx, waitq, wqset);
5391
5392 if (ret == WQ_ITERATE_BREAK_KEEP_LOCKED) {
5393 ret = WQ_ITERATE_BREAK;
5394 goto out;
5395 }
5396
5397 out_unlock:
5398 waitq_unlock(waitq);
5399 out:
5400 return ret;
5401 }
5402
5403 /**
5404 * iterator over all sets to which the given waitq has been linked
5405 *
5406 * Conditions:
5407 * 'waitq' is locked
5408 */
5409 int
5410 waitq_iterate_sets(struct waitq *waitq, void *ctx, waitq_iterator_t it)
5411 {
5412 int ret;
5413 struct wq_it_ctx wctx = {
5414 .input = (void *)waitq,
5415 .ctx = ctx,
5416 .it = it,
5417 };
5418 if (!it || !waitq) {
5419 return KERN_INVALID_ARGUMENT;
5420 }
5421
5422 ret = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id,
5423 WQL_WQS, (void *)&wctx, waitq_iterate_sets_cb);
5424 if (ret == WQ_ITERATE_CONTINUE) {
5425 ret = WQ_ITERATE_SUCCESS;
5426 }
5427 return ret;
5428 }
5429
5430 /**
5431 * iterator over all preposts in the given wqset
5432 *
5433 * Conditions:
5434 * 'wqset' is locked
5435 */
5436 int
5437 waitq_set_iterate_preposts(struct waitq_set *wqset,
5438 void *ctx, waitq_iterator_t it)
5439 {
5440 struct wq_it_ctx wctx = {
5441 .input = (void *)wqset,
5442 .ctx = ctx,
5443 .it = it,
5444 };
5445 if (!it || !wqset) {
5446 return WQ_ITERATE_INVALID;
5447 }
5448
5449 assert(waitq_held(&wqset->wqset_q));
5450
5451 return wq_prepost_foreach_locked(wqset, (void *)&wctx,
5452 wqset_iterate_prepost_cb);
5453 }
5454
5455
5456 /* ----------------------------------------------------------------------
5457 *
5458 * Higher-level APIs
5459 *
5460 * ---------------------------------------------------------------------- */
5461
5462
5463 /**
5464 * declare a thread's intent to wait on 'waitq' for 'wait_event'
5465 *
5466 * Conditions:
5467 * 'waitq' is not locked
5468 */
5469 wait_result_t
5470 waitq_assert_wait64(struct waitq *waitq,
5471 event64_t wait_event,
5472 wait_interrupt_t interruptible,
5473 uint64_t deadline)
5474 {
5475 thread_t thread = current_thread();
5476 wait_result_t ret;
5477 spl_t s;
5478
5479 if (!waitq_valid(waitq)) {
5480 panic("Invalid waitq: %p", waitq);
5481 }
5482
5483 if (waitq_irq_safe(waitq)) {
5484 s = splsched();
5485 }
5486
5487 waitq_lock(waitq);
5488 ret = waitq_assert_wait64_locked(waitq, wait_event, interruptible,
5489 TIMEOUT_URGENCY_SYS_NORMAL,
5490 deadline, TIMEOUT_NO_LEEWAY, thread);
5491 waitq_unlock(waitq);
5492
5493 if (waitq_irq_safe(waitq)) {
5494 splx(s);
5495 }
5496
5497 return ret;
5498 }
5499
5500 /**
5501 * declare a thread's intent to wait on 'waitq' for 'wait_event'
5502 *
5503 * Conditions:
5504 * 'waitq' is not locked
5505 * will disable and re-enable interrupts while locking current_thread()
5506 */
5507 wait_result_t
5508 waitq_assert_wait64_leeway(struct waitq *waitq,
5509 event64_t wait_event,
5510 wait_interrupt_t interruptible,
5511 wait_timeout_urgency_t urgency,
5512 uint64_t deadline,
5513 uint64_t leeway)
5514 {
5515 wait_result_t ret;
5516 thread_t thread = current_thread();
5517 spl_t s;
5518
5519 if (!waitq_valid(waitq)) {
5520 panic("Invalid waitq: %p", waitq);
5521 }
5522
5523 if (waitq_irq_safe(waitq)) {
5524 s = splsched();
5525 }
5526
5527 waitq_lock(waitq);
5528 ret = waitq_assert_wait64_locked(waitq, wait_event, interruptible,
5529 urgency, deadline, leeway, thread);
5530 waitq_unlock(waitq);
5531
5532 if (waitq_irq_safe(waitq)) {
5533 splx(s);
5534 }
5535
5536 return ret;
5537 }
5538
5539 /**
5540 * wakeup a single thread from a waitq that's waiting for a given event
5541 *
5542 * Conditions:
5543 * 'waitq' is not locked
5544 * may (rarely) block if 'waitq' is non-global and a member of 1 or more sets
5545 * may disable and re-enable interrupts
5546 *
5547 * Notes:
5548 * will _not_ block if waitq is global (or not a member of any set)
5549 */
5550 kern_return_t
5551 waitq_wakeup64_one(struct waitq *waitq, event64_t wake_event,
5552 wait_result_t result, int priority)
5553 {
5554 kern_return_t kr;
5555 uint64_t reserved_preposts = 0;
5556 spl_t spl;
5557
5558 if (!waitq_valid(waitq)) {
5559 panic("Invalid waitq: %p", waitq);
5560 }
5561
5562 if (!waitq_irq_safe(waitq)) {
5563 /* reserve preposts in addition to locking the waitq */
5564 reserved_preposts = waitq_prepost_reserve(waitq, 0, WAITQ_KEEP_LOCKED);
5565 } else {
5566 spl = splsched();
5567 waitq_lock(waitq);
5568 }
5569
5570 /* waitq is locked upon return */
5571 kr = waitq_wakeup64_one_locked(waitq, wake_event, result,
5572 &reserved_preposts, priority, WAITQ_UNLOCK);
5573
5574 if (waitq_irq_safe(waitq)) {
5575 splx(spl);
5576 }
5577
5578 /* release any left-over prepost object (won't block/lock anything) */
5579 waitq_prepost_release_reserve(reserved_preposts);
5580
5581 return kr;
5582 }
5583
5584 /**
5585 * wakeup all threads from a waitq that are waiting for a given event
5586 *
5587 * Conditions:
5588 * 'waitq' is not locked
5589 * may (rarely) block if 'waitq' is non-global and a member of 1 or more sets
5590 * may disable and re-enable interrupts
5591 *
5592 * Notes:
5593 * will _not_ block if waitq is global (or not a member of any set)
5594 */
5595 kern_return_t
5596 waitq_wakeup64_all(struct waitq *waitq,
5597 event64_t wake_event,
5598 wait_result_t result,
5599 int priority)
5600 {
5601 kern_return_t ret;
5602 uint64_t reserved_preposts = 0;
5603 spl_t s;
5604
5605 if (!waitq_valid(waitq)) {
5606 panic("Invalid waitq: %p", waitq);
5607 }
5608
5609 if (!waitq_irq_safe(waitq)) {
5610 /* reserve preposts in addition to locking waitq */
5611 reserved_preposts = waitq_prepost_reserve(waitq, 0,
5612 WAITQ_KEEP_LOCKED);
5613 } else {
5614 s = splsched();
5615 waitq_lock(waitq);
5616 }
5617
5618 ret = waitq_wakeup64_all_locked(waitq, wake_event, result,
5619 &reserved_preposts, priority,
5620 WAITQ_UNLOCK);
5621
5622 if (waitq_irq_safe(waitq)) {
5623 splx(s);
5624 }
5625
5626 waitq_prepost_release_reserve(reserved_preposts);
5627
5628 return ret;
5629 }
5630
5631 /**
5632 * wakeup a specific thread iff it's waiting on 'waitq' for 'wake_event'
5633 *
5634 * Conditions:
5635 * 'waitq' is not locked
5636 *
5637 * Notes:
5638 * May temporarily disable and re-enable interrupts
5639 */
5640 kern_return_t
5641 waitq_wakeup64_thread(struct waitq *waitq,
5642 event64_t wake_event,
5643 thread_t thread,
5644 wait_result_t result)
5645 {
5646 kern_return_t ret;
5647 spl_t s, th_spl;
5648
5649 if (!waitq_valid(waitq)) {
5650 panic("Invalid waitq: %p", waitq);
5651 }
5652
5653 if (waitq_irq_safe(waitq)) {
5654 s = splsched();
5655 }
5656 waitq_lock(waitq);
5657
5658 ret = waitq_select_thread_locked(waitq, wake_event, thread, &th_spl);
5659 /* on success, returns 'thread' locked */
5660
5661 waitq_unlock(waitq);
5662
5663 if (ret == KERN_SUCCESS) {
5664 ret = thread_go(thread, result);
5665 assert(ret == KERN_SUCCESS);
5666 thread_unlock(thread);
5667 splx(th_spl);
5668 waitq_stats_count_wakeup(waitq);
5669 } else {
5670 ret = KERN_NOT_WAITING;
5671 waitq_stats_count_fail(waitq);
5672 }
5673
5674 if (waitq_irq_safe(waitq)) {
5675 splx(s);
5676 }
5677
5678 return ret;
5679 }
5680
5681 /**
5682 * wakeup a single thread from a waitq that's waiting for a given event
5683 * and return a reference to that thread
5684 * returns THREAD_NULL if no thread was waiting
5685 *
5686 * Conditions:
5687 * 'waitq' is not locked
5688 * may (rarely) block if 'waitq' is non-global and a member of 1 or more sets
5689 * may disable and re-enable interrupts
5690 *
5691 * Notes:
5692 * will _not_ block if waitq is global (or not a member of any set)
5693 */
5694 thread_t
5695 waitq_wakeup64_identify(struct waitq *waitq,
5696 event64_t wake_event,
5697 wait_result_t result,
5698 int priority)
5699 {
5700 uint64_t reserved_preposts = 0;
5701 spl_t thread_spl = 0;
5702 thread_t thread;
5703 spl_t spl;
5704
5705 if (!waitq_valid(waitq)) {
5706 panic("Invalid waitq: %p", waitq);
5707 }
5708
5709 if (!waitq_irq_safe(waitq)) {
5710 /* reserve preposts in addition to locking waitq */
5711 reserved_preposts = waitq_prepost_reserve(waitq, 0, WAITQ_KEEP_LOCKED);
5712 } else {
5713 spl = splsched();
5714 waitq_lock(waitq);
5715 }
5716
5717 thread = waitq_wakeup64_identify_locked(waitq, wake_event, result,
5718 &thread_spl, &reserved_preposts,
5719 priority, WAITQ_UNLOCK);
5720 /* waitq is unlocked, thread is locked */
5721
5722 if (thread != THREAD_NULL) {
5723 thread_reference(thread);
5724 thread_unlock(thread);
5725 splx(thread_spl);
5726 }
5727
5728 if (waitq_irq_safe(waitq)) {
5729 splx(spl);
5730 }
5731
5732 /* release any left-over prepost object (won't block/lock anything) */
5733 waitq_prepost_release_reserve(reserved_preposts);
5734
5735 /* returns +1 ref to running thread or THREAD_NULL */
5736 return thread;
5737 }