]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/wait_queue.c
6bcabf8c1cfcc12672e7e1a2da49e7f208e4b300
[apple/xnu.git] / osfmk / kern / wait_queue.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: wait_queue.c (adapted from sched_prim.c)
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Primitives for manipulating wait queues: either global
64 * ones from sched_prim.c, or private ones associated with
65 * particular structures(pots, semaphores, etc..).
66 */
67
68 #include <kern/kern_types.h>
69 #include <kern/simple_lock.h>
70 #include <kern/zalloc.h>
71 #include <kern/queue.h>
72 #include <kern/spl.h>
73 #include <mach/sync_policy.h>
74 #include <kern/mach_param.h>
75 #include <kern/sched_prim.h>
76
77 #include <kern/wait_queue.h>
78 #include <vm/vm_kern.h>
79
80 /* forward declarations */
81 static boolean_t wait_queue_member_locked(
82 wait_queue_t wq,
83 wait_queue_set_t wq_set);
84
85 static void wait_queues_init(void) __attribute__((section("__TEXT, initcode")));
86
87 #define WAIT_QUEUE_MAX thread_max
88 #define WAIT_QUEUE_SET_MAX task_max * 3
89 #define WAIT_QUEUE_LINK_MAX PORT_MAX / 2 + (WAIT_QUEUE_MAX * WAIT_QUEUE_SET_MAX) / 64
90
91 static zone_t _wait_queue_link_zone;
92 static zone_t _wait_queue_set_zone;
93 static zone_t _wait_queue_zone;
94
95 /* see rdar://6737748&5561610; we need an unshadowed
96 * definition of a WaitQueueLink for debugging,
97 * but it needs to be used somewhere to wind up in
98 * the dSYM file. */
99 volatile WaitQueueLink *unused_except_for_debugging;
100
101
102 /*
103 * Waiting protocols and implementation:
104 *
105 * Each thread may be waiting for exactly one event; this event
106 * is set using assert_wait(). That thread may be awakened either
107 * by performing a thread_wakeup_prim() on its event,
108 * or by directly waking that thread up with clear_wait().
109 *
110 * The implementation of wait events uses a hash table. Each
111 * bucket is queue of threads having the same hash function
112 * value; the chain for the queue (linked list) is the run queue
113 * field. [It is not possible to be waiting and runnable at the
114 * same time.]
115 *
116 * Locks on both the thread and on the hash buckets govern the
117 * wait event field and the queue chain field. Because wakeup
118 * operations only have the event as an argument, the event hash
119 * bucket must be locked before any thread.
120 *
121 * Scheduling operations may also occur at interrupt level; therefore,
122 * interrupts below splsched() must be prevented when holding
123 * thread or hash bucket locks.
124 *
125 * The wait event hash table declarations are as follows:
126 */
127
128 struct wait_queue boot_wait_queue[1];
129 __private_extern__ struct wait_queue *wait_queues = &boot_wait_queue[0];
130 __private_extern__ uint32_t num_wait_queues = 1;
131
132 #define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align)))
133 #define ROUNDDOWN(x,y) (((x)/(y))*(y))
134
135 static uint32_t
136 compute_wait_hash_size(void)
137 {
138 uint32_t hsize, queues;
139
140 if (PE_parse_boot_argn("wqsize", &hsize, sizeof(hsize)))
141 return (hsize);
142
143 queues = thread_max / 11;
144 hsize = P2ROUNDUP(queues * sizeof(struct wait_queue), PAGE_SIZE);
145
146 return hsize;
147 }
148
149 static void
150 wait_queues_init(void)
151 {
152 uint32_t i, whsize, qsz;
153 kern_return_t kret;
154
155 /*
156 * Determine the amount of memory we're willing to reserve for
157 * the waitqueue hash table
158 */
159 whsize = compute_wait_hash_size();
160
161 /* Determine the number of waitqueues we can fit. */
162 qsz = sizeof (struct wait_queue);
163 whsize = ROUNDDOWN(whsize, qsz);
164 num_wait_queues = whsize / qsz;
165
166 /*
167 * The hash algorithm requires that this be a power of 2, so we
168 * just mask off all the low-order bits.
169 */
170 for (i = 0; i < 31; i++) {
171 uint32_t bit = (1 << i);
172 if ((num_wait_queues & bit) == num_wait_queues)
173 break;
174 num_wait_queues &= ~bit;
175 }
176 assert(num_wait_queues > 0);
177
178 /* Now determine how much memory we really need. */
179 whsize = P2ROUNDUP(num_wait_queues * qsz, PAGE_SIZE);
180
181 kret = kernel_memory_allocate(kernel_map, (vm_offset_t *) &wait_queues,
182 whsize, 0, KMA_KOBJECT|KMA_NOPAGEWAIT);
183
184 if (kret != KERN_SUCCESS || wait_queues == NULL)
185 panic("kernel_memory_allocate() failed to allocate wait queues, error: %d, whsize: 0x%x", kret, whsize);
186
187 for (i = 0; i < num_wait_queues; i++) {
188 wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO);
189 }
190 }
191
192 void
193 wait_queue_bootstrap(void)
194 {
195 wait_queues_init();
196 _wait_queue_zone = zinit(sizeof(struct wait_queue),
197 WAIT_QUEUE_MAX * sizeof(struct wait_queue),
198 sizeof(struct wait_queue),
199 "wait queues");
200 zone_change(_wait_queue_zone, Z_NOENCRYPT, TRUE);
201
202 _wait_queue_set_zone = zinit(sizeof(struct wait_queue_set),
203 WAIT_QUEUE_SET_MAX * sizeof(struct wait_queue_set),
204 sizeof(struct wait_queue_set),
205 "wait queue sets");
206 zone_change(_wait_queue_set_zone, Z_NOENCRYPT, TRUE);
207
208 _wait_queue_link_zone = zinit(sizeof(struct _wait_queue_link),
209 WAIT_QUEUE_LINK_MAX * sizeof(struct _wait_queue_link),
210 sizeof(struct _wait_queue_link),
211 "wait queue links");
212 zone_change(_wait_queue_link_zone, Z_NOENCRYPT, TRUE);
213 }
214
215 /*
216 * Routine: wait_queue_init
217 * Purpose:
218 * Initialize a previously allocated wait queue.
219 * Returns:
220 * KERN_SUCCESS - The wait_queue_t was initialized
221 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
222 */
223 kern_return_t
224 wait_queue_init(
225 wait_queue_t wq,
226 int policy)
227 {
228 /* only FIFO and LIFO for now */
229 if ((policy & SYNC_POLICY_FIXED_PRIORITY) != 0)
230 return KERN_INVALID_ARGUMENT;
231
232 wq->wq_fifo = ((policy & SYNC_POLICY_REVERSED) == 0);
233 wq->wq_type = _WAIT_QUEUE_inited;
234 queue_init(&wq->wq_queue);
235 hw_lock_init(&wq->wq_interlock);
236 return KERN_SUCCESS;
237 }
238
239 /*
240 * Routine: wait_queue_alloc
241 * Purpose:
242 * Allocate and initialize a wait queue for use outside of
243 * of the mach part of the kernel.
244 * Conditions:
245 * Nothing locked - can block.
246 * Returns:
247 * The allocated and initialized wait queue
248 * WAIT_QUEUE_NULL if there is a resource shortage
249 */
250 wait_queue_t
251 wait_queue_alloc(
252 int policy)
253 {
254 wait_queue_t wq;
255 kern_return_t ret;
256
257 wq = (wait_queue_t) zalloc(_wait_queue_zone);
258 if (wq != WAIT_QUEUE_NULL) {
259 ret = wait_queue_init(wq, policy);
260 if (ret != KERN_SUCCESS) {
261 zfree(_wait_queue_zone, wq);
262 wq = WAIT_QUEUE_NULL;
263 }
264 }
265 return wq;
266 }
267
268 /*
269 * Routine: wait_queue_free
270 * Purpose:
271 * Free an allocated wait queue.
272 * Conditions:
273 * May block.
274 */
275 kern_return_t
276 wait_queue_free(
277 wait_queue_t wq)
278 {
279 if (!wait_queue_is_queue(wq))
280 return KERN_INVALID_ARGUMENT;
281 if (!queue_empty(&wq->wq_queue))
282 return KERN_FAILURE;
283 zfree(_wait_queue_zone, wq);
284 return KERN_SUCCESS;
285 }
286
287 /*
288 * Routine: wait_queue_set_init
289 * Purpose:
290 * Initialize a previously allocated wait queue set.
291 * Returns:
292 * KERN_SUCCESS - The wait_queue_set_t was initialized
293 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
294 */
295 kern_return_t
296 wait_queue_set_init(
297 wait_queue_set_t wqset,
298 int policy)
299 {
300 kern_return_t ret;
301
302 ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
303 if (ret != KERN_SUCCESS)
304 return ret;
305
306 wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
307 if (policy & SYNC_POLICY_PREPOST)
308 wqset->wqs_wait_queue.wq_prepost = TRUE;
309 else
310 wqset->wqs_wait_queue.wq_prepost = FALSE;
311 queue_init(&wqset->wqs_setlinks);
312 queue_init(&wqset->wqs_preposts);
313 return KERN_SUCCESS;
314 }
315
316
317 kern_return_t
318 wait_queue_sub_init(
319 wait_queue_set_t wqset,
320 int policy)
321 {
322 return wait_queue_set_init(wqset, policy);
323 }
324
325 kern_return_t
326 wait_queue_sub_clearrefs(
327 wait_queue_set_t wq_set)
328 {
329 wait_queue_link_t wql;
330 queue_t q;
331 spl_t s;
332
333 if (!wait_queue_is_set(wq_set))
334 return KERN_INVALID_ARGUMENT;
335
336 s = splsched();
337 wqs_lock(wq_set);
338 q = &wq_set->wqs_preposts;
339 while (!queue_empty(q)) {
340 queue_remove_first(q, wql, wait_queue_link_t, wql_preposts);
341 assert(!wql_is_preposted(wql));
342 }
343 wqs_unlock(wq_set);
344 splx(s);
345 return KERN_SUCCESS;
346 }
347
348 /*
349 * Routine: wait_queue_set_alloc
350 * Purpose:
351 * Allocate and initialize a wait queue set for
352 * use outside of the mach part of the kernel.
353 * Conditions:
354 * May block.
355 * Returns:
356 * The allocated and initialized wait queue set
357 * WAIT_QUEUE_SET_NULL if there is a resource shortage
358 */
359 wait_queue_set_t
360 wait_queue_set_alloc(
361 int policy)
362 {
363 wait_queue_set_t wq_set;
364
365 wq_set = (wait_queue_set_t) zalloc(_wait_queue_set_zone);
366 if (wq_set != WAIT_QUEUE_SET_NULL) {
367 kern_return_t ret;
368
369 ret = wait_queue_set_init(wq_set, policy);
370 if (ret != KERN_SUCCESS) {
371 zfree(_wait_queue_set_zone, wq_set);
372 wq_set = WAIT_QUEUE_SET_NULL;
373 }
374 }
375 return wq_set;
376 }
377
378 /*
379 * Routine: wait_queue_set_free
380 * Purpose:
381 * Free an allocated wait queue set
382 * Conditions:
383 * May block.
384 */
385 kern_return_t
386 wait_queue_set_free(
387 wait_queue_set_t wq_set)
388 {
389 if (!wait_queue_is_set(wq_set))
390 return KERN_INVALID_ARGUMENT;
391
392 if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
393 return KERN_FAILURE;
394
395 zfree(_wait_queue_set_zone, wq_set);
396 return KERN_SUCCESS;
397 }
398
399
400 /*
401 *
402 * Routine: wait_queue_set_size
403 * Routine: wait_queue_link_size
404 * Purpose:
405 * Return the size of opaque wait queue structures
406 */
407 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
408 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
409
410 /* declare a unique type for wait queue link structures */
411 static unsigned int _wait_queue_link;
412 static unsigned int _wait_queue_link_noalloc;
413 static unsigned int _wait_queue_unlinked;
414
415 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
416 #define WAIT_QUEUE_LINK_NOALLOC ((void *)&_wait_queue_link_noalloc)
417 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
418
419 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
420 WQASSERT(((wqe)->wqe_queue == (wq) && \
421 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
422 "wait queue element list corruption: wq=%#x, wqe=%#x", \
423 (wq), (wqe))
424
425 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
426 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
427 (queue_t)(wql) : &(wql)->wql_setlinks)))
428
429 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
430 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
431 (queue_t)(wql) : &(wql)->wql_setlinks)))
432
433 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
434 WQASSERT(((((wql)->wql_type == WAIT_QUEUE_LINK) || \
435 ((wql)->wql_type == WAIT_QUEUE_LINK_NOALLOC)) && \
436 ((wql)->wql_setqueue == (wqs)) && \
437 (((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) || \
438 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_SET_inited)) && \
439 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
440 "wait queue set links corruption: wqs=%#x, wql=%#x", \
441 (wqs), (wql))
442
443 #if defined(_WAIT_QUEUE_DEBUG_)
444
445 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
446
447 #define WAIT_QUEUE_CHECK(wq) \
448 MACRO_BEGIN \
449 queue_t q2 = &(wq)->wq_queue; \
450 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
451 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
452 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
453 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
454 } \
455 MACRO_END
456
457 #define WAIT_QUEUE_SET_CHECK(wqs) \
458 MACRO_BEGIN \
459 queue_t q2 = &(wqs)->wqs_setlinks; \
460 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
461 while (!queue_end(q2, (queue_entry_t)wql2)) { \
462 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
463 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
464 } \
465 MACRO_END
466
467 #else /* !_WAIT_QUEUE_DEBUG_ */
468
469 #define WQASSERT(e, s, p0, p1) assert(e)
470
471 #define WAIT_QUEUE_CHECK(wq)
472 #define WAIT_QUEUE_SET_CHECK(wqs)
473
474 #endif /* !_WAIT_QUEUE_DEBUG_ */
475
476 /*
477 * Routine: wait_queue_member_locked
478 * Purpose:
479 * Indicate if this set queue is a member of the queue
480 * Conditions:
481 * The wait queue is locked
482 * The set queue is just that, a set queue
483 */
484 static boolean_t
485 wait_queue_member_locked(
486 wait_queue_t wq,
487 wait_queue_set_t wq_set)
488 {
489 wait_queue_element_t wq_element;
490 queue_t q;
491
492 assert(wait_queue_held(wq));
493 assert(wait_queue_is_set(wq_set));
494
495 q = &wq->wq_queue;
496
497 wq_element = (wait_queue_element_t) queue_first(q);
498 while (!queue_end(q, (queue_entry_t)wq_element)) {
499 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
500 if ((wq_element->wqe_type == WAIT_QUEUE_LINK) ||
501 (wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC)) {
502 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
503
504 if (wql->wql_setqueue == wq_set)
505 return TRUE;
506 }
507 wq_element = (wait_queue_element_t)
508 queue_next((queue_t) wq_element);
509 }
510 return FALSE;
511 }
512
513
514 /*
515 * Routine: wait_queue_member
516 * Purpose:
517 * Indicate if this set queue is a member of the queue
518 * Conditions:
519 * The set queue is just that, a set queue
520 */
521 boolean_t
522 wait_queue_member(
523 wait_queue_t wq,
524 wait_queue_set_t wq_set)
525 {
526 boolean_t ret;
527 spl_t s;
528
529 if (!wait_queue_is_set(wq_set))
530 return FALSE;
531
532 s = splsched();
533 wait_queue_lock(wq);
534 ret = wait_queue_member_locked(wq, wq_set);
535 wait_queue_unlock(wq);
536 splx(s);
537
538 return ret;
539 }
540
541
542 /*
543 * Routine: wait_queue_link_internal
544 * Purpose:
545 * Insert a set wait queue into a wait queue. This
546 * requires us to link the two together using a wait_queue_link
547 * structure that was provided.
548 * Conditions:
549 * The wait queue being inserted must be inited as a set queue
550 * The wait_queue_link structure must already be properly typed
551 */
552 static
553 kern_return_t
554 wait_queue_link_internal(
555 wait_queue_t wq,
556 wait_queue_set_t wq_set,
557 wait_queue_link_t wql)
558 {
559 wait_queue_element_t wq_element;
560 queue_t q;
561 spl_t s;
562
563 if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set))
564 return KERN_INVALID_ARGUMENT;
565
566 /*
567 * There are probably fewer threads and sets associated with
568 * the wait queue than there are wait queues associated with
569 * the set. So let's validate it that way.
570 */
571 s = splsched();
572 wait_queue_lock(wq);
573 q = &wq->wq_queue;
574 wq_element = (wait_queue_element_t) queue_first(q);
575 while (!queue_end(q, (queue_entry_t)wq_element)) {
576 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
577 if ((wq_element->wqe_type == WAIT_QUEUE_LINK ||
578 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) &&
579 ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
580 wait_queue_unlock(wq);
581 splx(s);
582 return KERN_ALREADY_IN_SET;
583 }
584 wq_element = (wait_queue_element_t)
585 queue_next((queue_t) wq_element);
586 }
587
588 /*
589 * Not already a member, so we can add it.
590 */
591 wqs_lock(wq_set);
592
593 WAIT_QUEUE_SET_CHECK(wq_set);
594
595 assert(wql->wql_type == WAIT_QUEUE_LINK ||
596 wql->wql_type == WAIT_QUEUE_LINK_NOALLOC);
597
598 wql->wql_queue = wq;
599 wql_clear_prepost(wql);
600 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
601 wql->wql_setqueue = wq_set;
602 queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
603
604 wqs_unlock(wq_set);
605 wait_queue_unlock(wq);
606 splx(s);
607
608 return KERN_SUCCESS;
609 }
610
611 /*
612 * Routine: wait_queue_link_noalloc
613 * Purpose:
614 * Insert a set wait queue into a wait queue. This
615 * requires us to link the two together using a wait_queue_link
616 * structure that we allocate.
617 * Conditions:
618 * The wait queue being inserted must be inited as a set queue
619 */
620 kern_return_t
621 wait_queue_link_noalloc(
622 wait_queue_t wq,
623 wait_queue_set_t wq_set,
624 wait_queue_link_t wql)
625 {
626 wql->wql_type = WAIT_QUEUE_LINK_NOALLOC;
627 return wait_queue_link_internal(wq, wq_set, wql);
628 }
629
630 /*
631 * Routine: wait_queue_link
632 * Purpose:
633 * Insert a set wait queue into a wait queue. This
634 * requires us to link the two together using a wait_queue_link
635 * structure that we allocate.
636 * Conditions:
637 * The wait queue being inserted must be inited as a set queue
638 */
639 kern_return_t
640 wait_queue_link(
641 wait_queue_t wq,
642 wait_queue_set_t wq_set)
643 {
644 wait_queue_link_t wql;
645 kern_return_t ret;
646
647 wql = (wait_queue_link_t) zalloc(_wait_queue_link_zone);
648 if (wql == WAIT_QUEUE_LINK_NULL)
649 return KERN_RESOURCE_SHORTAGE;
650
651 wql->wql_type = WAIT_QUEUE_LINK;
652 ret = wait_queue_link_internal(wq, wq_set, wql);
653 if (ret != KERN_SUCCESS)
654 zfree(_wait_queue_link_zone, wql);
655
656 return ret;
657 }
658
659 wait_queue_link_t
660 wait_queue_link_allocate(void)
661 {
662 wait_queue_link_t wql;
663
664 wql = zalloc(_wait_queue_link_zone); /* Can't fail */
665 bzero(wql, sizeof(*wql));
666 wql->wql_type = WAIT_QUEUE_UNLINKED;
667
668 return wql;
669 }
670
671 kern_return_t
672 wait_queue_link_free(wait_queue_link_t wql)
673 {
674 zfree(_wait_queue_link_zone, wql);
675 return KERN_SUCCESS;
676 }
677
678
679 /*
680 * Routine: wait_queue_unlink_locked
681 * Purpose:
682 * Undo the linkage between a wait queue and a set.
683 */
684 static void
685 wait_queue_unlink_locked(
686 wait_queue_t wq,
687 wait_queue_set_t wq_set,
688 wait_queue_link_t wql)
689 {
690 assert(wait_queue_held(wq));
691 assert(wait_queue_held(&wq_set->wqs_wait_queue));
692
693 wql->wql_queue = WAIT_QUEUE_NULL;
694 queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
695 wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
696 queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
697 if (wql_is_preposted(wql)) {
698 queue_t ppq = &wq_set->wqs_preposts;
699 queue_remove(ppq, wql, wait_queue_link_t, wql_preposts);
700 }
701 wql->wql_type = WAIT_QUEUE_UNLINKED;
702
703 WAIT_QUEUE_CHECK(wq);
704 WAIT_QUEUE_SET_CHECK(wq_set);
705 }
706
707 /*
708 * Routine: wait_queue_unlink_nofree
709 * Purpose:
710 * Remove the linkage between a wait queue and a set,
711 * returning the linkage structure to the caller to
712 * free later.
713 * Conditions:
714 * The wait queue being must be a member set queue
715 */
716 kern_return_t
717 wait_queue_unlink_nofree(
718 wait_queue_t wq,
719 wait_queue_set_t wq_set,
720 wait_queue_link_t *wqlp)
721 {
722 wait_queue_element_t wq_element;
723 wait_queue_link_t wql;
724 queue_t q;
725 spl_t s;
726
727 if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set)) {
728 return KERN_INVALID_ARGUMENT;
729 }
730 s = splsched();
731 wait_queue_lock(wq);
732
733 q = &wq->wq_queue;
734 wq_element = (wait_queue_element_t) queue_first(q);
735 while (!queue_end(q, (queue_entry_t)wq_element)) {
736 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
737 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
738 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
739
740 wql = (wait_queue_link_t)wq_element;
741
742 if (wql->wql_setqueue == wq_set) {
743
744 wqs_lock(wq_set);
745 wait_queue_unlink_locked(wq, wq_set, wql);
746 wqs_unlock(wq_set);
747 wait_queue_unlock(wq);
748 splx(s);
749 *wqlp = wql;
750 return KERN_SUCCESS;
751 }
752 }
753 wq_element = (wait_queue_element_t)
754 queue_next((queue_t) wq_element);
755 }
756 wait_queue_unlock(wq);
757 splx(s);
758 return KERN_NOT_IN_SET;
759 }
760
761 /*
762 * Routine: wait_queue_unlink
763 * Purpose:
764 * Remove the linkage between a wait queue and a set,
765 * freeing the linkage structure.
766 * Conditions:
767 * The wait queue being must be a member set queue
768 */
769 kern_return_t
770 wait_queue_unlink(
771 wait_queue_t wq,
772 wait_queue_set_t wq_set)
773 {
774 wait_queue_element_t wq_element;
775 wait_queue_link_t wql;
776 queue_t q;
777 spl_t s;
778
779 if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set)) {
780 return KERN_INVALID_ARGUMENT;
781 }
782 s = splsched();
783 wait_queue_lock(wq);
784
785 q = &wq->wq_queue;
786 wq_element = (wait_queue_element_t) queue_first(q);
787 while (!queue_end(q, (queue_entry_t)wq_element)) {
788 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
789 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
790 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
791
792 wql = (wait_queue_link_t)wq_element;
793
794 if (wql->wql_setqueue == wq_set) {
795 boolean_t alloced;
796
797 alloced = (wql->wql_type == WAIT_QUEUE_LINK);
798 wqs_lock(wq_set);
799 wait_queue_unlink_locked(wq, wq_set, wql);
800 wqs_unlock(wq_set);
801 wait_queue_unlock(wq);
802 splx(s);
803 if (alloced)
804 zfree(_wait_queue_link_zone, wql);
805 return KERN_SUCCESS;
806 }
807 }
808 wq_element = (wait_queue_element_t)
809 queue_next((queue_t) wq_element);
810 }
811 wait_queue_unlock(wq);
812 splx(s);
813 return KERN_NOT_IN_SET;
814 }
815
816 /*
817 * Routine: wait_queue_unlink_all_nofree_locked
818 * Purpose:
819 * Remove the linkage between a wait queue and all its sets.
820 * All the linkage structures are returned to the caller for
821 * later freeing.
822 * Conditions:
823 * Wait queue locked.
824 */
825
826 static void
827 wait_queue_unlink_all_nofree_locked(
828 wait_queue_t wq,
829 queue_t links)
830 {
831 wait_queue_element_t wq_element;
832 wait_queue_element_t wq_next_element;
833 wait_queue_set_t wq_set;
834 wait_queue_link_t wql;
835 queue_t q;
836
837 q = &wq->wq_queue;
838
839 wq_element = (wait_queue_element_t) queue_first(q);
840 while (!queue_end(q, (queue_entry_t)wq_element)) {
841
842 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
843 wq_next_element = (wait_queue_element_t)
844 queue_next((queue_t) wq_element);
845
846 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
847 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
848 wql = (wait_queue_link_t)wq_element;
849 wq_set = wql->wql_setqueue;
850 wqs_lock(wq_set);
851 wait_queue_unlink_locked(wq, wq_set, wql);
852 wqs_unlock(wq_set);
853 enqueue(links, &wql->wql_links);
854 }
855 wq_element = wq_next_element;
856 }
857 }
858
859 /*
860 * Routine: wait_queue_unlink_all_nofree
861 * Purpose:
862 * Remove the linkage between a wait queue and all its sets.
863 * All the linkage structures are returned to the caller for
864 * later freeing.
865 * Conditions:
866 * Nothing of interest locked.
867 */
868
869 kern_return_t
870 wait_queue_unlink_all_nofree(
871 wait_queue_t wq,
872 queue_t links)
873 {
874 spl_t s;
875
876 if (!wait_queue_is_valid(wq)) {
877 return KERN_INVALID_ARGUMENT;
878 }
879
880 s = splsched();
881 wait_queue_lock(wq);
882 wait_queue_unlink_all_nofree_locked(wq, links);
883 wait_queue_unlock(wq);
884 splx(s);
885
886 return(KERN_SUCCESS);
887 }
888
889 /*
890 * Routine: wait_queue_unlink_all_locked
891 * Purpose:
892 * Remove the linkage between a locked wait queue and all its
893 * sets and enqueue the allocated ones onto the links queue
894 * provided.
895 * Conditions:
896 * Wait queue locked.
897 */
898 static void
899 wait_queue_unlink_all_locked(
900 wait_queue_t wq,
901 queue_t links)
902 {
903 wait_queue_element_t wq_element;
904 wait_queue_element_t wq_next_element;
905 wait_queue_set_t wq_set;
906 wait_queue_link_t wql;
907 queue_t q;
908
909 q = &wq->wq_queue;
910
911 wq_element = (wait_queue_element_t) queue_first(q);
912 while (!queue_end(q, (queue_entry_t)wq_element)) {
913 boolean_t alloced;
914
915 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
916 wq_next_element = (wait_queue_element_t)
917 queue_next((queue_t) wq_element);
918
919 alloced = (wq_element->wqe_type == WAIT_QUEUE_LINK);
920 if (alloced || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
921 wql = (wait_queue_link_t)wq_element;
922 wq_set = wql->wql_setqueue;
923 wqs_lock(wq_set);
924 wait_queue_unlink_locked(wq, wq_set, wql);
925 wqs_unlock(wq_set);
926 if (alloced)
927 enqueue(links, &wql->wql_links);
928 }
929 wq_element = wq_next_element;
930 }
931
932 }
933
934
935 /*
936 * Routine: wait_queue_unlink_all
937 * Purpose:
938 * Remove the linkage between a wait queue and all its sets.
939 * All the linkage structures that were allocated internally
940 * are freed. The others are the caller's responsibility.
941 * Conditions:
942 * Nothing of interest locked.
943 */
944
945 kern_return_t
946 wait_queue_unlink_all(
947 wait_queue_t wq)
948 {
949 wait_queue_link_t wql;
950 queue_head_t links_queue_head;
951 queue_t links = &links_queue_head;
952 spl_t s;
953
954 if (!wait_queue_is_valid(wq)) {
955 return KERN_INVALID_ARGUMENT;
956 }
957
958 queue_init(links);
959
960 s = splsched();
961 wait_queue_lock(wq);
962 wait_queue_unlink_all_locked(wq, links);
963 wait_queue_unlock(wq);
964 splx(s);
965
966 while(!queue_empty(links)) {
967 wql = (wait_queue_link_t) dequeue(links);
968 zfree(_wait_queue_link_zone, wql);
969 }
970
971 return(KERN_SUCCESS);
972 }
973
974 /* legacy interface naming */
975 kern_return_t
976 wait_subqueue_unlink_all(
977 wait_queue_set_t wq_set)
978 {
979 return wait_queue_set_unlink_all(wq_set);
980 }
981
982
983 /*
984 * Routine: wait_queue_set_unlink_all_nofree
985 * Purpose:
986 * Remove the linkage between a set wait queue and all its
987 * member wait queues and all the sets it may be a member of.
988 * The links structures are returned for later freeing by the
989 * caller.
990 * Conditions:
991 * The wait queue must be a set
992 */
993 kern_return_t
994 wait_queue_set_unlink_all_nofree(
995 wait_queue_set_t wq_set,
996 queue_t links)
997 {
998 wait_queue_link_t wql;
999 wait_queue_t wq;
1000 queue_t q;
1001 spl_t s;
1002
1003 if (!wait_queue_is_set(wq_set)) {
1004 return KERN_INVALID_ARGUMENT;
1005 }
1006
1007 retry:
1008 s = splsched();
1009 wqs_lock(wq_set);
1010
1011 /* remove the wait queues that are members of our set */
1012 q = &wq_set->wqs_setlinks;
1013
1014 wql = (wait_queue_link_t)queue_first(q);
1015 while (!queue_end(q, (queue_entry_t)wql)) {
1016 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
1017 wq = wql->wql_queue;
1018 if (wait_queue_lock_try(wq)) {
1019 wait_queue_unlink_locked(wq, wq_set, wql);
1020 wait_queue_unlock(wq);
1021 enqueue(links, &wql->wql_links);
1022 wql = (wait_queue_link_t)queue_first(q);
1023 } else {
1024 wqs_unlock(wq_set);
1025 splx(s);
1026 delay(1);
1027 goto retry;
1028 }
1029 }
1030
1031 /* remove this set from sets it belongs to */
1032 wait_queue_unlink_all_nofree_locked(&wq_set->wqs_wait_queue, links);
1033
1034 wqs_unlock(wq_set);
1035 splx(s);
1036
1037 return(KERN_SUCCESS);
1038 }
1039
1040 /*
1041 * Routine: wait_queue_set_unlink_all
1042 * Purpose:
1043 * Remove the linkage between a set wait queue and all its
1044 * member wait queues and all the sets it may be members of.
1045 * The link structures are freed for those links which were
1046 * dynamically allocated.
1047 * Conditions:
1048 * The wait queue must be a set
1049 */
1050 kern_return_t
1051 wait_queue_set_unlink_all(
1052 wait_queue_set_t wq_set)
1053 {
1054 wait_queue_link_t wql;
1055 wait_queue_t wq;
1056 queue_t q;
1057 queue_head_t links_queue_head;
1058 queue_t links = &links_queue_head;
1059 spl_t s;
1060
1061 if (!wait_queue_is_set(wq_set)) {
1062 return KERN_INVALID_ARGUMENT;
1063 }
1064
1065 queue_init(links);
1066
1067 retry:
1068 s = splsched();
1069 wqs_lock(wq_set);
1070
1071 /* remove the wait queues that are members of our set */
1072 q = &wq_set->wqs_setlinks;
1073
1074 wql = (wait_queue_link_t)queue_first(q);
1075 while (!queue_end(q, (queue_entry_t)wql)) {
1076 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
1077 wq = wql->wql_queue;
1078 if (wait_queue_lock_try(wq)) {
1079 boolean_t alloced;
1080
1081 alloced = (wql->wql_type == WAIT_QUEUE_LINK);
1082 wait_queue_unlink_locked(wq, wq_set, wql);
1083 wait_queue_unlock(wq);
1084 if (alloced)
1085 enqueue(links, &wql->wql_links);
1086 wql = (wait_queue_link_t)queue_first(q);
1087 } else {
1088 wqs_unlock(wq_set);
1089 splx(s);
1090 delay(1);
1091 goto retry;
1092 }
1093 }
1094
1095
1096 /* remove this set from sets it belongs to */
1097 wait_queue_unlink_all_locked(&wq_set->wqs_wait_queue, links);
1098
1099 wqs_unlock(wq_set);
1100 splx(s);
1101
1102 while (!queue_empty (links)) {
1103 wql = (wait_queue_link_t) dequeue(links);
1104 zfree(_wait_queue_link_zone, wql);
1105 }
1106 return(KERN_SUCCESS);
1107 }
1108
1109 kern_return_t
1110 wait_queue_set_unlink_one(
1111 wait_queue_set_t wq_set,
1112 wait_queue_link_t wql)
1113 {
1114 wait_queue_t wq;
1115 spl_t s;
1116
1117 assert(wait_queue_is_set(wq_set));
1118
1119 retry:
1120 s = splsched();
1121 wqs_lock(wq_set);
1122
1123 WAIT_QUEUE_SET_CHECK(wq_set);
1124
1125 /* Already unlinked, e.g. by selclearthread() */
1126 if (wql->wql_type == WAIT_QUEUE_UNLINKED) {
1127 goto out;
1128 }
1129
1130 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
1131
1132 /* On a wait queue, and we hold set queue lock ... */
1133 wq = wql->wql_queue;
1134 if (wait_queue_lock_try(wq)) {
1135 wait_queue_unlink_locked(wq, wq_set, wql);
1136 wait_queue_unlock(wq);
1137 } else {
1138 wqs_unlock(wq_set);
1139 splx(s);
1140 delay(1);
1141 goto retry;
1142 }
1143
1144 out:
1145 wqs_unlock(wq_set);
1146 splx(s);
1147
1148 return KERN_SUCCESS;
1149 }
1150
1151 /*
1152 * Routine: wait_queue_assert_wait64_locked
1153 * Purpose:
1154 * Insert the current thread into the supplied wait queue
1155 * waiting for a particular event to be posted to that queue.
1156 *
1157 * Conditions:
1158 * The wait queue is assumed locked.
1159 * The waiting thread is assumed locked.
1160 *
1161 */
1162 __private_extern__ wait_result_t
1163 wait_queue_assert_wait64_locked(
1164 wait_queue_t wq,
1165 event64_t event,
1166 wait_interrupt_t interruptible,
1167 uint64_t deadline,
1168 thread_t thread)
1169 {
1170 wait_result_t wait_result;
1171 boolean_t realtime;
1172
1173 if (!wait_queue_assert_possible(thread))
1174 panic("wait_queue_assert_wait64_locked");
1175
1176 if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
1177 wait_queue_set_t wqs = (wait_queue_set_t)wq;
1178
1179 if (event == NO_EVENT64 && wqs_is_preposted(wqs))
1180 return(THREAD_AWAKENED);
1181 }
1182
1183 /*
1184 * Realtime threads get priority for wait queue placements.
1185 * This allows wait_queue_wakeup_one to prefer a waiting
1186 * realtime thread, similar in principle to performing
1187 * a wait_queue_wakeup_all and allowing scheduler prioritization
1188 * to run the realtime thread, but without causing the
1189 * lock contention of that scenario.
1190 */
1191 realtime = (thread->sched_pri >= BASEPRI_REALTIME);
1192
1193 /*
1194 * This is the extent to which we currently take scheduling attributes
1195 * into account. If the thread is vm priviledged, we stick it at
1196 * the front of the queue. Later, these queues will honor the policy
1197 * value set at wait_queue_init time.
1198 */
1199 wait_result = thread_mark_wait_locked(thread, interruptible);
1200 if (wait_result == THREAD_WAITING) {
1201 if (!wq->wq_fifo
1202 || (thread->options & TH_OPT_VMPRIV)
1203 || realtime)
1204 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
1205 else
1206 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
1207
1208 thread->wait_event = event;
1209 thread->wait_queue = wq;
1210
1211 if (deadline != 0) {
1212 uint32_t flags;
1213
1214 flags = realtime ? TIMER_CALL_CRITICAL : 0;
1215
1216 if (!timer_call_enter(&thread->wait_timer, deadline, flags))
1217 thread->wait_timer_active++;
1218 thread->wait_timer_is_set = TRUE;
1219 }
1220 }
1221 return(wait_result);
1222 }
1223
1224 /*
1225 * Routine: wait_queue_assert_wait
1226 * Purpose:
1227 * Insert the current thread into the supplied wait queue
1228 * waiting for a particular event to be posted to that queue.
1229 *
1230 * Conditions:
1231 * nothing of interest locked.
1232 */
1233 wait_result_t
1234 wait_queue_assert_wait(
1235 wait_queue_t wq,
1236 event_t event,
1237 wait_interrupt_t interruptible,
1238 uint64_t deadline)
1239 {
1240 spl_t s;
1241 wait_result_t ret;
1242 thread_t thread = current_thread();
1243
1244 /* If it is an invalid wait queue, you can't wait on it */
1245 if (!wait_queue_is_valid(wq))
1246 return (thread->wait_result = THREAD_RESTART);
1247
1248 s = splsched();
1249 wait_queue_lock(wq);
1250 thread_lock(thread);
1251 ret = wait_queue_assert_wait64_locked(wq, CAST_DOWN(event64_t,event),
1252 interruptible, deadline, thread);
1253 thread_unlock(thread);
1254 wait_queue_unlock(wq);
1255 splx(s);
1256 return(ret);
1257 }
1258
1259 /*
1260 * Routine: wait_queue_assert_wait64
1261 * Purpose:
1262 * Insert the current thread into the supplied wait queue
1263 * waiting for a particular event to be posted to that queue.
1264 * Conditions:
1265 * nothing of interest locked.
1266 */
1267 wait_result_t
1268 wait_queue_assert_wait64(
1269 wait_queue_t wq,
1270 event64_t event,
1271 wait_interrupt_t interruptible,
1272 uint64_t deadline)
1273 {
1274 spl_t s;
1275 wait_result_t ret;
1276 thread_t thread = current_thread();
1277
1278 /* If it is an invalid wait queue, you cant wait on it */
1279 if (!wait_queue_is_valid(wq))
1280 return (thread->wait_result = THREAD_RESTART);
1281
1282 s = splsched();
1283 wait_queue_lock(wq);
1284 thread_lock(thread);
1285 ret = wait_queue_assert_wait64_locked(wq, event, interruptible, deadline, thread);
1286 thread_unlock(thread);
1287 wait_queue_unlock(wq);
1288 splx(s);
1289 return(ret);
1290 }
1291
1292 /*
1293 * Routine: _wait_queue_select64_all
1294 * Purpose:
1295 * Select all threads off a wait queue that meet the
1296 * supplied criteria.
1297 * Conditions:
1298 * at splsched
1299 * wait queue locked
1300 * wake_queue initialized and ready for insertion
1301 * possibly recursive
1302 * Returns:
1303 * a queue of locked threads
1304 */
1305 static void
1306 _wait_queue_select64_all(
1307 wait_queue_t wq,
1308 event64_t event,
1309 queue_t wake_queue)
1310 {
1311 wait_queue_element_t wq_element;
1312 wait_queue_element_t wqe_next;
1313 queue_t q;
1314
1315 q = &wq->wq_queue;
1316
1317 wq_element = (wait_queue_element_t) queue_first(q);
1318 while (!queue_end(q, (queue_entry_t)wq_element)) {
1319 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1320 wqe_next = (wait_queue_element_t)
1321 queue_next((queue_t) wq_element);
1322
1323 /*
1324 * We may have to recurse if this is a compound wait queue.
1325 */
1326 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
1327 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
1328 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1329 wait_queue_set_t set_queue = wql->wql_setqueue;
1330
1331 /*
1332 * We have to check the set wait queue. If it is marked
1333 * as pre-post, and it is the "generic event" then mark
1334 * it pre-posted now (if not already).
1335 */
1336 wqs_lock(set_queue);
1337 if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) {
1338 queue_t ppq = &set_queue->wqs_preposts;
1339 queue_enter(ppq, wql, wait_queue_link_t, wql_preposts);
1340 }
1341 if (! wait_queue_empty(&set_queue->wqs_wait_queue))
1342 _wait_queue_select64_all(&set_queue->wqs_wait_queue, event, wake_queue);
1343 wqs_unlock(set_queue);
1344 } else {
1345
1346 /*
1347 * Otherwise, its a thread. If it is waiting on
1348 * the event we are posting to this queue, pull
1349 * it off the queue and stick it in out wake_queue.
1350 */
1351 thread_t t = (thread_t)wq_element;
1352
1353 if (t->wait_event == event) {
1354 thread_lock(t);
1355 remqueue((queue_entry_t) t);
1356 enqueue (wake_queue, (queue_entry_t) t);
1357 t->wait_queue = WAIT_QUEUE_NULL;
1358 t->wait_event = NO_EVENT64;
1359 t->at_safe_point = FALSE;
1360 /* returned locked */
1361 }
1362 }
1363 wq_element = wqe_next;
1364 }
1365 }
1366
1367 /*
1368 * Routine: wait_queue_wakeup64_all_locked
1369 * Purpose:
1370 * Wakeup some number of threads that are in the specified
1371 * wait queue and waiting on the specified event.
1372 * Conditions:
1373 * wait queue already locked (may be released).
1374 * Returns:
1375 * KERN_SUCCESS - Threads were woken up
1376 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1377 */
1378 __private_extern__ kern_return_t
1379 wait_queue_wakeup64_all_locked(
1380 wait_queue_t wq,
1381 event64_t event,
1382 wait_result_t result,
1383 boolean_t unlock)
1384 {
1385 queue_head_t wake_queue_head;
1386 queue_t q = &wake_queue_head;
1387 kern_return_t res;
1388
1389 // assert(wait_queue_held(wq));
1390 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1391 // panic("wait_queue_wakeup64_all_locked: lock not held on %p\n", wq); /* (BRINGUP) */
1392 // }
1393
1394 queue_init(q);
1395
1396 /*
1397 * Select the threads that we will wake up. The threads
1398 * are returned to us locked and cleanly removed from the
1399 * wait queue.
1400 */
1401 _wait_queue_select64_all(wq, event, q);
1402 if (unlock)
1403 wait_queue_unlock(wq);
1404
1405 /*
1406 * For each thread, set it running.
1407 */
1408 res = KERN_NOT_WAITING;
1409 while (!queue_empty (q)) {
1410 thread_t thread = (thread_t) dequeue(q);
1411 res = thread_go(thread, result);
1412 assert(res == KERN_SUCCESS);
1413 thread_unlock(thread);
1414 }
1415 return res;
1416 }
1417
1418
1419 /*
1420 * Routine: wait_queue_wakeup_all
1421 * Purpose:
1422 * Wakeup some number of threads that are in the specified
1423 * wait queue and waiting on the specified event.
1424 * Conditions:
1425 * Nothing locked
1426 * Returns:
1427 * KERN_SUCCESS - Threads were woken up
1428 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1429 */
1430 kern_return_t
1431 wait_queue_wakeup_all(
1432 wait_queue_t wq,
1433 event_t event,
1434 wait_result_t result)
1435 {
1436 kern_return_t ret;
1437 spl_t s;
1438
1439 if (!wait_queue_is_valid(wq)) {
1440 return KERN_INVALID_ARGUMENT;
1441 }
1442
1443 s = splsched();
1444 wait_queue_lock(wq);
1445 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1446 // panic("wait_queue_wakeup_all: we did not get the lock on %p\n", wq); /* (BRINGUP) */
1447 // }
1448 ret = wait_queue_wakeup64_all_locked(
1449 wq, CAST_DOWN(event64_t,event),
1450 result, TRUE);
1451 /* lock released */
1452 splx(s);
1453 return ret;
1454 }
1455
1456 /*
1457 * Routine: wait_queue_wakeup64_all
1458 * Purpose:
1459 * Wakeup some number of threads that are in the specified
1460 * wait queue and waiting on the specified event.
1461 * Conditions:
1462 * Nothing locked
1463 * Returns:
1464 * KERN_SUCCESS - Threads were woken up
1465 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1466 */
1467 kern_return_t
1468 wait_queue_wakeup64_all(
1469 wait_queue_t wq,
1470 event64_t event,
1471 wait_result_t result)
1472 {
1473 kern_return_t ret;
1474 spl_t s;
1475
1476 if (!wait_queue_is_valid(wq)) {
1477 return KERN_INVALID_ARGUMENT;
1478 }
1479
1480 s = splsched();
1481 wait_queue_lock(wq);
1482 ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1483 /* lock released */
1484 splx(s);
1485 return ret;
1486 }
1487
1488 /*
1489 * Routine: _wait_queue_select64_one
1490 * Purpose:
1491 * Select the best thread off a wait queue that meet the
1492 * supplied criteria.
1493 * Conditions:
1494 * at splsched
1495 * wait queue locked
1496 * possibly recursive
1497 * Returns:
1498 * a locked thread - if one found
1499 * Note:
1500 * This is where the sync policy of the wait queue comes
1501 * into effect. For now, we just assume FIFO/LIFO.
1502 */
1503 static thread_t
1504 _wait_queue_select64_one(
1505 wait_queue_t wq,
1506 event64_t event)
1507 {
1508 wait_queue_element_t wq_element;
1509 wait_queue_element_t wqe_next;
1510 thread_t t = THREAD_NULL;
1511 queue_t q;
1512
1513 q = &wq->wq_queue;
1514
1515 wq_element = (wait_queue_element_t) queue_first(q);
1516 while (!queue_end(q, (queue_entry_t)wq_element)) {
1517 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1518 wqe_next = (wait_queue_element_t)
1519 queue_next((queue_t) wq_element);
1520
1521 /*
1522 * We may have to recurse if this is a compound wait queue.
1523 */
1524 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
1525 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
1526 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1527 wait_queue_set_t set_queue = wql->wql_setqueue;
1528
1529 /*
1530 * We have to check the set wait queue. If the set
1531 * supports pre-posting, it isn't already preposted,
1532 * and we didn't find a thread in the set, then mark it.
1533 *
1534 * If we later find a thread, there may be a spurious
1535 * pre-post here on this set. The wait side has to check
1536 * for that either pre- or post-wait.
1537 */
1538 wqs_lock(set_queue);
1539 if (! wait_queue_empty(&set_queue->wqs_wait_queue)) {
1540 t = _wait_queue_select64_one(&set_queue->wqs_wait_queue, event);
1541 }
1542 if (t != THREAD_NULL) {
1543 wqs_unlock(set_queue);
1544 return t;
1545 }
1546 if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) {
1547 queue_t ppq = &set_queue->wqs_preposts;
1548 queue_enter(ppq, wql, wait_queue_link_t, wql_preposts);
1549 }
1550 wqs_unlock(set_queue);
1551
1552 } else {
1553
1554 /*
1555 * Otherwise, its a thread. If it is waiting on
1556 * the event we are posting to this queue, pull
1557 * it off the queue and stick it in out wake_queue.
1558 */
1559 t = (thread_t)wq_element;
1560 if (t->wait_event == event) {
1561 thread_lock(t);
1562 remqueue((queue_entry_t) t);
1563 t->wait_queue = WAIT_QUEUE_NULL;
1564 t->wait_event = NO_EVENT64;
1565 t->at_safe_point = FALSE;
1566 return t; /* still locked */
1567 }
1568
1569 t = THREAD_NULL;
1570 }
1571 wq_element = wqe_next;
1572 }
1573 return THREAD_NULL;
1574 }
1575
1576
1577 /*
1578 * Routine: wait_queue_pull_thread_locked
1579 * Purpose:
1580 * Pull a thread off its wait queue and (possibly) unlock
1581 * the waitq.
1582 * Conditions:
1583 * at splsched
1584 * wait queue locked
1585 * thread locked
1586 * Returns:
1587 * with the thread still locked.
1588 */
1589 void
1590 wait_queue_pull_thread_locked(
1591 wait_queue_t waitq,
1592 thread_t thread,
1593 boolean_t unlock)
1594 {
1595
1596 assert(thread->wait_queue == waitq);
1597
1598 remqueue((queue_entry_t)thread );
1599 thread->wait_queue = WAIT_QUEUE_NULL;
1600 thread->wait_event = NO_EVENT64;
1601 thread->at_safe_point = FALSE;
1602 if (unlock)
1603 wait_queue_unlock(waitq);
1604 }
1605
1606
1607 /*
1608 * Routine: wait_queue_select64_thread
1609 * Purpose:
1610 * Look for a thread and remove it from the queues, if
1611 * (and only if) the thread is waiting on the supplied
1612 * <wait_queue, event> pair.
1613 * Conditions:
1614 * at splsched
1615 * wait queue locked
1616 * possibly recursive
1617 * Returns:
1618 * KERN_NOT_WAITING: Thread is not waiting here.
1619 * KERN_SUCCESS: It was, and is now removed (returned locked)
1620 */
1621 static kern_return_t
1622 _wait_queue_select64_thread(
1623 wait_queue_t wq,
1624 event64_t event,
1625 thread_t thread)
1626 {
1627 wait_queue_element_t wq_element;
1628 wait_queue_element_t wqe_next;
1629 kern_return_t res = KERN_NOT_WAITING;
1630 queue_t q = &wq->wq_queue;
1631
1632 thread_lock(thread);
1633 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
1634 remqueue((queue_entry_t) thread);
1635 thread->at_safe_point = FALSE;
1636 thread->wait_event = NO_EVENT64;
1637 thread->wait_queue = WAIT_QUEUE_NULL;
1638 /* thread still locked */
1639 return KERN_SUCCESS;
1640 }
1641 thread_unlock(thread);
1642
1643 /*
1644 * The wait_queue associated with the thread may be one of this
1645 * wait queue's sets. Go see. If so, removing it from
1646 * there is like removing it from here.
1647 */
1648 wq_element = (wait_queue_element_t) queue_first(q);
1649 while (!queue_end(q, (queue_entry_t)wq_element)) {
1650 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1651 wqe_next = (wait_queue_element_t)
1652 queue_next((queue_t) wq_element);
1653
1654 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
1655 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
1656 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1657 wait_queue_set_t set_queue = wql->wql_setqueue;
1658
1659 wqs_lock(set_queue);
1660 if (! wait_queue_empty(&set_queue->wqs_wait_queue)) {
1661 res = _wait_queue_select64_thread(&set_queue->wqs_wait_queue,
1662 event,
1663 thread);
1664 }
1665 wqs_unlock(set_queue);
1666 if (res == KERN_SUCCESS)
1667 return KERN_SUCCESS;
1668 }
1669 wq_element = wqe_next;
1670 }
1671 return res;
1672 }
1673
1674
1675 /*
1676 * Routine: wait_queue_wakeup64_identity_locked
1677 * Purpose:
1678 * Select a single thread that is most-eligible to run and set
1679 * set it running. But return the thread locked.
1680 *
1681 * Conditions:
1682 * at splsched
1683 * wait queue locked
1684 * possibly recursive
1685 * Returns:
1686 * a pointer to the locked thread that was awakened
1687 */
1688 __private_extern__ thread_t
1689 wait_queue_wakeup64_identity_locked(
1690 wait_queue_t wq,
1691 event64_t event,
1692 wait_result_t result,
1693 boolean_t unlock)
1694 {
1695 kern_return_t res;
1696 thread_t thread;
1697
1698 assert(wait_queue_held(wq));
1699
1700 thread = _wait_queue_select64_one(wq, event);
1701 if (unlock)
1702 wait_queue_unlock(wq);
1703
1704 if (thread) {
1705 res = thread_go(thread, result);
1706 assert(res == KERN_SUCCESS);
1707 }
1708 return thread; /* still locked if not NULL */
1709 }
1710
1711
1712 /*
1713 * Routine: wait_queue_wakeup64_one_locked
1714 * Purpose:
1715 * Select a single thread that is most-eligible to run and set
1716 * set it runnings.
1717 *
1718 * Conditions:
1719 * at splsched
1720 * wait queue locked
1721 * possibly recursive
1722 * Returns:
1723 * KERN_SUCCESS: It was, and is, now removed.
1724 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1725 */
1726 __private_extern__ kern_return_t
1727 wait_queue_wakeup64_one_locked(
1728 wait_queue_t wq,
1729 event64_t event,
1730 wait_result_t result,
1731 boolean_t unlock)
1732 {
1733 thread_t thread;
1734
1735 assert(wait_queue_held(wq));
1736
1737 thread = _wait_queue_select64_one(wq, event);
1738 if (unlock)
1739 wait_queue_unlock(wq);
1740
1741 if (thread) {
1742 kern_return_t res;
1743
1744 res = thread_go(thread, result);
1745 assert(res == KERN_SUCCESS);
1746 thread_unlock(thread);
1747 return res;
1748 }
1749
1750 return KERN_NOT_WAITING;
1751 }
1752
1753 /*
1754 * Routine: wait_queue_wakeup_one
1755 * Purpose:
1756 * Wakeup the most appropriate thread that is in the specified
1757 * wait queue for the specified event.
1758 * Conditions:
1759 * Nothing locked
1760 * Returns:
1761 * KERN_SUCCESS - Thread was woken up
1762 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1763 */
1764 kern_return_t
1765 wait_queue_wakeup_one(
1766 wait_queue_t wq,
1767 event_t event,
1768 wait_result_t result,
1769 int priority)
1770 {
1771 thread_t thread;
1772 spl_t s;
1773
1774 if (!wait_queue_is_valid(wq)) {
1775 return KERN_INVALID_ARGUMENT;
1776 }
1777
1778 s = splsched();
1779 wait_queue_lock(wq);
1780 thread = _wait_queue_select64_one(wq, CAST_DOWN(event64_t,event));
1781 wait_queue_unlock(wq);
1782
1783 if (thread) {
1784 kern_return_t res;
1785
1786 if (thread->sched_pri < priority) {
1787 if (priority <= MAXPRI) {
1788 set_sched_pri(thread, priority);
1789
1790 thread->was_promoted_on_wakeup = 1;
1791 thread->sched_flags |= TH_SFLAG_PROMOTED;
1792 }
1793 }
1794 res = thread_go(thread, result);
1795 assert(res == KERN_SUCCESS);
1796 thread_unlock(thread);
1797 splx(s);
1798 return res;
1799 }
1800
1801 splx(s);
1802 return KERN_NOT_WAITING;
1803 }
1804
1805 /*
1806 * Routine: wait_queue_wakeup64_one
1807 * Purpose:
1808 * Wakeup the most appropriate thread that is in the specified
1809 * wait queue for the specified event.
1810 * Conditions:
1811 * Nothing locked
1812 * Returns:
1813 * KERN_SUCCESS - Thread was woken up
1814 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1815 */
1816 kern_return_t
1817 wait_queue_wakeup64_one(
1818 wait_queue_t wq,
1819 event64_t event,
1820 wait_result_t result)
1821 {
1822 thread_t thread;
1823 spl_t s;
1824
1825 if (!wait_queue_is_valid(wq)) {
1826 return KERN_INVALID_ARGUMENT;
1827 }
1828 s = splsched();
1829 wait_queue_lock(wq);
1830 thread = _wait_queue_select64_one(wq, event);
1831 wait_queue_unlock(wq);
1832
1833 if (thread) {
1834 kern_return_t res;
1835
1836 res = thread_go(thread, result);
1837 assert(res == KERN_SUCCESS);
1838 thread_unlock(thread);
1839 splx(s);
1840 return res;
1841 }
1842
1843 splx(s);
1844 return KERN_NOT_WAITING;
1845 }
1846
1847
1848 /*
1849 * Routine: wait_queue_wakeup64_thread_locked
1850 * Purpose:
1851 * Wakeup the particular thread that was specified if and only
1852 * it was in this wait queue (or one of it's set queues)
1853 * and waiting on the specified event.
1854 *
1855 * This is much safer than just removing the thread from
1856 * whatever wait queue it happens to be on. For instance, it
1857 * may have already been awoken from the wait you intended to
1858 * interrupt and waited on something else (like another
1859 * semaphore).
1860 * Conditions:
1861 * at splsched
1862 * wait queue already locked (may be released).
1863 * Returns:
1864 * KERN_SUCCESS - the thread was found waiting and awakened
1865 * KERN_NOT_WAITING - the thread was not waiting here
1866 */
1867 __private_extern__ kern_return_t
1868 wait_queue_wakeup64_thread_locked(
1869 wait_queue_t wq,
1870 event64_t event,
1871 thread_t thread,
1872 wait_result_t result,
1873 boolean_t unlock)
1874 {
1875 kern_return_t res;
1876
1877 assert(wait_queue_held(wq));
1878
1879 /*
1880 * See if the thread was still waiting there. If so, it got
1881 * dequeued and returned locked.
1882 */
1883 res = _wait_queue_select64_thread(wq, event, thread);
1884 if (unlock)
1885 wait_queue_unlock(wq);
1886
1887 if (res != KERN_SUCCESS)
1888 return KERN_NOT_WAITING;
1889
1890 res = thread_go(thread, result);
1891 assert(res == KERN_SUCCESS);
1892 thread_unlock(thread);
1893 return res;
1894 }
1895
1896 /*
1897 * Routine: wait_queue_wakeup_thread
1898 * Purpose:
1899 * Wakeup the particular thread that was specified if and only
1900 * it was in this wait queue (or one of it's set queues)
1901 * and waiting on the specified event.
1902 *
1903 * This is much safer than just removing the thread from
1904 * whatever wait queue it happens to be on. For instance, it
1905 * may have already been awoken from the wait you intended to
1906 * interrupt and waited on something else (like another
1907 * semaphore).
1908 * Conditions:
1909 * nothing of interest locked
1910 * we need to assume spl needs to be raised
1911 * Returns:
1912 * KERN_SUCCESS - the thread was found waiting and awakened
1913 * KERN_NOT_WAITING - the thread was not waiting here
1914 */
1915 kern_return_t
1916 wait_queue_wakeup_thread(
1917 wait_queue_t wq,
1918 event_t event,
1919 thread_t thread,
1920 wait_result_t result)
1921 {
1922 kern_return_t res;
1923 spl_t s;
1924
1925 if (!wait_queue_is_valid(wq)) {
1926 return KERN_INVALID_ARGUMENT;
1927 }
1928
1929 s = splsched();
1930 wait_queue_lock(wq);
1931 res = _wait_queue_select64_thread(wq, CAST_DOWN(event64_t,event), thread);
1932 wait_queue_unlock(wq);
1933
1934 if (res == KERN_SUCCESS) {
1935 res = thread_go(thread, result);
1936 assert(res == KERN_SUCCESS);
1937 thread_unlock(thread);
1938 splx(s);
1939 return res;
1940 }
1941 splx(s);
1942 return KERN_NOT_WAITING;
1943 }
1944
1945 /*
1946 * Routine: wait_queue_wakeup64_thread
1947 * Purpose:
1948 * Wakeup the particular thread that was specified if and only
1949 * it was in this wait queue (or one of it's set's queues)
1950 * and waiting on the specified event.
1951 *
1952 * This is much safer than just removing the thread from
1953 * whatever wait queue it happens to be on. For instance, it
1954 * may have already been awoken from the wait you intended to
1955 * interrupt and waited on something else (like another
1956 * semaphore).
1957 * Conditions:
1958 * nothing of interest locked
1959 * we need to assume spl needs to be raised
1960 * Returns:
1961 * KERN_SUCCESS - the thread was found waiting and awakened
1962 * KERN_NOT_WAITING - the thread was not waiting here
1963 */
1964 kern_return_t
1965 wait_queue_wakeup64_thread(
1966 wait_queue_t wq,
1967 event64_t event,
1968 thread_t thread,
1969 wait_result_t result)
1970 {
1971 kern_return_t res;
1972 spl_t s;
1973
1974 if (!wait_queue_is_valid(wq)) {
1975 return KERN_INVALID_ARGUMENT;
1976 }
1977
1978 s = splsched();
1979 wait_queue_lock(wq);
1980 res = _wait_queue_select64_thread(wq, event, thread);
1981 wait_queue_unlock(wq);
1982
1983 if (res == KERN_SUCCESS) {
1984 res = thread_go(thread, result);
1985 assert(res == KERN_SUCCESS);
1986 thread_unlock(thread);
1987 splx(s);
1988 return res;
1989 }
1990 splx(s);
1991 return KERN_NOT_WAITING;
1992 }