]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/wait_queue.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / kern / wait_queue.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: wait_queue.c (adapted from sched_prim.c)
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Primitives for manipulating wait queues: either global
64 * ones from sched_prim.c, or private ones associated with
65 * particular structures(pots, semaphores, etc..).
66 */
67
68 #include <kern/kern_types.h>
69 #include <kern/simple_lock.h>
70 #include <kern/zalloc.h>
71 #include <kern/queue.h>
72 #include <kern/spl.h>
73 #include <mach/sync_policy.h>
74 #include <kern/mach_param.h>
75 #include <kern/sched_prim.h>
76
77 #include <kern/wait_queue.h>
78 #include <vm/vm_kern.h>
79
80 /* forward declarations */
81 static boolean_t wait_queue_member_locked(
82 wait_queue_t wq,
83 wait_queue_set_t wq_set);
84
85 static void wait_queues_init(void) __attribute__((section("__TEXT, initcode")));
86
87
88 #define WAIT_QUEUE_MAX thread_max
89 #define WAIT_QUEUE_SET_MAX task_max * 3
90 #define WAIT_QUEUE_LINK_MAX PORT_MAX / 2 + (WAIT_QUEUE_MAX * WAIT_QUEUE_SET_MAX) / 64
91
92 static zone_t _wait_queue_link_zone;
93 static zone_t _wait_queue_set_zone;
94 static zone_t _wait_queue_zone;
95
96 /* see rdar://6737748&5561610; we need an unshadowed
97 * definition of a WaitQueueLink for debugging,
98 * but it needs to be used somewhere to wind up in
99 * the dSYM file. */
100 volatile WaitQueueLink *unused_except_for_debugging;
101
102
103 /*
104 * Waiting protocols and implementation:
105 *
106 * Each thread may be waiting for exactly one event; this event
107 * is set using assert_wait(). That thread may be awakened either
108 * by performing a thread_wakeup_prim() on its event,
109 * or by directly waking that thread up with clear_wait().
110 *
111 * The implementation of wait events uses a hash table. Each
112 * bucket is queue of threads having the same hash function
113 * value; the chain for the queue (linked list) is the run queue
114 * field. [It is not possible to be waiting and runnable at the
115 * same time.]
116 *
117 * Locks on both the thread and on the hash buckets govern the
118 * wait event field and the queue chain field. Because wakeup
119 * operations only have the event as an argument, the event hash
120 * bucket must be locked before any thread.
121 *
122 * Scheduling operations may also occur at interrupt level; therefore,
123 * interrupts below splsched() must be prevented when holding
124 * thread or hash bucket locks.
125 *
126 * The wait event hash table declarations are as follows:
127 */
128
129 struct wait_queue boot_wait_queue[1];
130 __private_extern__ struct wait_queue *wait_queues = &boot_wait_queue[0];
131
132 __private_extern__ uint32_t num_wait_queues = 1;
133
134 static uint32_t
135 compute_wait_hash_size(__unused unsigned cpu_count, __unused uint64_t memsize) {
136 uint32_t hsize = (uint32_t)round_page_64((thread_max / 11) * sizeof(struct wait_queue));
137 uint32_t bhsize;
138
139 if (PE_parse_boot_argn("wqsize", &bhsize, sizeof(bhsize)))
140 hsize = bhsize;
141
142 return hsize;
143 }
144
145 static void
146 wait_queues_init(void)
147 {
148 uint32_t i, whsize;
149 kern_return_t kret;
150
151 whsize = compute_wait_hash_size(processor_avail_count, machine_info.max_mem);
152 num_wait_queues = (whsize / ((uint32_t)sizeof(struct wait_queue))) - 1;
153
154 kret = kernel_memory_allocate(kernel_map, (vm_offset_t *) &wait_queues, whsize, 0, KMA_KOBJECT|KMA_NOPAGEWAIT);
155
156 if (kret != KERN_SUCCESS || wait_queues == NULL)
157 panic("kernel_memory_allocate() failed to allocate wait queues, error: %d, whsize: 0x%x", kret, whsize);
158
159 for (i = 0; i < num_wait_queues; i++) {
160 wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO);
161 }
162 }
163
164 void
165 wait_queue_bootstrap(void)
166 {
167 wait_queues_init();
168 _wait_queue_zone = zinit(sizeof(struct wait_queue),
169 WAIT_QUEUE_MAX * sizeof(struct wait_queue),
170 sizeof(struct wait_queue),
171 "wait queues");
172 zone_change(_wait_queue_zone, Z_NOENCRYPT, TRUE);
173
174 _wait_queue_set_zone = zinit(sizeof(struct wait_queue_set),
175 WAIT_QUEUE_SET_MAX * sizeof(struct wait_queue_set),
176 sizeof(struct wait_queue_set),
177 "wait queue sets");
178 zone_change(_wait_queue_set_zone, Z_NOENCRYPT, TRUE);
179
180 _wait_queue_link_zone = zinit(sizeof(struct _wait_queue_link),
181 WAIT_QUEUE_LINK_MAX * sizeof(struct _wait_queue_link),
182 sizeof(struct _wait_queue_link),
183 "wait queue links");
184 zone_change(_wait_queue_link_zone, Z_NOENCRYPT, TRUE);
185 }
186
187 /*
188 * Routine: wait_queue_init
189 * Purpose:
190 * Initialize a previously allocated wait queue.
191 * Returns:
192 * KERN_SUCCESS - The wait_queue_t was initialized
193 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
194 */
195 kern_return_t
196 wait_queue_init(
197 wait_queue_t wq,
198 int policy)
199 {
200 /* only FIFO and LIFO for now */
201 if ((policy & SYNC_POLICY_FIXED_PRIORITY) != 0)
202 return KERN_INVALID_ARGUMENT;
203
204 wq->wq_fifo = ((policy & SYNC_POLICY_REVERSED) == 0);
205 wq->wq_type = _WAIT_QUEUE_inited;
206 queue_init(&wq->wq_queue);
207 hw_lock_init(&wq->wq_interlock);
208 return KERN_SUCCESS;
209 }
210
211 /*
212 * Routine: wait_queue_alloc
213 * Purpose:
214 * Allocate and initialize a wait queue for use outside of
215 * of the mach part of the kernel.
216 * Conditions:
217 * Nothing locked - can block.
218 * Returns:
219 * The allocated and initialized wait queue
220 * WAIT_QUEUE_NULL if there is a resource shortage
221 */
222 wait_queue_t
223 wait_queue_alloc(
224 int policy)
225 {
226 wait_queue_t wq;
227 kern_return_t ret;
228
229 wq = (wait_queue_t) zalloc(_wait_queue_zone);
230 if (wq != WAIT_QUEUE_NULL) {
231 ret = wait_queue_init(wq, policy);
232 if (ret != KERN_SUCCESS) {
233 zfree(_wait_queue_zone, wq);
234 wq = WAIT_QUEUE_NULL;
235 }
236 }
237 return wq;
238 }
239
240 /*
241 * Routine: wait_queue_free
242 * Purpose:
243 * Free an allocated wait queue.
244 * Conditions:
245 * May block.
246 */
247 kern_return_t
248 wait_queue_free(
249 wait_queue_t wq)
250 {
251 if (!wait_queue_is_queue(wq))
252 return KERN_INVALID_ARGUMENT;
253 if (!queue_empty(&wq->wq_queue))
254 return KERN_FAILURE;
255 zfree(_wait_queue_zone, wq);
256 return KERN_SUCCESS;
257 }
258
259 /*
260 * Routine: wait_queue_set_init
261 * Purpose:
262 * Initialize a previously allocated wait queue set.
263 * Returns:
264 * KERN_SUCCESS - The wait_queue_set_t was initialized
265 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
266 */
267 kern_return_t
268 wait_queue_set_init(
269 wait_queue_set_t wqset,
270 int policy)
271 {
272 kern_return_t ret;
273
274 ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
275 if (ret != KERN_SUCCESS)
276 return ret;
277
278 wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
279 if (policy & SYNC_POLICY_PREPOST)
280 wqset->wqs_wait_queue.wq_prepost = TRUE;
281 else
282 wqset->wqs_wait_queue.wq_prepost = FALSE;
283 queue_init(&wqset->wqs_setlinks);
284 queue_init(&wqset->wqs_preposts);
285 return KERN_SUCCESS;
286 }
287
288
289 kern_return_t
290 wait_queue_sub_init(
291 wait_queue_set_t wqset,
292 int policy)
293 {
294 return wait_queue_set_init(wqset, policy);
295 }
296
297 kern_return_t
298 wait_queue_sub_clearrefs(
299 wait_queue_set_t wq_set)
300 {
301 wait_queue_link_t wql;
302 queue_t q;
303 spl_t s;
304
305 if (!wait_queue_is_set(wq_set))
306 return KERN_INVALID_ARGUMENT;
307
308 s = splsched();
309 wqs_lock(wq_set);
310 q = &wq_set->wqs_preposts;
311 while (!queue_empty(q)) {
312 queue_remove_first(q, wql, wait_queue_link_t, wql_preposts);
313 assert(!wql_is_preposted(wql));
314 }
315 wqs_unlock(wq_set);
316 splx(s);
317 return KERN_SUCCESS;
318 }
319
320 /*
321 * Routine: wait_queue_set_alloc
322 * Purpose:
323 * Allocate and initialize a wait queue set for
324 * use outside of the mach part of the kernel.
325 * Conditions:
326 * May block.
327 * Returns:
328 * The allocated and initialized wait queue set
329 * WAIT_QUEUE_SET_NULL if there is a resource shortage
330 */
331 wait_queue_set_t
332 wait_queue_set_alloc(
333 int policy)
334 {
335 wait_queue_set_t wq_set;
336
337 wq_set = (wait_queue_set_t) zalloc(_wait_queue_set_zone);
338 if (wq_set != WAIT_QUEUE_SET_NULL) {
339 kern_return_t ret;
340
341 ret = wait_queue_set_init(wq_set, policy);
342 if (ret != KERN_SUCCESS) {
343 zfree(_wait_queue_set_zone, wq_set);
344 wq_set = WAIT_QUEUE_SET_NULL;
345 }
346 }
347 return wq_set;
348 }
349
350 /*
351 * Routine: wait_queue_set_free
352 * Purpose:
353 * Free an allocated wait queue set
354 * Conditions:
355 * May block.
356 */
357 kern_return_t
358 wait_queue_set_free(
359 wait_queue_set_t wq_set)
360 {
361 if (!wait_queue_is_set(wq_set))
362 return KERN_INVALID_ARGUMENT;
363
364 if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
365 return KERN_FAILURE;
366
367 zfree(_wait_queue_set_zone, wq_set);
368 return KERN_SUCCESS;
369 }
370
371
372 /*
373 *
374 * Routine: wait_queue_set_size
375 * Routine: wait_queue_link_size
376 * Purpose:
377 * Return the size of opaque wait queue structures
378 */
379 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
380 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
381
382 /* declare a unique type for wait queue link structures */
383 static unsigned int _wait_queue_link;
384 static unsigned int _wait_queue_link_noalloc;
385 static unsigned int _wait_queue_unlinked;
386
387 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
388 #define WAIT_QUEUE_LINK_NOALLOC ((void *)&_wait_queue_link_noalloc)
389 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
390
391 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
392 WQASSERT(((wqe)->wqe_queue == (wq) && \
393 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
394 "wait queue element list corruption: wq=%#x, wqe=%#x", \
395 (wq), (wqe))
396
397 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
398 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
399 (queue_t)(wql) : &(wql)->wql_setlinks)))
400
401 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
402 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
403 (queue_t)(wql) : &(wql)->wql_setlinks)))
404
405 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
406 WQASSERT(((((wql)->wql_type == WAIT_QUEUE_LINK) || \
407 ((wql)->wql_type == WAIT_QUEUE_LINK_NOALLOC)) && \
408 ((wql)->wql_setqueue == (wqs)) && \
409 (((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) || \
410 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_SET_inited)) && \
411 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
412 "wait queue set links corruption: wqs=%#x, wql=%#x", \
413 (wqs), (wql))
414
415 #if defined(_WAIT_QUEUE_DEBUG_)
416
417 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
418
419 #define WAIT_QUEUE_CHECK(wq) \
420 MACRO_BEGIN \
421 queue_t q2 = &(wq)->wq_queue; \
422 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
423 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
424 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
425 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
426 } \
427 MACRO_END
428
429 #define WAIT_QUEUE_SET_CHECK(wqs) \
430 MACRO_BEGIN \
431 queue_t q2 = &(wqs)->wqs_setlinks; \
432 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
433 while (!queue_end(q2, (queue_entry_t)wql2)) { \
434 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
435 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
436 } \
437 MACRO_END
438
439 #else /* !_WAIT_QUEUE_DEBUG_ */
440
441 #define WQASSERT(e, s, p0, p1) assert(e)
442
443 #define WAIT_QUEUE_CHECK(wq)
444 #define WAIT_QUEUE_SET_CHECK(wqs)
445
446 #endif /* !_WAIT_QUEUE_DEBUG_ */
447
448 /*
449 * Routine: wait_queue_member_locked
450 * Purpose:
451 * Indicate if this set queue is a member of the queue
452 * Conditions:
453 * The wait queue is locked
454 * The set queue is just that, a set queue
455 */
456 static boolean_t
457 wait_queue_member_locked(
458 wait_queue_t wq,
459 wait_queue_set_t wq_set)
460 {
461 wait_queue_element_t wq_element;
462 queue_t q;
463
464 assert(wait_queue_held(wq));
465 assert(wait_queue_is_set(wq_set));
466
467 q = &wq->wq_queue;
468
469 wq_element = (wait_queue_element_t) queue_first(q);
470 while (!queue_end(q, (queue_entry_t)wq_element)) {
471 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
472 if ((wq_element->wqe_type == WAIT_QUEUE_LINK) ||
473 (wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC)) {
474 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
475
476 if (wql->wql_setqueue == wq_set)
477 return TRUE;
478 }
479 wq_element = (wait_queue_element_t)
480 queue_next((queue_t) wq_element);
481 }
482 return FALSE;
483 }
484
485
486 /*
487 * Routine: wait_queue_member
488 * Purpose:
489 * Indicate if this set queue is a member of the queue
490 * Conditions:
491 * The set queue is just that, a set queue
492 */
493 boolean_t
494 wait_queue_member(
495 wait_queue_t wq,
496 wait_queue_set_t wq_set)
497 {
498 boolean_t ret;
499 spl_t s;
500
501 if (!wait_queue_is_set(wq_set))
502 return FALSE;
503
504 s = splsched();
505 wait_queue_lock(wq);
506 ret = wait_queue_member_locked(wq, wq_set);
507 wait_queue_unlock(wq);
508 splx(s);
509
510 return ret;
511 }
512
513
514 /*
515 * Routine: wait_queue_link_internal
516 * Purpose:
517 * Insert a set wait queue into a wait queue. This
518 * requires us to link the two together using a wait_queue_link
519 * structure that was provided.
520 * Conditions:
521 * The wait queue being inserted must be inited as a set queue
522 * The wait_queue_link structure must already be properly typed
523 */
524 static
525 kern_return_t
526 wait_queue_link_internal(
527 wait_queue_t wq,
528 wait_queue_set_t wq_set,
529 wait_queue_link_t wql)
530 {
531 wait_queue_element_t wq_element;
532 queue_t q;
533 spl_t s;
534
535 if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set))
536 return KERN_INVALID_ARGUMENT;
537
538 /*
539 * There are probably fewer threads and sets associated with
540 * the wait queue than there are wait queues associated with
541 * the set. So let's validate it that way.
542 */
543 s = splsched();
544 wait_queue_lock(wq);
545 q = &wq->wq_queue;
546 wq_element = (wait_queue_element_t) queue_first(q);
547 while (!queue_end(q, (queue_entry_t)wq_element)) {
548 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
549 if ((wq_element->wqe_type == WAIT_QUEUE_LINK ||
550 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) &&
551 ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
552 wait_queue_unlock(wq);
553 splx(s);
554 return KERN_ALREADY_IN_SET;
555 }
556 wq_element = (wait_queue_element_t)
557 queue_next((queue_t) wq_element);
558 }
559
560 /*
561 * Not already a member, so we can add it.
562 */
563 wqs_lock(wq_set);
564
565 WAIT_QUEUE_SET_CHECK(wq_set);
566
567 assert(wql->wql_type == WAIT_QUEUE_LINK ||
568 wql->wql_type == WAIT_QUEUE_LINK_NOALLOC);
569
570 wql->wql_queue = wq;
571 wql_clear_prepost(wql);
572 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
573 wql->wql_setqueue = wq_set;
574 queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
575
576 wqs_unlock(wq_set);
577 wait_queue_unlock(wq);
578 splx(s);
579
580 return KERN_SUCCESS;
581 }
582
583 /*
584 * Routine: wait_queue_link_noalloc
585 * Purpose:
586 * Insert a set wait queue into a wait queue. This
587 * requires us to link the two together using a wait_queue_link
588 * structure that we allocate.
589 * Conditions:
590 * The wait queue being inserted must be inited as a set queue
591 */
592 kern_return_t
593 wait_queue_link_noalloc(
594 wait_queue_t wq,
595 wait_queue_set_t wq_set,
596 wait_queue_link_t wql)
597 {
598 wql->wql_type = WAIT_QUEUE_LINK_NOALLOC;
599 return wait_queue_link_internal(wq, wq_set, wql);
600 }
601
602 /*
603 * Routine: wait_queue_link
604 * Purpose:
605 * Insert a set wait queue into a wait queue. This
606 * requires us to link the two together using a wait_queue_link
607 * structure that we allocate.
608 * Conditions:
609 * The wait queue being inserted must be inited as a set queue
610 */
611 kern_return_t
612 wait_queue_link(
613 wait_queue_t wq,
614 wait_queue_set_t wq_set)
615 {
616 wait_queue_link_t wql;
617 kern_return_t ret;
618
619 wql = (wait_queue_link_t) zalloc(_wait_queue_link_zone);
620 if (wql == WAIT_QUEUE_LINK_NULL)
621 return KERN_RESOURCE_SHORTAGE;
622
623 wql->wql_type = WAIT_QUEUE_LINK;
624 ret = wait_queue_link_internal(wq, wq_set, wql);
625 if (ret != KERN_SUCCESS)
626 zfree(_wait_queue_link_zone, wql);
627
628 return ret;
629 }
630
631
632 /*
633 * Routine: wait_queue_unlink_locked
634 * Purpose:
635 * Undo the linkage between a wait queue and a set.
636 */
637 static void
638 wait_queue_unlink_locked(
639 wait_queue_t wq,
640 wait_queue_set_t wq_set,
641 wait_queue_link_t wql)
642 {
643 assert(wait_queue_held(wq));
644 assert(wait_queue_held(&wq_set->wqs_wait_queue));
645
646 wql->wql_queue = WAIT_QUEUE_NULL;
647 queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
648 wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
649 queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
650 if (wql_is_preposted(wql)) {
651 queue_t ppq = &wq_set->wqs_preposts;
652 queue_remove(ppq, wql, wait_queue_link_t, wql_preposts);
653 }
654 wql->wql_type = WAIT_QUEUE_UNLINKED;
655
656 WAIT_QUEUE_CHECK(wq);
657 WAIT_QUEUE_SET_CHECK(wq_set);
658 }
659
660 /*
661 * Routine: wait_queue_unlink
662 * Purpose:
663 * Remove the linkage between a wait queue and a set,
664 * freeing the linkage structure.
665 * Conditions:
666 * The wait queue being must be a member set queue
667 */
668 kern_return_t
669 wait_queue_unlink(
670 wait_queue_t wq,
671 wait_queue_set_t wq_set)
672 {
673 wait_queue_element_t wq_element;
674 wait_queue_link_t wql;
675 queue_t q;
676 spl_t s;
677
678 if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set)) {
679 return KERN_INVALID_ARGUMENT;
680 }
681 s = splsched();
682 wait_queue_lock(wq);
683
684 q = &wq->wq_queue;
685 wq_element = (wait_queue_element_t) queue_first(q);
686 while (!queue_end(q, (queue_entry_t)wq_element)) {
687 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
688 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
689 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
690
691 wql = (wait_queue_link_t)wq_element;
692
693 if (wql->wql_setqueue == wq_set) {
694 boolean_t alloced;
695
696 alloced = (wql->wql_type == WAIT_QUEUE_LINK);
697 wqs_lock(wq_set);
698 wait_queue_unlink_locked(wq, wq_set, wql);
699 wqs_unlock(wq_set);
700 wait_queue_unlock(wq);
701 splx(s);
702 if (alloced)
703 zfree(_wait_queue_link_zone, wql);
704 return KERN_SUCCESS;
705 }
706 }
707 wq_element = (wait_queue_element_t)
708 queue_next((queue_t) wq_element);
709 }
710 wait_queue_unlock(wq);
711 splx(s);
712 return KERN_NOT_IN_SET;
713 }
714
715 /*
716 * Routine: wait_queue_unlink_all
717 * Purpose:
718 * Remove the linkage between a wait queue and all its sets.
719 * All the linkage structures that were allocated internally
720 * are freed. The others are the caller's responsibility.
721 * Conditions:
722 * Nothing of interest locked.
723 */
724
725 kern_return_t
726 wait_queue_unlink_all(
727 wait_queue_t wq)
728 {
729 wait_queue_element_t wq_element;
730 wait_queue_element_t wq_next_element;
731 wait_queue_set_t wq_set;
732 wait_queue_link_t wql;
733 queue_head_t links_queue_head;
734 queue_t links = &links_queue_head;
735 queue_t q;
736 spl_t s;
737
738 if (!wait_queue_is_valid(wq)) {
739 return KERN_INVALID_ARGUMENT;
740 }
741
742 queue_init(links);
743
744 s = splsched();
745 wait_queue_lock(wq);
746
747 q = &wq->wq_queue;
748
749 wq_element = (wait_queue_element_t) queue_first(q);
750 while (!queue_end(q, (queue_entry_t)wq_element)) {
751 boolean_t alloced;
752
753 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
754 wq_next_element = (wait_queue_element_t)
755 queue_next((queue_t) wq_element);
756
757 alloced = (wq_element->wqe_type == WAIT_QUEUE_LINK);
758 if (alloced || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
759 wql = (wait_queue_link_t)wq_element;
760 wq_set = wql->wql_setqueue;
761 wqs_lock(wq_set);
762 wait_queue_unlink_locked(wq, wq_set, wql);
763 wqs_unlock(wq_set);
764 if (alloced)
765 enqueue(links, &wql->wql_links);
766 }
767 wq_element = wq_next_element;
768 }
769 wait_queue_unlock(wq);
770 splx(s);
771
772 while(!queue_empty(links)) {
773 wql = (wait_queue_link_t) dequeue(links);
774 zfree(_wait_queue_link_zone, wql);
775 }
776
777 return(KERN_SUCCESS);
778 }
779
780 /* legacy interface naming */
781 kern_return_t
782 wait_subqueue_unlink_all(
783 wait_queue_set_t wq_set)
784 {
785 return wait_queue_set_unlink_all(wq_set);
786 }
787
788
789 /*
790 * Routine: wait_queue_set_unlink_all
791 * Purpose:
792 * Remove the linkage between a set wait queue and all its
793 * member wait queues. The link structures are freed for those
794 * links which were dynamically allocated.
795 * Conditions:
796 * The wait queue must be a set
797 */
798 kern_return_t
799 wait_queue_set_unlink_all(
800 wait_queue_set_t wq_set)
801 {
802 wait_queue_link_t wql;
803 wait_queue_t wq;
804 queue_t q;
805 queue_head_t links_queue_head;
806 queue_t links = &links_queue_head;
807 spl_t s;
808
809 if (!wait_queue_is_set(wq_set)) {
810 return KERN_INVALID_ARGUMENT;
811 }
812
813 queue_init(links);
814
815 retry:
816 s = splsched();
817 wqs_lock(wq_set);
818
819 q = &wq_set->wqs_setlinks;
820
821 wql = (wait_queue_link_t)queue_first(q);
822 while (!queue_end(q, (queue_entry_t)wql)) {
823 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
824 wq = wql->wql_queue;
825 if (wait_queue_lock_try(wq)) {
826 boolean_t alloced;
827
828 alloced = (wql->wql_type == WAIT_QUEUE_LINK);
829 wait_queue_unlink_locked(wq, wq_set, wql);
830 wait_queue_unlock(wq);
831 if (alloced)
832 enqueue(links, &wql->wql_links);
833 wql = (wait_queue_link_t)queue_first(q);
834 } else {
835 wqs_unlock(wq_set);
836 splx(s);
837 delay(1);
838 goto retry;
839 }
840 }
841 wqs_unlock(wq_set);
842 splx(s);
843
844 while (!queue_empty (links)) {
845 wql = (wait_queue_link_t) dequeue(links);
846 zfree(_wait_queue_link_zone, wql);
847 }
848 return(KERN_SUCCESS);
849 }
850
851 /*
852 * Routine: wait_queue_assert_wait64_locked
853 * Purpose:
854 * Insert the current thread into the supplied wait queue
855 * waiting for a particular event to be posted to that queue.
856 *
857 * Conditions:
858 * The wait queue is assumed locked.
859 * The waiting thread is assumed locked.
860 *
861 */
862 __private_extern__ wait_result_t
863 wait_queue_assert_wait64_locked(
864 wait_queue_t wq,
865 event64_t event,
866 wait_interrupt_t interruptible,
867 uint64_t deadline,
868 thread_t thread)
869 {
870 wait_result_t wait_result;
871
872 if (!wait_queue_assert_possible(thread))
873 panic("wait_queue_assert_wait64_locked");
874
875 if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
876 wait_queue_set_t wqs = (wait_queue_set_t)wq;
877
878 if (event == NO_EVENT64 && wqs_is_preposted(wqs))
879 return(THREAD_AWAKENED);
880 }
881
882 /*
883 * This is the extent to which we currently take scheduling attributes
884 * into account. If the thread is vm priviledged, we stick it at
885 * the front of the queue. Later, these queues will honor the policy
886 * value set at wait_queue_init time.
887 */
888 wait_result = thread_mark_wait_locked(thread, interruptible);
889 if (wait_result == THREAD_WAITING) {
890 if (!wq->wq_fifo || thread->options & TH_OPT_VMPRIV)
891 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
892 else
893 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
894
895 thread->wait_event = event;
896 thread->wait_queue = wq;
897
898 if (deadline != 0) {
899 if (!timer_call_enter(&thread->wait_timer, deadline))
900 thread->wait_timer_active++;
901 thread->wait_timer_is_set = TRUE;
902 }
903 }
904 return(wait_result);
905 }
906
907 /*
908 * Routine: wait_queue_assert_wait
909 * Purpose:
910 * Insert the current thread into the supplied wait queue
911 * waiting for a particular event to be posted to that queue.
912 *
913 * Conditions:
914 * nothing of interest locked.
915 */
916 wait_result_t
917 wait_queue_assert_wait(
918 wait_queue_t wq,
919 event_t event,
920 wait_interrupt_t interruptible,
921 uint64_t deadline)
922 {
923 spl_t s;
924 wait_result_t ret;
925 thread_t thread = current_thread();
926
927 /* If it is an invalid wait queue, you can't wait on it */
928 if (!wait_queue_is_valid(wq))
929 return (thread->wait_result = THREAD_RESTART);
930
931 s = splsched();
932 wait_queue_lock(wq);
933 thread_lock(thread);
934 ret = wait_queue_assert_wait64_locked(wq, CAST_DOWN(event64_t,event),
935 interruptible, deadline, thread);
936 thread_unlock(thread);
937 wait_queue_unlock(wq);
938 splx(s);
939 return(ret);
940 }
941
942 /*
943 * Routine: wait_queue_assert_wait64
944 * Purpose:
945 * Insert the current thread into the supplied wait queue
946 * waiting for a particular event to be posted to that queue.
947 * Conditions:
948 * nothing of interest locked.
949 */
950 wait_result_t
951 wait_queue_assert_wait64(
952 wait_queue_t wq,
953 event64_t event,
954 wait_interrupt_t interruptible,
955 uint64_t deadline)
956 {
957 spl_t s;
958 wait_result_t ret;
959 thread_t thread = current_thread();
960
961 /* If it is an invalid wait queue, you cant wait on it */
962 if (!wait_queue_is_valid(wq))
963 return (thread->wait_result = THREAD_RESTART);
964
965 s = splsched();
966 wait_queue_lock(wq);
967 thread_lock(thread);
968 ret = wait_queue_assert_wait64_locked(wq, event, interruptible, deadline, thread);
969 thread_unlock(thread);
970 wait_queue_unlock(wq);
971 splx(s);
972 return(ret);
973 }
974
975 /*
976 * Routine: _wait_queue_select64_all
977 * Purpose:
978 * Select all threads off a wait queue that meet the
979 * supplied criteria.
980 * Conditions:
981 * at splsched
982 * wait queue locked
983 * wake_queue initialized and ready for insertion
984 * possibly recursive
985 * Returns:
986 * a queue of locked threads
987 */
988 static void
989 _wait_queue_select64_all(
990 wait_queue_t wq,
991 event64_t event,
992 queue_t wake_queue)
993 {
994 wait_queue_element_t wq_element;
995 wait_queue_element_t wqe_next;
996 queue_t q;
997
998 q = &wq->wq_queue;
999
1000 wq_element = (wait_queue_element_t) queue_first(q);
1001 while (!queue_end(q, (queue_entry_t)wq_element)) {
1002 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1003 wqe_next = (wait_queue_element_t)
1004 queue_next((queue_t) wq_element);
1005
1006 /*
1007 * We may have to recurse if this is a compound wait queue.
1008 */
1009 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
1010 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
1011 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1012 wait_queue_set_t set_queue = wql->wql_setqueue;
1013
1014 /*
1015 * We have to check the set wait queue. If it is marked
1016 * as pre-post, and it is the "generic event" then mark
1017 * it pre-posted now (if not already).
1018 */
1019 wqs_lock(set_queue);
1020 if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) {
1021 queue_t ppq = &set_queue->wqs_preposts;
1022 queue_enter(ppq, wql, wait_queue_link_t, wql_preposts);
1023 }
1024 if (! wait_queue_empty(&set_queue->wqs_wait_queue))
1025 _wait_queue_select64_all(&set_queue->wqs_wait_queue, event, wake_queue);
1026 wqs_unlock(set_queue);
1027 } else {
1028
1029 /*
1030 * Otherwise, its a thread. If it is waiting on
1031 * the event we are posting to this queue, pull
1032 * it off the queue and stick it in out wake_queue.
1033 */
1034 thread_t t = (thread_t)wq_element;
1035
1036 if (t->wait_event == event) {
1037 thread_lock(t);
1038 remqueue(q, (queue_entry_t) t);
1039 enqueue (wake_queue, (queue_entry_t) t);
1040 t->wait_queue = WAIT_QUEUE_NULL;
1041 t->wait_event = NO_EVENT64;
1042 t->at_safe_point = FALSE;
1043 /* returned locked */
1044 }
1045 }
1046 wq_element = wqe_next;
1047 }
1048 }
1049
1050 /*
1051 * Routine: wait_queue_wakeup64_all_locked
1052 * Purpose:
1053 * Wakeup some number of threads that are in the specified
1054 * wait queue and waiting on the specified event.
1055 * Conditions:
1056 * wait queue already locked (may be released).
1057 * Returns:
1058 * KERN_SUCCESS - Threads were woken up
1059 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1060 */
1061 __private_extern__ kern_return_t
1062 wait_queue_wakeup64_all_locked(
1063 wait_queue_t wq,
1064 event64_t event,
1065 wait_result_t result,
1066 boolean_t unlock)
1067 {
1068 queue_head_t wake_queue_head;
1069 queue_t q = &wake_queue_head;
1070 kern_return_t res;
1071
1072 // assert(wait_queue_held(wq));
1073 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1074 // panic("wait_queue_wakeup64_all_locked: lock not held on %p\n", wq); /* (BRINGUP) */
1075 // }
1076
1077 queue_init(q);
1078
1079 /*
1080 * Select the threads that we will wake up. The threads
1081 * are returned to us locked and cleanly removed from the
1082 * wait queue.
1083 */
1084 _wait_queue_select64_all(wq, event, q);
1085 if (unlock)
1086 wait_queue_unlock(wq);
1087
1088 /*
1089 * For each thread, set it running.
1090 */
1091 res = KERN_NOT_WAITING;
1092 while (!queue_empty (q)) {
1093 thread_t thread = (thread_t) dequeue(q);
1094 res = thread_go(thread, result);
1095 assert(res == KERN_SUCCESS);
1096 thread_unlock(thread);
1097 }
1098 return res;
1099 }
1100
1101
1102 /*
1103 * Routine: wait_queue_wakeup_all
1104 * Purpose:
1105 * Wakeup some number of threads that are in the specified
1106 * wait queue and waiting on the specified event.
1107 * Conditions:
1108 * Nothing locked
1109 * Returns:
1110 * KERN_SUCCESS - Threads were woken up
1111 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1112 */
1113 kern_return_t
1114 wait_queue_wakeup_all(
1115 wait_queue_t wq,
1116 event_t event,
1117 wait_result_t result)
1118 {
1119 kern_return_t ret;
1120 spl_t s;
1121
1122 if (!wait_queue_is_valid(wq)) {
1123 return KERN_INVALID_ARGUMENT;
1124 }
1125
1126 s = splsched();
1127 wait_queue_lock(wq);
1128 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1129 // panic("wait_queue_wakeup_all: we did not get the lock on %p\n", wq); /* (BRINGUP) */
1130 // }
1131 ret = wait_queue_wakeup64_all_locked(
1132 wq, CAST_DOWN(event64_t,event),
1133 result, TRUE);
1134 /* lock released */
1135 splx(s);
1136 return ret;
1137 }
1138
1139 /*
1140 * Routine: wait_queue_wakeup64_all
1141 * Purpose:
1142 * Wakeup some number of threads that are in the specified
1143 * wait queue and waiting on the specified event.
1144 * Conditions:
1145 * Nothing locked
1146 * Returns:
1147 * KERN_SUCCESS - Threads were woken up
1148 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1149 */
1150 kern_return_t
1151 wait_queue_wakeup64_all(
1152 wait_queue_t wq,
1153 event64_t event,
1154 wait_result_t result)
1155 {
1156 kern_return_t ret;
1157 spl_t s;
1158
1159 if (!wait_queue_is_valid(wq)) {
1160 return KERN_INVALID_ARGUMENT;
1161 }
1162
1163 s = splsched();
1164 wait_queue_lock(wq);
1165 ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1166 /* lock released */
1167 splx(s);
1168 return ret;
1169 }
1170
1171 /*
1172 * Routine: _wait_queue_select64_one
1173 * Purpose:
1174 * Select the best thread off a wait queue that meet the
1175 * supplied criteria.
1176 * Conditions:
1177 * at splsched
1178 * wait queue locked
1179 * possibly recursive
1180 * Returns:
1181 * a locked thread - if one found
1182 * Note:
1183 * This is where the sync policy of the wait queue comes
1184 * into effect. For now, we just assume FIFO/LIFO.
1185 */
1186 static thread_t
1187 _wait_queue_select64_one(
1188 wait_queue_t wq,
1189 event64_t event)
1190 {
1191 wait_queue_element_t wq_element;
1192 wait_queue_element_t wqe_next;
1193 thread_t t = THREAD_NULL;
1194 queue_t q;
1195
1196 q = &wq->wq_queue;
1197
1198 wq_element = (wait_queue_element_t) queue_first(q);
1199 while (!queue_end(q, (queue_entry_t)wq_element)) {
1200 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1201 wqe_next = (wait_queue_element_t)
1202 queue_next((queue_t) wq_element);
1203
1204 /*
1205 * We may have to recurse if this is a compound wait queue.
1206 */
1207 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
1208 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
1209 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1210 wait_queue_set_t set_queue = wql->wql_setqueue;
1211
1212 /*
1213 * We have to check the set wait queue. If the set
1214 * supports pre-posting, it isn't already preposted,
1215 * and we didn't find a thread in the set, then mark it.
1216 *
1217 * If we later find a thread, there may be a spurious
1218 * pre-post here on this set. The wait side has to check
1219 * for that either pre- or post-wait.
1220 */
1221 wqs_lock(set_queue);
1222 if (! wait_queue_empty(&set_queue->wqs_wait_queue)) {
1223 t = _wait_queue_select64_one(&set_queue->wqs_wait_queue, event);
1224 }
1225 if (t != THREAD_NULL) {
1226 wqs_unlock(set_queue);
1227 return t;
1228 }
1229 if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) {
1230 queue_t ppq = &set_queue->wqs_preposts;
1231 queue_enter(ppq, wql, wait_queue_link_t, wql_preposts);
1232 }
1233 wqs_unlock(set_queue);
1234
1235 } else {
1236
1237 /*
1238 * Otherwise, its a thread. If it is waiting on
1239 * the event we are posting to this queue, pull
1240 * it off the queue and stick it in out wake_queue.
1241 */
1242 t = (thread_t)wq_element;
1243 if (t->wait_event == event) {
1244 thread_lock(t);
1245 remqueue(q, (queue_entry_t) t);
1246 t->wait_queue = WAIT_QUEUE_NULL;
1247 t->wait_event = NO_EVENT64;
1248 t->at_safe_point = FALSE;
1249 return t; /* still locked */
1250 }
1251
1252 t = THREAD_NULL;
1253 }
1254 wq_element = wqe_next;
1255 }
1256 return THREAD_NULL;
1257 }
1258
1259
1260 /*
1261 * Routine: wait_queue_pull_thread_locked
1262 * Purpose:
1263 * Pull a thread off its wait queue and (possibly) unlock
1264 * the waitq.
1265 * Conditions:
1266 * at splsched
1267 * wait queue locked
1268 * thread locked
1269 * Returns:
1270 * with the thread still locked.
1271 */
1272 void
1273 wait_queue_pull_thread_locked(
1274 wait_queue_t waitq,
1275 thread_t thread,
1276 boolean_t unlock)
1277 {
1278
1279 assert(thread->wait_queue == waitq);
1280
1281 remqueue(&waitq->wq_queue, (queue_entry_t)thread );
1282 thread->wait_queue = WAIT_QUEUE_NULL;
1283 thread->wait_event = NO_EVENT64;
1284 thread->at_safe_point = FALSE;
1285 if (unlock)
1286 wait_queue_unlock(waitq);
1287 }
1288
1289
1290 /*
1291 * Routine: wait_queue_select64_thread
1292 * Purpose:
1293 * Look for a thread and remove it from the queues, if
1294 * (and only if) the thread is waiting on the supplied
1295 * <wait_queue, event> pair.
1296 * Conditions:
1297 * at splsched
1298 * wait queue locked
1299 * possibly recursive
1300 * Returns:
1301 * KERN_NOT_WAITING: Thread is not waiting here.
1302 * KERN_SUCCESS: It was, and is now removed (returned locked)
1303 */
1304 static kern_return_t
1305 _wait_queue_select64_thread(
1306 wait_queue_t wq,
1307 event64_t event,
1308 thread_t thread)
1309 {
1310 wait_queue_element_t wq_element;
1311 wait_queue_element_t wqe_next;
1312 kern_return_t res = KERN_NOT_WAITING;
1313 queue_t q = &wq->wq_queue;
1314
1315 thread_lock(thread);
1316 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
1317 remqueue(q, (queue_entry_t) thread);
1318 thread->at_safe_point = FALSE;
1319 thread->wait_event = NO_EVENT64;
1320 thread->wait_queue = WAIT_QUEUE_NULL;
1321 /* thread still locked */
1322 return KERN_SUCCESS;
1323 }
1324 thread_unlock(thread);
1325
1326 /*
1327 * The wait_queue associated with the thread may be one of this
1328 * wait queue's sets. Go see. If so, removing it from
1329 * there is like removing it from here.
1330 */
1331 wq_element = (wait_queue_element_t) queue_first(q);
1332 while (!queue_end(q, (queue_entry_t)wq_element)) {
1333 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1334 wqe_next = (wait_queue_element_t)
1335 queue_next((queue_t) wq_element);
1336
1337 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
1338 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
1339 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1340 wait_queue_set_t set_queue = wql->wql_setqueue;
1341
1342 wqs_lock(set_queue);
1343 if (! wait_queue_empty(&set_queue->wqs_wait_queue)) {
1344 res = _wait_queue_select64_thread(&set_queue->wqs_wait_queue,
1345 event,
1346 thread);
1347 }
1348 wqs_unlock(set_queue);
1349 if (res == KERN_SUCCESS)
1350 return KERN_SUCCESS;
1351 }
1352 wq_element = wqe_next;
1353 }
1354 return res;
1355 }
1356
1357
1358 /*
1359 * Routine: wait_queue_wakeup64_identity_locked
1360 * Purpose:
1361 * Select a single thread that is most-eligible to run and set
1362 * set it running. But return the thread locked.
1363 *
1364 * Conditions:
1365 * at splsched
1366 * wait queue locked
1367 * possibly recursive
1368 * Returns:
1369 * a pointer to the locked thread that was awakened
1370 */
1371 __private_extern__ thread_t
1372 wait_queue_wakeup64_identity_locked(
1373 wait_queue_t wq,
1374 event64_t event,
1375 wait_result_t result,
1376 boolean_t unlock)
1377 {
1378 kern_return_t res;
1379 thread_t thread;
1380
1381 assert(wait_queue_held(wq));
1382
1383 thread = _wait_queue_select64_one(wq, event);
1384 if (unlock)
1385 wait_queue_unlock(wq);
1386
1387 if (thread) {
1388 res = thread_go(thread, result);
1389 assert(res == KERN_SUCCESS);
1390 }
1391 return thread; /* still locked if not NULL */
1392 }
1393
1394
1395 /*
1396 * Routine: wait_queue_wakeup64_one_locked
1397 * Purpose:
1398 * Select a single thread that is most-eligible to run and set
1399 * set it runnings.
1400 *
1401 * Conditions:
1402 * at splsched
1403 * wait queue locked
1404 * possibly recursive
1405 * Returns:
1406 * KERN_SUCCESS: It was, and is, now removed.
1407 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1408 */
1409 __private_extern__ kern_return_t
1410 wait_queue_wakeup64_one_locked(
1411 wait_queue_t wq,
1412 event64_t event,
1413 wait_result_t result,
1414 boolean_t unlock)
1415 {
1416 thread_t thread;
1417
1418 assert(wait_queue_held(wq));
1419
1420 thread = _wait_queue_select64_one(wq, event);
1421 if (unlock)
1422 wait_queue_unlock(wq);
1423
1424 if (thread) {
1425 kern_return_t res;
1426
1427 res = thread_go(thread, result);
1428 assert(res == KERN_SUCCESS);
1429 thread_unlock(thread);
1430 return res;
1431 }
1432
1433 return KERN_NOT_WAITING;
1434 }
1435
1436 /*
1437 * Routine: wait_queue_wakeup_one
1438 * Purpose:
1439 * Wakeup the most appropriate thread that is in the specified
1440 * wait queue for the specified event.
1441 * Conditions:
1442 * Nothing locked
1443 * Returns:
1444 * KERN_SUCCESS - Thread was woken up
1445 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1446 */
1447 kern_return_t
1448 wait_queue_wakeup_one(
1449 wait_queue_t wq,
1450 event_t event,
1451 wait_result_t result)
1452 {
1453 thread_t thread;
1454 spl_t s;
1455
1456 if (!wait_queue_is_valid(wq)) {
1457 return KERN_INVALID_ARGUMENT;
1458 }
1459
1460 s = splsched();
1461 wait_queue_lock(wq);
1462 thread = _wait_queue_select64_one(wq, CAST_DOWN(event64_t,event));
1463 wait_queue_unlock(wq);
1464
1465 if (thread) {
1466 kern_return_t res;
1467
1468 res = thread_go(thread, result);
1469 assert(res == KERN_SUCCESS);
1470 thread_unlock(thread);
1471 splx(s);
1472 return res;
1473 }
1474
1475 splx(s);
1476 return KERN_NOT_WAITING;
1477 }
1478
1479 /*
1480 * Routine: wait_queue_wakeup64_one
1481 * Purpose:
1482 * Wakeup the most appropriate thread that is in the specified
1483 * wait queue for the specified event.
1484 * Conditions:
1485 * Nothing locked
1486 * Returns:
1487 * KERN_SUCCESS - Thread was woken up
1488 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1489 */
1490 kern_return_t
1491 wait_queue_wakeup64_one(
1492 wait_queue_t wq,
1493 event64_t event,
1494 wait_result_t result)
1495 {
1496 thread_t thread;
1497 spl_t s;
1498
1499 if (!wait_queue_is_valid(wq)) {
1500 return KERN_INVALID_ARGUMENT;
1501 }
1502 s = splsched();
1503 wait_queue_lock(wq);
1504 thread = _wait_queue_select64_one(wq, event);
1505 wait_queue_unlock(wq);
1506
1507 if (thread) {
1508 kern_return_t res;
1509
1510 res = thread_go(thread, result);
1511 assert(res == KERN_SUCCESS);
1512 thread_unlock(thread);
1513 splx(s);
1514 return res;
1515 }
1516
1517 splx(s);
1518 return KERN_NOT_WAITING;
1519 }
1520
1521
1522 /*
1523 * Routine: wait_queue_wakeup64_thread_locked
1524 * Purpose:
1525 * Wakeup the particular thread that was specified if and only
1526 * it was in this wait queue (or one of it's set queues)
1527 * and waiting on the specified event.
1528 *
1529 * This is much safer than just removing the thread from
1530 * whatever wait queue it happens to be on. For instance, it
1531 * may have already been awoken from the wait you intended to
1532 * interrupt and waited on something else (like another
1533 * semaphore).
1534 * Conditions:
1535 * at splsched
1536 * wait queue already locked (may be released).
1537 * Returns:
1538 * KERN_SUCCESS - the thread was found waiting and awakened
1539 * KERN_NOT_WAITING - the thread was not waiting here
1540 */
1541 __private_extern__ kern_return_t
1542 wait_queue_wakeup64_thread_locked(
1543 wait_queue_t wq,
1544 event64_t event,
1545 thread_t thread,
1546 wait_result_t result,
1547 boolean_t unlock)
1548 {
1549 kern_return_t res;
1550
1551 assert(wait_queue_held(wq));
1552
1553 /*
1554 * See if the thread was still waiting there. If so, it got
1555 * dequeued and returned locked.
1556 */
1557 res = _wait_queue_select64_thread(wq, event, thread);
1558 if (unlock)
1559 wait_queue_unlock(wq);
1560
1561 if (res != KERN_SUCCESS)
1562 return KERN_NOT_WAITING;
1563
1564 res = thread_go(thread, result);
1565 assert(res == KERN_SUCCESS);
1566 thread_unlock(thread);
1567 return res;
1568 }
1569
1570 /*
1571 * Routine: wait_queue_wakeup_thread
1572 * Purpose:
1573 * Wakeup the particular thread that was specified if and only
1574 * it was in this wait queue (or one of it's set queues)
1575 * and waiting on the specified event.
1576 *
1577 * This is much safer than just removing the thread from
1578 * whatever wait queue it happens to be on. For instance, it
1579 * may have already been awoken from the wait you intended to
1580 * interrupt and waited on something else (like another
1581 * semaphore).
1582 * Conditions:
1583 * nothing of interest locked
1584 * we need to assume spl needs to be raised
1585 * Returns:
1586 * KERN_SUCCESS - the thread was found waiting and awakened
1587 * KERN_NOT_WAITING - the thread was not waiting here
1588 */
1589 kern_return_t
1590 wait_queue_wakeup_thread(
1591 wait_queue_t wq,
1592 event_t event,
1593 thread_t thread,
1594 wait_result_t result)
1595 {
1596 kern_return_t res;
1597 spl_t s;
1598
1599 if (!wait_queue_is_valid(wq)) {
1600 return KERN_INVALID_ARGUMENT;
1601 }
1602
1603 s = splsched();
1604 wait_queue_lock(wq);
1605 res = _wait_queue_select64_thread(wq, CAST_DOWN(event64_t,event), thread);
1606 wait_queue_unlock(wq);
1607
1608 if (res == KERN_SUCCESS) {
1609 res = thread_go(thread, result);
1610 assert(res == KERN_SUCCESS);
1611 thread_unlock(thread);
1612 splx(s);
1613 return res;
1614 }
1615 splx(s);
1616 return KERN_NOT_WAITING;
1617 }
1618
1619 /*
1620 * Routine: wait_queue_wakeup64_thread
1621 * Purpose:
1622 * Wakeup the particular thread that was specified if and only
1623 * it was in this wait queue (or one of it's set's queues)
1624 * and waiting on the specified event.
1625 *
1626 * This is much safer than just removing the thread from
1627 * whatever wait queue it happens to be on. For instance, it
1628 * may have already been awoken from the wait you intended to
1629 * interrupt and waited on something else (like another
1630 * semaphore).
1631 * Conditions:
1632 * nothing of interest locked
1633 * we need to assume spl needs to be raised
1634 * Returns:
1635 * KERN_SUCCESS - the thread was found waiting and awakened
1636 * KERN_NOT_WAITING - the thread was not waiting here
1637 */
1638 kern_return_t
1639 wait_queue_wakeup64_thread(
1640 wait_queue_t wq,
1641 event64_t event,
1642 thread_t thread,
1643 wait_result_t result)
1644 {
1645 kern_return_t res;
1646 spl_t s;
1647
1648 if (!wait_queue_is_valid(wq)) {
1649 return KERN_INVALID_ARGUMENT;
1650 }
1651
1652 s = splsched();
1653 wait_queue_lock(wq);
1654 res = _wait_queue_select64_thread(wq, event, thread);
1655 wait_queue_unlock(wq);
1656
1657 if (res == KERN_SUCCESS) {
1658 res = thread_go(thread, result);
1659 assert(res == KERN_SUCCESS);
1660 thread_unlock(thread);
1661 splx(s);
1662 return res;
1663 }
1664 splx(s);
1665 return KERN_NOT_WAITING;
1666 }