]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/wait_queue.c
xnu-1504.7.4.tar.gz
[apple/xnu.git] / osfmk / kern / wait_queue.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: wait_queue.c (adapted from sched_prim.c)
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Primitives for manipulating wait queues: either global
64 * ones from sched_prim.c, or private ones associated with
65 * particular structures(pots, semaphores, etc..).
66 */
67
68 #include <kern/kern_types.h>
69 #include <kern/simple_lock.h>
70 #include <kern/zalloc.h>
71 #include <kern/queue.h>
72 #include <kern/spl.h>
73 #include <mach/sync_policy.h>
74 #include <kern/mach_param.h>
75 #include <kern/sched_prim.h>
76
77 #include <kern/wait_queue.h>
78 #include <vm/vm_kern.h>
79
80 /* forward declarations */
81 static boolean_t wait_queue_member_locked(
82 wait_queue_t wq,
83 wait_queue_set_t wq_set);
84
85 static void wait_queues_init(void) __attribute__((section("__TEXT, initcode")));
86
87
88 #define WAIT_QUEUE_MAX thread_max
89 #define WAIT_QUEUE_SET_MAX task_max * 3
90 #define WAIT_QUEUE_LINK_MAX PORT_MAX / 2 + (WAIT_QUEUE_MAX * WAIT_QUEUE_SET_MAX) / 64
91
92 static zone_t _wait_queue_link_zone;
93 static zone_t _wait_queue_set_zone;
94 static zone_t _wait_queue_zone;
95
96 /* see rdar://6737748&5561610; we need an unshadowed
97 * definition of a WaitQueueLink for debugging,
98 * but it needs to be used somewhere to wind up in
99 * the dSYM file. */
100 volatile WaitQueueLink *unused_except_for_debugging;
101
102
103 /*
104 * Waiting protocols and implementation:
105 *
106 * Each thread may be waiting for exactly one event; this event
107 * is set using assert_wait(). That thread may be awakened either
108 * by performing a thread_wakeup_prim() on its event,
109 * or by directly waking that thread up with clear_wait().
110 *
111 * The implementation of wait events uses a hash table. Each
112 * bucket is queue of threads having the same hash function
113 * value; the chain for the queue (linked list) is the run queue
114 * field. [It is not possible to be waiting and runnable at the
115 * same time.]
116 *
117 * Locks on both the thread and on the hash buckets govern the
118 * wait event field and the queue chain field. Because wakeup
119 * operations only have the event as an argument, the event hash
120 * bucket must be locked before any thread.
121 *
122 * Scheduling operations may also occur at interrupt level; therefore,
123 * interrupts below splsched() must be prevented when holding
124 * thread or hash bucket locks.
125 *
126 * The wait event hash table declarations are as follows:
127 */
128
129 struct wait_queue boot_wait_queue[1];
130 __private_extern__ struct wait_queue *wait_queues = &boot_wait_queue[0];
131
132 __private_extern__ uint32_t num_wait_queues = 1;
133
134 static uint32_t
135 compute_wait_hash_size(__unused unsigned cpu_count, __unused uint64_t memsize) {
136 uint32_t hsize = (uint32_t)round_page_64((thread_max / 11) * sizeof(struct wait_queue));
137 uint32_t bhsize;
138
139 if (PE_parse_boot_argn("wqsize", &bhsize, sizeof(bhsize)))
140 hsize = bhsize;
141
142 return hsize;
143 }
144
145 static void
146 wait_queues_init(void)
147 {
148 uint32_t i, whsize;
149 kern_return_t kret;
150
151 whsize = compute_wait_hash_size(processor_avail_count, machine_info.max_mem);
152 num_wait_queues = (whsize / ((uint32_t)sizeof(struct wait_queue))) - 1;
153
154 kret = kernel_memory_allocate(kernel_map, (vm_offset_t *) &wait_queues, whsize, 0, KMA_KOBJECT|KMA_NOPAGEWAIT);
155
156 if (kret != KERN_SUCCESS || wait_queues == NULL)
157 panic("kernel_memory_allocate() failed to allocate wait queues, error: %d, whsize: 0x%x", kret, whsize);
158
159 for (i = 0; i < num_wait_queues; i++) {
160 wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO);
161 }
162 }
163
164 void
165 wait_queue_bootstrap(void)
166 {
167 wait_queues_init();
168 _wait_queue_zone = zinit(sizeof(struct wait_queue),
169 WAIT_QUEUE_MAX * sizeof(struct wait_queue),
170 sizeof(struct wait_queue),
171 "wait queues");
172 _wait_queue_set_zone = zinit(sizeof(struct wait_queue_set),
173 WAIT_QUEUE_SET_MAX * sizeof(struct wait_queue_set),
174 sizeof(struct wait_queue_set),
175 "wait queue sets");
176 _wait_queue_link_zone = zinit(sizeof(struct _wait_queue_link),
177 WAIT_QUEUE_LINK_MAX * sizeof(struct _wait_queue_link),
178 sizeof(struct _wait_queue_link),
179 "wait queue links");
180 }
181
182 /*
183 * Routine: wait_queue_init
184 * Purpose:
185 * Initialize a previously allocated wait queue.
186 * Returns:
187 * KERN_SUCCESS - The wait_queue_t was initialized
188 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
189 */
190 kern_return_t
191 wait_queue_init(
192 wait_queue_t wq,
193 int policy)
194 {
195 /* only FIFO and LIFO for now */
196 if ((policy & SYNC_POLICY_FIXED_PRIORITY) != 0)
197 return KERN_INVALID_ARGUMENT;
198
199 wq->wq_fifo = ((policy & SYNC_POLICY_REVERSED) == 0);
200 wq->wq_type = _WAIT_QUEUE_inited;
201 queue_init(&wq->wq_queue);
202 hw_lock_init(&wq->wq_interlock);
203 return KERN_SUCCESS;
204 }
205
206 /*
207 * Routine: wait_queue_alloc
208 * Purpose:
209 * Allocate and initialize a wait queue for use outside of
210 * of the mach part of the kernel.
211 * Conditions:
212 * Nothing locked - can block.
213 * Returns:
214 * The allocated and initialized wait queue
215 * WAIT_QUEUE_NULL if there is a resource shortage
216 */
217 wait_queue_t
218 wait_queue_alloc(
219 int policy)
220 {
221 wait_queue_t wq;
222 kern_return_t ret;
223
224 wq = (wait_queue_t) zalloc(_wait_queue_zone);
225 if (wq != WAIT_QUEUE_NULL) {
226 ret = wait_queue_init(wq, policy);
227 if (ret != KERN_SUCCESS) {
228 zfree(_wait_queue_zone, wq);
229 wq = WAIT_QUEUE_NULL;
230 }
231 }
232 return wq;
233 }
234
235 /*
236 * Routine: wait_queue_free
237 * Purpose:
238 * Free an allocated wait queue.
239 * Conditions:
240 * May block.
241 */
242 kern_return_t
243 wait_queue_free(
244 wait_queue_t wq)
245 {
246 if (!wait_queue_is_queue(wq))
247 return KERN_INVALID_ARGUMENT;
248 if (!queue_empty(&wq->wq_queue))
249 return KERN_FAILURE;
250 zfree(_wait_queue_zone, wq);
251 return KERN_SUCCESS;
252 }
253
254 /*
255 * Routine: wait_queue_set_init
256 * Purpose:
257 * Initialize a previously allocated wait queue set.
258 * Returns:
259 * KERN_SUCCESS - The wait_queue_set_t was initialized
260 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
261 */
262 kern_return_t
263 wait_queue_set_init(
264 wait_queue_set_t wqset,
265 int policy)
266 {
267 kern_return_t ret;
268
269 ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
270 if (ret != KERN_SUCCESS)
271 return ret;
272
273 wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
274 if (policy & SYNC_POLICY_PREPOST)
275 wqset->wqs_wait_queue.wq_prepost = TRUE;
276 else
277 wqset->wqs_wait_queue.wq_prepost = FALSE;
278 queue_init(&wqset->wqs_setlinks);
279 queue_init(&wqset->wqs_preposts);
280 return KERN_SUCCESS;
281 }
282
283
284 kern_return_t
285 wait_queue_sub_init(
286 wait_queue_set_t wqset,
287 int policy)
288 {
289 return wait_queue_set_init(wqset, policy);
290 }
291
292 kern_return_t
293 wait_queue_sub_clearrefs(
294 wait_queue_set_t wq_set)
295 {
296 wait_queue_link_t wql;
297 queue_t q;
298 spl_t s;
299
300 if (!wait_queue_is_set(wq_set))
301 return KERN_INVALID_ARGUMENT;
302
303 s = splsched();
304 wqs_lock(wq_set);
305 q = &wq_set->wqs_preposts;
306 while (!queue_empty(q)) {
307 queue_remove_first(q, wql, wait_queue_link_t, wql_preposts);
308 assert(!wql_is_preposted(wql));
309 }
310 wqs_unlock(wq_set);
311 splx(s);
312 return KERN_SUCCESS;
313 }
314
315 /*
316 * Routine: wait_queue_set_alloc
317 * Purpose:
318 * Allocate and initialize a wait queue set for
319 * use outside of the mach part of the kernel.
320 * Conditions:
321 * May block.
322 * Returns:
323 * The allocated and initialized wait queue set
324 * WAIT_QUEUE_SET_NULL if there is a resource shortage
325 */
326 wait_queue_set_t
327 wait_queue_set_alloc(
328 int policy)
329 {
330 wait_queue_set_t wq_set;
331
332 wq_set = (wait_queue_set_t) zalloc(_wait_queue_set_zone);
333 if (wq_set != WAIT_QUEUE_SET_NULL) {
334 kern_return_t ret;
335
336 ret = wait_queue_set_init(wq_set, policy);
337 if (ret != KERN_SUCCESS) {
338 zfree(_wait_queue_set_zone, wq_set);
339 wq_set = WAIT_QUEUE_SET_NULL;
340 }
341 }
342 return wq_set;
343 }
344
345 /*
346 * Routine: wait_queue_set_free
347 * Purpose:
348 * Free an allocated wait queue set
349 * Conditions:
350 * May block.
351 */
352 kern_return_t
353 wait_queue_set_free(
354 wait_queue_set_t wq_set)
355 {
356 if (!wait_queue_is_set(wq_set))
357 return KERN_INVALID_ARGUMENT;
358
359 if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
360 return KERN_FAILURE;
361
362 zfree(_wait_queue_set_zone, wq_set);
363 return KERN_SUCCESS;
364 }
365
366
367 /*
368 *
369 * Routine: wait_queue_set_size
370 * Routine: wait_queue_link_size
371 * Purpose:
372 * Return the size of opaque wait queue structures
373 */
374 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
375 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
376
377 /* declare a unique type for wait queue link structures */
378 static unsigned int _wait_queue_link;
379 static unsigned int _wait_queue_link_noalloc;
380 static unsigned int _wait_queue_unlinked;
381
382 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
383 #define WAIT_QUEUE_LINK_NOALLOC ((void *)&_wait_queue_link_noalloc)
384 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
385
386 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
387 WQASSERT(((wqe)->wqe_queue == (wq) && \
388 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
389 "wait queue element list corruption: wq=%#x, wqe=%#x", \
390 (wq), (wqe))
391
392 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
393 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
394 (queue_t)(wql) : &(wql)->wql_setlinks)))
395
396 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
397 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
398 (queue_t)(wql) : &(wql)->wql_setlinks)))
399
400 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
401 WQASSERT(((((wql)->wql_type == WAIT_QUEUE_LINK) || \
402 ((wql)->wql_type == WAIT_QUEUE_LINK_NOALLOC)) && \
403 ((wql)->wql_setqueue == (wqs)) && \
404 (((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) || \
405 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_SET_inited)) && \
406 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
407 "wait queue set links corruption: wqs=%#x, wql=%#x", \
408 (wqs), (wql))
409
410 #if defined(_WAIT_QUEUE_DEBUG_)
411
412 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
413
414 #define WAIT_QUEUE_CHECK(wq) \
415 MACRO_BEGIN \
416 queue_t q2 = &(wq)->wq_queue; \
417 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
418 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
419 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
420 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
421 } \
422 MACRO_END
423
424 #define WAIT_QUEUE_SET_CHECK(wqs) \
425 MACRO_BEGIN \
426 queue_t q2 = &(wqs)->wqs_setlinks; \
427 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
428 while (!queue_end(q2, (queue_entry_t)wql2)) { \
429 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
430 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
431 } \
432 MACRO_END
433
434 #else /* !_WAIT_QUEUE_DEBUG_ */
435
436 #define WQASSERT(e, s, p0, p1) assert(e)
437
438 #define WAIT_QUEUE_CHECK(wq)
439 #define WAIT_QUEUE_SET_CHECK(wqs)
440
441 #endif /* !_WAIT_QUEUE_DEBUG_ */
442
443 /*
444 * Routine: wait_queue_member_locked
445 * Purpose:
446 * Indicate if this set queue is a member of the queue
447 * Conditions:
448 * The wait queue is locked
449 * The set queue is just that, a set queue
450 */
451 static boolean_t
452 wait_queue_member_locked(
453 wait_queue_t wq,
454 wait_queue_set_t wq_set)
455 {
456 wait_queue_element_t wq_element;
457 queue_t q;
458
459 assert(wait_queue_held(wq));
460 assert(wait_queue_is_set(wq_set));
461
462 q = &wq->wq_queue;
463
464 wq_element = (wait_queue_element_t) queue_first(q);
465 while (!queue_end(q, (queue_entry_t)wq_element)) {
466 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
467 if ((wq_element->wqe_type == WAIT_QUEUE_LINK) ||
468 (wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC)) {
469 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
470
471 if (wql->wql_setqueue == wq_set)
472 return TRUE;
473 }
474 wq_element = (wait_queue_element_t)
475 queue_next((queue_t) wq_element);
476 }
477 return FALSE;
478 }
479
480
481 /*
482 * Routine: wait_queue_member
483 * Purpose:
484 * Indicate if this set queue is a member of the queue
485 * Conditions:
486 * The set queue is just that, a set queue
487 */
488 boolean_t
489 wait_queue_member(
490 wait_queue_t wq,
491 wait_queue_set_t wq_set)
492 {
493 boolean_t ret;
494 spl_t s;
495
496 if (!wait_queue_is_set(wq_set))
497 return FALSE;
498
499 s = splsched();
500 wait_queue_lock(wq);
501 ret = wait_queue_member_locked(wq, wq_set);
502 wait_queue_unlock(wq);
503 splx(s);
504
505 return ret;
506 }
507
508
509 /*
510 * Routine: wait_queue_link_internal
511 * Purpose:
512 * Insert a set wait queue into a wait queue. This
513 * requires us to link the two together using a wait_queue_link
514 * structure that was provided.
515 * Conditions:
516 * The wait queue being inserted must be inited as a set queue
517 * The wait_queue_link structure must already be properly typed
518 */
519 static
520 kern_return_t
521 wait_queue_link_internal(
522 wait_queue_t wq,
523 wait_queue_set_t wq_set,
524 wait_queue_link_t wql)
525 {
526 wait_queue_element_t wq_element;
527 queue_t q;
528 spl_t s;
529
530 if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set))
531 return KERN_INVALID_ARGUMENT;
532
533 /*
534 * There are probably fewer threads and sets associated with
535 * the wait queue than there are wait queues associated with
536 * the set. So let's validate it that way.
537 */
538 s = splsched();
539 wait_queue_lock(wq);
540 q = &wq->wq_queue;
541 wq_element = (wait_queue_element_t) queue_first(q);
542 while (!queue_end(q, (queue_entry_t)wq_element)) {
543 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
544 if ((wq_element->wqe_type == WAIT_QUEUE_LINK ||
545 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) &&
546 ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
547 wait_queue_unlock(wq);
548 splx(s);
549 return KERN_ALREADY_IN_SET;
550 }
551 wq_element = (wait_queue_element_t)
552 queue_next((queue_t) wq_element);
553 }
554
555 /*
556 * Not already a member, so we can add it.
557 */
558 wqs_lock(wq_set);
559
560 WAIT_QUEUE_SET_CHECK(wq_set);
561
562 assert(wql->wql_type == WAIT_QUEUE_LINK ||
563 wql->wql_type == WAIT_QUEUE_LINK_NOALLOC);
564
565 wql->wql_queue = wq;
566 wql_clear_prepost(wql);
567 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
568 wql->wql_setqueue = wq_set;
569 queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
570
571 wqs_unlock(wq_set);
572 wait_queue_unlock(wq);
573 splx(s);
574
575 return KERN_SUCCESS;
576 }
577
578 /*
579 * Routine: wait_queue_link_noalloc
580 * Purpose:
581 * Insert a set wait queue into a wait queue. This
582 * requires us to link the two together using a wait_queue_link
583 * structure that we allocate.
584 * Conditions:
585 * The wait queue being inserted must be inited as a set queue
586 */
587 kern_return_t
588 wait_queue_link_noalloc(
589 wait_queue_t wq,
590 wait_queue_set_t wq_set,
591 wait_queue_link_t wql)
592 {
593 wql->wql_type = WAIT_QUEUE_LINK_NOALLOC;
594 return wait_queue_link_internal(wq, wq_set, wql);
595 }
596
597 /*
598 * Routine: wait_queue_link
599 * Purpose:
600 * Insert a set wait queue into a wait queue. This
601 * requires us to link the two together using a wait_queue_link
602 * structure that we allocate.
603 * Conditions:
604 * The wait queue being inserted must be inited as a set queue
605 */
606 kern_return_t
607 wait_queue_link(
608 wait_queue_t wq,
609 wait_queue_set_t wq_set)
610 {
611 wait_queue_link_t wql;
612 kern_return_t ret;
613
614 wql = (wait_queue_link_t) zalloc(_wait_queue_link_zone);
615 if (wql == WAIT_QUEUE_LINK_NULL)
616 return KERN_RESOURCE_SHORTAGE;
617
618 wql->wql_type = WAIT_QUEUE_LINK;
619 ret = wait_queue_link_internal(wq, wq_set, wql);
620 if (ret != KERN_SUCCESS)
621 zfree(_wait_queue_link_zone, wql);
622
623 return ret;
624 }
625
626
627 /*
628 * Routine: wait_queue_unlink_locked
629 * Purpose:
630 * Undo the linkage between a wait queue and a set.
631 */
632 static void
633 wait_queue_unlink_locked(
634 wait_queue_t wq,
635 wait_queue_set_t wq_set,
636 wait_queue_link_t wql)
637 {
638 assert(wait_queue_held(wq));
639 assert(wait_queue_held(&wq_set->wqs_wait_queue));
640
641 wql->wql_queue = WAIT_QUEUE_NULL;
642 queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
643 wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
644 queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
645 if (wql_is_preposted(wql)) {
646 queue_t ppq = &wq_set->wqs_preposts;
647 queue_remove(ppq, wql, wait_queue_link_t, wql_preposts);
648 }
649 wql->wql_type = WAIT_QUEUE_UNLINKED;
650
651 WAIT_QUEUE_CHECK(wq);
652 WAIT_QUEUE_SET_CHECK(wq_set);
653 }
654
655 /*
656 * Routine: wait_queue_unlink
657 * Purpose:
658 * Remove the linkage between a wait queue and a set,
659 * freeing the linkage structure.
660 * Conditions:
661 * The wait queue being must be a member set queue
662 */
663 kern_return_t
664 wait_queue_unlink(
665 wait_queue_t wq,
666 wait_queue_set_t wq_set)
667 {
668 wait_queue_element_t wq_element;
669 wait_queue_link_t wql;
670 queue_t q;
671 spl_t s;
672
673 if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set)) {
674 return KERN_INVALID_ARGUMENT;
675 }
676 s = splsched();
677 wait_queue_lock(wq);
678
679 q = &wq->wq_queue;
680 wq_element = (wait_queue_element_t) queue_first(q);
681 while (!queue_end(q, (queue_entry_t)wq_element)) {
682 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
683 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
684 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
685
686 wql = (wait_queue_link_t)wq_element;
687
688 if (wql->wql_setqueue == wq_set) {
689 boolean_t alloced;
690
691 alloced = (wql->wql_type == WAIT_QUEUE_LINK);
692 wqs_lock(wq_set);
693 wait_queue_unlink_locked(wq, wq_set, wql);
694 wqs_unlock(wq_set);
695 wait_queue_unlock(wq);
696 splx(s);
697 if (alloced)
698 zfree(_wait_queue_link_zone, wql);
699 return KERN_SUCCESS;
700 }
701 }
702 wq_element = (wait_queue_element_t)
703 queue_next((queue_t) wq_element);
704 }
705 wait_queue_unlock(wq);
706 splx(s);
707 return KERN_NOT_IN_SET;
708 }
709
710 /*
711 * Routine: wait_queue_unlink_all
712 * Purpose:
713 * Remove the linkage between a wait queue and all its sets.
714 * All the linkage structures that were allocated internally
715 * are freed. The others are the caller's responsibility.
716 * Conditions:
717 * Nothing of interest locked.
718 */
719
720 kern_return_t
721 wait_queue_unlink_all(
722 wait_queue_t wq)
723 {
724 wait_queue_element_t wq_element;
725 wait_queue_element_t wq_next_element;
726 wait_queue_set_t wq_set;
727 wait_queue_link_t wql;
728 queue_head_t links_queue_head;
729 queue_t links = &links_queue_head;
730 queue_t q;
731 spl_t s;
732
733 if (!wait_queue_is_valid(wq)) {
734 return KERN_INVALID_ARGUMENT;
735 }
736
737 queue_init(links);
738
739 s = splsched();
740 wait_queue_lock(wq);
741
742 q = &wq->wq_queue;
743
744 wq_element = (wait_queue_element_t) queue_first(q);
745 while (!queue_end(q, (queue_entry_t)wq_element)) {
746 boolean_t alloced;
747
748 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
749 wq_next_element = (wait_queue_element_t)
750 queue_next((queue_t) wq_element);
751
752 alloced = (wq_element->wqe_type == WAIT_QUEUE_LINK);
753 if (alloced || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
754 wql = (wait_queue_link_t)wq_element;
755 wq_set = wql->wql_setqueue;
756 wqs_lock(wq_set);
757 wait_queue_unlink_locked(wq, wq_set, wql);
758 wqs_unlock(wq_set);
759 if (alloced)
760 enqueue(links, &wql->wql_links);
761 }
762 wq_element = wq_next_element;
763 }
764 wait_queue_unlock(wq);
765 splx(s);
766
767 while(!queue_empty(links)) {
768 wql = (wait_queue_link_t) dequeue(links);
769 zfree(_wait_queue_link_zone, wql);
770 }
771
772 return(KERN_SUCCESS);
773 }
774
775 /* legacy interface naming */
776 kern_return_t
777 wait_subqueue_unlink_all(
778 wait_queue_set_t wq_set)
779 {
780 return wait_queue_set_unlink_all(wq_set);
781 }
782
783
784 /*
785 * Routine: wait_queue_set_unlink_all
786 * Purpose:
787 * Remove the linkage between a set wait queue and all its
788 * member wait queues. The link structures are freed for those
789 * links which were dynamically allocated.
790 * Conditions:
791 * The wait queue must be a set
792 */
793 kern_return_t
794 wait_queue_set_unlink_all(
795 wait_queue_set_t wq_set)
796 {
797 wait_queue_link_t wql;
798 wait_queue_t wq;
799 queue_t q;
800 queue_head_t links_queue_head;
801 queue_t links = &links_queue_head;
802 spl_t s;
803
804 if (!wait_queue_is_set(wq_set)) {
805 return KERN_INVALID_ARGUMENT;
806 }
807
808 queue_init(links);
809
810 retry:
811 s = splsched();
812 wqs_lock(wq_set);
813
814 q = &wq_set->wqs_setlinks;
815
816 wql = (wait_queue_link_t)queue_first(q);
817 while (!queue_end(q, (queue_entry_t)wql)) {
818 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
819 wq = wql->wql_queue;
820 if (wait_queue_lock_try(wq)) {
821 boolean_t alloced;
822
823 alloced = (wql->wql_type == WAIT_QUEUE_LINK);
824 wait_queue_unlink_locked(wq, wq_set, wql);
825 wait_queue_unlock(wq);
826 if (alloced)
827 enqueue(links, &wql->wql_links);
828 wql = (wait_queue_link_t)queue_first(q);
829 } else {
830 wqs_unlock(wq_set);
831 splx(s);
832 delay(1);
833 goto retry;
834 }
835 }
836 wqs_unlock(wq_set);
837 splx(s);
838
839 while (!queue_empty (links)) {
840 wql = (wait_queue_link_t) dequeue(links);
841 zfree(_wait_queue_link_zone, wql);
842 }
843 return(KERN_SUCCESS);
844 }
845
846 /*
847 * Routine: wait_queue_assert_wait64_locked
848 * Purpose:
849 * Insert the current thread into the supplied wait queue
850 * waiting for a particular event to be posted to that queue.
851 *
852 * Conditions:
853 * The wait queue is assumed locked.
854 * The waiting thread is assumed locked.
855 *
856 */
857 __private_extern__ wait_result_t
858 wait_queue_assert_wait64_locked(
859 wait_queue_t wq,
860 event64_t event,
861 wait_interrupt_t interruptible,
862 uint64_t deadline,
863 thread_t thread)
864 {
865 wait_result_t wait_result;
866
867 if (!wait_queue_assert_possible(thread))
868 panic("wait_queue_assert_wait64_locked");
869
870 if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
871 wait_queue_set_t wqs = (wait_queue_set_t)wq;
872
873 if (event == NO_EVENT64 && wqs_is_preposted(wqs))
874 return(THREAD_AWAKENED);
875 }
876
877 /*
878 * This is the extent to which we currently take scheduling attributes
879 * into account. If the thread is vm priviledged, we stick it at
880 * the front of the queue. Later, these queues will honor the policy
881 * value set at wait_queue_init time.
882 */
883 wait_result = thread_mark_wait_locked(thread, interruptible);
884 if (wait_result == THREAD_WAITING) {
885 if (!wq->wq_fifo || thread->options & TH_OPT_VMPRIV)
886 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
887 else
888 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
889
890 thread->wait_event = event;
891 thread->wait_queue = wq;
892
893 if (deadline != 0) {
894 if (!timer_call_enter(&thread->wait_timer, deadline))
895 thread->wait_timer_active++;
896 thread->wait_timer_is_set = TRUE;
897 }
898 }
899 return(wait_result);
900 }
901
902 /*
903 * Routine: wait_queue_assert_wait
904 * Purpose:
905 * Insert the current thread into the supplied wait queue
906 * waiting for a particular event to be posted to that queue.
907 *
908 * Conditions:
909 * nothing of interest locked.
910 */
911 wait_result_t
912 wait_queue_assert_wait(
913 wait_queue_t wq,
914 event_t event,
915 wait_interrupt_t interruptible,
916 uint64_t deadline)
917 {
918 spl_t s;
919 wait_result_t ret;
920 thread_t thread = current_thread();
921
922 /* If it is an invalid wait queue, you can't wait on it */
923 if (!wait_queue_is_valid(wq))
924 return (thread->wait_result = THREAD_RESTART);
925
926 s = splsched();
927 wait_queue_lock(wq);
928 thread_lock(thread);
929 ret = wait_queue_assert_wait64_locked(wq, CAST_DOWN(event64_t,event),
930 interruptible, deadline, thread);
931 thread_unlock(thread);
932 wait_queue_unlock(wq);
933 splx(s);
934 return(ret);
935 }
936
937 /*
938 * Routine: wait_queue_assert_wait64
939 * Purpose:
940 * Insert the current thread into the supplied wait queue
941 * waiting for a particular event to be posted to that queue.
942 * Conditions:
943 * nothing of interest locked.
944 */
945 wait_result_t
946 wait_queue_assert_wait64(
947 wait_queue_t wq,
948 event64_t event,
949 wait_interrupt_t interruptible,
950 uint64_t deadline)
951 {
952 spl_t s;
953 wait_result_t ret;
954 thread_t thread = current_thread();
955
956 /* If it is an invalid wait queue, you cant wait on it */
957 if (!wait_queue_is_valid(wq))
958 return (thread->wait_result = THREAD_RESTART);
959
960 s = splsched();
961 wait_queue_lock(wq);
962 thread_lock(thread);
963 ret = wait_queue_assert_wait64_locked(wq, event, interruptible, deadline, thread);
964 thread_unlock(thread);
965 wait_queue_unlock(wq);
966 splx(s);
967 return(ret);
968 }
969
970 /*
971 * Routine: _wait_queue_select64_all
972 * Purpose:
973 * Select all threads off a wait queue that meet the
974 * supplied criteria.
975 * Conditions:
976 * at splsched
977 * wait queue locked
978 * wake_queue initialized and ready for insertion
979 * possibly recursive
980 * Returns:
981 * a queue of locked threads
982 */
983 static void
984 _wait_queue_select64_all(
985 wait_queue_t wq,
986 event64_t event,
987 queue_t wake_queue)
988 {
989 wait_queue_element_t wq_element;
990 wait_queue_element_t wqe_next;
991 queue_t q;
992
993 q = &wq->wq_queue;
994
995 wq_element = (wait_queue_element_t) queue_first(q);
996 while (!queue_end(q, (queue_entry_t)wq_element)) {
997 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
998 wqe_next = (wait_queue_element_t)
999 queue_next((queue_t) wq_element);
1000
1001 /*
1002 * We may have to recurse if this is a compound wait queue.
1003 */
1004 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
1005 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
1006 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1007 wait_queue_set_t set_queue = wql->wql_setqueue;
1008
1009 /*
1010 * We have to check the set wait queue. If it is marked
1011 * as pre-post, and it is the "generic event" then mark
1012 * it pre-posted now (if not already).
1013 */
1014 wqs_lock(set_queue);
1015 if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) {
1016 queue_t ppq = &set_queue->wqs_preposts;
1017 queue_enter(ppq, wql, wait_queue_link_t, wql_preposts);
1018 }
1019 if (! wait_queue_empty(&set_queue->wqs_wait_queue))
1020 _wait_queue_select64_all(&set_queue->wqs_wait_queue, event, wake_queue);
1021 wqs_unlock(set_queue);
1022 } else {
1023
1024 /*
1025 * Otherwise, its a thread. If it is waiting on
1026 * the event we are posting to this queue, pull
1027 * it off the queue and stick it in out wake_queue.
1028 */
1029 thread_t t = (thread_t)wq_element;
1030
1031 if (t->wait_event == event) {
1032 thread_lock(t);
1033 remqueue(q, (queue_entry_t) t);
1034 enqueue (wake_queue, (queue_entry_t) t);
1035 t->wait_queue = WAIT_QUEUE_NULL;
1036 t->wait_event = NO_EVENT64;
1037 t->at_safe_point = FALSE;
1038 /* returned locked */
1039 }
1040 }
1041 wq_element = wqe_next;
1042 }
1043 }
1044
1045 /*
1046 * Routine: wait_queue_wakeup64_all_locked
1047 * Purpose:
1048 * Wakeup some number of threads that are in the specified
1049 * wait queue and waiting on the specified event.
1050 * Conditions:
1051 * wait queue already locked (may be released).
1052 * Returns:
1053 * KERN_SUCCESS - Threads were woken up
1054 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1055 */
1056 __private_extern__ kern_return_t
1057 wait_queue_wakeup64_all_locked(
1058 wait_queue_t wq,
1059 event64_t event,
1060 wait_result_t result,
1061 boolean_t unlock)
1062 {
1063 queue_head_t wake_queue_head;
1064 queue_t q = &wake_queue_head;
1065 kern_return_t res;
1066
1067 // assert(wait_queue_held(wq));
1068 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1069 // panic("wait_queue_wakeup64_all_locked: lock not held on %p\n", wq); /* (BRINGUP) */
1070 // }
1071
1072 queue_init(q);
1073
1074 /*
1075 * Select the threads that we will wake up. The threads
1076 * are returned to us locked and cleanly removed from the
1077 * wait queue.
1078 */
1079 _wait_queue_select64_all(wq, event, q);
1080 if (unlock)
1081 wait_queue_unlock(wq);
1082
1083 /*
1084 * For each thread, set it running.
1085 */
1086 res = KERN_NOT_WAITING;
1087 while (!queue_empty (q)) {
1088 thread_t thread = (thread_t) dequeue(q);
1089 res = thread_go(thread, result);
1090 assert(res == KERN_SUCCESS);
1091 thread_unlock(thread);
1092 }
1093 return res;
1094 }
1095
1096
1097 /*
1098 * Routine: wait_queue_wakeup_all
1099 * Purpose:
1100 * Wakeup some number of threads that are in the specified
1101 * wait queue and waiting on the specified event.
1102 * Conditions:
1103 * Nothing locked
1104 * Returns:
1105 * KERN_SUCCESS - Threads were woken up
1106 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1107 */
1108 kern_return_t
1109 wait_queue_wakeup_all(
1110 wait_queue_t wq,
1111 event_t event,
1112 wait_result_t result)
1113 {
1114 kern_return_t ret;
1115 spl_t s;
1116
1117 if (!wait_queue_is_valid(wq)) {
1118 return KERN_INVALID_ARGUMENT;
1119 }
1120
1121 s = splsched();
1122 wait_queue_lock(wq);
1123 // if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1124 // panic("wait_queue_wakeup_all: we did not get the lock on %p\n", wq); /* (BRINGUP) */
1125 // }
1126 ret = wait_queue_wakeup64_all_locked(
1127 wq, CAST_DOWN(event64_t,event),
1128 result, TRUE);
1129 /* lock released */
1130 splx(s);
1131 return ret;
1132 }
1133
1134 /*
1135 * Routine: wait_queue_wakeup64_all
1136 * Purpose:
1137 * Wakeup some number of threads that are in the specified
1138 * wait queue and waiting on the specified event.
1139 * Conditions:
1140 * Nothing locked
1141 * Returns:
1142 * KERN_SUCCESS - Threads were woken up
1143 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1144 */
1145 kern_return_t
1146 wait_queue_wakeup64_all(
1147 wait_queue_t wq,
1148 event64_t event,
1149 wait_result_t result)
1150 {
1151 kern_return_t ret;
1152 spl_t s;
1153
1154 if (!wait_queue_is_valid(wq)) {
1155 return KERN_INVALID_ARGUMENT;
1156 }
1157
1158 s = splsched();
1159 wait_queue_lock(wq);
1160 ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1161 /* lock released */
1162 splx(s);
1163 return ret;
1164 }
1165
1166 /*
1167 * Routine: _wait_queue_select64_one
1168 * Purpose:
1169 * Select the best thread off a wait queue that meet the
1170 * supplied criteria.
1171 * Conditions:
1172 * at splsched
1173 * wait queue locked
1174 * possibly recursive
1175 * Returns:
1176 * a locked thread - if one found
1177 * Note:
1178 * This is where the sync policy of the wait queue comes
1179 * into effect. For now, we just assume FIFO/LIFO.
1180 */
1181 static thread_t
1182 _wait_queue_select64_one(
1183 wait_queue_t wq,
1184 event64_t event)
1185 {
1186 wait_queue_element_t wq_element;
1187 wait_queue_element_t wqe_next;
1188 thread_t t = THREAD_NULL;
1189 queue_t q;
1190
1191 q = &wq->wq_queue;
1192
1193 wq_element = (wait_queue_element_t) queue_first(q);
1194 while (!queue_end(q, (queue_entry_t)wq_element)) {
1195 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1196 wqe_next = (wait_queue_element_t)
1197 queue_next((queue_t) wq_element);
1198
1199 /*
1200 * We may have to recurse if this is a compound wait queue.
1201 */
1202 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
1203 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
1204 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1205 wait_queue_set_t set_queue = wql->wql_setqueue;
1206
1207 /*
1208 * We have to check the set wait queue. If the set
1209 * supports pre-posting, it isn't already preposted,
1210 * and we didn't find a thread in the set, then mark it.
1211 *
1212 * If we later find a thread, there may be a spurious
1213 * pre-post here on this set. The wait side has to check
1214 * for that either pre- or post-wait.
1215 */
1216 wqs_lock(set_queue);
1217 if (! wait_queue_empty(&set_queue->wqs_wait_queue)) {
1218 t = _wait_queue_select64_one(&set_queue->wqs_wait_queue, event);
1219 }
1220 if (t != THREAD_NULL) {
1221 wqs_unlock(set_queue);
1222 return t;
1223 }
1224 if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) {
1225 queue_t ppq = &set_queue->wqs_preposts;
1226 queue_enter(ppq, wql, wait_queue_link_t, wql_preposts);
1227 }
1228 wqs_unlock(set_queue);
1229
1230 } else {
1231
1232 /*
1233 * Otherwise, its a thread. If it is waiting on
1234 * the event we are posting to this queue, pull
1235 * it off the queue and stick it in out wake_queue.
1236 */
1237 t = (thread_t)wq_element;
1238 if (t->wait_event == event) {
1239 thread_lock(t);
1240 remqueue(q, (queue_entry_t) t);
1241 t->wait_queue = WAIT_QUEUE_NULL;
1242 t->wait_event = NO_EVENT64;
1243 t->at_safe_point = FALSE;
1244 return t; /* still locked */
1245 }
1246
1247 t = THREAD_NULL;
1248 }
1249 wq_element = wqe_next;
1250 }
1251 return THREAD_NULL;
1252 }
1253
1254
1255 /*
1256 * Routine: wait_queue_pull_thread_locked
1257 * Purpose:
1258 * Pull a thread off its wait queue and (possibly) unlock
1259 * the waitq.
1260 * Conditions:
1261 * at splsched
1262 * wait queue locked
1263 * thread locked
1264 * Returns:
1265 * with the thread still locked.
1266 */
1267 void
1268 wait_queue_pull_thread_locked(
1269 wait_queue_t waitq,
1270 thread_t thread,
1271 boolean_t unlock)
1272 {
1273
1274 assert(thread->wait_queue == waitq);
1275
1276 remqueue(&waitq->wq_queue, (queue_entry_t)thread );
1277 thread->wait_queue = WAIT_QUEUE_NULL;
1278 thread->wait_event = NO_EVENT64;
1279 thread->at_safe_point = FALSE;
1280 if (unlock)
1281 wait_queue_unlock(waitq);
1282 }
1283
1284
1285 /*
1286 * Routine: wait_queue_select64_thread
1287 * Purpose:
1288 * Look for a thread and remove it from the queues, if
1289 * (and only if) the thread is waiting on the supplied
1290 * <wait_queue, event> pair.
1291 * Conditions:
1292 * at splsched
1293 * wait queue locked
1294 * possibly recursive
1295 * Returns:
1296 * KERN_NOT_WAITING: Thread is not waiting here.
1297 * KERN_SUCCESS: It was, and is now removed (returned locked)
1298 */
1299 static kern_return_t
1300 _wait_queue_select64_thread(
1301 wait_queue_t wq,
1302 event64_t event,
1303 thread_t thread)
1304 {
1305 wait_queue_element_t wq_element;
1306 wait_queue_element_t wqe_next;
1307 kern_return_t res = KERN_NOT_WAITING;
1308 queue_t q = &wq->wq_queue;
1309
1310 thread_lock(thread);
1311 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
1312 remqueue(q, (queue_entry_t) thread);
1313 thread->at_safe_point = FALSE;
1314 thread->wait_event = NO_EVENT64;
1315 thread->wait_queue = WAIT_QUEUE_NULL;
1316 /* thread still locked */
1317 return KERN_SUCCESS;
1318 }
1319 thread_unlock(thread);
1320
1321 /*
1322 * The wait_queue associated with the thread may be one of this
1323 * wait queue's sets. Go see. If so, removing it from
1324 * there is like removing it from here.
1325 */
1326 wq_element = (wait_queue_element_t) queue_first(q);
1327 while (!queue_end(q, (queue_entry_t)wq_element)) {
1328 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1329 wqe_next = (wait_queue_element_t)
1330 queue_next((queue_t) wq_element);
1331
1332 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
1333 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
1334 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1335 wait_queue_set_t set_queue = wql->wql_setqueue;
1336
1337 wqs_lock(set_queue);
1338 if (! wait_queue_empty(&set_queue->wqs_wait_queue)) {
1339 res = _wait_queue_select64_thread(&set_queue->wqs_wait_queue,
1340 event,
1341 thread);
1342 }
1343 wqs_unlock(set_queue);
1344 if (res == KERN_SUCCESS)
1345 return KERN_SUCCESS;
1346 }
1347 wq_element = wqe_next;
1348 }
1349 return res;
1350 }
1351
1352
1353 /*
1354 * Routine: wait_queue_wakeup64_identity_locked
1355 * Purpose:
1356 * Select a single thread that is most-eligible to run and set
1357 * set it running. But return the thread locked.
1358 *
1359 * Conditions:
1360 * at splsched
1361 * wait queue locked
1362 * possibly recursive
1363 * Returns:
1364 * a pointer to the locked thread that was awakened
1365 */
1366 __private_extern__ thread_t
1367 wait_queue_wakeup64_identity_locked(
1368 wait_queue_t wq,
1369 event64_t event,
1370 wait_result_t result,
1371 boolean_t unlock)
1372 {
1373 kern_return_t res;
1374 thread_t thread;
1375
1376 assert(wait_queue_held(wq));
1377
1378 thread = _wait_queue_select64_one(wq, event);
1379 if (unlock)
1380 wait_queue_unlock(wq);
1381
1382 if (thread) {
1383 res = thread_go(thread, result);
1384 assert(res == KERN_SUCCESS);
1385 }
1386 return thread; /* still locked if not NULL */
1387 }
1388
1389
1390 /*
1391 * Routine: wait_queue_wakeup64_one_locked
1392 * Purpose:
1393 * Select a single thread that is most-eligible to run and set
1394 * set it runnings.
1395 *
1396 * Conditions:
1397 * at splsched
1398 * wait queue locked
1399 * possibly recursive
1400 * Returns:
1401 * KERN_SUCCESS: It was, and is, now removed.
1402 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1403 */
1404 __private_extern__ kern_return_t
1405 wait_queue_wakeup64_one_locked(
1406 wait_queue_t wq,
1407 event64_t event,
1408 wait_result_t result,
1409 boolean_t unlock)
1410 {
1411 thread_t thread;
1412
1413 assert(wait_queue_held(wq));
1414
1415 thread = _wait_queue_select64_one(wq, event);
1416 if (unlock)
1417 wait_queue_unlock(wq);
1418
1419 if (thread) {
1420 kern_return_t res;
1421
1422 res = thread_go(thread, result);
1423 assert(res == KERN_SUCCESS);
1424 thread_unlock(thread);
1425 return res;
1426 }
1427
1428 return KERN_NOT_WAITING;
1429 }
1430
1431 /*
1432 * Routine: wait_queue_wakeup_one
1433 * Purpose:
1434 * Wakeup the most appropriate thread that is in the specified
1435 * wait queue for the specified event.
1436 * Conditions:
1437 * Nothing locked
1438 * Returns:
1439 * KERN_SUCCESS - Thread was woken up
1440 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1441 */
1442 kern_return_t
1443 wait_queue_wakeup_one(
1444 wait_queue_t wq,
1445 event_t event,
1446 wait_result_t result)
1447 {
1448 thread_t thread;
1449 spl_t s;
1450
1451 if (!wait_queue_is_valid(wq)) {
1452 return KERN_INVALID_ARGUMENT;
1453 }
1454
1455 s = splsched();
1456 wait_queue_lock(wq);
1457 thread = _wait_queue_select64_one(wq, CAST_DOWN(event64_t,event));
1458 wait_queue_unlock(wq);
1459
1460 if (thread) {
1461 kern_return_t res;
1462
1463 res = thread_go(thread, result);
1464 assert(res == KERN_SUCCESS);
1465 thread_unlock(thread);
1466 splx(s);
1467 return res;
1468 }
1469
1470 splx(s);
1471 return KERN_NOT_WAITING;
1472 }
1473
1474 /*
1475 * Routine: wait_queue_wakeup64_one
1476 * Purpose:
1477 * Wakeup the most appropriate thread that is in the specified
1478 * wait queue for the specified event.
1479 * Conditions:
1480 * Nothing locked
1481 * Returns:
1482 * KERN_SUCCESS - Thread was woken up
1483 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1484 */
1485 kern_return_t
1486 wait_queue_wakeup64_one(
1487 wait_queue_t wq,
1488 event64_t event,
1489 wait_result_t result)
1490 {
1491 thread_t thread;
1492 spl_t s;
1493
1494 if (!wait_queue_is_valid(wq)) {
1495 return KERN_INVALID_ARGUMENT;
1496 }
1497 s = splsched();
1498 wait_queue_lock(wq);
1499 thread = _wait_queue_select64_one(wq, event);
1500 wait_queue_unlock(wq);
1501
1502 if (thread) {
1503 kern_return_t res;
1504
1505 res = thread_go(thread, result);
1506 assert(res == KERN_SUCCESS);
1507 thread_unlock(thread);
1508 splx(s);
1509 return res;
1510 }
1511
1512 splx(s);
1513 return KERN_NOT_WAITING;
1514 }
1515
1516
1517 /*
1518 * Routine: wait_queue_wakeup64_thread_locked
1519 * Purpose:
1520 * Wakeup the particular thread that was specified if and only
1521 * it was in this wait queue (or one of it's set queues)
1522 * and waiting on the specified event.
1523 *
1524 * This is much safer than just removing the thread from
1525 * whatever wait queue it happens to be on. For instance, it
1526 * may have already been awoken from the wait you intended to
1527 * interrupt and waited on something else (like another
1528 * semaphore).
1529 * Conditions:
1530 * at splsched
1531 * wait queue already locked (may be released).
1532 * Returns:
1533 * KERN_SUCCESS - the thread was found waiting and awakened
1534 * KERN_NOT_WAITING - the thread was not waiting here
1535 */
1536 __private_extern__ kern_return_t
1537 wait_queue_wakeup64_thread_locked(
1538 wait_queue_t wq,
1539 event64_t event,
1540 thread_t thread,
1541 wait_result_t result,
1542 boolean_t unlock)
1543 {
1544 kern_return_t res;
1545
1546 assert(wait_queue_held(wq));
1547
1548 /*
1549 * See if the thread was still waiting there. If so, it got
1550 * dequeued and returned locked.
1551 */
1552 res = _wait_queue_select64_thread(wq, event, thread);
1553 if (unlock)
1554 wait_queue_unlock(wq);
1555
1556 if (res != KERN_SUCCESS)
1557 return KERN_NOT_WAITING;
1558
1559 res = thread_go(thread, result);
1560 assert(res == KERN_SUCCESS);
1561 thread_unlock(thread);
1562 return res;
1563 }
1564
1565 /*
1566 * Routine: wait_queue_wakeup_thread
1567 * Purpose:
1568 * Wakeup the particular thread that was specified if and only
1569 * it was in this wait queue (or one of it's set queues)
1570 * and waiting on the specified event.
1571 *
1572 * This is much safer than just removing the thread from
1573 * whatever wait queue it happens to be on. For instance, it
1574 * may have already been awoken from the wait you intended to
1575 * interrupt and waited on something else (like another
1576 * semaphore).
1577 * Conditions:
1578 * nothing of interest locked
1579 * we need to assume spl needs to be raised
1580 * Returns:
1581 * KERN_SUCCESS - the thread was found waiting and awakened
1582 * KERN_NOT_WAITING - the thread was not waiting here
1583 */
1584 kern_return_t
1585 wait_queue_wakeup_thread(
1586 wait_queue_t wq,
1587 event_t event,
1588 thread_t thread,
1589 wait_result_t result)
1590 {
1591 kern_return_t res;
1592 spl_t s;
1593
1594 if (!wait_queue_is_valid(wq)) {
1595 return KERN_INVALID_ARGUMENT;
1596 }
1597
1598 s = splsched();
1599 wait_queue_lock(wq);
1600 res = _wait_queue_select64_thread(wq, CAST_DOWN(event64_t,event), thread);
1601 wait_queue_unlock(wq);
1602
1603 if (res == KERN_SUCCESS) {
1604 res = thread_go(thread, result);
1605 assert(res == KERN_SUCCESS);
1606 thread_unlock(thread);
1607 splx(s);
1608 return res;
1609 }
1610 splx(s);
1611 return KERN_NOT_WAITING;
1612 }
1613
1614 /*
1615 * Routine: wait_queue_wakeup64_thread
1616 * Purpose:
1617 * Wakeup the particular thread that was specified if and only
1618 * it was in this wait queue (or one of it's set's queues)
1619 * and waiting on the specified event.
1620 *
1621 * This is much safer than just removing the thread from
1622 * whatever wait queue it happens to be on. For instance, it
1623 * may have already been awoken from the wait you intended to
1624 * interrupt and waited on something else (like another
1625 * semaphore).
1626 * Conditions:
1627 * nothing of interest locked
1628 * we need to assume spl needs to be raised
1629 * Returns:
1630 * KERN_SUCCESS - the thread was found waiting and awakened
1631 * KERN_NOT_WAITING - the thread was not waiting here
1632 */
1633 kern_return_t
1634 wait_queue_wakeup64_thread(
1635 wait_queue_t wq,
1636 event64_t event,
1637 thread_t thread,
1638 wait_result_t result)
1639 {
1640 kern_return_t res;
1641 spl_t s;
1642
1643 if (!wait_queue_is_valid(wq)) {
1644 return KERN_INVALID_ARGUMENT;
1645 }
1646
1647 s = splsched();
1648 wait_queue_lock(wq);
1649 res = _wait_queue_select64_thread(wq, event, thread);
1650 wait_queue_unlock(wq);
1651
1652 if (res == KERN_SUCCESS) {
1653 res = thread_go(thread, result);
1654 assert(res == KERN_SUCCESS);
1655 thread_unlock(thread);
1656 splx(s);
1657 return res;
1658 }
1659 splx(s);
1660 return KERN_NOT_WAITING;
1661 }