]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/wait_queue.c
xnu-517.9.5.tar.gz
[apple/xnu.git] / osfmk / kern / wait_queue.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: wait_queue.c (adapted from sched_prim.c)
54 * Author: Avadis Tevanian, Jr.
55 * Date: 1986
56 *
57 * Primitives for manipulating wait queues: either global
58 * ones from sched_prim.c, or private ones associated with
59 * particular structures(pots, semaphores, etc..).
60 */
61
62 #include <kern/kern_types.h>
63 #include <kern/simple_lock.h>
64 #include <kern/kalloc.h>
65 #include <kern/queue.h>
66 #include <kern/spl.h>
67 #include <mach/sync_policy.h>
68 #include <kern/sched_prim.h>
69
70 #include <kern/wait_queue.h>
71
72 /*
73 * Routine: wait_queue_init
74 * Purpose:
75 * Initialize a previously allocated wait queue.
76 * Returns:
77 * KERN_SUCCESS - The wait_queue_t was initialized
78 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
79 */
80 kern_return_t
81 wait_queue_init(
82 wait_queue_t wq,
83 int policy)
84 {
85 if (!((policy & SYNC_POLICY_ORDER_MASK) == SYNC_POLICY_FIFO))
86 return KERN_INVALID_ARGUMENT;
87
88 wq->wq_fifo = TRUE;
89 wq->wq_type = _WAIT_QUEUE_inited;
90 queue_init(&wq->wq_queue);
91 hw_lock_init(&wq->wq_interlock);
92 return KERN_SUCCESS;
93 }
94
95 /*
96 * Routine: wait_queue_alloc
97 * Purpose:
98 * Allocate and initialize a wait queue for use outside of
99 * of the mach part of the kernel.
100 * Conditions:
101 * Nothing locked - can block.
102 * Returns:
103 * The allocated and initialized wait queue
104 * WAIT_QUEUE_NULL if there is a resource shortage
105 */
106 wait_queue_t
107 wait_queue_alloc(
108 int policy)
109 {
110 wait_queue_t wq;
111 kern_return_t ret;
112
113 wq = (wait_queue_t) kalloc(sizeof(struct wait_queue));
114 if (wq != WAIT_QUEUE_NULL) {
115 ret = wait_queue_init(wq, policy);
116 if (ret != KERN_SUCCESS) {
117 kfree((vm_offset_t)wq, sizeof(struct wait_queue));
118 wq = WAIT_QUEUE_NULL;
119 }
120 }
121 return wq;
122 }
123
124 /*
125 * Routine: wait_queue_free
126 * Purpose:
127 * Free an allocated wait queue.
128 * Conditions:
129 * May block.
130 */
131 kern_return_t
132 wait_queue_free(
133 wait_queue_t wq)
134 {
135 if (!wait_queue_is_queue(wq))
136 return KERN_INVALID_ARGUMENT;
137 if (!queue_empty(&wq->wq_queue))
138 return KERN_FAILURE;
139 kfree((vm_offset_t)wq, sizeof(struct wait_queue));
140 return KERN_SUCCESS;
141 }
142
143 /*
144 * Routine: wait_queue_set_init
145 * Purpose:
146 * Initialize a previously allocated wait queue set.
147 * Returns:
148 * KERN_SUCCESS - The wait_queue_set_t was initialized
149 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
150 */
151 kern_return_t
152 wait_queue_set_init(
153 wait_queue_set_t wqset,
154 int policy)
155 {
156 kern_return_t ret;
157
158 ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
159 if (ret != KERN_SUCCESS)
160 return ret;
161
162 wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
163 if (policy & SYNC_POLICY_PREPOST)
164 wqset->wqs_wait_queue.wq_isprepost = TRUE;
165 else
166 wqset->wqs_wait_queue.wq_isprepost = FALSE;
167 queue_init(&wqset->wqs_setlinks);
168 wqset->wqs_refcount = 0;
169 return KERN_SUCCESS;
170 }
171
172 /* legacy API */
173 kern_return_t
174 wait_queue_sub_init(
175 wait_queue_set_t wqset,
176 int policy)
177 {
178 return wait_queue_set_init(wqset, policy);
179 }
180
181 /*
182 * Routine: wait_queue_set_alloc
183 * Purpose:
184 * Allocate and initialize a wait queue set for
185 * use outside of the mach part of the kernel.
186 * Conditions:
187 * May block.
188 * Returns:
189 * The allocated and initialized wait queue set
190 * WAIT_QUEUE_SET_NULL if there is a resource shortage
191 */
192 wait_queue_set_t
193 wait_queue_set_alloc(
194 int policy)
195 {
196 wait_queue_set_t wq_set;
197
198 wq_set = (wait_queue_set_t) kalloc(sizeof(struct wait_queue_set));
199 if (wq_set != WAIT_QUEUE_SET_NULL) {
200 kern_return_t ret;
201
202 ret = wait_queue_set_init(wq_set, policy);
203 if (ret != KERN_SUCCESS) {
204 kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set));
205 wq_set = WAIT_QUEUE_SET_NULL;
206 }
207 }
208 return wq_set;
209 }
210
211 /*
212 * Routine: wait_queue_set_free
213 * Purpose:
214 * Free an allocated wait queue set
215 * Conditions:
216 * May block.
217 */
218 kern_return_t
219 wait_queue_set_free(
220 wait_queue_set_t wq_set)
221 {
222 if (!wait_queue_is_set(wq_set))
223 return KERN_INVALID_ARGUMENT;
224
225 if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
226 return KERN_FAILURE;
227
228 kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set));
229 return KERN_SUCCESS;
230 }
231
232 kern_return_t
233 wait_queue_sub_clearrefs(
234 wait_queue_set_t wq_set)
235 {
236 if (!wait_queue_is_set(wq_set))
237 return KERN_INVALID_ARGUMENT;
238
239 wqs_lock(wq_set);
240 wq_set->wqs_refcount = 0;
241 wqs_unlock(wq_set);
242 return KERN_SUCCESS;
243 }
244
245 /*
246 *
247 * Routine: wait_queue_set_size
248 * Routine: wait_queue_link_size
249 * Purpose:
250 * Return the size of opaque wait queue structures
251 */
252 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
253 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
254
255 /* declare a unique type for wait queue link structures */
256 static unsigned int _wait_queue_link;
257 static unsigned int _wait_queue_unlinked;
258
259 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
260 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
261
262 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
263 WQASSERT(((wqe)->wqe_queue == (wq) && \
264 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
265 "wait queue element list corruption: wq=%#x, wqe=%#x", \
266 (wq), (wqe))
267
268 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
269 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
270 (queue_t)(wql) : &(wql)->wql_setlinks)))
271
272 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
273 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
274 (queue_t)(wql) : &(wql)->wql_setlinks)))
275
276 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
277 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
278 ((wql)->wql_setqueue == (wqs)) && \
279 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
280 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
281 "wait queue set links corruption: wqs=%#x, wql=%#x", \
282 (wqs), (wql))
283
284 #if defined(_WAIT_QUEUE_DEBUG_)
285
286 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
287
288 #define WAIT_QUEUE_CHECK(wq) \
289 MACRO_BEGIN \
290 queue_t q2 = &(wq)->wq_queue; \
291 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
292 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
293 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
294 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
295 } \
296 MACRO_END
297
298 #define WAIT_QUEUE_SET_CHECK(wqs) \
299 MACRO_BEGIN \
300 queue_t q2 = &(wqs)->wqs_setlinks; \
301 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
302 while (!queue_end(q2, (queue_entry_t)wql2)) { \
303 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
304 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
305 } \
306 MACRO_END
307
308 #else /* !_WAIT_QUEUE_DEBUG_ */
309
310 #define WQASSERT(e, s, p0, p1) assert(e)
311
312 #define WAIT_QUEUE_CHECK(wq)
313 #define WAIT_QUEUE_SET_CHECK(wqs)
314
315 #endif /* !_WAIT_QUEUE_DEBUG_ */
316
317 /*
318 * Routine: wait_queue_member_locked
319 * Purpose:
320 * Indicate if this set queue is a member of the queue
321 * Conditions:
322 * The wait queue is locked
323 * The set queue is just that, a set queue
324 */
325 __private_extern__ boolean_t
326 wait_queue_member_locked(
327 wait_queue_t wq,
328 wait_queue_set_t wq_set)
329 {
330 wait_queue_element_t wq_element;
331 queue_t q;
332
333 assert(wait_queue_held(wq));
334 assert(wait_queue_is_set(wq_set));
335
336 q = &wq->wq_queue;
337
338 wq_element = (wait_queue_element_t) queue_first(q);
339 while (!queue_end(q, (queue_entry_t)wq_element)) {
340 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
341 if ((wq_element->wqe_type == WAIT_QUEUE_LINK)) {
342 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
343
344 if (wql->wql_setqueue == wq_set)
345 return TRUE;
346 }
347 wq_element = (wait_queue_element_t)
348 queue_next((queue_t) wq_element);
349 }
350 return FALSE;
351 }
352
353
354 /*
355 * Routine: wait_queue_member
356 * Purpose:
357 * Indicate if this set queue is a member of the queue
358 * Conditions:
359 * The set queue is just that, a set queue
360 */
361 boolean_t
362 wait_queue_member(
363 wait_queue_t wq,
364 wait_queue_set_t wq_set)
365 {
366 boolean_t ret;
367 spl_t s;
368
369 if (!wait_queue_is_set(wq_set))
370 return FALSE;
371
372 s = splsched();
373 wait_queue_lock(wq);
374 ret = wait_queue_member_locked(wq, wq_set);
375 wait_queue_unlock(wq);
376 splx(s);
377
378 return ret;
379 }
380
381
382 /*
383 * Routine: wait_queue_link_noalloc
384 * Purpose:
385 * Insert a set wait queue into a wait queue. This
386 * requires us to link the two together using a wait_queue_link
387 * structure that we allocate.
388 * Conditions:
389 * The wait queue being inserted must be inited as a set queue
390 */
391 kern_return_t
392 wait_queue_link_noalloc(
393 wait_queue_t wq,
394 wait_queue_set_t wq_set,
395 wait_queue_link_t wql)
396 {
397 wait_queue_element_t wq_element;
398 queue_t q;
399 spl_t s;
400
401 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set))
402 return KERN_INVALID_ARGUMENT;
403
404 /*
405 * There are probably less threads and sets associated with
406 * the wait queue, then there are wait queues associated with
407 * the set. So lets validate it that way.
408 */
409 s = splsched();
410 wait_queue_lock(wq);
411 q = &wq->wq_queue;
412 wq_element = (wait_queue_element_t) queue_first(q);
413 while (!queue_end(q, (queue_entry_t)wq_element)) {
414 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
415 if (wq_element->wqe_type == WAIT_QUEUE_LINK &&
416 ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
417 wait_queue_unlock(wq);
418 splx(s);
419 return KERN_ALREADY_IN_SET;
420 }
421 wq_element = (wait_queue_element_t)
422 queue_next((queue_t) wq_element);
423 }
424
425 /*
426 * Not already a member, so we can add it.
427 */
428 wqs_lock(wq_set);
429
430 WAIT_QUEUE_SET_CHECK(wq_set);
431
432 wql->wql_queue = wq;
433 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
434 wql->wql_setqueue = wq_set;
435 queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
436 wql->wql_type = WAIT_QUEUE_LINK;
437
438 wqs_unlock(wq_set);
439 wait_queue_unlock(wq);
440 splx(s);
441
442 return KERN_SUCCESS;
443 }
444
445 /*
446 * Routine: wait_queue_link
447 * Purpose:
448 * Insert a set wait queue into a wait queue. This
449 * requires us to link the two together using a wait_queue_link
450 * structure that we allocate.
451 * Conditions:
452 * The wait queue being inserted must be inited as a set queue
453 */
454 kern_return_t
455 wait_queue_link(
456 wait_queue_t wq,
457 wait_queue_set_t wq_set)
458 {
459 wait_queue_link_t wql;
460 kern_return_t ret;
461
462 wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link));
463 if (wql == WAIT_QUEUE_LINK_NULL)
464 return KERN_RESOURCE_SHORTAGE;
465
466 ret = wait_queue_link_noalloc(wq, wq_set, wql);
467 if (ret != KERN_SUCCESS)
468 kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
469
470 return ret;
471 }
472
473
474 /*
475 * Routine: wait_queue_unlink_nofree
476 * Purpose:
477 * Undo the linkage between a wait queue and a set.
478 */
479 static void
480 wait_queue_unlink_locked(
481 wait_queue_t wq,
482 wait_queue_set_t wq_set,
483 wait_queue_link_t wql)
484 {
485 assert(wait_queue_held(wq));
486 assert(wait_queue_held(&wq_set->wqs_wait_queue));
487
488 wql->wql_queue = WAIT_QUEUE_NULL;
489 queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
490 wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
491 queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
492 wql->wql_type = WAIT_QUEUE_UNLINKED;
493
494 WAIT_QUEUE_CHECK(wq);
495 WAIT_QUEUE_SET_CHECK(wq_set);
496 }
497
498 /*
499 * Routine: wait_queue_unlink
500 * Purpose:
501 * Remove the linkage between a wait queue and a set,
502 * freeing the linkage structure.
503 * Conditions:
504 * The wait queue being must be a member set queue
505 */
506 kern_return_t
507 wait_queue_unlink(
508 wait_queue_t wq,
509 wait_queue_set_t wq_set)
510 {
511 wait_queue_element_t wq_element;
512 wait_queue_link_t wql;
513 queue_t q;
514 spl_t s;
515
516 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) {
517 return KERN_INVALID_ARGUMENT;
518 }
519 s = splsched();
520 wait_queue_lock(wq);
521
522 q = &wq->wq_queue;
523 wq_element = (wait_queue_element_t) queue_first(q);
524 while (!queue_end(q, (queue_entry_t)wq_element)) {
525 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
526 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
527 wql = (wait_queue_link_t)wq_element;
528
529 if (wql->wql_setqueue == wq_set) {
530 wqs_lock(wq_set);
531 wait_queue_unlink_locked(wq, wq_set, wql);
532 wqs_unlock(wq_set);
533 wait_queue_unlock(wq);
534 splx(s);
535 kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
536 return KERN_SUCCESS;
537 }
538 }
539 wq_element = (wait_queue_element_t)
540 queue_next((queue_t) wq_element);
541 }
542 wait_queue_unlock(wq);
543 splx(s);
544 return KERN_NOT_IN_SET;
545 }
546
547
548 /*
549 * Routine: wait_queue_unlinkall_nofree
550 * Purpose:
551 * Remove the linkage between a wait queue and all its
552 * sets. The caller is responsible for freeing
553 * the wait queue link structures.
554 */
555
556 kern_return_t
557 wait_queue_unlinkall_nofree(
558 wait_queue_t wq)
559 {
560 wait_queue_element_t wq_element;
561 wait_queue_element_t wq_next_element;
562 wait_queue_set_t wq_set;
563 wait_queue_link_t wql;
564 queue_head_t links_queue_head;
565 queue_t links = &links_queue_head;
566 queue_t q;
567 spl_t s;
568
569 if (!wait_queue_is_queue(wq)) {
570 return KERN_INVALID_ARGUMENT;
571 }
572
573 queue_init(links);
574
575 s = splsched();
576 wait_queue_lock(wq);
577
578 q = &wq->wq_queue;
579
580 wq_element = (wait_queue_element_t) queue_first(q);
581 while (!queue_end(q, (queue_entry_t)wq_element)) {
582 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
583 wq_next_element = (wait_queue_element_t)
584 queue_next((queue_t) wq_element);
585
586 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
587 wql = (wait_queue_link_t)wq_element;
588 wq_set = wql->wql_setqueue;
589 wqs_lock(wq_set);
590 wait_queue_unlink_locked(wq, wq_set, wql);
591 wqs_unlock(wq_set);
592 }
593 wq_element = wq_next_element;
594 }
595 wait_queue_unlock(wq);
596 splx(s);
597 return(KERN_SUCCESS);
598 }
599
600
601 /*
602 * Routine: wait_queue_unlink_all
603 * Purpose:
604 * Remove the linkage between a wait queue and all its sets.
605 * All the linkage structures are freed.
606 * Conditions:
607 * Nothing of interest locked.
608 */
609
610 kern_return_t
611 wait_queue_unlink_all(
612 wait_queue_t wq)
613 {
614 wait_queue_element_t wq_element;
615 wait_queue_element_t wq_next_element;
616 wait_queue_set_t wq_set;
617 wait_queue_link_t wql;
618 queue_head_t links_queue_head;
619 queue_t links = &links_queue_head;
620 queue_t q;
621 spl_t s;
622
623 if (!wait_queue_is_queue(wq)) {
624 return KERN_INVALID_ARGUMENT;
625 }
626
627 queue_init(links);
628
629 s = splsched();
630 wait_queue_lock(wq);
631
632 q = &wq->wq_queue;
633
634 wq_element = (wait_queue_element_t) queue_first(q);
635 while (!queue_end(q, (queue_entry_t)wq_element)) {
636 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
637 wq_next_element = (wait_queue_element_t)
638 queue_next((queue_t) wq_element);
639
640 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
641 wql = (wait_queue_link_t)wq_element;
642 wq_set = wql->wql_setqueue;
643 wqs_lock(wq_set);
644 wait_queue_unlink_locked(wq, wq_set, wql);
645 wqs_unlock(wq_set);
646 enqueue(links, &wql->wql_links);
647 }
648 wq_element = wq_next_element;
649 }
650 wait_queue_unlock(wq);
651 splx(s);
652
653 while(!queue_empty(links)) {
654 wql = (wait_queue_link_t) dequeue(links);
655 kfree((vm_offset_t) wql, sizeof(struct wait_queue_link));
656 }
657
658 return(KERN_SUCCESS);
659 }
660
661 /*
662 * Routine: wait_queue_set_unlink_all_nofree
663 * Purpose:
664 * Remove the linkage between a set wait queue and all its
665 * member wait queues. The link structures are not freed, nor
666 * returned. It is the caller's responsibility to track and free
667 * them.
668 * Conditions:
669 * The wait queue being must be a member set queue
670 */
671 kern_return_t
672 wait_queue_set_unlink_all_nofree(
673 wait_queue_set_t wq_set)
674 {
675 wait_queue_link_t wql;
676 wait_queue_t wq;
677 queue_t q;
678 kern_return_t kret;
679 spl_t s;
680
681 if (!wait_queue_is_set(wq_set)) {
682 return KERN_INVALID_ARGUMENT;
683 }
684
685 retry:
686 s = splsched();
687 wqs_lock(wq_set);
688
689 q = &wq_set->wqs_setlinks;
690
691 wql = (wait_queue_link_t)queue_first(q);
692 while (!queue_end(q, (queue_entry_t)wql)) {
693 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
694 wq = wql->wql_queue;
695 if (wait_queue_lock_try(wq)) {
696 wait_queue_unlink_locked(wq, wq_set, wql);
697 wait_queue_unlock(wq);
698 wql = (wait_queue_link_t)queue_first(q);
699 } else {
700 wqs_unlock(wq_set);
701 splx(s);
702 delay(1);
703 goto retry;
704 }
705 }
706 wqs_unlock(wq_set);
707 splx(s);
708
709 return(KERN_SUCCESS);
710 }
711
712 /* legacy interface naming */
713 kern_return_t
714 wait_subqueue_unlink_all(
715 wait_queue_set_t wq_set)
716 {
717 return wait_queue_set_unlink_all_nofree(wq_set);
718 }
719
720
721 /*
722 * Routine: wait_queue_set_unlink_all
723 * Purpose:
724 * Remove the linkage between a set wait queue and all its
725 * member wait queues. The link structures are freed.
726 * Conditions:
727 * The wait queue must be a set
728 */
729 kern_return_t
730 wait_queue_set_unlink_all(
731 wait_queue_set_t wq_set)
732 {
733 wait_queue_link_t wql;
734 wait_queue_t wq;
735 queue_t q;
736 queue_head_t links_queue_head;
737 queue_t links = &links_queue_head;
738 kern_return_t kret;
739 spl_t s;
740
741 if (!wait_queue_is_set(wq_set)) {
742 return KERN_INVALID_ARGUMENT;
743 }
744
745 queue_init(links);
746
747 retry:
748 s = splsched();
749 wqs_lock(wq_set);
750
751 q = &wq_set->wqs_setlinks;
752
753 wql = (wait_queue_link_t)queue_first(q);
754 while (!queue_end(q, (queue_entry_t)wql)) {
755 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
756 wq = wql->wql_queue;
757 if (wait_queue_lock_try(wq)) {
758 wait_queue_unlink_locked(wq, wq_set, wql);
759 wait_queue_unlock(wq);
760 enqueue(links, &wql->wql_links);
761 wql = (wait_queue_link_t)queue_first(q);
762 } else {
763 wqs_unlock(wq_set);
764 splx(s);
765 delay(1);
766 goto retry;
767 }
768 }
769 wqs_unlock(wq_set);
770 splx(s);
771
772 while (!queue_empty (links)) {
773 wql = (wait_queue_link_t) dequeue(links);
774 kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
775 }
776 return(KERN_SUCCESS);
777 }
778
779
780 /*
781 * Routine: wait_queue_unlink_one
782 * Purpose:
783 * Find and unlink one set wait queue
784 * Conditions:
785 * Nothing of interest locked.
786 */
787 void
788 wait_queue_unlink_one(
789 wait_queue_t wq,
790 wait_queue_set_t *wq_setp)
791 {
792 wait_queue_element_t wq_element;
793 queue_t q;
794 spl_t s;
795
796 s = splsched();
797 wait_queue_lock(wq);
798
799 q = &wq->wq_queue;
800
801 wq_element = (wait_queue_element_t) queue_first(q);
802 while (!queue_end(q, (queue_entry_t)wq_element)) {
803
804 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
805 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
806 wait_queue_set_t wq_set = wql->wql_setqueue;
807
808 wqs_lock(wq_set);
809 wait_queue_unlink_locked(wq, wq_set, wql);
810 wqs_unlock(wq_set);
811 wait_queue_unlock(wq);
812 splx(s);
813 kfree((vm_offset_t)wql,sizeof(struct wait_queue_link));
814 *wq_setp = wq_set;
815 return;
816 }
817
818 wq_element = (wait_queue_element_t)
819 queue_next((queue_t) wq_element);
820 }
821 wait_queue_unlock(wq);
822 splx(s);
823 *wq_setp = WAIT_QUEUE_SET_NULL;
824 }
825
826
827 /*
828 * Routine: wait_queue_assert_wait64_locked
829 * Purpose:
830 * Insert the current thread into the supplied wait queue
831 * waiting for a particular event to be posted to that queue.
832 *
833 * Conditions:
834 * The wait queue is assumed locked.
835 * The waiting thread is assumed locked.
836 *
837 */
838 __private_extern__ wait_result_t
839 wait_queue_assert_wait64_locked(
840 wait_queue_t wq,
841 event64_t event,
842 wait_interrupt_t interruptible,
843 thread_t thread)
844 {
845 wait_result_t wait_result;
846
847 if (!wait_queue_assert_possible(thread))
848 panic("wait_queue_assert_wait64_locked");
849
850 if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
851 wait_queue_set_t wqs = (wait_queue_set_t)wq;
852
853 if (wqs->wqs_isprepost && wqs->wqs_refcount > 0)
854 return(THREAD_AWAKENED);
855 }
856
857 /*
858 * This is the extent to which we currently take scheduling attributes
859 * into account. If the thread is vm priviledged, we stick it at
860 * the front of the queue. Later, these queues will honor the policy
861 * value set at wait_queue_init time.
862 */
863 wait_result = thread_mark_wait_locked(thread, interruptible);
864 if (wait_result == THREAD_WAITING) {
865 if (thread->vm_privilege)
866 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
867 else
868 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
869 thread->wait_event = event;
870 thread->wait_queue = wq;
871 }
872 return(wait_result);
873 }
874
875 /*
876 * Routine: wait_queue_assert_wait
877 * Purpose:
878 * Insert the current thread into the supplied wait queue
879 * waiting for a particular event to be posted to that queue.
880 *
881 * Conditions:
882 * nothing of interest locked.
883 */
884 wait_result_t
885 wait_queue_assert_wait(
886 wait_queue_t wq,
887 event_t event,
888 wait_interrupt_t interruptible)
889 {
890 spl_t s;
891 wait_result_t ret;
892 thread_t cur_thread = current_thread();
893
894 /* If it is an invalid wait queue, you can't wait on it */
895 if (!wait_queue_is_valid(wq)) {
896 thread_t thread = current_thread();
897 return (thread->wait_result = THREAD_RESTART);
898 }
899
900 s = splsched();
901 wait_queue_lock(wq);
902 thread_lock(cur_thread);
903 ret = wait_queue_assert_wait64_locked(
904 wq, (event64_t)((uint32_t)event),
905 interruptible, cur_thread);
906 thread_unlock(cur_thread);
907 wait_queue_unlock(wq);
908 splx(s);
909 return(ret);
910 }
911
912 /*
913 * Routine: wait_queue_assert_wait64
914 * Purpose:
915 * Insert the current thread into the supplied wait queue
916 * waiting for a particular event to be posted to that queue.
917 * Conditions:
918 * nothing of interest locked.
919 */
920 wait_result_t
921 wait_queue_assert_wait64(
922 wait_queue_t wq,
923 event64_t event,
924 wait_interrupt_t interruptible)
925 {
926 spl_t s;
927 wait_result_t ret;
928 thread_t cur_thread = current_thread();
929
930 /* If it is an invalid wait queue, you cant wait on it */
931 if (!wait_queue_is_valid(wq)) {
932 thread_t thread = current_thread();
933 return (thread->wait_result = THREAD_RESTART);
934 }
935
936 s = splsched();
937 wait_queue_lock(wq);
938 thread_lock(cur_thread);
939 ret = wait_queue_assert_wait64_locked(wq, event, interruptible, cur_thread);
940 thread_unlock(cur_thread);
941 wait_queue_unlock(wq);
942 splx(s);
943 return(ret);
944 }
945
946
947 /*
948 * Routine: _wait_queue_select64_all
949 * Purpose:
950 * Select all threads off a wait queue that meet the
951 * supplied criteria.
952 * Conditions:
953 * at splsched
954 * wait queue locked
955 * wake_queue initialized and ready for insertion
956 * possibly recursive
957 * Returns:
958 * a queue of locked threads
959 */
960 static void
961 _wait_queue_select64_all(
962 wait_queue_t wq,
963 event64_t event,
964 queue_t wake_queue)
965 {
966 wait_queue_element_t wq_element;
967 wait_queue_element_t wqe_next;
968 queue_t q;
969
970 q = &wq->wq_queue;
971
972 wq_element = (wait_queue_element_t) queue_first(q);
973 while (!queue_end(q, (queue_entry_t)wq_element)) {
974 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
975 wqe_next = (wait_queue_element_t)
976 queue_next((queue_t) wq_element);
977
978 /*
979 * We may have to recurse if this is a compound wait queue.
980 */
981 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
982 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
983 wait_queue_t set_queue;
984
985 /*
986 * We have to check the set wait queue.
987 */
988 set_queue = (wait_queue_t)wql->wql_setqueue;
989 wait_queue_lock(set_queue);
990 if (set_queue->wq_isprepost) {
991 wait_queue_set_t wqs = (wait_queue_set_t)set_queue;
992
993 /*
994 * Preposting is only for sets and wait queue
995 * is the first element of set
996 */
997 wqs->wqs_refcount++;
998 }
999 if (! wait_queue_empty(set_queue))
1000 _wait_queue_select64_all(set_queue, event, wake_queue);
1001 wait_queue_unlock(set_queue);
1002 } else {
1003
1004 /*
1005 * Otherwise, its a thread. If it is waiting on
1006 * the event we are posting to this queue, pull
1007 * it off the queue and stick it in out wake_queue.
1008 */
1009 thread_t t = (thread_t)wq_element;
1010
1011 if (t->wait_event == event) {
1012 thread_lock(t);
1013 remqueue(q, (queue_entry_t) t);
1014 enqueue (wake_queue, (queue_entry_t) t);
1015 t->wait_queue = WAIT_QUEUE_NULL;
1016 t->wait_event = NO_EVENT64;
1017 t->at_safe_point = FALSE;
1018 /* returned locked */
1019 }
1020 }
1021 wq_element = wqe_next;
1022 }
1023 }
1024
1025 /*
1026 * Routine: wait_queue_wakeup64_all_locked
1027 * Purpose:
1028 * Wakeup some number of threads that are in the specified
1029 * wait queue and waiting on the specified event.
1030 * Conditions:
1031 * wait queue already locked (may be released).
1032 * Returns:
1033 * KERN_SUCCESS - Threads were woken up
1034 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1035 */
1036 __private_extern__ kern_return_t
1037 wait_queue_wakeup64_all_locked(
1038 wait_queue_t wq,
1039 event64_t event,
1040 wait_result_t result,
1041 boolean_t unlock)
1042 {
1043 queue_head_t wake_queue_head;
1044 queue_t q = &wake_queue_head;
1045 kern_return_t res;
1046
1047 assert(wait_queue_held(wq));
1048 queue_init(q);
1049
1050 /*
1051 * Select the threads that we will wake up. The threads
1052 * are returned to us locked and cleanly removed from the
1053 * wait queue.
1054 */
1055 _wait_queue_select64_all(wq, event, q);
1056 if (unlock)
1057 wait_queue_unlock(wq);
1058
1059 /*
1060 * For each thread, set it running.
1061 */
1062 res = KERN_NOT_WAITING;
1063 while (!queue_empty (q)) {
1064 thread_t thread = (thread_t) dequeue(q);
1065 res = thread_go_locked(thread, result);
1066 assert(res == KERN_SUCCESS);
1067 thread_unlock(thread);
1068 }
1069 return res;
1070 }
1071
1072
1073 /*
1074 * Routine: wait_queue_wakeup_all
1075 * Purpose:
1076 * Wakeup some number of threads that are in the specified
1077 * wait queue and waiting on the specified event.
1078 * Conditions:
1079 * Nothing locked
1080 * Returns:
1081 * KERN_SUCCESS - Threads were woken up
1082 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1083 */
1084 kern_return_t
1085 wait_queue_wakeup_all(
1086 wait_queue_t wq,
1087 event_t event,
1088 wait_result_t result)
1089 {
1090 kern_return_t ret;
1091 spl_t s;
1092
1093 if (!wait_queue_is_valid(wq)) {
1094 return KERN_INVALID_ARGUMENT;
1095 }
1096
1097 s = splsched();
1098 wait_queue_lock(wq);
1099 ret = wait_queue_wakeup64_all_locked(
1100 wq, (event64_t)((uint32_t)event),
1101 result, TRUE);
1102 /* lock released */
1103 splx(s);
1104 return ret;
1105 }
1106
1107 /*
1108 * Routine: wait_queue_wakeup64_all
1109 * Purpose:
1110 * Wakeup some number of threads that are in the specified
1111 * wait queue and waiting on the specified event.
1112 * Conditions:
1113 * Nothing locked
1114 * Returns:
1115 * KERN_SUCCESS - Threads were woken up
1116 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1117 */
1118 kern_return_t
1119 wait_queue_wakeup64_all(
1120 wait_queue_t wq,
1121 event64_t event,
1122 wait_result_t result)
1123 {
1124 kern_return_t ret;
1125 spl_t s;
1126
1127 if (!wait_queue_is_valid(wq)) {
1128 return KERN_INVALID_ARGUMENT;
1129 }
1130
1131 s = splsched();
1132 wait_queue_lock(wq);
1133 ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1134 /* lock released */
1135 splx(s);
1136 return ret;
1137 }
1138
1139 /*
1140 * Routine: _wait_queue_select64_one
1141 * Purpose:
1142 * Select the best thread off a wait queue that meet the
1143 * supplied criteria.
1144 * Conditions:
1145 * at splsched
1146 * wait queue locked
1147 * possibly recursive
1148 * Returns:
1149 * a locked thread - if one found
1150 * Note:
1151 * This is where the sync policy of the wait queue comes
1152 * into effect. For now, we just assume FIFO.
1153 */
1154 static thread_t
1155 _wait_queue_select64_one(
1156 wait_queue_t wq,
1157 event64_t event)
1158 {
1159 wait_queue_element_t wq_element;
1160 wait_queue_element_t wqe_next;
1161 thread_t t = THREAD_NULL;
1162 queue_t q;
1163
1164 assert(wq->wq_fifo);
1165
1166 q = &wq->wq_queue;
1167
1168 wq_element = (wait_queue_element_t) queue_first(q);
1169 while (!queue_end(q, (queue_entry_t)wq_element)) {
1170 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1171 wqe_next = (wait_queue_element_t)
1172 queue_next((queue_t) wq_element);
1173
1174 /*
1175 * We may have to recurse if this is a compound wait queue.
1176 */
1177 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1178 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1179 wait_queue_t set_queue;
1180
1181 /*
1182 * We have to check the set wait queue.
1183 */
1184 set_queue = (wait_queue_t)wql->wql_setqueue;
1185 wait_queue_lock(set_queue);
1186 if (! wait_queue_empty(set_queue)) {
1187 t = _wait_queue_select64_one(set_queue, event);
1188 }
1189 wait_queue_unlock(set_queue);
1190 if (t != THREAD_NULL)
1191 return t;
1192 } else {
1193
1194 /*
1195 * Otherwise, its a thread. If it is waiting on
1196 * the event we are posting to this queue, pull
1197 * it off the queue and stick it in out wake_queue.
1198 */
1199 thread_t t = (thread_t)wq_element;
1200
1201 if (t->wait_event == event) {
1202 thread_lock(t);
1203 remqueue(q, (queue_entry_t) t);
1204 t->wait_queue = WAIT_QUEUE_NULL;
1205 t->wait_event = NO_EVENT64;
1206 t->at_safe_point = FALSE;
1207 return t; /* still locked */
1208 }
1209 }
1210 wq_element = wqe_next;
1211 }
1212 return THREAD_NULL;
1213 }
1214
1215 /*
1216 * Routine: wait_queue_peek64_locked
1217 * Purpose:
1218 * Select the best thread from a wait queue that meet the
1219 * supplied criteria, but leave it on the queue it was
1220 * found on. The thread, and the actual wait_queue the
1221 * thread was found on are identified.
1222 * Conditions:
1223 * at splsched
1224 * wait queue locked
1225 * possibly recursive
1226 * Returns:
1227 * a locked thread - if one found
1228 * a locked waitq - the one the thread was found on
1229 * Note:
1230 * Both the waitq the thread was actually found on, and
1231 * the supplied wait queue, are locked after this.
1232 */
1233 __private_extern__ void
1234 wait_queue_peek64_locked(
1235 wait_queue_t wq,
1236 event64_t event,
1237 thread_t *tp,
1238 wait_queue_t *wqp)
1239 {
1240 wait_queue_element_t wq_element;
1241 wait_queue_element_t wqe_next;
1242 thread_t t;
1243 queue_t q;
1244
1245 assert(wq->wq_fifo);
1246
1247 *tp = THREAD_NULL;
1248
1249 q = &wq->wq_queue;
1250
1251 wq_element = (wait_queue_element_t) queue_first(q);
1252 while (!queue_end(q, (queue_entry_t)wq_element)) {
1253 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1254 wqe_next = (wait_queue_element_t)
1255 queue_next((queue_t) wq_element);
1256
1257 /*
1258 * We may have to recurse if this is a compound wait queue.
1259 */
1260 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1261 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1262 wait_queue_t set_queue;
1263
1264 /*
1265 * We have to check the set wait queue.
1266 */
1267 set_queue = (wait_queue_t)wql->wql_setqueue;
1268 wait_queue_lock(set_queue);
1269 if (! wait_queue_empty(set_queue)) {
1270 wait_queue_peek64_locked(set_queue, event, tp, wqp);
1271 }
1272 if (*tp != THREAD_NULL) {
1273 if (*wqp != set_queue)
1274 wait_queue_unlock(set_queue);
1275 return; /* thread and its waitq locked */
1276 }
1277
1278 wait_queue_unlock(set_queue);
1279 } else {
1280
1281 /*
1282 * Otherwise, its a thread. If it is waiting on
1283 * the event we are posting to this queue, return
1284 * it locked, but leave it on the queue.
1285 */
1286 thread_t t = (thread_t)wq_element;
1287
1288 if (t->wait_event == event) {
1289 thread_lock(t);
1290 *tp = t;
1291 *wqp = wq;
1292 return;
1293 }
1294 }
1295 wq_element = wqe_next;
1296 }
1297 }
1298
1299 /*
1300 * Routine: wait_queue_pull_thread_locked
1301 * Purpose:
1302 * Pull a thread that was previously "peeked" off the wait
1303 * queue and (possibly) unlock the waitq.
1304 * Conditions:
1305 * at splsched
1306 * wait queue locked
1307 * thread locked
1308 * Returns:
1309 * with the thread still locked.
1310 */
1311 void
1312 wait_queue_pull_thread_locked(
1313 wait_queue_t waitq,
1314 thread_t thread,
1315 boolean_t unlock)
1316 {
1317
1318 assert(thread->wait_queue == waitq);
1319
1320 remqueue(&waitq->wq_queue, (queue_entry_t)thread );
1321 thread->wait_queue = WAIT_QUEUE_NULL;
1322 thread->wait_event = NO_EVENT64;
1323 thread->at_safe_point = FALSE;
1324 if (unlock)
1325 wait_queue_unlock(waitq);
1326 }
1327
1328
1329 /*
1330 * Routine: wait_queue_select64_thread
1331 * Purpose:
1332 * Look for a thread and remove it from the queues, if
1333 * (and only if) the thread is waiting on the supplied
1334 * <wait_queue, event> pair.
1335 * Conditions:
1336 * at splsched
1337 * wait queue locked
1338 * possibly recursive
1339 * Returns:
1340 * KERN_NOT_WAITING: Thread is not waiting here.
1341 * KERN_SUCCESS: It was, and is now removed (returned locked)
1342 */
1343 static kern_return_t
1344 _wait_queue_select64_thread(
1345 wait_queue_t wq,
1346 event64_t event,
1347 thread_t thread)
1348 {
1349 wait_queue_element_t wq_element;
1350 wait_queue_element_t wqe_next;
1351 kern_return_t res = KERN_NOT_WAITING;
1352 queue_t q = &wq->wq_queue;
1353
1354 thread_lock(thread);
1355 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
1356 remqueue(q, (queue_entry_t) thread);
1357 thread->at_safe_point = FALSE;
1358 thread->wait_event = NO_EVENT64;
1359 thread->wait_queue = WAIT_QUEUE_NULL;
1360 /* thread still locked */
1361 return KERN_SUCCESS;
1362 }
1363 thread_unlock(thread);
1364
1365 /*
1366 * The wait_queue associated with the thread may be one of this
1367 * wait queue's sets. Go see. If so, removing it from
1368 * there is like removing it from here.
1369 */
1370 wq_element = (wait_queue_element_t) queue_first(q);
1371 while (!queue_end(q, (queue_entry_t)wq_element)) {
1372 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1373 wqe_next = (wait_queue_element_t)
1374 queue_next((queue_t) wq_element);
1375
1376 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1377 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1378 wait_queue_t set_queue;
1379
1380 set_queue = (wait_queue_t)wql->wql_setqueue;
1381 wait_queue_lock(set_queue);
1382 if (! wait_queue_empty(set_queue)) {
1383 res = _wait_queue_select64_thread(set_queue,
1384 event,
1385 thread);
1386 }
1387 wait_queue_unlock(set_queue);
1388 if (res == KERN_SUCCESS)
1389 return KERN_SUCCESS;
1390 }
1391 wq_element = wqe_next;
1392 }
1393 return res;
1394 }
1395
1396
1397 /*
1398 * Routine: wait_queue_wakeup64_identity_locked
1399 * Purpose:
1400 * Select a single thread that is most-eligible to run and set
1401 * set it running. But return the thread locked.
1402 *
1403 * Conditions:
1404 * at splsched
1405 * wait queue locked
1406 * possibly recursive
1407 * Returns:
1408 * a pointer to the locked thread that was awakened
1409 */
1410 __private_extern__ thread_t
1411 wait_queue_wakeup64_identity_locked(
1412 wait_queue_t wq,
1413 event64_t event,
1414 wait_result_t result,
1415 boolean_t unlock)
1416 {
1417 kern_return_t res;
1418 thread_t thread;
1419
1420 assert(wait_queue_held(wq));
1421
1422
1423 thread = _wait_queue_select64_one(wq, event);
1424 if (unlock)
1425 wait_queue_unlock(wq);
1426
1427 if (thread) {
1428 res = thread_go_locked(thread, result);
1429 assert(res == KERN_SUCCESS);
1430 }
1431 return thread; /* still locked if not NULL */
1432 }
1433
1434
1435 /*
1436 * Routine: wait_queue_wakeup64_one_locked
1437 * Purpose:
1438 * Select a single thread that is most-eligible to run and set
1439 * set it runnings.
1440 *
1441 * Conditions:
1442 * at splsched
1443 * wait queue locked
1444 * possibly recursive
1445 * Returns:
1446 * KERN_SUCCESS: It was, and is, now removed.
1447 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1448 */
1449 __private_extern__ kern_return_t
1450 wait_queue_wakeup64_one_locked(
1451 wait_queue_t wq,
1452 event64_t event,
1453 wait_result_t result,
1454 boolean_t unlock)
1455 {
1456 thread_t thread;
1457
1458 assert(wait_queue_held(wq));
1459
1460 thread = _wait_queue_select64_one(wq, event);
1461 if (unlock)
1462 wait_queue_unlock(wq);
1463
1464 if (thread) {
1465 kern_return_t res;
1466
1467 res = thread_go_locked(thread, result);
1468 assert(res == KERN_SUCCESS);
1469 thread_unlock(thread);
1470 return res;
1471 }
1472
1473 return KERN_NOT_WAITING;
1474 }
1475
1476 /*
1477 * Routine: wait_queue_wakeup_one
1478 * Purpose:
1479 * Wakeup the most appropriate thread that is in the specified
1480 * wait queue for the specified event.
1481 * Conditions:
1482 * Nothing locked
1483 * Returns:
1484 * KERN_SUCCESS - Thread was woken up
1485 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1486 */
1487 kern_return_t
1488 wait_queue_wakeup_one(
1489 wait_queue_t wq,
1490 event_t event,
1491 wait_result_t result)
1492 {
1493 thread_t thread;
1494 spl_t s;
1495
1496 if (!wait_queue_is_valid(wq)) {
1497 return KERN_INVALID_ARGUMENT;
1498 }
1499
1500 s = splsched();
1501 wait_queue_lock(wq);
1502 thread = _wait_queue_select64_one(wq, (event64_t)((uint32_t)event));
1503 wait_queue_unlock(wq);
1504
1505 if (thread) {
1506 kern_return_t res;
1507
1508 res = thread_go_locked(thread, result);
1509 assert(res == KERN_SUCCESS);
1510 thread_unlock(thread);
1511 splx(s);
1512 return res;
1513 }
1514
1515 splx(s);
1516 return KERN_NOT_WAITING;
1517 }
1518
1519 /*
1520 * Routine: wait_queue_wakeup64_one
1521 * Purpose:
1522 * Wakeup the most appropriate thread that is in the specified
1523 * wait queue for the specified event.
1524 * Conditions:
1525 * Nothing locked
1526 * Returns:
1527 * KERN_SUCCESS - Thread was woken up
1528 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1529 */
1530 kern_return_t
1531 wait_queue_wakeup64_one(
1532 wait_queue_t wq,
1533 event64_t event,
1534 wait_result_t result)
1535 {
1536 thread_t thread;
1537 spl_t s;
1538
1539 if (!wait_queue_is_valid(wq)) {
1540 return KERN_INVALID_ARGUMENT;
1541 }
1542 s = splsched();
1543 wait_queue_lock(wq);
1544 thread = _wait_queue_select64_one(wq, event);
1545 wait_queue_unlock(wq);
1546
1547 if (thread) {
1548 kern_return_t res;
1549
1550 res = thread_go_locked(thread, result);
1551 assert(res == KERN_SUCCESS);
1552 thread_unlock(thread);
1553 splx(s);
1554 return res;
1555 }
1556
1557 splx(s);
1558 return KERN_NOT_WAITING;
1559 }
1560
1561
1562 /*
1563 * Routine: wait_queue_wakeup64_thread_locked
1564 * Purpose:
1565 * Wakeup the particular thread that was specified if and only
1566 * it was in this wait queue (or one of it's set queues)
1567 * and waiting on the specified event.
1568 *
1569 * This is much safer than just removing the thread from
1570 * whatever wait queue it happens to be on. For instance, it
1571 * may have already been awoken from the wait you intended to
1572 * interrupt and waited on something else (like another
1573 * semaphore).
1574 * Conditions:
1575 * at splsched
1576 * wait queue already locked (may be released).
1577 * Returns:
1578 * KERN_SUCCESS - the thread was found waiting and awakened
1579 * KERN_NOT_WAITING - the thread was not waiting here
1580 */
1581 __private_extern__ kern_return_t
1582 wait_queue_wakeup64_thread_locked(
1583 wait_queue_t wq,
1584 event64_t event,
1585 thread_t thread,
1586 wait_result_t result,
1587 boolean_t unlock)
1588 {
1589 kern_return_t res;
1590
1591 assert(wait_queue_held(wq));
1592
1593 /*
1594 * See if the thread was still waiting there. If so, it got
1595 * dequeued and returned locked.
1596 */
1597 res = _wait_queue_select64_thread(wq, event, thread);
1598 if (unlock)
1599 wait_queue_unlock(wq);
1600
1601 if (res != KERN_SUCCESS)
1602 return KERN_NOT_WAITING;
1603
1604 res = thread_go_locked(thread, result);
1605 assert(res == KERN_SUCCESS);
1606 thread_unlock(thread);
1607 return res;
1608 }
1609
1610 /*
1611 * Routine: wait_queue_wakeup_thread
1612 * Purpose:
1613 * Wakeup the particular thread that was specified if and only
1614 * it was in this wait queue (or one of it's set queues)
1615 * and waiting on the specified event.
1616 *
1617 * This is much safer than just removing the thread from
1618 * whatever wait queue it happens to be on. For instance, it
1619 * may have already been awoken from the wait you intended to
1620 * interrupt and waited on something else (like another
1621 * semaphore).
1622 * Conditions:
1623 * nothing of interest locked
1624 * we need to assume spl needs to be raised
1625 * Returns:
1626 * KERN_SUCCESS - the thread was found waiting and awakened
1627 * KERN_NOT_WAITING - the thread was not waiting here
1628 */
1629 kern_return_t
1630 wait_queue_wakeup_thread(
1631 wait_queue_t wq,
1632 event_t event,
1633 thread_t thread,
1634 wait_result_t result)
1635 {
1636 kern_return_t res;
1637 spl_t s;
1638
1639 if (!wait_queue_is_valid(wq)) {
1640 return KERN_INVALID_ARGUMENT;
1641 }
1642
1643 s = splsched();
1644 wait_queue_lock(wq);
1645 res = _wait_queue_select64_thread(wq, (event64_t)((uint32_t)event), thread);
1646 wait_queue_unlock(wq);
1647
1648 if (res == KERN_SUCCESS) {
1649 res = thread_go_locked(thread, result);
1650 assert(res == KERN_SUCCESS);
1651 thread_unlock(thread);
1652 splx(s);
1653 return res;
1654 }
1655 splx(s);
1656 return KERN_NOT_WAITING;
1657 }
1658
1659 /*
1660 * Routine: wait_queue_wakeup64_thread
1661 * Purpose:
1662 * Wakeup the particular thread that was specified if and only
1663 * it was in this wait queue (or one of it's set's queues)
1664 * and waiting on the specified event.
1665 *
1666 * This is much safer than just removing the thread from
1667 * whatever wait queue it happens to be on. For instance, it
1668 * may have already been awoken from the wait you intended to
1669 * interrupt and waited on something else (like another
1670 * semaphore).
1671 * Conditions:
1672 * nothing of interest locked
1673 * we need to assume spl needs to be raised
1674 * Returns:
1675 * KERN_SUCCESS - the thread was found waiting and awakened
1676 * KERN_NOT_WAITING - the thread was not waiting here
1677 */
1678 kern_return_t
1679 wait_queue_wakeup64_thread(
1680 wait_queue_t wq,
1681 event64_t event,
1682 thread_t thread,
1683 wait_result_t result)
1684 {
1685 kern_return_t res;
1686 spl_t s;
1687
1688 if (!wait_queue_is_valid(wq)) {
1689 return KERN_INVALID_ARGUMENT;
1690 }
1691
1692 s = splsched();
1693 wait_queue_lock(wq);
1694 res = _wait_queue_select64_thread(wq, event, thread);
1695 wait_queue_unlock(wq);
1696
1697 if (res == KERN_SUCCESS) {
1698 res = thread_go_locked(thread, result);
1699 assert(res == KERN_SUCCESS);
1700 thread_unlock(thread);
1701 splx(s);
1702 return res;
1703 }
1704 splx(s);
1705 return KERN_NOT_WAITING;
1706 }