]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/wait_queue.c
627bf793cd34a3ab5c4662add87d5d82818a61da
[apple/xnu.git] / osfmk / kern / wait_queue.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: wait_queue.c (adapted from sched_prim.c)
54 * Author: Avadis Tevanian, Jr.
55 * Date: 1986
56 *
57 * Primitives for manipulating wait queues: either global
58 * ones from sched_prim.c, or private ones associated with
59 * particular structures(pots, semaphores, etc..).
60 */
61
62 #include <kern/kern_types.h>
63 #include <kern/simple_lock.h>
64 #include <kern/kalloc.h>
65 #include <kern/queue.h>
66 #include <kern/spl.h>
67 #include <mach/sync_policy.h>
68 #include <kern/sched_prim.h>
69
70 #include <kern/wait_queue.h>
71
72 /*
73 * Routine: wait_queue_init
74 * Purpose:
75 * Initialize a previously allocated wait queue.
76 * Returns:
77 * KERN_SUCCESS - The wait_queue_t was initialized
78 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
79 */
80 kern_return_t
81 wait_queue_init(
82 wait_queue_t wq,
83 int policy)
84 {
85 if (!((policy & SYNC_POLICY_ORDER_MASK) == SYNC_POLICY_FIFO))
86 return KERN_INVALID_ARGUMENT;
87
88 wq->wq_fifo = TRUE;
89 wq->wq_type = _WAIT_QUEUE_inited;
90 queue_init(&wq->wq_queue);
91 hw_lock_init(&wq->wq_interlock);
92 return KERN_SUCCESS;
93 }
94
95 /*
96 * Routine: wait_queue_alloc
97 * Purpose:
98 * Allocate and initialize a wait queue for use outside of
99 * of the mach part of the kernel.
100 * Conditions:
101 * Nothing locked - can block.
102 * Returns:
103 * The allocated and initialized wait queue
104 * WAIT_QUEUE_NULL if there is a resource shortage
105 */
106 wait_queue_t
107 wait_queue_alloc(
108 int policy)
109 {
110 wait_queue_t wq;
111 kern_return_t ret;
112
113 wq = (wait_queue_t) kalloc(sizeof(struct wait_queue));
114 if (wq != WAIT_QUEUE_NULL) {
115 ret = wait_queue_init(wq, policy);
116 if (ret != KERN_SUCCESS) {
117 kfree((vm_offset_t)wq, sizeof(struct wait_queue));
118 wq = WAIT_QUEUE_NULL;
119 }
120 }
121 return wq;
122 }
123
124 /*
125 * Routine: wait_queue_free
126 * Purpose:
127 * Free an allocated wait queue.
128 * Conditions:
129 * May block.
130 */
131 kern_return_t
132 wait_queue_free(
133 wait_queue_t wq)
134 {
135 if (!wait_queue_is_queue(wq))
136 return KERN_INVALID_ARGUMENT;
137 if (!queue_empty(&wq->wq_queue))
138 return KERN_FAILURE;
139 kfree((vm_offset_t)wq, sizeof(struct wait_queue));
140 return KERN_SUCCESS;
141 }
142
143 /*
144 * Routine: wait_queue_set_init
145 * Purpose:
146 * Initialize a previously allocated wait queue set.
147 * Returns:
148 * KERN_SUCCESS - The wait_queue_set_t was initialized
149 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
150 */
151 kern_return_t
152 wait_queue_set_init(
153 wait_queue_set_t wqset,
154 int policy)
155 {
156 kern_return_t ret;
157
158 ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
159 if (ret != KERN_SUCCESS)
160 return ret;
161
162 wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
163 if (policy & SYNC_POLICY_PREPOST)
164 wqset->wqs_wait_queue.wq_isprepost = TRUE;
165 else
166 wqset->wqs_wait_queue.wq_isprepost = FALSE;
167 queue_init(&wqset->wqs_setlinks);
168 wqset->wqs_refcount = 0;
169 return KERN_SUCCESS;
170 }
171
172 /* legacy API */
173 kern_return_t
174 wait_queue_sub_init(
175 wait_queue_set_t wqset,
176 int policy)
177 {
178 return wait_queue_set_init(wqset, policy);
179 }
180
181 /*
182 * Routine: wait_queue_set_alloc
183 * Purpose:
184 * Allocate and initialize a wait queue set for
185 * use outside of the mach part of the kernel.
186 * Conditions:
187 * May block.
188 * Returns:
189 * The allocated and initialized wait queue set
190 * WAIT_QUEUE_SET_NULL if there is a resource shortage
191 */
192 wait_queue_set_t
193 wait_queue_set_alloc(
194 int policy)
195 {
196 wait_queue_set_t wq_set;
197
198 wq_set = (wait_queue_set_t) kalloc(sizeof(struct wait_queue_set));
199 if (wq_set != WAIT_QUEUE_SET_NULL) {
200 kern_return_t ret;
201
202 ret = wait_queue_set_init(wq_set, policy);
203 if (ret != KERN_SUCCESS) {
204 kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set));
205 wq_set = WAIT_QUEUE_SET_NULL;
206 }
207 }
208 return wq_set;
209 }
210
211 /*
212 * Routine: wait_queue_set_free
213 * Purpose:
214 * Free an allocated wait queue set
215 * Conditions:
216 * May block.
217 */
218 kern_return_t
219 wait_queue_set_free(
220 wait_queue_set_t wq_set)
221 {
222 if (!wait_queue_is_set(wq_set))
223 return KERN_INVALID_ARGUMENT;
224
225 if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
226 return KERN_FAILURE;
227
228 kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set));
229 return KERN_SUCCESS;
230 }
231
232 kern_return_t
233 wait_queue_sub_clearrefs(
234 wait_queue_set_t wq_set)
235 {
236 if (!wait_queue_is_set(wq_set))
237 return KERN_INVALID_ARGUMENT;
238
239 wqs_lock(wq_set);
240 wq_set->wqs_refcount = 0;
241 wqs_unlock(wq_set);
242 return KERN_SUCCESS;
243 }
244
245 /*
246 *
247 * Routine: wait_queue_set_size
248 * Routine: wait_queue_link_size
249 * Purpose:
250 * Return the size of opaque wait queue structures
251 */
252 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
253 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
254
255 /* declare a unique type for wait queue link structures */
256 static unsigned int _wait_queue_link;
257 static unsigned int _wait_queue_unlinked;
258
259 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
260 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
261
262 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
263 WQASSERT(((wqe)->wqe_queue == (wq) && \
264 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
265 "wait queue element list corruption: wq=%#x, wqe=%#x", \
266 (wq), (wqe))
267
268 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
269 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
270 (queue_t)(wql) : &(wql)->wql_setlinks)))
271
272 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
273 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
274 (queue_t)(wql) : &(wql)->wql_setlinks)))
275
276 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
277 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
278 ((wql)->wql_setqueue == (wqs)) && \
279 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
280 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
281 "wait queue set links corruption: wqs=%#x, wql=%#x", \
282 (wqs), (wql))
283
284 #if defined(_WAIT_QUEUE_DEBUG_)
285
286 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
287
288 #define WAIT_QUEUE_CHECK(wq) \
289 MACRO_BEGIN \
290 queue_t q2 = &(wq)->wq_queue; \
291 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
292 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
293 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
294 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
295 } \
296 MACRO_END
297
298 #define WAIT_QUEUE_SET_CHECK(wqs) \
299 MACRO_BEGIN \
300 queue_t q2 = &(wqs)->wqs_setlinks; \
301 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
302 while (!queue_end(q2, (queue_entry_t)wql2)) { \
303 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
304 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
305 } \
306 MACRO_END
307
308 #else /* !_WAIT_QUEUE_DEBUG_ */
309
310 #define WQASSERT(e, s, p0, p1) assert(e)
311
312 #define WAIT_QUEUE_CHECK(wq)
313 #define WAIT_QUEUE_SET_CHECK(wqs)
314
315 #endif /* !_WAIT_QUEUE_DEBUG_ */
316
317 /*
318 * Routine: wait_queue_member_locked
319 * Purpose:
320 * Indicate if this set queue is a member of the queue
321 * Conditions:
322 * The wait queue is locked
323 * The set queue is just that, a set queue
324 */
325 __private_extern__ boolean_t
326 wait_queue_member_locked(
327 wait_queue_t wq,
328 wait_queue_set_t wq_set)
329 {
330 wait_queue_element_t wq_element;
331 queue_t q;
332
333 assert(wait_queue_held(wq));
334 assert(wait_queue_is_set(wq_set));
335
336 q = &wq->wq_queue;
337
338 wq_element = (wait_queue_element_t) queue_first(q);
339 while (!queue_end(q, (queue_entry_t)wq_element)) {
340 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
341 if ((wq_element->wqe_type == WAIT_QUEUE_LINK)) {
342 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
343
344 if (wql->wql_setqueue == wq_set)
345 return TRUE;
346 }
347 wq_element = (wait_queue_element_t)
348 queue_next((queue_t) wq_element);
349 }
350 return FALSE;
351 }
352
353
354 /*
355 * Routine: wait_queue_member
356 * Purpose:
357 * Indicate if this set queue is a member of the queue
358 * Conditions:
359 * The set queue is just that, a set queue
360 */
361 boolean_t
362 wait_queue_member(
363 wait_queue_t wq,
364 wait_queue_set_t wq_set)
365 {
366 boolean_t ret;
367 spl_t s;
368
369 if (!wait_queue_is_set(wq_set))
370 return FALSE;
371
372 s = splsched();
373 wait_queue_lock(wq);
374 ret = wait_queue_member_locked(wq, wq_set);
375 wait_queue_unlock(wq);
376 splx(s);
377
378 return ret;
379 }
380
381
382 /*
383 * Routine: wait_queue_link_noalloc
384 * Purpose:
385 * Insert a set wait queue into a wait queue. This
386 * requires us to link the two together using a wait_queue_link
387 * structure that we allocate.
388 * Conditions:
389 * The wait queue being inserted must be inited as a set queue
390 */
391 kern_return_t
392 wait_queue_link_noalloc(
393 wait_queue_t wq,
394 wait_queue_set_t wq_set,
395 wait_queue_link_t wql)
396 {
397 wait_queue_element_t wq_element;
398 queue_t q;
399 spl_t s;
400
401 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set))
402 return KERN_INVALID_ARGUMENT;
403
404 /*
405 * There are probably less threads and sets associated with
406 * the wait queue, then there are wait queues associated with
407 * the set. So lets validate it that way.
408 */
409 s = splsched();
410 wait_queue_lock(wq);
411 wqs_lock(wq_set);
412 q = &wq->wq_queue;
413 wq_element = (wait_queue_element_t) queue_first(q);
414 while (!queue_end(q, (queue_entry_t)wq_element)) {
415 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
416 if (wq_element->wqe_type == WAIT_QUEUE_LINK &&
417 ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
418 wqs_unlock(wq_set);
419 wait_queue_unlock(wq);
420 splx(s);
421 return KERN_ALREADY_IN_SET;
422 }
423 wq_element = (wait_queue_element_t)
424 queue_next((queue_t) wq_element);
425 }
426
427 /*
428 * Not already a member, so we can add it.
429 */
430
431 WAIT_QUEUE_SET_CHECK(wq_set);
432
433 wql->wql_queue = wq;
434 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
435 wql->wql_setqueue = wq_set;
436 queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
437 wql->wql_type = WAIT_QUEUE_LINK;
438
439 wqs_unlock(wq_set);
440 wait_queue_unlock(wq);
441 splx(s);
442
443 return KERN_SUCCESS;
444 }
445
446 /*
447 * Routine: wait_queue_link
448 * Purpose:
449 * Insert a set wait queue into a wait queue. This
450 * requires us to link the two together using a wait_queue_link
451 * structure that we allocate.
452 * Conditions:
453 * The wait queue being inserted must be inited as a set queue
454 */
455 kern_return_t
456 wait_queue_link(
457 wait_queue_t wq,
458 wait_queue_set_t wq_set)
459 {
460 wait_queue_link_t wql;
461 kern_return_t ret;
462
463 wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link));
464 if (wql == WAIT_QUEUE_LINK_NULL)
465 return KERN_RESOURCE_SHORTAGE;
466
467 ret = wait_queue_link_noalloc(wq, wq_set, wql);
468 if (ret != KERN_SUCCESS)
469 kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
470
471 return ret;
472 }
473
474
475 /*
476 * Routine: wait_queue_unlink_nofree
477 * Purpose:
478 * Undo the linkage between a wait queue and a set.
479 */
480 static void
481 wait_queue_unlink_locked(
482 wait_queue_t wq,
483 wait_queue_set_t wq_set,
484 wait_queue_link_t wql)
485 {
486 assert(wait_queue_held(wq));
487 assert(wait_queue_held(&wq_set->wqs_wait_queue));
488
489 wql->wql_queue = WAIT_QUEUE_NULL;
490 queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
491 wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
492 queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
493 wql->wql_type = WAIT_QUEUE_UNLINKED;
494
495 WAIT_QUEUE_CHECK(wq);
496 WAIT_QUEUE_SET_CHECK(wq_set);
497 }
498
499 /*
500 * Routine: wait_queue_unlink
501 * Purpose:
502 * Remove the linkage between a wait queue and a set,
503 * freeing the linkage structure.
504 * Conditions:
505 * The wait queue being must be a member set queue
506 */
507 kern_return_t
508 wait_queue_unlink(
509 wait_queue_t wq,
510 wait_queue_set_t wq_set)
511 {
512 wait_queue_element_t wq_element;
513 wait_queue_link_t wql;
514 queue_t q;
515 spl_t s;
516
517 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) {
518 return KERN_INVALID_ARGUMENT;
519 }
520 s = splsched();
521 wait_queue_lock(wq);
522
523 q = &wq->wq_queue;
524 wq_element = (wait_queue_element_t) queue_first(q);
525 while (!queue_end(q, (queue_entry_t)wq_element)) {
526 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
527 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
528 wql = (wait_queue_link_t)wq_element;
529
530 if (wql->wql_setqueue == wq_set) {
531 wqs_lock(wq_set);
532 wait_queue_unlink_locked(wq, wq_set, wql);
533 wqs_unlock(wq_set);
534 wait_queue_unlock(wq);
535 splx(s);
536 kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
537 return KERN_SUCCESS;
538 }
539 }
540 wq_element = (wait_queue_element_t)
541 queue_next((queue_t) wq_element);
542 }
543 wait_queue_unlock(wq);
544 splx(s);
545 return KERN_NOT_IN_SET;
546 }
547
548
549 /*
550 * Routine: wait_queue_unlinkall_nofree
551 * Purpose:
552 * Remove the linkage between a wait queue and all its
553 * sets. The caller is responsible for freeing
554 * the wait queue link structures.
555 */
556
557 kern_return_t
558 wait_queue_unlinkall_nofree(
559 wait_queue_t wq)
560 {
561 wait_queue_element_t wq_element;
562 wait_queue_element_t wq_next_element;
563 wait_queue_set_t wq_set;
564 wait_queue_link_t wql;
565 queue_head_t links_queue_head;
566 queue_t links = &links_queue_head;
567 queue_t q;
568 spl_t s;
569
570 if (!wait_queue_is_queue(wq)) {
571 return KERN_INVALID_ARGUMENT;
572 }
573
574 queue_init(links);
575
576 s = splsched();
577 wait_queue_lock(wq);
578
579 q = &wq->wq_queue;
580
581 wq_element = (wait_queue_element_t) queue_first(q);
582 while (!queue_end(q, (queue_entry_t)wq_element)) {
583 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
584 wq_next_element = (wait_queue_element_t)
585 queue_next((queue_t) wq_element);
586
587 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
588 wql = (wait_queue_link_t)wq_element;
589 wq_set = wql->wql_setqueue;
590 wqs_lock(wq_set);
591 wait_queue_unlink_locked(wq, wq_set, wql);
592 wqs_unlock(wq_set);
593 }
594 wq_element = wq_next_element;
595 }
596 wait_queue_unlock(wq);
597 splx(s);
598 return(KERN_SUCCESS);
599 }
600
601
602 /*
603 * Routine: wait_queue_unlink_all
604 * Purpose:
605 * Remove the linkage between a wait queue and all its sets.
606 * All the linkage structures are freed.
607 * Conditions:
608 * Nothing of interest locked.
609 */
610
611 kern_return_t
612 wait_queue_unlink_all(
613 wait_queue_t wq)
614 {
615 wait_queue_element_t wq_element;
616 wait_queue_element_t wq_next_element;
617 wait_queue_set_t wq_set;
618 wait_queue_link_t wql;
619 queue_head_t links_queue_head;
620 queue_t links = &links_queue_head;
621 queue_t q;
622 spl_t s;
623
624 if (!wait_queue_is_queue(wq)) {
625 return KERN_INVALID_ARGUMENT;
626 }
627
628 queue_init(links);
629
630 s = splsched();
631 wait_queue_lock(wq);
632
633 q = &wq->wq_queue;
634
635 wq_element = (wait_queue_element_t) queue_first(q);
636 while (!queue_end(q, (queue_entry_t)wq_element)) {
637 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
638 wq_next_element = (wait_queue_element_t)
639 queue_next((queue_t) wq_element);
640
641 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
642 wql = (wait_queue_link_t)wq_element;
643 wq_set = wql->wql_setqueue;
644 wqs_lock(wq_set);
645 wait_queue_unlink_locked(wq, wq_set, wql);
646 wqs_unlock(wq_set);
647 enqueue(links, &wql->wql_links);
648 }
649 wq_element = wq_next_element;
650 }
651 wait_queue_unlock(wq);
652 splx(s);
653
654 while(!queue_empty(links)) {
655 wql = (wait_queue_link_t) dequeue(links);
656 kfree((vm_offset_t) wql, sizeof(struct wait_queue_link));
657 }
658
659 return(KERN_SUCCESS);
660 }
661
662 /*
663 * Routine: wait_queue_set_unlink_all_nofree
664 * Purpose:
665 * Remove the linkage between a set wait queue and all its
666 * member wait queues. The link structures are not freed, nor
667 * returned. It is the caller's responsibility to track and free
668 * them.
669 * Conditions:
670 * The wait queue being must be a member set queue
671 */
672 kern_return_t
673 wait_queue_set_unlink_all_nofree(
674 wait_queue_set_t wq_set)
675 {
676 wait_queue_link_t wql;
677 wait_queue_t wq;
678 queue_t q;
679 kern_return_t kret;
680 spl_t s;
681
682 if (!wait_queue_is_set(wq_set)) {
683 return KERN_INVALID_ARGUMENT;
684 }
685
686 retry:
687 s = splsched();
688 wqs_lock(wq_set);
689
690 q = &wq_set->wqs_setlinks;
691
692 wql = (wait_queue_link_t)queue_first(q);
693 while (!queue_end(q, (queue_entry_t)wql)) {
694 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
695 wq = wql->wql_queue;
696 if (wait_queue_lock_try(wq)) {
697 wait_queue_unlink_locked(wq, wq_set, wql);
698 wait_queue_unlock(wq);
699 wql = (wait_queue_link_t)queue_first(q);
700 } else {
701 wqs_unlock(wq_set);
702 splx(s);
703 delay(1);
704 goto retry;
705 }
706 }
707 wqs_unlock(wq_set);
708 splx(s);
709
710 return(KERN_SUCCESS);
711 }
712
713 /* legacy interface naming */
714 kern_return_t
715 wait_subqueue_unlink_all(
716 wait_queue_set_t wq_set)
717 {
718 return wait_queue_set_unlink_all_nofree(wq_set);
719 }
720
721
722 /*
723 * Routine: wait_queue_set_unlink_all
724 * Purpose:
725 * Remove the linkage between a set wait queue and all its
726 * member wait queues. The link structures are freed.
727 * Conditions:
728 * The wait queue must be a set
729 */
730 kern_return_t
731 wait_queue_set_unlink_all(
732 wait_queue_set_t wq_set)
733 {
734 wait_queue_link_t wql;
735 wait_queue_t wq;
736 queue_t q;
737 queue_head_t links_queue_head;
738 queue_t links = &links_queue_head;
739 kern_return_t kret;
740 spl_t s;
741
742 if (!wait_queue_is_set(wq_set)) {
743 return KERN_INVALID_ARGUMENT;
744 }
745
746 queue_init(links);
747
748 retry:
749 s = splsched();
750 wqs_lock(wq_set);
751
752 q = &wq_set->wqs_setlinks;
753
754 wql = (wait_queue_link_t)queue_first(q);
755 while (!queue_end(q, (queue_entry_t)wql)) {
756 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
757 wq = wql->wql_queue;
758 if (wait_queue_lock_try(wq)) {
759 wait_queue_unlink_locked(wq, wq_set, wql);
760 wait_queue_unlock(wq);
761 enqueue(links, &wql->wql_links);
762 wql = (wait_queue_link_t)queue_first(q);
763 } else {
764 wqs_unlock(wq_set);
765 splx(s);
766 delay(1);
767 goto retry;
768 }
769 }
770 wqs_unlock(wq_set);
771 splx(s);
772
773 while (!queue_empty (links)) {
774 wql = (wait_queue_link_t) dequeue(links);
775 kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
776 }
777 return(KERN_SUCCESS);
778 }
779
780
781 /*
782 * Routine: wait_queue_unlink_one
783 * Purpose:
784 * Find and unlink one set wait queue
785 * Conditions:
786 * Nothing of interest locked.
787 */
788 void
789 wait_queue_unlink_one(
790 wait_queue_t wq,
791 wait_queue_set_t *wq_setp)
792 {
793 wait_queue_element_t wq_element;
794 queue_t q;
795 spl_t s;
796
797 s = splsched();
798 wait_queue_lock(wq);
799
800 q = &wq->wq_queue;
801
802 wq_element = (wait_queue_element_t) queue_first(q);
803 while (!queue_end(q, (queue_entry_t)wq_element)) {
804
805 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
806 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
807 wait_queue_set_t wq_set = wql->wql_setqueue;
808
809 wqs_lock(wq_set);
810 wait_queue_unlink_locked(wq, wq_set, wql);
811 wqs_unlock(wq_set);
812 wait_queue_unlock(wq);
813 splx(s);
814 kfree((vm_offset_t)wql,sizeof(struct wait_queue_link));
815 *wq_setp = wq_set;
816 return;
817 }
818
819 wq_element = (wait_queue_element_t)
820 queue_next((queue_t) wq_element);
821 }
822 wait_queue_unlock(wq);
823 splx(s);
824 *wq_setp = WAIT_QUEUE_SET_NULL;
825 }
826
827
828 /*
829 * Routine: wait_queue_assert_wait64_locked
830 * Purpose:
831 * Insert the current thread into the supplied wait queue
832 * waiting for a particular event to be posted to that queue.
833 *
834 * Conditions:
835 * The wait queue is assumed locked.
836 *
837 */
838 __private_extern__ wait_result_t
839 wait_queue_assert_wait64_locked(
840 wait_queue_t wq,
841 event64_t event,
842 wait_interrupt_t interruptible,
843 boolean_t unlock)
844 {
845 thread_t thread;
846 wait_result_t wait_result;
847
848 if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
849 wait_queue_set_t wqs = (wait_queue_set_t)wq;
850 if (wqs->wqs_isprepost && wqs->wqs_refcount > 0) {
851 if (unlock)
852 wait_queue_unlock(wq);
853 return(THREAD_AWAKENED);
854 }
855 }
856
857 /*
858 * This is the extent to which we currently take scheduling attributes
859 * into account. If the thread is vm priviledged, we stick it at
860 * the front of the queue. Later, these queues will honor the policy
861 * value set at wait_queue_init time.
862 */
863 thread = current_thread();
864 thread_lock(thread);
865 wait_result = thread_mark_wait_locked(thread, interruptible);
866 if (wait_result == THREAD_WAITING) {
867 if (thread->vm_privilege)
868 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
869 else
870 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
871 thread->wait_event = event;
872 thread->wait_queue = wq;
873 }
874 thread_unlock(thread);
875 if (unlock)
876 wait_queue_unlock(wq);
877 return(wait_result);
878 }
879
880 /*
881 * Routine: wait_queue_assert_wait
882 * Purpose:
883 * Insert the current thread into the supplied wait queue
884 * waiting for a particular event to be posted to that queue.
885 *
886 * Conditions:
887 * nothing of interest locked.
888 */
889 wait_result_t
890 wait_queue_assert_wait(
891 wait_queue_t wq,
892 event_t event,
893 wait_interrupt_t interruptible)
894 {
895 spl_t s;
896 wait_result_t ret;
897
898 /* If it is an invalid wait queue, you can't wait on it */
899 if (!wait_queue_is_valid(wq)) {
900 thread_t thread = current_thread();
901 return (thread->wait_result = THREAD_RESTART);
902 }
903
904 s = splsched();
905 wait_queue_lock(wq);
906 ret = wait_queue_assert_wait64_locked(
907 wq, (event64_t)((uint32_t)event),
908 interruptible, TRUE);
909 /* wait queue unlocked */
910 splx(s);
911 return(ret);
912 }
913
914 /*
915 * Routine: wait_queue_assert_wait64
916 * Purpose:
917 * Insert the current thread into the supplied wait queue
918 * waiting for a particular event to be posted to that queue.
919 * Conditions:
920 * nothing of interest locked.
921 */
922 wait_result_t
923 wait_queue_assert_wait64(
924 wait_queue_t wq,
925 event64_t event,
926 wait_interrupt_t interruptible)
927 {
928 spl_t s;
929 wait_result_t ret;
930
931 /* If it is an invalid wait queue, you cant wait on it */
932 if (!wait_queue_is_valid(wq)) {
933 thread_t thread = current_thread();
934 return (thread->wait_result = THREAD_RESTART);
935 }
936
937 s = splsched();
938 wait_queue_lock(wq);
939 ret = wait_queue_assert_wait64_locked(wq, event, interruptible, TRUE);
940 /* wait queue unlocked */
941 splx(s);
942 return(ret);
943 }
944
945
946 /*
947 * Routine: _wait_queue_select64_all
948 * Purpose:
949 * Select all threads off a wait queue that meet the
950 * supplied criteria.
951 * Conditions:
952 * at splsched
953 * wait queue locked
954 * wake_queue initialized and ready for insertion
955 * possibly recursive
956 * Returns:
957 * a queue of locked threads
958 */
959 static void
960 _wait_queue_select64_all(
961 wait_queue_t wq,
962 event64_t event,
963 queue_t wake_queue)
964 {
965 wait_queue_element_t wq_element;
966 wait_queue_element_t wqe_next;
967 queue_t q;
968
969 q = &wq->wq_queue;
970
971 wq_element = (wait_queue_element_t) queue_first(q);
972 while (!queue_end(q, (queue_entry_t)wq_element)) {
973 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
974 wqe_next = (wait_queue_element_t)
975 queue_next((queue_t) wq_element);
976
977 /*
978 * We may have to recurse if this is a compound wait queue.
979 */
980 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
981 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
982 wait_queue_t set_queue;
983
984 /*
985 * We have to check the set wait queue.
986 */
987 set_queue = (wait_queue_t)wql->wql_setqueue;
988 wait_queue_lock(set_queue);
989 if (set_queue->wq_isprepost) {
990 wait_queue_set_t wqs = (wait_queue_set_t)set_queue;
991
992 /*
993 * Preposting is only for sets and wait queue
994 * is the first element of set
995 */
996 wqs->wqs_refcount++;
997 }
998 if (! wait_queue_empty(set_queue))
999 _wait_queue_select64_all(set_queue, event, wake_queue);
1000 wait_queue_unlock(set_queue);
1001 } else {
1002
1003 /*
1004 * Otherwise, its a thread. If it is waiting on
1005 * the event we are posting to this queue, pull
1006 * it off the queue and stick it in out wake_queue.
1007 */
1008 thread_t t = (thread_t)wq_element;
1009
1010 if (t->wait_event == event) {
1011 thread_lock(t);
1012 remqueue(q, (queue_entry_t) t);
1013 enqueue (wake_queue, (queue_entry_t) t);
1014 t->wait_queue = WAIT_QUEUE_NULL;
1015 t->wait_event = NO_EVENT64;
1016 t->at_safe_point = FALSE;
1017 /* returned locked */
1018 }
1019 }
1020 wq_element = wqe_next;
1021 }
1022 }
1023
1024 /*
1025 * Routine: wait_queue_wakeup64_all_locked
1026 * Purpose:
1027 * Wakeup some number of threads that are in the specified
1028 * wait queue and waiting on the specified event.
1029 * Conditions:
1030 * wait queue already locked (may be released).
1031 * Returns:
1032 * KERN_SUCCESS - Threads were woken up
1033 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1034 */
1035 __private_extern__ kern_return_t
1036 wait_queue_wakeup64_all_locked(
1037 wait_queue_t wq,
1038 event64_t event,
1039 wait_result_t result,
1040 boolean_t unlock)
1041 {
1042 queue_head_t wake_queue_head;
1043 queue_t q = &wake_queue_head;
1044 kern_return_t res;
1045
1046 assert(wait_queue_held(wq));
1047 queue_init(q);
1048
1049 /*
1050 * Select the threads that we will wake up. The threads
1051 * are returned to us locked and cleanly removed from the
1052 * wait queue.
1053 */
1054 _wait_queue_select64_all(wq, event, q);
1055 if (unlock)
1056 wait_queue_unlock(wq);
1057
1058 /*
1059 * For each thread, set it running.
1060 */
1061 res = KERN_NOT_WAITING;
1062 while (!queue_empty (q)) {
1063 thread_t thread = (thread_t) dequeue(q);
1064 res = thread_go_locked(thread, result);
1065 assert(res == KERN_SUCCESS);
1066 thread_unlock(thread);
1067 }
1068 return res;
1069 }
1070
1071
1072 /*
1073 * Routine: wait_queue_wakeup_all
1074 * Purpose:
1075 * Wakeup some number of threads that are in the specified
1076 * wait queue and waiting on the specified event.
1077 * Conditions:
1078 * Nothing locked
1079 * Returns:
1080 * KERN_SUCCESS - Threads were woken up
1081 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1082 */
1083 kern_return_t
1084 wait_queue_wakeup_all(
1085 wait_queue_t wq,
1086 event_t event,
1087 wait_result_t result)
1088 {
1089 kern_return_t ret;
1090 spl_t s;
1091
1092 if (!wait_queue_is_valid(wq)) {
1093 return KERN_INVALID_ARGUMENT;
1094 }
1095
1096 s = splsched();
1097 wait_queue_lock(wq);
1098 ret = wait_queue_wakeup64_all_locked(
1099 wq, (event64_t)((uint32_t)event),
1100 result, TRUE);
1101 /* lock released */
1102 splx(s);
1103 return ret;
1104 }
1105
1106 /*
1107 * Routine: wait_queue_wakeup64_all
1108 * Purpose:
1109 * Wakeup some number of threads that are in the specified
1110 * wait queue and waiting on the specified event.
1111 * Conditions:
1112 * Nothing locked
1113 * Returns:
1114 * KERN_SUCCESS - Threads were woken up
1115 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1116 */
1117 kern_return_t
1118 wait_queue_wakeup64_all(
1119 wait_queue_t wq,
1120 event64_t event,
1121 wait_result_t result)
1122 {
1123 kern_return_t ret;
1124 spl_t s;
1125
1126 if (!wait_queue_is_valid(wq)) {
1127 return KERN_INVALID_ARGUMENT;
1128 }
1129
1130 s = splsched();
1131 wait_queue_lock(wq);
1132 ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1133 /* lock released */
1134 splx(s);
1135 return ret;
1136 }
1137
1138 /*
1139 * Routine: _wait_queue_select64_one
1140 * Purpose:
1141 * Select the best thread off a wait queue that meet the
1142 * supplied criteria.
1143 * Conditions:
1144 * at splsched
1145 * wait queue locked
1146 * possibly recursive
1147 * Returns:
1148 * a locked thread - if one found
1149 * Note:
1150 * This is where the sync policy of the wait queue comes
1151 * into effect. For now, we just assume FIFO.
1152 */
1153 static thread_t
1154 _wait_queue_select64_one(
1155 wait_queue_t wq,
1156 event64_t event)
1157 {
1158 wait_queue_element_t wq_element;
1159 wait_queue_element_t wqe_next;
1160 thread_t t = THREAD_NULL;
1161 queue_t q;
1162
1163 assert(wq->wq_fifo);
1164
1165 q = &wq->wq_queue;
1166
1167 wq_element = (wait_queue_element_t) queue_first(q);
1168 while (!queue_end(q, (queue_entry_t)wq_element)) {
1169 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1170 wqe_next = (wait_queue_element_t)
1171 queue_next((queue_t) wq_element);
1172
1173 /*
1174 * We may have to recurse if this is a compound wait queue.
1175 */
1176 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1177 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1178 wait_queue_t set_queue;
1179
1180 /*
1181 * We have to check the set wait queue.
1182 */
1183 set_queue = (wait_queue_t)wql->wql_setqueue;
1184 wait_queue_lock(set_queue);
1185 if (! wait_queue_empty(set_queue)) {
1186 t = _wait_queue_select64_one(set_queue, event);
1187 }
1188 wait_queue_unlock(set_queue);
1189 if (t != THREAD_NULL)
1190 return t;
1191 } else {
1192
1193 /*
1194 * Otherwise, its a thread. If it is waiting on
1195 * the event we are posting to this queue, pull
1196 * it off the queue and stick it in out wake_queue.
1197 */
1198 thread_t t = (thread_t)wq_element;
1199
1200 if (t->wait_event == event) {
1201 thread_lock(t);
1202 remqueue(q, (queue_entry_t) t);
1203 t->wait_queue = WAIT_QUEUE_NULL;
1204 t->wait_event = NO_EVENT64;
1205 t->at_safe_point = FALSE;
1206 return t; /* still locked */
1207 }
1208 }
1209 wq_element = wqe_next;
1210 }
1211 return THREAD_NULL;
1212 }
1213
1214 /*
1215 * Routine: wait_queue_peek64_locked
1216 * Purpose:
1217 * Select the best thread from a wait queue that meet the
1218 * supplied criteria, but leave it on the queue it was
1219 * found on. The thread, and the actual wait_queue the
1220 * thread was found on are identified.
1221 * Conditions:
1222 * at splsched
1223 * wait queue locked
1224 * possibly recursive
1225 * Returns:
1226 * a locked thread - if one found
1227 * a locked waitq - the one the thread was found on
1228 * Note:
1229 * Both the waitq the thread was actually found on, and
1230 * the supplied wait queue, are locked after this.
1231 */
1232 __private_extern__ void
1233 wait_queue_peek64_locked(
1234 wait_queue_t wq,
1235 event64_t event,
1236 thread_t *tp,
1237 wait_queue_t *wqp)
1238 {
1239 wait_queue_element_t wq_element;
1240 wait_queue_element_t wqe_next;
1241 thread_t t;
1242 queue_t q;
1243
1244 assert(wq->wq_fifo);
1245
1246 *tp = THREAD_NULL;
1247
1248 q = &wq->wq_queue;
1249
1250 wq_element = (wait_queue_element_t) queue_first(q);
1251 while (!queue_end(q, (queue_entry_t)wq_element)) {
1252 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1253 wqe_next = (wait_queue_element_t)
1254 queue_next((queue_t) wq_element);
1255
1256 /*
1257 * We may have to recurse if this is a compound wait queue.
1258 */
1259 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1260 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1261 wait_queue_t set_queue;
1262
1263 /*
1264 * We have to check the set wait queue.
1265 */
1266 set_queue = (wait_queue_t)wql->wql_setqueue;
1267 wait_queue_lock(set_queue);
1268 if (! wait_queue_empty(set_queue)) {
1269 wait_queue_peek64_locked(set_queue, event, tp, wqp);
1270 }
1271 if (*tp != THREAD_NULL) {
1272 if (*wqp != set_queue)
1273 wait_queue_unlock(set_queue);
1274 return; /* thread and its waitq locked */
1275 }
1276
1277 wait_queue_unlock(set_queue);
1278 } else {
1279
1280 /*
1281 * Otherwise, its a thread. If it is waiting on
1282 * the event we are posting to this queue, return
1283 * it locked, but leave it on the queue.
1284 */
1285 thread_t t = (thread_t)wq_element;
1286
1287 if (t->wait_event == event) {
1288 thread_lock(t);
1289 *tp = t;
1290 *wqp = wq;
1291 return;
1292 }
1293 }
1294 wq_element = wqe_next;
1295 }
1296 }
1297
1298 /*
1299 * Routine: wait_queue_pull_thread_locked
1300 * Purpose:
1301 * Pull a thread that was previously "peeked" off the wait
1302 * queue and (possibly) unlock the waitq.
1303 * Conditions:
1304 * at splsched
1305 * wait queue locked
1306 * thread locked
1307 * Returns:
1308 * with the thread still locked.
1309 */
1310 void
1311 wait_queue_pull_thread_locked(
1312 wait_queue_t waitq,
1313 thread_t thread,
1314 boolean_t unlock)
1315 {
1316
1317 assert(thread->wait_queue == waitq);
1318
1319 remqueue(&waitq->wq_queue, (queue_entry_t)thread );
1320 thread->wait_queue = WAIT_QUEUE_NULL;
1321 thread->wait_event = NO_EVENT64;
1322 thread->at_safe_point = FALSE;
1323 if (unlock)
1324 wait_queue_unlock(waitq);
1325 }
1326
1327
1328 /*
1329 * Routine: wait_queue_select64_thread
1330 * Purpose:
1331 * Look for a thread and remove it from the queues, if
1332 * (and only if) the thread is waiting on the supplied
1333 * <wait_queue, event> pair.
1334 * Conditions:
1335 * at splsched
1336 * wait queue locked
1337 * possibly recursive
1338 * Returns:
1339 * KERN_NOT_WAITING: Thread is not waiting here.
1340 * KERN_SUCCESS: It was, and is now removed (returned locked)
1341 */
1342 static kern_return_t
1343 _wait_queue_select64_thread(
1344 wait_queue_t wq,
1345 event64_t event,
1346 thread_t thread)
1347 {
1348 wait_queue_element_t wq_element;
1349 wait_queue_element_t wqe_next;
1350 kern_return_t res = KERN_NOT_WAITING;
1351 queue_t q = &wq->wq_queue;
1352
1353 thread_lock(thread);
1354 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
1355 remqueue(q, (queue_entry_t) thread);
1356 thread->at_safe_point = FALSE;
1357 thread->wait_event = NO_EVENT64;
1358 thread->wait_queue = WAIT_QUEUE_NULL;
1359 /* thread still locked */
1360 return KERN_SUCCESS;
1361 }
1362 thread_unlock(thread);
1363
1364 /*
1365 * The wait_queue associated with the thread may be one of this
1366 * wait queue's sets. Go see. If so, removing it from
1367 * there is like removing it from here.
1368 */
1369 wq_element = (wait_queue_element_t) queue_first(q);
1370 while (!queue_end(q, (queue_entry_t)wq_element)) {
1371 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1372 wqe_next = (wait_queue_element_t)
1373 queue_next((queue_t) wq_element);
1374
1375 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1376 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1377 wait_queue_t set_queue;
1378
1379 set_queue = (wait_queue_t)wql->wql_setqueue;
1380 wait_queue_lock(set_queue);
1381 if (! wait_queue_empty(set_queue)) {
1382 res = _wait_queue_select64_thread(set_queue,
1383 event,
1384 thread);
1385 }
1386 wait_queue_unlock(set_queue);
1387 if (res == KERN_SUCCESS)
1388 return KERN_SUCCESS;
1389 }
1390 wq_element = wqe_next;
1391 }
1392 return res;
1393 }
1394
1395
1396 /*
1397 * Routine: wait_queue_wakeup64_identity_locked
1398 * Purpose:
1399 * Select a single thread that is most-eligible to run and set
1400 * set it running. But return the thread locked.
1401 *
1402 * Conditions:
1403 * at splsched
1404 * wait queue locked
1405 * possibly recursive
1406 * Returns:
1407 * a pointer to the locked thread that was awakened
1408 */
1409 __private_extern__ thread_t
1410 wait_queue_wakeup64_identity_locked(
1411 wait_queue_t wq,
1412 event64_t event,
1413 wait_result_t result,
1414 boolean_t unlock)
1415 {
1416 kern_return_t res;
1417 thread_t thread;
1418
1419 assert(wait_queue_held(wq));
1420
1421
1422 thread = _wait_queue_select64_one(wq, event);
1423 if (unlock)
1424 wait_queue_unlock(wq);
1425
1426 if (thread) {
1427 res = thread_go_locked(thread, result);
1428 assert(res == KERN_SUCCESS);
1429 }
1430 return thread; /* still locked if not NULL */
1431 }
1432
1433
1434 /*
1435 * Routine: wait_queue_wakeup64_one_locked
1436 * Purpose:
1437 * Select a single thread that is most-eligible to run and set
1438 * set it runnings.
1439 *
1440 * Conditions:
1441 * at splsched
1442 * wait queue locked
1443 * possibly recursive
1444 * Returns:
1445 * KERN_SUCCESS: It was, and is, now removed.
1446 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1447 */
1448 __private_extern__ kern_return_t
1449 wait_queue_wakeup64_one_locked(
1450 wait_queue_t wq,
1451 event64_t event,
1452 wait_result_t result,
1453 boolean_t unlock)
1454 {
1455 thread_t thread;
1456
1457 assert(wait_queue_held(wq));
1458
1459 thread = _wait_queue_select64_one(wq, event);
1460 if (unlock)
1461 wait_queue_unlock(wq);
1462
1463 if (thread) {
1464 kern_return_t res;
1465
1466 res = thread_go_locked(thread, result);
1467 assert(res == KERN_SUCCESS);
1468 thread_unlock(thread);
1469 return res;
1470 }
1471
1472 return KERN_NOT_WAITING;
1473 }
1474
1475 /*
1476 * Routine: wait_queue_wakeup_one
1477 * Purpose:
1478 * Wakeup the most appropriate thread that is in the specified
1479 * wait queue for the specified event.
1480 * Conditions:
1481 * Nothing locked
1482 * Returns:
1483 * KERN_SUCCESS - Thread was woken up
1484 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1485 */
1486 kern_return_t
1487 wait_queue_wakeup_one(
1488 wait_queue_t wq,
1489 event_t event,
1490 wait_result_t result)
1491 {
1492 thread_t thread;
1493 spl_t s;
1494
1495 if (!wait_queue_is_valid(wq)) {
1496 return KERN_INVALID_ARGUMENT;
1497 }
1498
1499 s = splsched();
1500 wait_queue_lock(wq);
1501 thread = _wait_queue_select64_one(wq, (event64_t)((uint32_t)event));
1502 wait_queue_unlock(wq);
1503
1504 if (thread) {
1505 kern_return_t res;
1506
1507 res = thread_go_locked(thread, result);
1508 assert(res == KERN_SUCCESS);
1509 thread_unlock(thread);
1510 splx(s);
1511 return res;
1512 }
1513
1514 splx(s);
1515 return KERN_NOT_WAITING;
1516 }
1517
1518 /*
1519 * Routine: wait_queue_wakeup64_one
1520 * Purpose:
1521 * Wakeup the most appropriate thread that is in the specified
1522 * wait queue for the specified event.
1523 * Conditions:
1524 * Nothing locked
1525 * Returns:
1526 * KERN_SUCCESS - Thread was woken up
1527 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1528 */
1529 kern_return_t
1530 wait_queue_wakeup64_one(
1531 wait_queue_t wq,
1532 event64_t event,
1533 wait_result_t result)
1534 {
1535 thread_t thread;
1536 spl_t s;
1537
1538 if (!wait_queue_is_valid(wq)) {
1539 return KERN_INVALID_ARGUMENT;
1540 }
1541 s = splsched();
1542 wait_queue_lock(wq);
1543 thread = _wait_queue_select64_one(wq, event);
1544 wait_queue_unlock(wq);
1545
1546 if (thread) {
1547 kern_return_t res;
1548
1549 res = thread_go_locked(thread, result);
1550 assert(res == KERN_SUCCESS);
1551 thread_unlock(thread);
1552 splx(s);
1553 return res;
1554 }
1555
1556 splx(s);
1557 return KERN_NOT_WAITING;
1558 }
1559
1560
1561 /*
1562 * Routine: wait_queue_wakeup64_thread_locked
1563 * Purpose:
1564 * Wakeup the particular thread that was specified if and only
1565 * it was in this wait queue (or one of it's set queues)
1566 * and waiting on the specified event.
1567 *
1568 * This is much safer than just removing the thread from
1569 * whatever wait queue it happens to be on. For instance, it
1570 * may have already been awoken from the wait you intended to
1571 * interrupt and waited on something else (like another
1572 * semaphore).
1573 * Conditions:
1574 * at splsched
1575 * wait queue already locked (may be released).
1576 * Returns:
1577 * KERN_SUCCESS - the thread was found waiting and awakened
1578 * KERN_NOT_WAITING - the thread was not waiting here
1579 */
1580 __private_extern__ kern_return_t
1581 wait_queue_wakeup64_thread_locked(
1582 wait_queue_t wq,
1583 event64_t event,
1584 thread_t thread,
1585 wait_result_t result,
1586 boolean_t unlock)
1587 {
1588 kern_return_t res;
1589
1590 assert(wait_queue_held(wq));
1591
1592 /*
1593 * See if the thread was still waiting there. If so, it got
1594 * dequeued and returned locked.
1595 */
1596 res = _wait_queue_select64_thread(wq, event, thread);
1597 if (unlock)
1598 wait_queue_unlock(wq);
1599
1600 if (res != KERN_SUCCESS)
1601 return KERN_NOT_WAITING;
1602
1603 res = thread_go_locked(thread, result);
1604 assert(res == KERN_SUCCESS);
1605 thread_unlock(thread);
1606 return res;
1607 }
1608
1609 /*
1610 * Routine: wait_queue_wakeup_thread
1611 * Purpose:
1612 * Wakeup the particular thread that was specified if and only
1613 * it was in this wait queue (or one of it's set queues)
1614 * and waiting on the specified event.
1615 *
1616 * This is much safer than just removing the thread from
1617 * whatever wait queue it happens to be on. For instance, it
1618 * may have already been awoken from the wait you intended to
1619 * interrupt and waited on something else (like another
1620 * semaphore).
1621 * Conditions:
1622 * nothing of interest locked
1623 * we need to assume spl needs to be raised
1624 * Returns:
1625 * KERN_SUCCESS - the thread was found waiting and awakened
1626 * KERN_NOT_WAITING - the thread was not waiting here
1627 */
1628 kern_return_t
1629 wait_queue_wakeup_thread(
1630 wait_queue_t wq,
1631 event_t event,
1632 thread_t thread,
1633 wait_result_t result)
1634 {
1635 kern_return_t res;
1636 spl_t s;
1637
1638 if (!wait_queue_is_valid(wq)) {
1639 return KERN_INVALID_ARGUMENT;
1640 }
1641
1642 s = splsched();
1643 wait_queue_lock(wq);
1644 res = _wait_queue_select64_thread(wq, (event64_t)((uint32_t)event), thread);
1645 wait_queue_unlock(wq);
1646
1647 if (res == KERN_SUCCESS) {
1648 res = thread_go_locked(thread, result);
1649 assert(res == KERN_SUCCESS);
1650 thread_unlock(thread);
1651 splx(s);
1652 return res;
1653 }
1654 splx(s);
1655 return KERN_NOT_WAITING;
1656 }
1657
1658 /*
1659 * Routine: wait_queue_wakeup64_thread
1660 * Purpose:
1661 * Wakeup the particular thread that was specified if and only
1662 * it was in this wait queue (or one of it's set's queues)
1663 * and waiting on the specified event.
1664 *
1665 * This is much safer than just removing the thread from
1666 * whatever wait queue it happens to be on. For instance, it
1667 * may have already been awoken from the wait you intended to
1668 * interrupt and waited on something else (like another
1669 * semaphore).
1670 * Conditions:
1671 * nothing of interest locked
1672 * we need to assume spl needs to be raised
1673 * Returns:
1674 * KERN_SUCCESS - the thread was found waiting and awakened
1675 * KERN_NOT_WAITING - the thread was not waiting here
1676 */
1677 kern_return_t
1678 wait_queue_wakeup64_thread(
1679 wait_queue_t wq,
1680 event64_t event,
1681 thread_t thread,
1682 wait_result_t result)
1683 {
1684 kern_return_t res;
1685 spl_t s;
1686
1687 if (!wait_queue_is_valid(wq)) {
1688 return KERN_INVALID_ARGUMENT;
1689 }
1690
1691 s = splsched();
1692 wait_queue_lock(wq);
1693 res = _wait_queue_select64_thread(wq, event, thread);
1694 wait_queue_unlock(wq);
1695
1696 if (res == KERN_SUCCESS) {
1697 res = thread_go_locked(thread, result);
1698 assert(res == KERN_SUCCESS);
1699 thread_unlock(thread);
1700 splx(s);
1701 return res;
1702 }
1703 splx(s);
1704 return KERN_NOT_WAITING;
1705 }