]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/wait_queue.c
40986215d00c9fb8be2c4a377ab36ed633f575a1
[apple/xnu.git] / osfmk / kern / wait_queue.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_FREE_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51 /*
52 */
53 /*
54 * File: wait_queue.c (adapted from sched_prim.c)
55 * Author: Avadis Tevanian, Jr.
56 * Date: 1986
57 *
58 * Primitives for manipulating wait queues: either global
59 * ones from sched_prim.c, or private ones associated with
60 * particular structures(pots, semaphores, etc..).
61 */
62
63 #include <kern/kern_types.h>
64 #include <kern/simple_lock.h>
65 #include <kern/kalloc.h>
66 #include <kern/queue.h>
67 #include <kern/spl.h>
68 #include <mach/sync_policy.h>
69 #include <kern/sched_prim.h>
70
71 #include <kern/wait_queue.h>
72
73 /* forward declarations */
74 static boolean_t wait_queue_member_locked(
75 wait_queue_t wq,
76 wait_queue_set_t wq_set);
77
78 void wait_queue_unlink_one(
79 wait_queue_t wq,
80 wait_queue_set_t *wq_setp);
81
82 kern_return_t wait_queue_set_unlink_all_nofree(
83 wait_queue_set_t wq_set);
84
85 /*
86 * Routine: wait_queue_init
87 * Purpose:
88 * Initialize a previously allocated wait queue.
89 * Returns:
90 * KERN_SUCCESS - The wait_queue_t was initialized
91 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
92 */
93 kern_return_t
94 wait_queue_init(
95 wait_queue_t wq,
96 int policy)
97 {
98 if (!((policy & SYNC_POLICY_ORDER_MASK) == SYNC_POLICY_FIFO))
99 return KERN_INVALID_ARGUMENT;
100
101 wq->wq_fifo = TRUE;
102 wq->wq_type = _WAIT_QUEUE_inited;
103 queue_init(&wq->wq_queue);
104 hw_lock_init(&wq->wq_interlock);
105 return KERN_SUCCESS;
106 }
107
108 /*
109 * Routine: wait_queue_alloc
110 * Purpose:
111 * Allocate and initialize a wait queue for use outside of
112 * of the mach part of the kernel.
113 * Conditions:
114 * Nothing locked - can block.
115 * Returns:
116 * The allocated and initialized wait queue
117 * WAIT_QUEUE_NULL if there is a resource shortage
118 */
119 wait_queue_t
120 wait_queue_alloc(
121 int policy)
122 {
123 wait_queue_t wq;
124 kern_return_t ret;
125
126 wq = (wait_queue_t) kalloc(sizeof(struct wait_queue));
127 if (wq != WAIT_QUEUE_NULL) {
128 ret = wait_queue_init(wq, policy);
129 if (ret != KERN_SUCCESS) {
130 kfree(wq, sizeof(struct wait_queue));
131 wq = WAIT_QUEUE_NULL;
132 }
133 }
134 return wq;
135 }
136
137 /*
138 * Routine: wait_queue_free
139 * Purpose:
140 * Free an allocated wait queue.
141 * Conditions:
142 * May block.
143 */
144 kern_return_t
145 wait_queue_free(
146 wait_queue_t wq)
147 {
148 if (!wait_queue_is_queue(wq))
149 return KERN_INVALID_ARGUMENT;
150 if (!queue_empty(&wq->wq_queue))
151 return KERN_FAILURE;
152 kfree(wq, sizeof(struct wait_queue));
153 return KERN_SUCCESS;
154 }
155
156 /*
157 * Routine: wait_queue_set_init
158 * Purpose:
159 * Initialize a previously allocated wait queue set.
160 * Returns:
161 * KERN_SUCCESS - The wait_queue_set_t was initialized
162 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
163 */
164 kern_return_t
165 wait_queue_set_init(
166 wait_queue_set_t wqset,
167 int policy)
168 {
169 kern_return_t ret;
170
171 ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
172 if (ret != KERN_SUCCESS)
173 return ret;
174
175 wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
176 if (policy & SYNC_POLICY_PREPOST)
177 wqset->wqs_wait_queue.wq_isprepost = TRUE;
178 else
179 wqset->wqs_wait_queue.wq_isprepost = FALSE;
180 queue_init(&wqset->wqs_setlinks);
181 wqset->wqs_refcount = 0;
182 return KERN_SUCCESS;
183 }
184
185
186 kern_return_t
187 wait_queue_sub_init(
188 wait_queue_set_t wqset,
189 int policy)
190 {
191 return wait_queue_set_init(wqset, policy);
192 }
193
194 kern_return_t
195 wait_queue_sub_clearrefs(
196 wait_queue_set_t wq_set)
197 {
198 if (!wait_queue_is_set(wq_set))
199 return KERN_INVALID_ARGUMENT;
200
201 wqs_lock(wq_set);
202 wq_set->wqs_refcount = 0;
203 wqs_unlock(wq_set);
204 return KERN_SUCCESS;
205 }
206
207 /*
208 * Routine: wait_queue_set_alloc
209 * Purpose:
210 * Allocate and initialize a wait queue set for
211 * use outside of the mach part of the kernel.
212 * Conditions:
213 * May block.
214 * Returns:
215 * The allocated and initialized wait queue set
216 * WAIT_QUEUE_SET_NULL if there is a resource shortage
217 */
218 wait_queue_set_t
219 wait_queue_set_alloc(
220 int policy)
221 {
222 wait_queue_set_t wq_set;
223
224 wq_set = (wait_queue_set_t) kalloc(sizeof(struct wait_queue_set));
225 if (wq_set != WAIT_QUEUE_SET_NULL) {
226 kern_return_t ret;
227
228 ret = wait_queue_set_init(wq_set, policy);
229 if (ret != KERN_SUCCESS) {
230 kfree(wq_set, sizeof(struct wait_queue_set));
231 wq_set = WAIT_QUEUE_SET_NULL;
232 }
233 }
234 return wq_set;
235 }
236
237 /*
238 * Routine: wait_queue_set_free
239 * Purpose:
240 * Free an allocated wait queue set
241 * Conditions:
242 * May block.
243 */
244 kern_return_t
245 wait_queue_set_free(
246 wait_queue_set_t wq_set)
247 {
248 if (!wait_queue_is_set(wq_set))
249 return KERN_INVALID_ARGUMENT;
250
251 if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
252 return KERN_FAILURE;
253
254 kfree(wq_set, sizeof(struct wait_queue_set));
255 return KERN_SUCCESS;
256 }
257
258
259 /*
260 *
261 * Routine: wait_queue_set_size
262 * Routine: wait_queue_link_size
263 * Purpose:
264 * Return the size of opaque wait queue structures
265 */
266 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
267 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
268
269 /* declare a unique type for wait queue link structures */
270 static unsigned int _wait_queue_link;
271 static unsigned int _wait_queue_unlinked;
272
273 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
274 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
275
276 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
277 WQASSERT(((wqe)->wqe_queue == (wq) && \
278 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
279 "wait queue element list corruption: wq=%#x, wqe=%#x", \
280 (wq), (wqe))
281
282 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
283 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
284 (queue_t)(wql) : &(wql)->wql_setlinks)))
285
286 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
287 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
288 (queue_t)(wql) : &(wql)->wql_setlinks)))
289
290 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
291 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
292 ((wql)->wql_setqueue == (wqs)) && \
293 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
294 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
295 "wait queue set links corruption: wqs=%#x, wql=%#x", \
296 (wqs), (wql))
297
298 #if defined(_WAIT_QUEUE_DEBUG_)
299
300 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
301
302 #define WAIT_QUEUE_CHECK(wq) \
303 MACRO_BEGIN \
304 queue_t q2 = &(wq)->wq_queue; \
305 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
306 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
307 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
308 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
309 } \
310 MACRO_END
311
312 #define WAIT_QUEUE_SET_CHECK(wqs) \
313 MACRO_BEGIN \
314 queue_t q2 = &(wqs)->wqs_setlinks; \
315 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
316 while (!queue_end(q2, (queue_entry_t)wql2)) { \
317 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
318 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
319 } \
320 MACRO_END
321
322 #else /* !_WAIT_QUEUE_DEBUG_ */
323
324 #define WQASSERT(e, s, p0, p1) assert(e)
325
326 #define WAIT_QUEUE_CHECK(wq)
327 #define WAIT_QUEUE_SET_CHECK(wqs)
328
329 #endif /* !_WAIT_QUEUE_DEBUG_ */
330
331 /*
332 * Routine: wait_queue_member_locked
333 * Purpose:
334 * Indicate if this set queue is a member of the queue
335 * Conditions:
336 * The wait queue is locked
337 * The set queue is just that, a set queue
338 */
339 static boolean_t
340 wait_queue_member_locked(
341 wait_queue_t wq,
342 wait_queue_set_t wq_set)
343 {
344 wait_queue_element_t wq_element;
345 queue_t q;
346
347 assert(wait_queue_held(wq));
348 assert(wait_queue_is_set(wq_set));
349
350 q = &wq->wq_queue;
351
352 wq_element = (wait_queue_element_t) queue_first(q);
353 while (!queue_end(q, (queue_entry_t)wq_element)) {
354 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
355 if ((wq_element->wqe_type == WAIT_QUEUE_LINK)) {
356 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
357
358 if (wql->wql_setqueue == wq_set)
359 return TRUE;
360 }
361 wq_element = (wait_queue_element_t)
362 queue_next((queue_t) wq_element);
363 }
364 return FALSE;
365 }
366
367
368 /*
369 * Routine: wait_queue_member
370 * Purpose:
371 * Indicate if this set queue is a member of the queue
372 * Conditions:
373 * The set queue is just that, a set queue
374 */
375 boolean_t
376 wait_queue_member(
377 wait_queue_t wq,
378 wait_queue_set_t wq_set)
379 {
380 boolean_t ret;
381 spl_t s;
382
383 if (!wait_queue_is_set(wq_set))
384 return FALSE;
385
386 s = splsched();
387 wait_queue_lock(wq);
388 ret = wait_queue_member_locked(wq, wq_set);
389 wait_queue_unlock(wq);
390 splx(s);
391
392 return ret;
393 }
394
395
396 /*
397 * Routine: wait_queue_link_noalloc
398 * Purpose:
399 * Insert a set wait queue into a wait queue. This
400 * requires us to link the two together using a wait_queue_link
401 * structure that we allocate.
402 * Conditions:
403 * The wait queue being inserted must be inited as a set queue
404 */
405 kern_return_t
406 wait_queue_link_noalloc(
407 wait_queue_t wq,
408 wait_queue_set_t wq_set,
409 wait_queue_link_t wql)
410 {
411 wait_queue_element_t wq_element;
412 queue_t q;
413 spl_t s;
414
415 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set))
416 return KERN_INVALID_ARGUMENT;
417
418 /*
419 * There are probably less threads and sets associated with
420 * the wait queue, then there are wait queues associated with
421 * the set. So lets validate it that way.
422 */
423 s = splsched();
424 wait_queue_lock(wq);
425 q = &wq->wq_queue;
426 wq_element = (wait_queue_element_t) queue_first(q);
427 while (!queue_end(q, (queue_entry_t)wq_element)) {
428 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
429 if (wq_element->wqe_type == WAIT_QUEUE_LINK &&
430 ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
431 wait_queue_unlock(wq);
432 splx(s);
433 return KERN_ALREADY_IN_SET;
434 }
435 wq_element = (wait_queue_element_t)
436 queue_next((queue_t) wq_element);
437 }
438
439 /*
440 * Not already a member, so we can add it.
441 */
442 wqs_lock(wq_set);
443
444 WAIT_QUEUE_SET_CHECK(wq_set);
445
446 wql->wql_queue = wq;
447 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
448 wql->wql_setqueue = wq_set;
449 queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
450 wql->wql_type = WAIT_QUEUE_LINK;
451
452 wqs_unlock(wq_set);
453 wait_queue_unlock(wq);
454 splx(s);
455
456 return KERN_SUCCESS;
457 }
458
459 /*
460 * Routine: wait_queue_link
461 * Purpose:
462 * Insert a set wait queue into a wait queue. This
463 * requires us to link the two together using a wait_queue_link
464 * structure that we allocate.
465 * Conditions:
466 * The wait queue being inserted must be inited as a set queue
467 */
468 kern_return_t
469 wait_queue_link(
470 wait_queue_t wq,
471 wait_queue_set_t wq_set)
472 {
473 wait_queue_link_t wql;
474 kern_return_t ret;
475
476 wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link));
477 if (wql == WAIT_QUEUE_LINK_NULL)
478 return KERN_RESOURCE_SHORTAGE;
479
480 ret = wait_queue_link_noalloc(wq, wq_set, wql);
481 if (ret != KERN_SUCCESS)
482 kfree(wql, sizeof(struct wait_queue_link));
483
484 return ret;
485 }
486
487
488 /*
489 * Routine: wait_queue_unlink_nofree
490 * Purpose:
491 * Undo the linkage between a wait queue and a set.
492 */
493 static void
494 wait_queue_unlink_locked(
495 wait_queue_t wq,
496 wait_queue_set_t wq_set,
497 wait_queue_link_t wql)
498 {
499 assert(wait_queue_held(wq));
500 assert(wait_queue_held(&wq_set->wqs_wait_queue));
501
502 wql->wql_queue = WAIT_QUEUE_NULL;
503 queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
504 wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
505 queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
506 wql->wql_type = WAIT_QUEUE_UNLINKED;
507
508 WAIT_QUEUE_CHECK(wq);
509 WAIT_QUEUE_SET_CHECK(wq_set);
510 }
511
512 /*
513 * Routine: wait_queue_unlink
514 * Purpose:
515 * Remove the linkage between a wait queue and a set,
516 * freeing the linkage structure.
517 * Conditions:
518 * The wait queue being must be a member set queue
519 */
520 kern_return_t
521 wait_queue_unlink(
522 wait_queue_t wq,
523 wait_queue_set_t wq_set)
524 {
525 wait_queue_element_t wq_element;
526 wait_queue_link_t wql;
527 queue_t q;
528 spl_t s;
529
530 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) {
531 return KERN_INVALID_ARGUMENT;
532 }
533 s = splsched();
534 wait_queue_lock(wq);
535
536 q = &wq->wq_queue;
537 wq_element = (wait_queue_element_t) queue_first(q);
538 while (!queue_end(q, (queue_entry_t)wq_element)) {
539 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
540 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
541 wql = (wait_queue_link_t)wq_element;
542
543 if (wql->wql_setqueue == wq_set) {
544 wqs_lock(wq_set);
545 wait_queue_unlink_locked(wq, wq_set, wql);
546 wqs_unlock(wq_set);
547 wait_queue_unlock(wq);
548 splx(s);
549 kfree(wql, sizeof(struct wait_queue_link));
550 return KERN_SUCCESS;
551 }
552 }
553 wq_element = (wait_queue_element_t)
554 queue_next((queue_t) wq_element);
555 }
556 wait_queue_unlock(wq);
557 splx(s);
558 return KERN_NOT_IN_SET;
559 }
560
561
562 /*
563 * Routine: wait_queue_unlinkall_nofree
564 * Purpose:
565 * Remove the linkage between a wait queue and all its
566 * sets. The caller is responsible for freeing
567 * the wait queue link structures.
568 */
569
570 kern_return_t
571 wait_queue_unlinkall_nofree(
572 wait_queue_t wq)
573 {
574 wait_queue_element_t wq_element;
575 wait_queue_element_t wq_next_element;
576 wait_queue_set_t wq_set;
577 wait_queue_link_t wql;
578 queue_head_t links_queue_head;
579 queue_t links = &links_queue_head;
580 queue_t q;
581 spl_t s;
582
583 if (!wait_queue_is_queue(wq)) {
584 return KERN_INVALID_ARGUMENT;
585 }
586
587 queue_init(links);
588
589 s = splsched();
590 wait_queue_lock(wq);
591
592 q = &wq->wq_queue;
593
594 wq_element = (wait_queue_element_t) queue_first(q);
595 while (!queue_end(q, (queue_entry_t)wq_element)) {
596 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
597 wq_next_element = (wait_queue_element_t)
598 queue_next((queue_t) wq_element);
599
600 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
601 wql = (wait_queue_link_t)wq_element;
602 wq_set = wql->wql_setqueue;
603 wqs_lock(wq_set);
604 wait_queue_unlink_locked(wq, wq_set, wql);
605 wqs_unlock(wq_set);
606 }
607 wq_element = wq_next_element;
608 }
609 wait_queue_unlock(wq);
610 splx(s);
611 return(KERN_SUCCESS);
612 }
613
614
615 /*
616 * Routine: wait_queue_unlink_all
617 * Purpose:
618 * Remove the linkage between a wait queue and all its sets.
619 * All the linkage structures are freed.
620 * Conditions:
621 * Nothing of interest locked.
622 */
623
624 kern_return_t
625 wait_queue_unlink_all(
626 wait_queue_t wq)
627 {
628 wait_queue_element_t wq_element;
629 wait_queue_element_t wq_next_element;
630 wait_queue_set_t wq_set;
631 wait_queue_link_t wql;
632 queue_head_t links_queue_head;
633 queue_t links = &links_queue_head;
634 queue_t q;
635 spl_t s;
636
637 if (!wait_queue_is_queue(wq)) {
638 return KERN_INVALID_ARGUMENT;
639 }
640
641 queue_init(links);
642
643 s = splsched();
644 wait_queue_lock(wq);
645
646 q = &wq->wq_queue;
647
648 wq_element = (wait_queue_element_t) queue_first(q);
649 while (!queue_end(q, (queue_entry_t)wq_element)) {
650 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
651 wq_next_element = (wait_queue_element_t)
652 queue_next((queue_t) wq_element);
653
654 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
655 wql = (wait_queue_link_t)wq_element;
656 wq_set = wql->wql_setqueue;
657 wqs_lock(wq_set);
658 wait_queue_unlink_locked(wq, wq_set, wql);
659 wqs_unlock(wq_set);
660 enqueue(links, &wql->wql_links);
661 }
662 wq_element = wq_next_element;
663 }
664 wait_queue_unlock(wq);
665 splx(s);
666
667 while(!queue_empty(links)) {
668 wql = (wait_queue_link_t) dequeue(links);
669 kfree(wql, sizeof(struct wait_queue_link));
670 }
671
672 return(KERN_SUCCESS);
673 }
674
675 /*
676 * Routine: wait_queue_set_unlink_all_nofree
677 * Purpose:
678 * Remove the linkage between a set wait queue and all its
679 * member wait queues. The link structures are not freed, nor
680 * returned. It is the caller's responsibility to track and free
681 * them.
682 * Conditions:
683 * The wait queue being must be a member set queue
684 */
685 kern_return_t
686 wait_queue_set_unlink_all_nofree(
687 wait_queue_set_t wq_set)
688 {
689 wait_queue_link_t wql;
690 wait_queue_t wq;
691 queue_t q;
692 spl_t s;
693
694 if (!wait_queue_is_set(wq_set)) {
695 return KERN_INVALID_ARGUMENT;
696 }
697
698 retry:
699 s = splsched();
700 wqs_lock(wq_set);
701
702 q = &wq_set->wqs_setlinks;
703
704 wql = (wait_queue_link_t)queue_first(q);
705 while (!queue_end(q, (queue_entry_t)wql)) {
706 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
707 wq = wql->wql_queue;
708 if (wait_queue_lock_try(wq)) {
709 wait_queue_unlink_locked(wq, wq_set, wql);
710 wait_queue_unlock(wq);
711 wql = (wait_queue_link_t)queue_first(q);
712 } else {
713 wqs_unlock(wq_set);
714 splx(s);
715 delay(1);
716 goto retry;
717 }
718 }
719 wqs_unlock(wq_set);
720 splx(s);
721
722 return(KERN_SUCCESS);
723 }
724
725 /* legacy interface naming */
726 kern_return_t
727 wait_subqueue_unlink_all(
728 wait_queue_set_t wq_set)
729 {
730 return wait_queue_set_unlink_all_nofree(wq_set);
731 }
732
733
734 /*
735 * Routine: wait_queue_set_unlink_all
736 * Purpose:
737 * Remove the linkage between a set wait queue and all its
738 * member wait queues. The link structures are freed.
739 * Conditions:
740 * The wait queue must be a set
741 */
742 kern_return_t
743 wait_queue_set_unlink_all(
744 wait_queue_set_t wq_set)
745 {
746 wait_queue_link_t wql;
747 wait_queue_t wq;
748 queue_t q;
749 queue_head_t links_queue_head;
750 queue_t links = &links_queue_head;
751 spl_t s;
752
753 if (!wait_queue_is_set(wq_set)) {
754 return KERN_INVALID_ARGUMENT;
755 }
756
757 queue_init(links);
758
759 retry:
760 s = splsched();
761 wqs_lock(wq_set);
762
763 q = &wq_set->wqs_setlinks;
764
765 wql = (wait_queue_link_t)queue_first(q);
766 while (!queue_end(q, (queue_entry_t)wql)) {
767 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
768 wq = wql->wql_queue;
769 if (wait_queue_lock_try(wq)) {
770 wait_queue_unlink_locked(wq, wq_set, wql);
771 wait_queue_unlock(wq);
772 enqueue(links, &wql->wql_links);
773 wql = (wait_queue_link_t)queue_first(q);
774 } else {
775 wqs_unlock(wq_set);
776 splx(s);
777 delay(1);
778 goto retry;
779 }
780 }
781 wqs_unlock(wq_set);
782 splx(s);
783
784 while (!queue_empty (links)) {
785 wql = (wait_queue_link_t) dequeue(links);
786 kfree(wql, sizeof(struct wait_queue_link));
787 }
788 return(KERN_SUCCESS);
789 }
790
791
792 /*
793 * Routine: wait_queue_unlink_one
794 * Purpose:
795 * Find and unlink one set wait queue
796 * Conditions:
797 * Nothing of interest locked.
798 */
799 void
800 wait_queue_unlink_one(
801 wait_queue_t wq,
802 wait_queue_set_t *wq_setp)
803 {
804 wait_queue_element_t wq_element;
805 queue_t q;
806 spl_t s;
807
808 s = splsched();
809 wait_queue_lock(wq);
810
811 q = &wq->wq_queue;
812
813 wq_element = (wait_queue_element_t) queue_first(q);
814 while (!queue_end(q, (queue_entry_t)wq_element)) {
815
816 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
817 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
818 wait_queue_set_t wq_set = wql->wql_setqueue;
819
820 wqs_lock(wq_set);
821 wait_queue_unlink_locked(wq, wq_set, wql);
822 wqs_unlock(wq_set);
823 wait_queue_unlock(wq);
824 splx(s);
825 kfree(wql,sizeof(struct wait_queue_link));
826 *wq_setp = wq_set;
827 return;
828 }
829
830 wq_element = (wait_queue_element_t)
831 queue_next((queue_t) wq_element);
832 }
833 wait_queue_unlock(wq);
834 splx(s);
835 *wq_setp = WAIT_QUEUE_SET_NULL;
836 }
837
838
839 /*
840 * Routine: wait_queue_assert_wait64_locked
841 * Purpose:
842 * Insert the current thread into the supplied wait queue
843 * waiting for a particular event to be posted to that queue.
844 *
845 * Conditions:
846 * The wait queue is assumed locked.
847 * The waiting thread is assumed locked.
848 *
849 */
850 __private_extern__ wait_result_t
851 wait_queue_assert_wait64_locked(
852 wait_queue_t wq,
853 event64_t event,
854 wait_interrupt_t interruptible,
855 uint64_t deadline,
856 thread_t thread)
857 {
858 wait_result_t wait_result;
859
860 if (!wait_queue_assert_possible(thread))
861 panic("wait_queue_assert_wait64_locked");
862
863 if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
864 wait_queue_set_t wqs = (wait_queue_set_t)wq;
865
866 if (wqs->wqs_isprepost && wqs->wqs_refcount > 0)
867 return(THREAD_AWAKENED);
868 }
869
870 /*
871 * This is the extent to which we currently take scheduling attributes
872 * into account. If the thread is vm priviledged, we stick it at
873 * the front of the queue. Later, these queues will honor the policy
874 * value set at wait_queue_init time.
875 */
876 wait_result = thread_mark_wait_locked(thread, interruptible);
877 if (wait_result == THREAD_WAITING) {
878 if (thread->options & TH_OPT_VMPRIV)
879 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
880 else
881 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
882
883 thread->wait_event = event;
884 thread->wait_queue = wq;
885
886 if (deadline != 0) {
887 if (!timer_call_enter(&thread->wait_timer, deadline))
888 thread->wait_timer_active++;
889 thread->wait_timer_is_set = TRUE;
890 }
891 }
892 return(wait_result);
893 }
894
895 /*
896 * Routine: wait_queue_assert_wait
897 * Purpose:
898 * Insert the current thread into the supplied wait queue
899 * waiting for a particular event to be posted to that queue.
900 *
901 * Conditions:
902 * nothing of interest locked.
903 */
904 wait_result_t
905 wait_queue_assert_wait(
906 wait_queue_t wq,
907 event_t event,
908 wait_interrupt_t interruptible,
909 uint64_t deadline)
910 {
911 spl_t s;
912 wait_result_t ret;
913 thread_t thread = current_thread();
914
915 /* If it is an invalid wait queue, you can't wait on it */
916 if (!wait_queue_is_valid(wq))
917 return (thread->wait_result = THREAD_RESTART);
918
919 s = splsched();
920 wait_queue_lock(wq);
921 thread_lock(thread);
922 ret = wait_queue_assert_wait64_locked(wq, (event64_t)((uint32_t)event),
923 interruptible, deadline, thread);
924 thread_unlock(thread);
925 wait_queue_unlock(wq);
926 splx(s);
927 return(ret);
928 }
929
930 /*
931 * Routine: wait_queue_assert_wait64
932 * Purpose:
933 * Insert the current thread into the supplied wait queue
934 * waiting for a particular event to be posted to that queue.
935 * Conditions:
936 * nothing of interest locked.
937 */
938 wait_result_t
939 wait_queue_assert_wait64(
940 wait_queue_t wq,
941 event64_t event,
942 wait_interrupt_t interruptible,
943 uint64_t deadline)
944 {
945 spl_t s;
946 wait_result_t ret;
947 thread_t thread = current_thread();
948
949 /* If it is an invalid wait queue, you cant wait on it */
950 if (!wait_queue_is_valid(wq))
951 return (thread->wait_result = THREAD_RESTART);
952
953 s = splsched();
954 wait_queue_lock(wq);
955 thread_lock(thread);
956 ret = wait_queue_assert_wait64_locked(wq, event, interruptible, deadline, thread);
957 thread_unlock(thread);
958 wait_queue_unlock(wq);
959 splx(s);
960 return(ret);
961 }
962
963 /*
964 * Routine: _wait_queue_select64_all
965 * Purpose:
966 * Select all threads off a wait queue that meet the
967 * supplied criteria.
968 * Conditions:
969 * at splsched
970 * wait queue locked
971 * wake_queue initialized and ready for insertion
972 * possibly recursive
973 * Returns:
974 * a queue of locked threads
975 */
976 static void
977 _wait_queue_select64_all(
978 wait_queue_t wq,
979 event64_t event,
980 queue_t wake_queue)
981 {
982 wait_queue_element_t wq_element;
983 wait_queue_element_t wqe_next;
984 queue_t q;
985
986 q = &wq->wq_queue;
987
988 wq_element = (wait_queue_element_t) queue_first(q);
989 while (!queue_end(q, (queue_entry_t)wq_element)) {
990 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
991 wqe_next = (wait_queue_element_t)
992 queue_next((queue_t) wq_element);
993
994 /*
995 * We may have to recurse if this is a compound wait queue.
996 */
997 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
998 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
999 wait_queue_t set_queue;
1000
1001 /*
1002 * We have to check the set wait queue.
1003 */
1004 set_queue = (wait_queue_t)wql->wql_setqueue;
1005 wait_queue_lock(set_queue);
1006 if (set_queue->wq_isprepost) {
1007 wait_queue_set_t wqs = (wait_queue_set_t)set_queue;
1008
1009 /*
1010 * Preposting is only for sets and wait queue
1011 * is the first element of set
1012 */
1013 wqs->wqs_refcount++;
1014 }
1015 if (! wait_queue_empty(set_queue))
1016 _wait_queue_select64_all(set_queue, event, wake_queue);
1017 wait_queue_unlock(set_queue);
1018 } else {
1019
1020 /*
1021 * Otherwise, its a thread. If it is waiting on
1022 * the event we are posting to this queue, pull
1023 * it off the queue and stick it in out wake_queue.
1024 */
1025 thread_t t = (thread_t)wq_element;
1026
1027 if (t->wait_event == event) {
1028 thread_lock(t);
1029 remqueue(q, (queue_entry_t) t);
1030 enqueue (wake_queue, (queue_entry_t) t);
1031 t->wait_queue = WAIT_QUEUE_NULL;
1032 t->wait_event = NO_EVENT64;
1033 t->at_safe_point = FALSE;
1034 /* returned locked */
1035 }
1036 }
1037 wq_element = wqe_next;
1038 }
1039 }
1040
1041 /*
1042 * Routine: wait_queue_wakeup64_all_locked
1043 * Purpose:
1044 * Wakeup some number of threads that are in the specified
1045 * wait queue and waiting on the specified event.
1046 * Conditions:
1047 * wait queue already locked (may be released).
1048 * Returns:
1049 * KERN_SUCCESS - Threads were woken up
1050 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1051 */
1052 __private_extern__ kern_return_t
1053 wait_queue_wakeup64_all_locked(
1054 wait_queue_t wq,
1055 event64_t event,
1056 wait_result_t result,
1057 boolean_t unlock)
1058 {
1059 queue_head_t wake_queue_head;
1060 queue_t q = &wake_queue_head;
1061 kern_return_t res;
1062
1063 assert(wait_queue_held(wq));
1064 queue_init(q);
1065
1066 /*
1067 * Select the threads that we will wake up. The threads
1068 * are returned to us locked and cleanly removed from the
1069 * wait queue.
1070 */
1071 _wait_queue_select64_all(wq, event, q);
1072 if (unlock)
1073 wait_queue_unlock(wq);
1074
1075 /*
1076 * For each thread, set it running.
1077 */
1078 res = KERN_NOT_WAITING;
1079 while (!queue_empty (q)) {
1080 thread_t thread = (thread_t) dequeue(q);
1081 res = thread_go(thread, result);
1082 assert(res == KERN_SUCCESS);
1083 thread_unlock(thread);
1084 }
1085 return res;
1086 }
1087
1088
1089 /*
1090 * Routine: wait_queue_wakeup_all
1091 * Purpose:
1092 * Wakeup some number of threads that are in the specified
1093 * wait queue and waiting on the specified event.
1094 * Conditions:
1095 * Nothing locked
1096 * Returns:
1097 * KERN_SUCCESS - Threads were woken up
1098 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1099 */
1100 kern_return_t
1101 wait_queue_wakeup_all(
1102 wait_queue_t wq,
1103 event_t event,
1104 wait_result_t result)
1105 {
1106 kern_return_t ret;
1107 spl_t s;
1108
1109 if (!wait_queue_is_valid(wq)) {
1110 return KERN_INVALID_ARGUMENT;
1111 }
1112
1113 s = splsched();
1114 wait_queue_lock(wq);
1115 ret = wait_queue_wakeup64_all_locked(
1116 wq, (event64_t)((uint32_t)event),
1117 result, TRUE);
1118 /* lock released */
1119 splx(s);
1120 return ret;
1121 }
1122
1123 /*
1124 * Routine: wait_queue_wakeup64_all
1125 * Purpose:
1126 * Wakeup some number of threads that are in the specified
1127 * wait queue and waiting on the specified event.
1128 * Conditions:
1129 * Nothing locked
1130 * Returns:
1131 * KERN_SUCCESS - Threads were woken up
1132 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1133 */
1134 kern_return_t
1135 wait_queue_wakeup64_all(
1136 wait_queue_t wq,
1137 event64_t event,
1138 wait_result_t result)
1139 {
1140 kern_return_t ret;
1141 spl_t s;
1142
1143 if (!wait_queue_is_valid(wq)) {
1144 return KERN_INVALID_ARGUMENT;
1145 }
1146
1147 s = splsched();
1148 wait_queue_lock(wq);
1149 ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1150 /* lock released */
1151 splx(s);
1152 return ret;
1153 }
1154
1155 /*
1156 * Routine: _wait_queue_select64_one
1157 * Purpose:
1158 * Select the best thread off a wait queue that meet the
1159 * supplied criteria.
1160 * Conditions:
1161 * at splsched
1162 * wait queue locked
1163 * possibly recursive
1164 * Returns:
1165 * a locked thread - if one found
1166 * Note:
1167 * This is where the sync policy of the wait queue comes
1168 * into effect. For now, we just assume FIFO.
1169 */
1170 static thread_t
1171 _wait_queue_select64_one(
1172 wait_queue_t wq,
1173 event64_t event)
1174 {
1175 wait_queue_element_t wq_element;
1176 wait_queue_element_t wqe_next;
1177 thread_t t = THREAD_NULL;
1178 queue_t q;
1179
1180 assert(wq->wq_fifo);
1181
1182 q = &wq->wq_queue;
1183
1184 wq_element = (wait_queue_element_t) queue_first(q);
1185 while (!queue_end(q, (queue_entry_t)wq_element)) {
1186 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1187 wqe_next = (wait_queue_element_t)
1188 queue_next((queue_t) wq_element);
1189
1190 /*
1191 * We may have to recurse if this is a compound wait queue.
1192 */
1193 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1194 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1195 wait_queue_t set_queue;
1196
1197 /*
1198 * We have to check the set wait queue.
1199 */
1200 set_queue = (wait_queue_t)wql->wql_setqueue;
1201 wait_queue_lock(set_queue);
1202 if (! wait_queue_empty(set_queue)) {
1203 t = _wait_queue_select64_one(set_queue, event);
1204 }
1205 wait_queue_unlock(set_queue);
1206 if (t != THREAD_NULL)
1207 return t;
1208 } else {
1209
1210 /*
1211 * Otherwise, its a thread. If it is waiting on
1212 * the event we are posting to this queue, pull
1213 * it off the queue and stick it in out wake_queue.
1214 */
1215 t = (thread_t)wq_element;
1216 if (t->wait_event == event) {
1217 thread_lock(t);
1218 remqueue(q, (queue_entry_t) t);
1219 t->wait_queue = WAIT_QUEUE_NULL;
1220 t->wait_event = NO_EVENT64;
1221 t->at_safe_point = FALSE;
1222 return t; /* still locked */
1223 }
1224
1225 t = THREAD_NULL;
1226 }
1227 wq_element = wqe_next;
1228 }
1229 return THREAD_NULL;
1230 }
1231
1232 /*
1233 * Routine: wait_queue_peek64_locked
1234 * Purpose:
1235 * Select the best thread from a wait queue that meet the
1236 * supplied criteria, but leave it on the queue it was
1237 * found on. The thread, and the actual wait_queue the
1238 * thread was found on are identified.
1239 * Conditions:
1240 * at splsched
1241 * wait queue locked
1242 * possibly recursive
1243 * Returns:
1244 * a locked thread - if one found
1245 * a locked waitq - the one the thread was found on
1246 * Note:
1247 * Both the waitq the thread was actually found on, and
1248 * the supplied wait queue, are locked after this.
1249 */
1250 __private_extern__ void
1251 wait_queue_peek64_locked(
1252 wait_queue_t wq,
1253 event64_t event,
1254 thread_t *tp,
1255 wait_queue_t *wqp)
1256 {
1257 wait_queue_element_t wq_element;
1258 wait_queue_element_t wqe_next;
1259 queue_t q;
1260
1261 assert(wq->wq_fifo);
1262
1263 *tp = THREAD_NULL;
1264
1265 q = &wq->wq_queue;
1266
1267 wq_element = (wait_queue_element_t) queue_first(q);
1268 while (!queue_end(q, (queue_entry_t)wq_element)) {
1269 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1270 wqe_next = (wait_queue_element_t)
1271 queue_next((queue_t) wq_element);
1272
1273 /*
1274 * We may have to recurse if this is a compound wait queue.
1275 */
1276 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1277 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1278 wait_queue_t set_queue;
1279
1280 /*
1281 * We have to check the set wait queue.
1282 */
1283 set_queue = (wait_queue_t)wql->wql_setqueue;
1284 wait_queue_lock(set_queue);
1285 if (! wait_queue_empty(set_queue)) {
1286 wait_queue_peek64_locked(set_queue, event, tp, wqp);
1287 }
1288 if (*tp != THREAD_NULL) {
1289 if (*wqp != set_queue)
1290 wait_queue_unlock(set_queue);
1291 return; /* thread and its waitq locked */
1292 }
1293
1294 wait_queue_unlock(set_queue);
1295 } else {
1296
1297 /*
1298 * Otherwise, its a thread. If it is waiting on
1299 * the event we are posting to this queue, return
1300 * it locked, but leave it on the queue.
1301 */
1302 thread_t t = (thread_t)wq_element;
1303
1304 if (t->wait_event == event) {
1305 thread_lock(t);
1306 *tp = t;
1307 *wqp = wq;
1308 return;
1309 }
1310 }
1311 wq_element = wqe_next;
1312 }
1313 }
1314
1315 /*
1316 * Routine: wait_queue_pull_thread_locked
1317 * Purpose:
1318 * Pull a thread that was previously "peeked" off the wait
1319 * queue and (possibly) unlock the waitq.
1320 * Conditions:
1321 * at splsched
1322 * wait queue locked
1323 * thread locked
1324 * Returns:
1325 * with the thread still locked.
1326 */
1327 void
1328 wait_queue_pull_thread_locked(
1329 wait_queue_t waitq,
1330 thread_t thread,
1331 boolean_t unlock)
1332 {
1333
1334 assert(thread->wait_queue == waitq);
1335
1336 remqueue(&waitq->wq_queue, (queue_entry_t)thread );
1337 thread->wait_queue = WAIT_QUEUE_NULL;
1338 thread->wait_event = NO_EVENT64;
1339 thread->at_safe_point = FALSE;
1340 if (unlock)
1341 wait_queue_unlock(waitq);
1342 }
1343
1344
1345 /*
1346 * Routine: wait_queue_select64_thread
1347 * Purpose:
1348 * Look for a thread and remove it from the queues, if
1349 * (and only if) the thread is waiting on the supplied
1350 * <wait_queue, event> pair.
1351 * Conditions:
1352 * at splsched
1353 * wait queue locked
1354 * possibly recursive
1355 * Returns:
1356 * KERN_NOT_WAITING: Thread is not waiting here.
1357 * KERN_SUCCESS: It was, and is now removed (returned locked)
1358 */
1359 static kern_return_t
1360 _wait_queue_select64_thread(
1361 wait_queue_t wq,
1362 event64_t event,
1363 thread_t thread)
1364 {
1365 wait_queue_element_t wq_element;
1366 wait_queue_element_t wqe_next;
1367 kern_return_t res = KERN_NOT_WAITING;
1368 queue_t q = &wq->wq_queue;
1369
1370 thread_lock(thread);
1371 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
1372 remqueue(q, (queue_entry_t) thread);
1373 thread->at_safe_point = FALSE;
1374 thread->wait_event = NO_EVENT64;
1375 thread->wait_queue = WAIT_QUEUE_NULL;
1376 /* thread still locked */
1377 return KERN_SUCCESS;
1378 }
1379 thread_unlock(thread);
1380
1381 /*
1382 * The wait_queue associated with the thread may be one of this
1383 * wait queue's sets. Go see. If so, removing it from
1384 * there is like removing it from here.
1385 */
1386 wq_element = (wait_queue_element_t) queue_first(q);
1387 while (!queue_end(q, (queue_entry_t)wq_element)) {
1388 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1389 wqe_next = (wait_queue_element_t)
1390 queue_next((queue_t) wq_element);
1391
1392 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1393 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1394 wait_queue_t set_queue;
1395
1396 set_queue = (wait_queue_t)wql->wql_setqueue;
1397 wait_queue_lock(set_queue);
1398 if (! wait_queue_empty(set_queue)) {
1399 res = _wait_queue_select64_thread(set_queue,
1400 event,
1401 thread);
1402 }
1403 wait_queue_unlock(set_queue);
1404 if (res == KERN_SUCCESS)
1405 return KERN_SUCCESS;
1406 }
1407 wq_element = wqe_next;
1408 }
1409 return res;
1410 }
1411
1412
1413 /*
1414 * Routine: wait_queue_wakeup64_identity_locked
1415 * Purpose:
1416 * Select a single thread that is most-eligible to run and set
1417 * set it running. But return the thread locked.
1418 *
1419 * Conditions:
1420 * at splsched
1421 * wait queue locked
1422 * possibly recursive
1423 * Returns:
1424 * a pointer to the locked thread that was awakened
1425 */
1426 __private_extern__ thread_t
1427 wait_queue_wakeup64_identity_locked(
1428 wait_queue_t wq,
1429 event64_t event,
1430 wait_result_t result,
1431 boolean_t unlock)
1432 {
1433 kern_return_t res;
1434 thread_t thread;
1435
1436 assert(wait_queue_held(wq));
1437
1438
1439 thread = _wait_queue_select64_one(wq, event);
1440 if (unlock)
1441 wait_queue_unlock(wq);
1442
1443 if (thread) {
1444 res = thread_go(thread, result);
1445 assert(res == KERN_SUCCESS);
1446 }
1447 return thread; /* still locked if not NULL */
1448 }
1449
1450
1451 /*
1452 * Routine: wait_queue_wakeup64_one_locked
1453 * Purpose:
1454 * Select a single thread that is most-eligible to run and set
1455 * set it runnings.
1456 *
1457 * Conditions:
1458 * at splsched
1459 * wait queue locked
1460 * possibly recursive
1461 * Returns:
1462 * KERN_SUCCESS: It was, and is, now removed.
1463 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1464 */
1465 __private_extern__ kern_return_t
1466 wait_queue_wakeup64_one_locked(
1467 wait_queue_t wq,
1468 event64_t event,
1469 wait_result_t result,
1470 boolean_t unlock)
1471 {
1472 thread_t thread;
1473
1474 assert(wait_queue_held(wq));
1475
1476 thread = _wait_queue_select64_one(wq, event);
1477 if (unlock)
1478 wait_queue_unlock(wq);
1479
1480 if (thread) {
1481 kern_return_t res;
1482
1483 res = thread_go(thread, result);
1484 assert(res == KERN_SUCCESS);
1485 thread_unlock(thread);
1486 return res;
1487 }
1488
1489 return KERN_NOT_WAITING;
1490 }
1491
1492 /*
1493 * Routine: wait_queue_wakeup_one
1494 * Purpose:
1495 * Wakeup the most appropriate thread that is in the specified
1496 * wait queue for the specified event.
1497 * Conditions:
1498 * Nothing locked
1499 * Returns:
1500 * KERN_SUCCESS - Thread was woken up
1501 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1502 */
1503 kern_return_t
1504 wait_queue_wakeup_one(
1505 wait_queue_t wq,
1506 event_t event,
1507 wait_result_t result)
1508 {
1509 thread_t thread;
1510 spl_t s;
1511
1512 if (!wait_queue_is_valid(wq)) {
1513 return KERN_INVALID_ARGUMENT;
1514 }
1515
1516 s = splsched();
1517 wait_queue_lock(wq);
1518 thread = _wait_queue_select64_one(wq, (event64_t)((uint32_t)event));
1519 wait_queue_unlock(wq);
1520
1521 if (thread) {
1522 kern_return_t res;
1523
1524 res = thread_go(thread, result);
1525 assert(res == KERN_SUCCESS);
1526 thread_unlock(thread);
1527 splx(s);
1528 return res;
1529 }
1530
1531 splx(s);
1532 return KERN_NOT_WAITING;
1533 }
1534
1535 /*
1536 * Routine: wait_queue_wakeup64_one
1537 * Purpose:
1538 * Wakeup the most appropriate thread that is in the specified
1539 * wait queue for the specified event.
1540 * Conditions:
1541 * Nothing locked
1542 * Returns:
1543 * KERN_SUCCESS - Thread was woken up
1544 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1545 */
1546 kern_return_t
1547 wait_queue_wakeup64_one(
1548 wait_queue_t wq,
1549 event64_t event,
1550 wait_result_t result)
1551 {
1552 thread_t thread;
1553 spl_t s;
1554
1555 if (!wait_queue_is_valid(wq)) {
1556 return KERN_INVALID_ARGUMENT;
1557 }
1558 s = splsched();
1559 wait_queue_lock(wq);
1560 thread = _wait_queue_select64_one(wq, event);
1561 wait_queue_unlock(wq);
1562
1563 if (thread) {
1564 kern_return_t res;
1565
1566 res = thread_go(thread, result);
1567 assert(res == KERN_SUCCESS);
1568 thread_unlock(thread);
1569 splx(s);
1570 return res;
1571 }
1572
1573 splx(s);
1574 return KERN_NOT_WAITING;
1575 }
1576
1577
1578 /*
1579 * Routine: wait_queue_wakeup64_thread_locked
1580 * Purpose:
1581 * Wakeup the particular thread that was specified if and only
1582 * it was in this wait queue (or one of it's set queues)
1583 * and waiting on the specified event.
1584 *
1585 * This is much safer than just removing the thread from
1586 * whatever wait queue it happens to be on. For instance, it
1587 * may have already been awoken from the wait you intended to
1588 * interrupt and waited on something else (like another
1589 * semaphore).
1590 * Conditions:
1591 * at splsched
1592 * wait queue already locked (may be released).
1593 * Returns:
1594 * KERN_SUCCESS - the thread was found waiting and awakened
1595 * KERN_NOT_WAITING - the thread was not waiting here
1596 */
1597 __private_extern__ kern_return_t
1598 wait_queue_wakeup64_thread_locked(
1599 wait_queue_t wq,
1600 event64_t event,
1601 thread_t thread,
1602 wait_result_t result,
1603 boolean_t unlock)
1604 {
1605 kern_return_t res;
1606
1607 assert(wait_queue_held(wq));
1608
1609 /*
1610 * See if the thread was still waiting there. If so, it got
1611 * dequeued and returned locked.
1612 */
1613 res = _wait_queue_select64_thread(wq, event, thread);
1614 if (unlock)
1615 wait_queue_unlock(wq);
1616
1617 if (res != KERN_SUCCESS)
1618 return KERN_NOT_WAITING;
1619
1620 res = thread_go(thread, result);
1621 assert(res == KERN_SUCCESS);
1622 thread_unlock(thread);
1623 return res;
1624 }
1625
1626 /*
1627 * Routine: wait_queue_wakeup_thread
1628 * Purpose:
1629 * Wakeup the particular thread that was specified if and only
1630 * it was in this wait queue (or one of it's set queues)
1631 * and waiting on the specified event.
1632 *
1633 * This is much safer than just removing the thread from
1634 * whatever wait queue it happens to be on. For instance, it
1635 * may have already been awoken from the wait you intended to
1636 * interrupt and waited on something else (like another
1637 * semaphore).
1638 * Conditions:
1639 * nothing of interest locked
1640 * we need to assume spl needs to be raised
1641 * Returns:
1642 * KERN_SUCCESS - the thread was found waiting and awakened
1643 * KERN_NOT_WAITING - the thread was not waiting here
1644 */
1645 kern_return_t
1646 wait_queue_wakeup_thread(
1647 wait_queue_t wq,
1648 event_t event,
1649 thread_t thread,
1650 wait_result_t result)
1651 {
1652 kern_return_t res;
1653 spl_t s;
1654
1655 if (!wait_queue_is_valid(wq)) {
1656 return KERN_INVALID_ARGUMENT;
1657 }
1658
1659 s = splsched();
1660 wait_queue_lock(wq);
1661 res = _wait_queue_select64_thread(wq, (event64_t)((uint32_t)event), thread);
1662 wait_queue_unlock(wq);
1663
1664 if (res == KERN_SUCCESS) {
1665 res = thread_go(thread, result);
1666 assert(res == KERN_SUCCESS);
1667 thread_unlock(thread);
1668 splx(s);
1669 return res;
1670 }
1671 splx(s);
1672 return KERN_NOT_WAITING;
1673 }
1674
1675 /*
1676 * Routine: wait_queue_wakeup64_thread
1677 * Purpose:
1678 * Wakeup the particular thread that was specified if and only
1679 * it was in this wait queue (or one of it's set's queues)
1680 * and waiting on the specified event.
1681 *
1682 * This is much safer than just removing the thread from
1683 * whatever wait queue it happens to be on. For instance, it
1684 * may have already been awoken from the wait you intended to
1685 * interrupt and waited on something else (like another
1686 * semaphore).
1687 * Conditions:
1688 * nothing of interest locked
1689 * we need to assume spl needs to be raised
1690 * Returns:
1691 * KERN_SUCCESS - the thread was found waiting and awakened
1692 * KERN_NOT_WAITING - the thread was not waiting here
1693 */
1694 kern_return_t
1695 wait_queue_wakeup64_thread(
1696 wait_queue_t wq,
1697 event64_t event,
1698 thread_t thread,
1699 wait_result_t result)
1700 {
1701 kern_return_t res;
1702 spl_t s;
1703
1704 if (!wait_queue_is_valid(wq)) {
1705 return KERN_INVALID_ARGUMENT;
1706 }
1707
1708 s = splsched();
1709 wait_queue_lock(wq);
1710 res = _wait_queue_select64_thread(wq, event, thread);
1711 wait_queue_unlock(wq);
1712
1713 if (res == KERN_SUCCESS) {
1714 res = thread_go(thread, result);
1715 assert(res == KERN_SUCCESS);
1716 thread_unlock(thread);
1717 splx(s);
1718 return res;
1719 }
1720 splx(s);
1721 return KERN_NOT_WAITING;
1722 }