]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/wait_queue.c
48203732a5c78675c2ae649b9cacad5313800b7c
[apple/xnu.git] / osfmk / kern / wait_queue.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_FREE_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 /*
56 * File: wait_queue.c (adapted from sched_prim.c)
57 * Author: Avadis Tevanian, Jr.
58 * Date: 1986
59 *
60 * Primitives for manipulating wait queues: either global
61 * ones from sched_prim.c, or private ones associated with
62 * particular structures(pots, semaphores, etc..).
63 */
64
65 #include <kern/kern_types.h>
66 #include <kern/simple_lock.h>
67 #include <kern/kalloc.h>
68 #include <kern/queue.h>
69 #include <kern/spl.h>
70 #include <mach/sync_policy.h>
71 #include <kern/sched_prim.h>
72
73 #include <kern/wait_queue.h>
74
75 /*
76 * Routine: wait_queue_init
77 * Purpose:
78 * Initialize a previously allocated wait queue.
79 * Returns:
80 * KERN_SUCCESS - The wait_queue_t was initialized
81 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
82 */
83 kern_return_t
84 wait_queue_init(
85 wait_queue_t wq,
86 int policy)
87 {
88 if (!((policy & SYNC_POLICY_ORDER_MASK) == SYNC_POLICY_FIFO))
89 return KERN_INVALID_ARGUMENT;
90
91 wq->wq_fifo = TRUE;
92 wq->wq_type = _WAIT_QUEUE_inited;
93 queue_init(&wq->wq_queue);
94 hw_lock_init(&wq->wq_interlock);
95 return KERN_SUCCESS;
96 }
97
98 /*
99 * Routine: wait_queue_alloc
100 * Purpose:
101 * Allocate and initialize a wait queue for use outside of
102 * of the mach part of the kernel.
103 * Conditions:
104 * Nothing locked - can block.
105 * Returns:
106 * The allocated and initialized wait queue
107 * WAIT_QUEUE_NULL if there is a resource shortage
108 */
109 wait_queue_t
110 wait_queue_alloc(
111 int policy)
112 {
113 wait_queue_t wq;
114 kern_return_t ret;
115
116 wq = (wait_queue_t) kalloc(sizeof(struct wait_queue));
117 if (wq != WAIT_QUEUE_NULL) {
118 ret = wait_queue_init(wq, policy);
119 if (ret != KERN_SUCCESS) {
120 kfree((vm_offset_t)wq, sizeof(struct wait_queue));
121 wq = WAIT_QUEUE_NULL;
122 }
123 }
124 return wq;
125 }
126
127 /*
128 * Routine: wait_queue_free
129 * Purpose:
130 * Free an allocated wait queue.
131 * Conditions:
132 * May block.
133 */
134 kern_return_t
135 wait_queue_free(
136 wait_queue_t wq)
137 {
138 if (!wait_queue_is_queue(wq))
139 return KERN_INVALID_ARGUMENT;
140 if (!queue_empty(&wq->wq_queue))
141 return KERN_FAILURE;
142 kfree((vm_offset_t)wq, sizeof(struct wait_queue));
143 return KERN_SUCCESS;
144 }
145
146 /*
147 * Routine: wait_queue_set_init
148 * Purpose:
149 * Initialize a previously allocated wait queue set.
150 * Returns:
151 * KERN_SUCCESS - The wait_queue_set_t was initialized
152 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
153 */
154 kern_return_t
155 wait_queue_set_init(
156 wait_queue_set_t wqset,
157 int policy)
158 {
159 kern_return_t ret;
160
161 ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
162 if (ret != KERN_SUCCESS)
163 return ret;
164
165 wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
166 if (policy & SYNC_POLICY_PREPOST)
167 wqset->wqs_wait_queue.wq_isprepost = TRUE;
168 else
169 wqset->wqs_wait_queue.wq_isprepost = FALSE;
170 queue_init(&wqset->wqs_setlinks);
171 wqset->wqs_refcount = 0;
172 return KERN_SUCCESS;
173 }
174
175 /* legacy API */
176 kern_return_t
177 wait_queue_sub_init(
178 wait_queue_set_t wqset,
179 int policy)
180 {
181 return wait_queue_set_init(wqset, policy);
182 }
183
184 /*
185 * Routine: wait_queue_set_alloc
186 * Purpose:
187 * Allocate and initialize a wait queue set for
188 * use outside of the mach part of the kernel.
189 * Conditions:
190 * May block.
191 * Returns:
192 * The allocated and initialized wait queue set
193 * WAIT_QUEUE_SET_NULL if there is a resource shortage
194 */
195 wait_queue_set_t
196 wait_queue_set_alloc(
197 int policy)
198 {
199 wait_queue_set_t wq_set;
200
201 wq_set = (wait_queue_set_t) kalloc(sizeof(struct wait_queue_set));
202 if (wq_set != WAIT_QUEUE_SET_NULL) {
203 kern_return_t ret;
204
205 ret = wait_queue_set_init(wq_set, policy);
206 if (ret != KERN_SUCCESS) {
207 kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set));
208 wq_set = WAIT_QUEUE_SET_NULL;
209 }
210 }
211 return wq_set;
212 }
213
214 /*
215 * Routine: wait_queue_set_free
216 * Purpose:
217 * Free an allocated wait queue set
218 * Conditions:
219 * May block.
220 */
221 kern_return_t
222 wait_queue_set_free(
223 wait_queue_set_t wq_set)
224 {
225 if (!wait_queue_is_set(wq_set))
226 return KERN_INVALID_ARGUMENT;
227
228 if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
229 return KERN_FAILURE;
230
231 kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set));
232 return KERN_SUCCESS;
233 }
234
235 kern_return_t
236 wait_queue_sub_clearrefs(
237 wait_queue_set_t wq_set)
238 {
239 if (!wait_queue_is_set(wq_set))
240 return KERN_INVALID_ARGUMENT;
241
242 wqs_lock(wq_set);
243 wq_set->wqs_refcount = 0;
244 wqs_unlock(wq_set);
245 return KERN_SUCCESS;
246 }
247
248 /*
249 *
250 * Routine: wait_queue_set_size
251 * Routine: wait_queue_link_size
252 * Purpose:
253 * Return the size of opaque wait queue structures
254 */
255 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
256 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
257
258 /* declare a unique type for wait queue link structures */
259 static unsigned int _wait_queue_link;
260 static unsigned int _wait_queue_unlinked;
261
262 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
263 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
264
265 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
266 WQASSERT(((wqe)->wqe_queue == (wq) && \
267 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
268 "wait queue element list corruption: wq=%#x, wqe=%#x", \
269 (wq), (wqe))
270
271 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
272 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
273 (queue_t)(wql) : &(wql)->wql_setlinks)))
274
275 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
276 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
277 (queue_t)(wql) : &(wql)->wql_setlinks)))
278
279 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
280 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
281 ((wql)->wql_setqueue == (wqs)) && \
282 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
283 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
284 "wait queue set links corruption: wqs=%#x, wql=%#x", \
285 (wqs), (wql))
286
287 #if defined(_WAIT_QUEUE_DEBUG_)
288
289 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
290
291 #define WAIT_QUEUE_CHECK(wq) \
292 MACRO_BEGIN \
293 queue_t q2 = &(wq)->wq_queue; \
294 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
295 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
296 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
297 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
298 } \
299 MACRO_END
300
301 #define WAIT_QUEUE_SET_CHECK(wqs) \
302 MACRO_BEGIN \
303 queue_t q2 = &(wqs)->wqs_setlinks; \
304 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
305 while (!queue_end(q2, (queue_entry_t)wql2)) { \
306 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
307 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
308 } \
309 MACRO_END
310
311 #else /* !_WAIT_QUEUE_DEBUG_ */
312
313 #define WQASSERT(e, s, p0, p1) assert(e)
314
315 #define WAIT_QUEUE_CHECK(wq)
316 #define WAIT_QUEUE_SET_CHECK(wqs)
317
318 #endif /* !_WAIT_QUEUE_DEBUG_ */
319
320 /*
321 * Routine: wait_queue_member_locked
322 * Purpose:
323 * Indicate if this set queue is a member of the queue
324 * Conditions:
325 * The wait queue is locked
326 * The set queue is just that, a set queue
327 */
328 __private_extern__ boolean_t
329 wait_queue_member_locked(
330 wait_queue_t wq,
331 wait_queue_set_t wq_set)
332 {
333 wait_queue_element_t wq_element;
334 queue_t q;
335
336 assert(wait_queue_held(wq));
337 assert(wait_queue_is_set(wq_set));
338
339 q = &wq->wq_queue;
340
341 wq_element = (wait_queue_element_t) queue_first(q);
342 while (!queue_end(q, (queue_entry_t)wq_element)) {
343 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
344 if ((wq_element->wqe_type == WAIT_QUEUE_LINK)) {
345 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
346
347 if (wql->wql_setqueue == wq_set)
348 return TRUE;
349 }
350 wq_element = (wait_queue_element_t)
351 queue_next((queue_t) wq_element);
352 }
353 return FALSE;
354 }
355
356
357 /*
358 * Routine: wait_queue_member
359 * Purpose:
360 * Indicate if this set queue is a member of the queue
361 * Conditions:
362 * The set queue is just that, a set queue
363 */
364 boolean_t
365 wait_queue_member(
366 wait_queue_t wq,
367 wait_queue_set_t wq_set)
368 {
369 boolean_t ret;
370 spl_t s;
371
372 if (!wait_queue_is_set(wq_set))
373 return FALSE;
374
375 s = splsched();
376 wait_queue_lock(wq);
377 ret = wait_queue_member_locked(wq, wq_set);
378 wait_queue_unlock(wq);
379 splx(s);
380
381 return ret;
382 }
383
384
385 /*
386 * Routine: wait_queue_link_noalloc
387 * Purpose:
388 * Insert a set wait queue into a wait queue. This
389 * requires us to link the two together using a wait_queue_link
390 * structure that we allocate.
391 * Conditions:
392 * The wait queue being inserted must be inited as a set queue
393 */
394 kern_return_t
395 wait_queue_link_noalloc(
396 wait_queue_t wq,
397 wait_queue_set_t wq_set,
398 wait_queue_link_t wql)
399 {
400 wait_queue_element_t wq_element;
401 queue_t q;
402 spl_t s;
403
404 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set))
405 return KERN_INVALID_ARGUMENT;
406
407 /*
408 * There are probably less threads and sets associated with
409 * the wait queue, then there are wait queues associated with
410 * the set. So lets validate it that way.
411 */
412 s = splsched();
413 wait_queue_lock(wq);
414 q = &wq->wq_queue;
415 wq_element = (wait_queue_element_t) queue_first(q);
416 while (!queue_end(q, (queue_entry_t)wq_element)) {
417 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
418 if (wq_element->wqe_type == WAIT_QUEUE_LINK &&
419 ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
420 wait_queue_unlock(wq);
421 splx(s);
422 return KERN_ALREADY_IN_SET;
423 }
424 wq_element = (wait_queue_element_t)
425 queue_next((queue_t) wq_element);
426 }
427
428 /*
429 * Not already a member, so we can add it.
430 */
431 wqs_lock(wq_set);
432
433 WAIT_QUEUE_SET_CHECK(wq_set);
434
435 wql->wql_queue = wq;
436 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
437 wql->wql_setqueue = wq_set;
438 queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
439 wql->wql_type = WAIT_QUEUE_LINK;
440
441 wqs_unlock(wq_set);
442 wait_queue_unlock(wq);
443 splx(s);
444
445 return KERN_SUCCESS;
446 }
447
448 /*
449 * Routine: wait_queue_link
450 * Purpose:
451 * Insert a set wait queue into a wait queue. This
452 * requires us to link the two together using a wait_queue_link
453 * structure that we allocate.
454 * Conditions:
455 * The wait queue being inserted must be inited as a set queue
456 */
457 kern_return_t
458 wait_queue_link(
459 wait_queue_t wq,
460 wait_queue_set_t wq_set)
461 {
462 wait_queue_link_t wql;
463 kern_return_t ret;
464
465 wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link));
466 if (wql == WAIT_QUEUE_LINK_NULL)
467 return KERN_RESOURCE_SHORTAGE;
468
469 ret = wait_queue_link_noalloc(wq, wq_set, wql);
470 if (ret != KERN_SUCCESS)
471 kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
472
473 return ret;
474 }
475
476
477 /*
478 * Routine: wait_queue_unlink_nofree
479 * Purpose:
480 * Undo the linkage between a wait queue and a set.
481 */
482 static void
483 wait_queue_unlink_locked(
484 wait_queue_t wq,
485 wait_queue_set_t wq_set,
486 wait_queue_link_t wql)
487 {
488 assert(wait_queue_held(wq));
489 assert(wait_queue_held(&wq_set->wqs_wait_queue));
490
491 wql->wql_queue = WAIT_QUEUE_NULL;
492 queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
493 wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
494 queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
495 wql->wql_type = WAIT_QUEUE_UNLINKED;
496
497 WAIT_QUEUE_CHECK(wq);
498 WAIT_QUEUE_SET_CHECK(wq_set);
499 }
500
501 /*
502 * Routine: wait_queue_unlink
503 * Purpose:
504 * Remove the linkage between a wait queue and a set,
505 * freeing the linkage structure.
506 * Conditions:
507 * The wait queue being must be a member set queue
508 */
509 kern_return_t
510 wait_queue_unlink(
511 wait_queue_t wq,
512 wait_queue_set_t wq_set)
513 {
514 wait_queue_element_t wq_element;
515 wait_queue_link_t wql;
516 queue_t q;
517 spl_t s;
518
519 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) {
520 return KERN_INVALID_ARGUMENT;
521 }
522 s = splsched();
523 wait_queue_lock(wq);
524
525 q = &wq->wq_queue;
526 wq_element = (wait_queue_element_t) queue_first(q);
527 while (!queue_end(q, (queue_entry_t)wq_element)) {
528 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
529 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
530 wql = (wait_queue_link_t)wq_element;
531
532 if (wql->wql_setqueue == wq_set) {
533 wqs_lock(wq_set);
534 wait_queue_unlink_locked(wq, wq_set, wql);
535 wqs_unlock(wq_set);
536 wait_queue_unlock(wq);
537 splx(s);
538 kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
539 return KERN_SUCCESS;
540 }
541 }
542 wq_element = (wait_queue_element_t)
543 queue_next((queue_t) wq_element);
544 }
545 wait_queue_unlock(wq);
546 splx(s);
547 return KERN_NOT_IN_SET;
548 }
549
550
551 /*
552 * Routine: wait_queue_unlinkall_nofree
553 * Purpose:
554 * Remove the linkage between a wait queue and all its
555 * sets. The caller is responsible for freeing
556 * the wait queue link structures.
557 */
558
559 kern_return_t
560 wait_queue_unlinkall_nofree(
561 wait_queue_t wq)
562 {
563 wait_queue_element_t wq_element;
564 wait_queue_element_t wq_next_element;
565 wait_queue_set_t wq_set;
566 wait_queue_link_t wql;
567 queue_head_t links_queue_head;
568 queue_t links = &links_queue_head;
569 queue_t q;
570 spl_t s;
571
572 if (!wait_queue_is_queue(wq)) {
573 return KERN_INVALID_ARGUMENT;
574 }
575
576 queue_init(links);
577
578 s = splsched();
579 wait_queue_lock(wq);
580
581 q = &wq->wq_queue;
582
583 wq_element = (wait_queue_element_t) queue_first(q);
584 while (!queue_end(q, (queue_entry_t)wq_element)) {
585 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
586 wq_next_element = (wait_queue_element_t)
587 queue_next((queue_t) wq_element);
588
589 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
590 wql = (wait_queue_link_t)wq_element;
591 wq_set = wql->wql_setqueue;
592 wqs_lock(wq_set);
593 wait_queue_unlink_locked(wq, wq_set, wql);
594 wqs_unlock(wq_set);
595 }
596 wq_element = wq_next_element;
597 }
598 wait_queue_unlock(wq);
599 splx(s);
600 return(KERN_SUCCESS);
601 }
602
603
604 /*
605 * Routine: wait_queue_unlink_all
606 * Purpose:
607 * Remove the linkage between a wait queue and all its sets.
608 * All the linkage structures are freed.
609 * Conditions:
610 * Nothing of interest locked.
611 */
612
613 kern_return_t
614 wait_queue_unlink_all(
615 wait_queue_t wq)
616 {
617 wait_queue_element_t wq_element;
618 wait_queue_element_t wq_next_element;
619 wait_queue_set_t wq_set;
620 wait_queue_link_t wql;
621 queue_head_t links_queue_head;
622 queue_t links = &links_queue_head;
623 queue_t q;
624 spl_t s;
625
626 if (!wait_queue_is_queue(wq)) {
627 return KERN_INVALID_ARGUMENT;
628 }
629
630 queue_init(links);
631
632 s = splsched();
633 wait_queue_lock(wq);
634
635 q = &wq->wq_queue;
636
637 wq_element = (wait_queue_element_t) queue_first(q);
638 while (!queue_end(q, (queue_entry_t)wq_element)) {
639 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
640 wq_next_element = (wait_queue_element_t)
641 queue_next((queue_t) wq_element);
642
643 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
644 wql = (wait_queue_link_t)wq_element;
645 wq_set = wql->wql_setqueue;
646 wqs_lock(wq_set);
647 wait_queue_unlink_locked(wq, wq_set, wql);
648 wqs_unlock(wq_set);
649 enqueue(links, &wql->wql_links);
650 }
651 wq_element = wq_next_element;
652 }
653 wait_queue_unlock(wq);
654 splx(s);
655
656 while(!queue_empty(links)) {
657 wql = (wait_queue_link_t) dequeue(links);
658 kfree((vm_offset_t) wql, sizeof(struct wait_queue_link));
659 }
660
661 return(KERN_SUCCESS);
662 }
663
664 /*
665 * Routine: wait_queue_set_unlink_all_nofree
666 * Purpose:
667 * Remove the linkage between a set wait queue and all its
668 * member wait queues. The link structures are not freed, nor
669 * returned. It is the caller's responsibility to track and free
670 * them.
671 * Conditions:
672 * The wait queue being must be a member set queue
673 */
674 kern_return_t
675 wait_queue_set_unlink_all_nofree(
676 wait_queue_set_t wq_set)
677 {
678 wait_queue_link_t wql;
679 wait_queue_t wq;
680 queue_t q;
681 kern_return_t kret;
682 spl_t s;
683
684 if (!wait_queue_is_set(wq_set)) {
685 return KERN_INVALID_ARGUMENT;
686 }
687
688 retry:
689 s = splsched();
690 wqs_lock(wq_set);
691
692 q = &wq_set->wqs_setlinks;
693
694 wql = (wait_queue_link_t)queue_first(q);
695 while (!queue_end(q, (queue_entry_t)wql)) {
696 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
697 wq = wql->wql_queue;
698 if (wait_queue_lock_try(wq)) {
699 wait_queue_unlink_locked(wq, wq_set, wql);
700 wait_queue_unlock(wq);
701 wql = (wait_queue_link_t)queue_first(q);
702 } else {
703 wqs_unlock(wq_set);
704 splx(s);
705 delay(1);
706 goto retry;
707 }
708 }
709 wqs_unlock(wq_set);
710 splx(s);
711
712 return(KERN_SUCCESS);
713 }
714
715 /* legacy interface naming */
716 kern_return_t
717 wait_subqueue_unlink_all(
718 wait_queue_set_t wq_set)
719 {
720 return wait_queue_set_unlink_all_nofree(wq_set);
721 }
722
723
724 /*
725 * Routine: wait_queue_set_unlink_all
726 * Purpose:
727 * Remove the linkage between a set wait queue and all its
728 * member wait queues. The link structures are freed.
729 * Conditions:
730 * The wait queue must be a set
731 */
732 kern_return_t
733 wait_queue_set_unlink_all(
734 wait_queue_set_t wq_set)
735 {
736 wait_queue_link_t wql;
737 wait_queue_t wq;
738 queue_t q;
739 queue_head_t links_queue_head;
740 queue_t links = &links_queue_head;
741 kern_return_t kret;
742 spl_t s;
743
744 if (!wait_queue_is_set(wq_set)) {
745 return KERN_INVALID_ARGUMENT;
746 }
747
748 queue_init(links);
749
750 retry:
751 s = splsched();
752 wqs_lock(wq_set);
753
754 q = &wq_set->wqs_setlinks;
755
756 wql = (wait_queue_link_t)queue_first(q);
757 while (!queue_end(q, (queue_entry_t)wql)) {
758 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
759 wq = wql->wql_queue;
760 if (wait_queue_lock_try(wq)) {
761 wait_queue_unlink_locked(wq, wq_set, wql);
762 wait_queue_unlock(wq);
763 enqueue(links, &wql->wql_links);
764 wql = (wait_queue_link_t)queue_first(q);
765 } else {
766 wqs_unlock(wq_set);
767 splx(s);
768 delay(1);
769 goto retry;
770 }
771 }
772 wqs_unlock(wq_set);
773 splx(s);
774
775 while (!queue_empty (links)) {
776 wql = (wait_queue_link_t) dequeue(links);
777 kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
778 }
779 return(KERN_SUCCESS);
780 }
781
782
783 /*
784 * Routine: wait_queue_unlink_one
785 * Purpose:
786 * Find and unlink one set wait queue
787 * Conditions:
788 * Nothing of interest locked.
789 */
790 void
791 wait_queue_unlink_one(
792 wait_queue_t wq,
793 wait_queue_set_t *wq_setp)
794 {
795 wait_queue_element_t wq_element;
796 queue_t q;
797 spl_t s;
798
799 s = splsched();
800 wait_queue_lock(wq);
801
802 q = &wq->wq_queue;
803
804 wq_element = (wait_queue_element_t) queue_first(q);
805 while (!queue_end(q, (queue_entry_t)wq_element)) {
806
807 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
808 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
809 wait_queue_set_t wq_set = wql->wql_setqueue;
810
811 wqs_lock(wq_set);
812 wait_queue_unlink_locked(wq, wq_set, wql);
813 wqs_unlock(wq_set);
814 wait_queue_unlock(wq);
815 splx(s);
816 kfree((vm_offset_t)wql,sizeof(struct wait_queue_link));
817 *wq_setp = wq_set;
818 return;
819 }
820
821 wq_element = (wait_queue_element_t)
822 queue_next((queue_t) wq_element);
823 }
824 wait_queue_unlock(wq);
825 splx(s);
826 *wq_setp = WAIT_QUEUE_SET_NULL;
827 }
828
829
830 /*
831 * Routine: wait_queue_assert_wait64_locked
832 * Purpose:
833 * Insert the current thread into the supplied wait queue
834 * waiting for a particular event to be posted to that queue.
835 *
836 * Conditions:
837 * The wait queue is assumed locked.
838 * The waiting thread is assumed locked.
839 *
840 */
841 __private_extern__ wait_result_t
842 wait_queue_assert_wait64_locked(
843 wait_queue_t wq,
844 event64_t event,
845 wait_interrupt_t interruptible,
846 thread_t thread)
847 {
848 wait_result_t wait_result;
849
850 if (!wait_queue_assert_possible(thread))
851 panic("wait_queue_assert_wait64_locked");
852
853 if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
854 wait_queue_set_t wqs = (wait_queue_set_t)wq;
855
856 if (wqs->wqs_isprepost && wqs->wqs_refcount > 0)
857 return(THREAD_AWAKENED);
858 }
859
860 /*
861 * This is the extent to which we currently take scheduling attributes
862 * into account. If the thread is vm priviledged, we stick it at
863 * the front of the queue. Later, these queues will honor the policy
864 * value set at wait_queue_init time.
865 */
866 wait_result = thread_mark_wait_locked(thread, interruptible);
867 if (wait_result == THREAD_WAITING) {
868 if (thread->vm_privilege)
869 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
870 else
871 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
872 thread->wait_event = event;
873 thread->wait_queue = wq;
874 }
875 return(wait_result);
876 }
877
878 /*
879 * Routine: wait_queue_assert_wait
880 * Purpose:
881 * Insert the current thread into the supplied wait queue
882 * waiting for a particular event to be posted to that queue.
883 *
884 * Conditions:
885 * nothing of interest locked.
886 */
887 wait_result_t
888 wait_queue_assert_wait(
889 wait_queue_t wq,
890 event_t event,
891 wait_interrupt_t interruptible)
892 {
893 spl_t s;
894 wait_result_t ret;
895 thread_t cur_thread = current_thread();
896
897 /* If it is an invalid wait queue, you can't wait on it */
898 if (!wait_queue_is_valid(wq)) {
899 thread_t thread = current_thread();
900 return (thread->wait_result = THREAD_RESTART);
901 }
902
903 s = splsched();
904 wait_queue_lock(wq);
905 thread_lock(cur_thread);
906 ret = wait_queue_assert_wait64_locked(
907 wq, (event64_t)((uint32_t)event),
908 interruptible, cur_thread);
909 thread_unlock(cur_thread);
910 wait_queue_unlock(wq);
911 splx(s);
912 return(ret);
913 }
914
915 /*
916 * Routine: wait_queue_assert_wait64
917 * Purpose:
918 * Insert the current thread into the supplied wait queue
919 * waiting for a particular event to be posted to that queue.
920 * Conditions:
921 * nothing of interest locked.
922 */
923 wait_result_t
924 wait_queue_assert_wait64(
925 wait_queue_t wq,
926 event64_t event,
927 wait_interrupt_t interruptible)
928 {
929 spl_t s;
930 wait_result_t ret;
931 thread_t cur_thread = current_thread();
932
933 /* If it is an invalid wait queue, you cant wait on it */
934 if (!wait_queue_is_valid(wq)) {
935 thread_t thread = current_thread();
936 return (thread->wait_result = THREAD_RESTART);
937 }
938
939 s = splsched();
940 wait_queue_lock(wq);
941 thread_lock(cur_thread);
942 ret = wait_queue_assert_wait64_locked(wq, event, interruptible, cur_thread);
943 thread_unlock(cur_thread);
944 wait_queue_unlock(wq);
945 splx(s);
946 return(ret);
947 }
948
949
950 /*
951 * Routine: _wait_queue_select64_all
952 * Purpose:
953 * Select all threads off a wait queue that meet the
954 * supplied criteria.
955 * Conditions:
956 * at splsched
957 * wait queue locked
958 * wake_queue initialized and ready for insertion
959 * possibly recursive
960 * Returns:
961 * a queue of locked threads
962 */
963 static void
964 _wait_queue_select64_all(
965 wait_queue_t wq,
966 event64_t event,
967 queue_t wake_queue)
968 {
969 wait_queue_element_t wq_element;
970 wait_queue_element_t wqe_next;
971 queue_t q;
972
973 q = &wq->wq_queue;
974
975 wq_element = (wait_queue_element_t) queue_first(q);
976 while (!queue_end(q, (queue_entry_t)wq_element)) {
977 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
978 wqe_next = (wait_queue_element_t)
979 queue_next((queue_t) wq_element);
980
981 /*
982 * We may have to recurse if this is a compound wait queue.
983 */
984 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
985 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
986 wait_queue_t set_queue;
987
988 /*
989 * We have to check the set wait queue.
990 */
991 set_queue = (wait_queue_t)wql->wql_setqueue;
992 wait_queue_lock(set_queue);
993 if (set_queue->wq_isprepost) {
994 wait_queue_set_t wqs = (wait_queue_set_t)set_queue;
995
996 /*
997 * Preposting is only for sets and wait queue
998 * is the first element of set
999 */
1000 wqs->wqs_refcount++;
1001 }
1002 if (! wait_queue_empty(set_queue))
1003 _wait_queue_select64_all(set_queue, event, wake_queue);
1004 wait_queue_unlock(set_queue);
1005 } else {
1006
1007 /*
1008 * Otherwise, its a thread. If it is waiting on
1009 * the event we are posting to this queue, pull
1010 * it off the queue and stick it in out wake_queue.
1011 */
1012 thread_t t = (thread_t)wq_element;
1013
1014 if (t->wait_event == event) {
1015 thread_lock(t);
1016 remqueue(q, (queue_entry_t) t);
1017 enqueue (wake_queue, (queue_entry_t) t);
1018 t->wait_queue = WAIT_QUEUE_NULL;
1019 t->wait_event = NO_EVENT64;
1020 t->at_safe_point = FALSE;
1021 /* returned locked */
1022 }
1023 }
1024 wq_element = wqe_next;
1025 }
1026 }
1027
1028 /*
1029 * Routine: wait_queue_wakeup64_all_locked
1030 * Purpose:
1031 * Wakeup some number of threads that are in the specified
1032 * wait queue and waiting on the specified event.
1033 * Conditions:
1034 * wait queue already locked (may be released).
1035 * Returns:
1036 * KERN_SUCCESS - Threads were woken up
1037 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1038 */
1039 __private_extern__ kern_return_t
1040 wait_queue_wakeup64_all_locked(
1041 wait_queue_t wq,
1042 event64_t event,
1043 wait_result_t result,
1044 boolean_t unlock)
1045 {
1046 queue_head_t wake_queue_head;
1047 queue_t q = &wake_queue_head;
1048 kern_return_t res;
1049
1050 assert(wait_queue_held(wq));
1051 queue_init(q);
1052
1053 /*
1054 * Select the threads that we will wake up. The threads
1055 * are returned to us locked and cleanly removed from the
1056 * wait queue.
1057 */
1058 _wait_queue_select64_all(wq, event, q);
1059 if (unlock)
1060 wait_queue_unlock(wq);
1061
1062 /*
1063 * For each thread, set it running.
1064 */
1065 res = KERN_NOT_WAITING;
1066 while (!queue_empty (q)) {
1067 thread_t thread = (thread_t) dequeue(q);
1068 res = thread_go_locked(thread, result);
1069 assert(res == KERN_SUCCESS);
1070 thread_unlock(thread);
1071 }
1072 return res;
1073 }
1074
1075
1076 /*
1077 * Routine: wait_queue_wakeup_all
1078 * Purpose:
1079 * Wakeup some number of threads that are in the specified
1080 * wait queue and waiting on the specified event.
1081 * Conditions:
1082 * Nothing locked
1083 * Returns:
1084 * KERN_SUCCESS - Threads were woken up
1085 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1086 */
1087 kern_return_t
1088 wait_queue_wakeup_all(
1089 wait_queue_t wq,
1090 event_t event,
1091 wait_result_t result)
1092 {
1093 kern_return_t ret;
1094 spl_t s;
1095
1096 if (!wait_queue_is_valid(wq)) {
1097 return KERN_INVALID_ARGUMENT;
1098 }
1099
1100 s = splsched();
1101 wait_queue_lock(wq);
1102 ret = wait_queue_wakeup64_all_locked(
1103 wq, (event64_t)((uint32_t)event),
1104 result, TRUE);
1105 /* lock released */
1106 splx(s);
1107 return ret;
1108 }
1109
1110 /*
1111 * Routine: wait_queue_wakeup64_all
1112 * Purpose:
1113 * Wakeup some number of threads that are in the specified
1114 * wait queue and waiting on the specified event.
1115 * Conditions:
1116 * Nothing locked
1117 * Returns:
1118 * KERN_SUCCESS - Threads were woken up
1119 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1120 */
1121 kern_return_t
1122 wait_queue_wakeup64_all(
1123 wait_queue_t wq,
1124 event64_t event,
1125 wait_result_t result)
1126 {
1127 kern_return_t ret;
1128 spl_t s;
1129
1130 if (!wait_queue_is_valid(wq)) {
1131 return KERN_INVALID_ARGUMENT;
1132 }
1133
1134 s = splsched();
1135 wait_queue_lock(wq);
1136 ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1137 /* lock released */
1138 splx(s);
1139 return ret;
1140 }
1141
1142 /*
1143 * Routine: _wait_queue_select64_one
1144 * Purpose:
1145 * Select the best thread off a wait queue that meet the
1146 * supplied criteria.
1147 * Conditions:
1148 * at splsched
1149 * wait queue locked
1150 * possibly recursive
1151 * Returns:
1152 * a locked thread - if one found
1153 * Note:
1154 * This is where the sync policy of the wait queue comes
1155 * into effect. For now, we just assume FIFO.
1156 */
1157 static thread_t
1158 _wait_queue_select64_one(
1159 wait_queue_t wq,
1160 event64_t event)
1161 {
1162 wait_queue_element_t wq_element;
1163 wait_queue_element_t wqe_next;
1164 thread_t t = THREAD_NULL;
1165 queue_t q;
1166
1167 assert(wq->wq_fifo);
1168
1169 q = &wq->wq_queue;
1170
1171 wq_element = (wait_queue_element_t) queue_first(q);
1172 while (!queue_end(q, (queue_entry_t)wq_element)) {
1173 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1174 wqe_next = (wait_queue_element_t)
1175 queue_next((queue_t) wq_element);
1176
1177 /*
1178 * We may have to recurse if this is a compound wait queue.
1179 */
1180 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1181 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1182 wait_queue_t set_queue;
1183
1184 /*
1185 * We have to check the set wait queue.
1186 */
1187 set_queue = (wait_queue_t)wql->wql_setqueue;
1188 wait_queue_lock(set_queue);
1189 if (! wait_queue_empty(set_queue)) {
1190 t = _wait_queue_select64_one(set_queue, event);
1191 }
1192 wait_queue_unlock(set_queue);
1193 if (t != THREAD_NULL)
1194 return t;
1195 } else {
1196
1197 /*
1198 * Otherwise, its a thread. If it is waiting on
1199 * the event we are posting to this queue, pull
1200 * it off the queue and stick it in out wake_queue.
1201 */
1202 thread_t t = (thread_t)wq_element;
1203
1204 if (t->wait_event == event) {
1205 thread_lock(t);
1206 remqueue(q, (queue_entry_t) t);
1207 t->wait_queue = WAIT_QUEUE_NULL;
1208 t->wait_event = NO_EVENT64;
1209 t->at_safe_point = FALSE;
1210 return t; /* still locked */
1211 }
1212 }
1213 wq_element = wqe_next;
1214 }
1215 return THREAD_NULL;
1216 }
1217
1218 /*
1219 * Routine: wait_queue_peek64_locked
1220 * Purpose:
1221 * Select the best thread from a wait queue that meet the
1222 * supplied criteria, but leave it on the queue it was
1223 * found on. The thread, and the actual wait_queue the
1224 * thread was found on are identified.
1225 * Conditions:
1226 * at splsched
1227 * wait queue locked
1228 * possibly recursive
1229 * Returns:
1230 * a locked thread - if one found
1231 * a locked waitq - the one the thread was found on
1232 * Note:
1233 * Both the waitq the thread was actually found on, and
1234 * the supplied wait queue, are locked after this.
1235 */
1236 __private_extern__ void
1237 wait_queue_peek64_locked(
1238 wait_queue_t wq,
1239 event64_t event,
1240 thread_t *tp,
1241 wait_queue_t *wqp)
1242 {
1243 wait_queue_element_t wq_element;
1244 wait_queue_element_t wqe_next;
1245 thread_t t;
1246 queue_t q;
1247
1248 assert(wq->wq_fifo);
1249
1250 *tp = THREAD_NULL;
1251
1252 q = &wq->wq_queue;
1253
1254 wq_element = (wait_queue_element_t) queue_first(q);
1255 while (!queue_end(q, (queue_entry_t)wq_element)) {
1256 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1257 wqe_next = (wait_queue_element_t)
1258 queue_next((queue_t) wq_element);
1259
1260 /*
1261 * We may have to recurse if this is a compound wait queue.
1262 */
1263 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1264 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1265 wait_queue_t set_queue;
1266
1267 /*
1268 * We have to check the set wait queue.
1269 */
1270 set_queue = (wait_queue_t)wql->wql_setqueue;
1271 wait_queue_lock(set_queue);
1272 if (! wait_queue_empty(set_queue)) {
1273 wait_queue_peek64_locked(set_queue, event, tp, wqp);
1274 }
1275 if (*tp != THREAD_NULL) {
1276 if (*wqp != set_queue)
1277 wait_queue_unlock(set_queue);
1278 return; /* thread and its waitq locked */
1279 }
1280
1281 wait_queue_unlock(set_queue);
1282 } else {
1283
1284 /*
1285 * Otherwise, its a thread. If it is waiting on
1286 * the event we are posting to this queue, return
1287 * it locked, but leave it on the queue.
1288 */
1289 thread_t t = (thread_t)wq_element;
1290
1291 if (t->wait_event == event) {
1292 thread_lock(t);
1293 *tp = t;
1294 *wqp = wq;
1295 return;
1296 }
1297 }
1298 wq_element = wqe_next;
1299 }
1300 }
1301
1302 /*
1303 * Routine: wait_queue_pull_thread_locked
1304 * Purpose:
1305 * Pull a thread that was previously "peeked" off the wait
1306 * queue and (possibly) unlock the waitq.
1307 * Conditions:
1308 * at splsched
1309 * wait queue locked
1310 * thread locked
1311 * Returns:
1312 * with the thread still locked.
1313 */
1314 void
1315 wait_queue_pull_thread_locked(
1316 wait_queue_t waitq,
1317 thread_t thread,
1318 boolean_t unlock)
1319 {
1320
1321 assert(thread->wait_queue == waitq);
1322
1323 remqueue(&waitq->wq_queue, (queue_entry_t)thread );
1324 thread->wait_queue = WAIT_QUEUE_NULL;
1325 thread->wait_event = NO_EVENT64;
1326 thread->at_safe_point = FALSE;
1327 if (unlock)
1328 wait_queue_unlock(waitq);
1329 }
1330
1331
1332 /*
1333 * Routine: wait_queue_select64_thread
1334 * Purpose:
1335 * Look for a thread and remove it from the queues, if
1336 * (and only if) the thread is waiting on the supplied
1337 * <wait_queue, event> pair.
1338 * Conditions:
1339 * at splsched
1340 * wait queue locked
1341 * possibly recursive
1342 * Returns:
1343 * KERN_NOT_WAITING: Thread is not waiting here.
1344 * KERN_SUCCESS: It was, and is now removed (returned locked)
1345 */
1346 static kern_return_t
1347 _wait_queue_select64_thread(
1348 wait_queue_t wq,
1349 event64_t event,
1350 thread_t thread)
1351 {
1352 wait_queue_element_t wq_element;
1353 wait_queue_element_t wqe_next;
1354 kern_return_t res = KERN_NOT_WAITING;
1355 queue_t q = &wq->wq_queue;
1356
1357 thread_lock(thread);
1358 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
1359 remqueue(q, (queue_entry_t) thread);
1360 thread->at_safe_point = FALSE;
1361 thread->wait_event = NO_EVENT64;
1362 thread->wait_queue = WAIT_QUEUE_NULL;
1363 /* thread still locked */
1364 return KERN_SUCCESS;
1365 }
1366 thread_unlock(thread);
1367
1368 /*
1369 * The wait_queue associated with the thread may be one of this
1370 * wait queue's sets. Go see. If so, removing it from
1371 * there is like removing it from here.
1372 */
1373 wq_element = (wait_queue_element_t) queue_first(q);
1374 while (!queue_end(q, (queue_entry_t)wq_element)) {
1375 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1376 wqe_next = (wait_queue_element_t)
1377 queue_next((queue_t) wq_element);
1378
1379 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1380 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1381 wait_queue_t set_queue;
1382
1383 set_queue = (wait_queue_t)wql->wql_setqueue;
1384 wait_queue_lock(set_queue);
1385 if (! wait_queue_empty(set_queue)) {
1386 res = _wait_queue_select64_thread(set_queue,
1387 event,
1388 thread);
1389 }
1390 wait_queue_unlock(set_queue);
1391 if (res == KERN_SUCCESS)
1392 return KERN_SUCCESS;
1393 }
1394 wq_element = wqe_next;
1395 }
1396 return res;
1397 }
1398
1399
1400 /*
1401 * Routine: wait_queue_wakeup64_identity_locked
1402 * Purpose:
1403 * Select a single thread that is most-eligible to run and set
1404 * set it running. But return the thread locked.
1405 *
1406 * Conditions:
1407 * at splsched
1408 * wait queue locked
1409 * possibly recursive
1410 * Returns:
1411 * a pointer to the locked thread that was awakened
1412 */
1413 __private_extern__ thread_t
1414 wait_queue_wakeup64_identity_locked(
1415 wait_queue_t wq,
1416 event64_t event,
1417 wait_result_t result,
1418 boolean_t unlock)
1419 {
1420 kern_return_t res;
1421 thread_t thread;
1422
1423 assert(wait_queue_held(wq));
1424
1425
1426 thread = _wait_queue_select64_one(wq, event);
1427 if (unlock)
1428 wait_queue_unlock(wq);
1429
1430 if (thread) {
1431 res = thread_go_locked(thread, result);
1432 assert(res == KERN_SUCCESS);
1433 }
1434 return thread; /* still locked if not NULL */
1435 }
1436
1437
1438 /*
1439 * Routine: wait_queue_wakeup64_one_locked
1440 * Purpose:
1441 * Select a single thread that is most-eligible to run and set
1442 * set it runnings.
1443 *
1444 * Conditions:
1445 * at splsched
1446 * wait queue locked
1447 * possibly recursive
1448 * Returns:
1449 * KERN_SUCCESS: It was, and is, now removed.
1450 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1451 */
1452 __private_extern__ kern_return_t
1453 wait_queue_wakeup64_one_locked(
1454 wait_queue_t wq,
1455 event64_t event,
1456 wait_result_t result,
1457 boolean_t unlock)
1458 {
1459 thread_t thread;
1460
1461 assert(wait_queue_held(wq));
1462
1463 thread = _wait_queue_select64_one(wq, event);
1464 if (unlock)
1465 wait_queue_unlock(wq);
1466
1467 if (thread) {
1468 kern_return_t res;
1469
1470 res = thread_go_locked(thread, result);
1471 assert(res == KERN_SUCCESS);
1472 thread_unlock(thread);
1473 return res;
1474 }
1475
1476 return KERN_NOT_WAITING;
1477 }
1478
1479 /*
1480 * Routine: wait_queue_wakeup_one
1481 * Purpose:
1482 * Wakeup the most appropriate thread that is in the specified
1483 * wait queue for the specified event.
1484 * Conditions:
1485 * Nothing locked
1486 * Returns:
1487 * KERN_SUCCESS - Thread was woken up
1488 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1489 */
1490 kern_return_t
1491 wait_queue_wakeup_one(
1492 wait_queue_t wq,
1493 event_t event,
1494 wait_result_t result)
1495 {
1496 thread_t thread;
1497 spl_t s;
1498
1499 if (!wait_queue_is_valid(wq)) {
1500 return KERN_INVALID_ARGUMENT;
1501 }
1502
1503 s = splsched();
1504 wait_queue_lock(wq);
1505 thread = _wait_queue_select64_one(wq, (event64_t)((uint32_t)event));
1506 wait_queue_unlock(wq);
1507
1508 if (thread) {
1509 kern_return_t res;
1510
1511 res = thread_go_locked(thread, result);
1512 assert(res == KERN_SUCCESS);
1513 thread_unlock(thread);
1514 splx(s);
1515 return res;
1516 }
1517
1518 splx(s);
1519 return KERN_NOT_WAITING;
1520 }
1521
1522 /*
1523 * Routine: wait_queue_wakeup64_one
1524 * Purpose:
1525 * Wakeup the most appropriate thread that is in the specified
1526 * wait queue for the specified event.
1527 * Conditions:
1528 * Nothing locked
1529 * Returns:
1530 * KERN_SUCCESS - Thread was woken up
1531 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1532 */
1533 kern_return_t
1534 wait_queue_wakeup64_one(
1535 wait_queue_t wq,
1536 event64_t event,
1537 wait_result_t result)
1538 {
1539 thread_t thread;
1540 spl_t s;
1541
1542 if (!wait_queue_is_valid(wq)) {
1543 return KERN_INVALID_ARGUMENT;
1544 }
1545 s = splsched();
1546 wait_queue_lock(wq);
1547 thread = _wait_queue_select64_one(wq, event);
1548 wait_queue_unlock(wq);
1549
1550 if (thread) {
1551 kern_return_t res;
1552
1553 res = thread_go_locked(thread, result);
1554 assert(res == KERN_SUCCESS);
1555 thread_unlock(thread);
1556 splx(s);
1557 return res;
1558 }
1559
1560 splx(s);
1561 return KERN_NOT_WAITING;
1562 }
1563
1564
1565 /*
1566 * Routine: wait_queue_wakeup64_thread_locked
1567 * Purpose:
1568 * Wakeup the particular thread that was specified if and only
1569 * it was in this wait queue (or one of it's set queues)
1570 * and waiting on the specified event.
1571 *
1572 * This is much safer than just removing the thread from
1573 * whatever wait queue it happens to be on. For instance, it
1574 * may have already been awoken from the wait you intended to
1575 * interrupt and waited on something else (like another
1576 * semaphore).
1577 * Conditions:
1578 * at splsched
1579 * wait queue already locked (may be released).
1580 * Returns:
1581 * KERN_SUCCESS - the thread was found waiting and awakened
1582 * KERN_NOT_WAITING - the thread was not waiting here
1583 */
1584 __private_extern__ kern_return_t
1585 wait_queue_wakeup64_thread_locked(
1586 wait_queue_t wq,
1587 event64_t event,
1588 thread_t thread,
1589 wait_result_t result,
1590 boolean_t unlock)
1591 {
1592 kern_return_t res;
1593
1594 assert(wait_queue_held(wq));
1595
1596 /*
1597 * See if the thread was still waiting there. If so, it got
1598 * dequeued and returned locked.
1599 */
1600 res = _wait_queue_select64_thread(wq, event, thread);
1601 if (unlock)
1602 wait_queue_unlock(wq);
1603
1604 if (res != KERN_SUCCESS)
1605 return KERN_NOT_WAITING;
1606
1607 res = thread_go_locked(thread, result);
1608 assert(res == KERN_SUCCESS);
1609 thread_unlock(thread);
1610 return res;
1611 }
1612
1613 /*
1614 * Routine: wait_queue_wakeup_thread
1615 * Purpose:
1616 * Wakeup the particular thread that was specified if and only
1617 * it was in this wait queue (or one of it's set queues)
1618 * and waiting on the specified event.
1619 *
1620 * This is much safer than just removing the thread from
1621 * whatever wait queue it happens to be on. For instance, it
1622 * may have already been awoken from the wait you intended to
1623 * interrupt and waited on something else (like another
1624 * semaphore).
1625 * Conditions:
1626 * nothing of interest locked
1627 * we need to assume spl needs to be raised
1628 * Returns:
1629 * KERN_SUCCESS - the thread was found waiting and awakened
1630 * KERN_NOT_WAITING - the thread was not waiting here
1631 */
1632 kern_return_t
1633 wait_queue_wakeup_thread(
1634 wait_queue_t wq,
1635 event_t event,
1636 thread_t thread,
1637 wait_result_t result)
1638 {
1639 kern_return_t res;
1640 spl_t s;
1641
1642 if (!wait_queue_is_valid(wq)) {
1643 return KERN_INVALID_ARGUMENT;
1644 }
1645
1646 s = splsched();
1647 wait_queue_lock(wq);
1648 res = _wait_queue_select64_thread(wq, (event64_t)((uint32_t)event), thread);
1649 wait_queue_unlock(wq);
1650
1651 if (res == KERN_SUCCESS) {
1652 res = thread_go_locked(thread, result);
1653 assert(res == KERN_SUCCESS);
1654 thread_unlock(thread);
1655 splx(s);
1656 return res;
1657 }
1658 splx(s);
1659 return KERN_NOT_WAITING;
1660 }
1661
1662 /*
1663 * Routine: wait_queue_wakeup64_thread
1664 * Purpose:
1665 * Wakeup the particular thread that was specified if and only
1666 * it was in this wait queue (or one of it's set's queues)
1667 * and waiting on the specified event.
1668 *
1669 * This is much safer than just removing the thread from
1670 * whatever wait queue it happens to be on. For instance, it
1671 * may have already been awoken from the wait you intended to
1672 * interrupt and waited on something else (like another
1673 * semaphore).
1674 * Conditions:
1675 * nothing of interest locked
1676 * we need to assume spl needs to be raised
1677 * Returns:
1678 * KERN_SUCCESS - the thread was found waiting and awakened
1679 * KERN_NOT_WAITING - the thread was not waiting here
1680 */
1681 kern_return_t
1682 wait_queue_wakeup64_thread(
1683 wait_queue_t wq,
1684 event64_t event,
1685 thread_t thread,
1686 wait_result_t result)
1687 {
1688 kern_return_t res;
1689 spl_t s;
1690
1691 if (!wait_queue_is_valid(wq)) {
1692 return KERN_INVALID_ARGUMENT;
1693 }
1694
1695 s = splsched();
1696 wait_queue_lock(wq);
1697 res = _wait_queue_select64_thread(wq, event, thread);
1698 wait_queue_unlock(wq);
1699
1700 if (res == KERN_SUCCESS) {
1701 res = thread_go_locked(thread, result);
1702 assert(res == KERN_SUCCESS);
1703 thread_unlock(thread);
1704 splx(s);
1705 return res;
1706 }
1707 splx(s);
1708 return KERN_NOT_WAITING;
1709 }