]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/wait_queue.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / kern / wait_queue.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_FREE_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 /*
56 * File: wait_queue.c (adapted from sched_prim.c)
57 * Author: Avadis Tevanian, Jr.
58 * Date: 1986
59 *
60 * Primitives for manipulating wait queues: either global
61 * ones from sched_prim.c, or private ones associated with
62 * particular structures(pots, semaphores, etc..).
63 */
64
65 #include <kern/kern_types.h>
66 #include <kern/simple_lock.h>
67 #include <kern/kalloc.h>
68 #include <kern/queue.h>
69 #include <kern/spl.h>
70 #include <mach/sync_policy.h>
71 #include <kern/sched_prim.h>
72
73 #include <kern/wait_queue.h>
74
75 /*
76 * Routine: wait_queue_init
77 * Purpose:
78 * Initialize a previously allocated wait queue.
79 * Returns:
80 * KERN_SUCCESS - The wait_queue_t was initialized
81 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
82 */
83 kern_return_t
84 wait_queue_init(
85 wait_queue_t wq,
86 int policy)
87 {
88 if (!((policy & SYNC_POLICY_ORDER_MASK) == SYNC_POLICY_FIFO))
89 return KERN_INVALID_ARGUMENT;
90
91 wq->wq_fifo = TRUE;
92 wq->wq_type = _WAIT_QUEUE_inited;
93 queue_init(&wq->wq_queue);
94 hw_lock_init(&wq->wq_interlock);
95 return KERN_SUCCESS;
96 }
97
98 /*
99 * Routine: wait_queue_alloc
100 * Purpose:
101 * Allocate and initialize a wait queue for use outside of
102 * of the mach part of the kernel.
103 * Conditions:
104 * Nothing locked - can block.
105 * Returns:
106 * The allocated and initialized wait queue
107 * WAIT_QUEUE_NULL if there is a resource shortage
108 */
109 wait_queue_t
110 wait_queue_alloc(
111 int policy)
112 {
113 wait_queue_t wq;
114 kern_return_t ret;
115
116 wq = (wait_queue_t) kalloc(sizeof(struct wait_queue));
117 if (wq != WAIT_QUEUE_NULL) {
118 ret = wait_queue_init(wq, policy);
119 if (ret != KERN_SUCCESS) {
120 kfree((vm_offset_t)wq, sizeof(struct wait_queue));
121 wq = WAIT_QUEUE_NULL;
122 }
123 }
124 return wq;
125 }
126
127 /*
128 * Routine: wait_queue_free
129 * Purpose:
130 * Free an allocated wait queue.
131 * Conditions:
132 * May block.
133 */
134 kern_return_t
135 wait_queue_free(
136 wait_queue_t wq)
137 {
138 if (!wait_queue_is_queue(wq))
139 return KERN_INVALID_ARGUMENT;
140 if (!queue_empty(&wq->wq_queue))
141 return KERN_FAILURE;
142 kfree((vm_offset_t)wq, sizeof(struct wait_queue));
143 return KERN_SUCCESS;
144 }
145
146 /*
147 * Routine: wait_queue_set_init
148 * Purpose:
149 * Initialize a previously allocated wait queue set.
150 * Returns:
151 * KERN_SUCCESS - The wait_queue_set_t was initialized
152 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
153 */
154 kern_return_t
155 wait_queue_set_init(
156 wait_queue_set_t wqset,
157 int policy)
158 {
159 kern_return_t ret;
160
161 ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
162 if (ret != KERN_SUCCESS)
163 return ret;
164
165 wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
166 if (policy & SYNC_POLICY_PREPOST)
167 wqset->wqs_wait_queue.wq_isprepost = TRUE;
168 else
169 wqset->wqs_wait_queue.wq_isprepost = FALSE;
170 queue_init(&wqset->wqs_setlinks);
171 wqset->wqs_refcount = 0;
172 return KERN_SUCCESS;
173 }
174
175 /* legacy API */
176 kern_return_t
177 wait_queue_sub_init(
178 wait_queue_set_t wqset,
179 int policy)
180 {
181 return wait_queue_set_init(wqset, policy);
182 }
183
184 /*
185 * Routine: wait_queue_set_alloc
186 * Purpose:
187 * Allocate and initialize a wait queue set for
188 * use outside of the mach part of the kernel.
189 * Conditions:
190 * May block.
191 * Returns:
192 * The allocated and initialized wait queue set
193 * WAIT_QUEUE_SET_NULL if there is a resource shortage
194 */
195 wait_queue_set_t
196 wait_queue_set_alloc(
197 int policy)
198 {
199 wait_queue_set_t wq_set;
200
201 wq_set = (wait_queue_set_t) kalloc(sizeof(struct wait_queue_set));
202 if (wq_set != WAIT_QUEUE_SET_NULL) {
203 kern_return_t ret;
204
205 ret = wait_queue_set_init(wq_set, policy);
206 if (ret != KERN_SUCCESS) {
207 kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set));
208 wq_set = WAIT_QUEUE_SET_NULL;
209 }
210 }
211 return wq_set;
212 }
213
214 /*
215 * Routine: wait_queue_set_free
216 * Purpose:
217 * Free an allocated wait queue set
218 * Conditions:
219 * May block.
220 */
221 kern_return_t
222 wait_queue_set_free(
223 wait_queue_set_t wq_set)
224 {
225 if (!wait_queue_is_set(wq_set))
226 return KERN_INVALID_ARGUMENT;
227
228 if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
229 return KERN_FAILURE;
230
231 kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set));
232 return KERN_SUCCESS;
233 }
234
235 kern_return_t
236 wait_queue_sub_clearrefs(
237 wait_queue_set_t wq_set)
238 {
239 if (!wait_queue_is_set(wq_set))
240 return KERN_INVALID_ARGUMENT;
241
242 wqs_lock(wq_set);
243 wq_set->wqs_refcount = 0;
244 wqs_unlock(wq_set);
245 return KERN_SUCCESS;
246 }
247
248 /*
249 *
250 * Routine: wait_queue_set_size
251 * Routine: wait_queue_link_size
252 * Purpose:
253 * Return the size of opaque wait queue structures
254 */
255 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
256 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
257
258 /* declare a unique type for wait queue link structures */
259 static unsigned int _wait_queue_link;
260 static unsigned int _wait_queue_unlinked;
261
262 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
263 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
264
265 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
266 WQASSERT(((wqe)->wqe_queue == (wq) && \
267 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
268 "wait queue element list corruption: wq=%#x, wqe=%#x", \
269 (wq), (wqe))
270
271 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
272 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
273 (queue_t)(wql) : &(wql)->wql_setlinks)))
274
275 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
276 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
277 (queue_t)(wql) : &(wql)->wql_setlinks)))
278
279 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
280 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
281 ((wql)->wql_setqueue == (wqs)) && \
282 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
283 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
284 "wait queue set links corruption: wqs=%#x, wql=%#x", \
285 (wqs), (wql))
286
287 #if defined(_WAIT_QUEUE_DEBUG_)
288
289 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
290
291 #define WAIT_QUEUE_CHECK(wq) \
292 MACRO_BEGIN \
293 queue_t q2 = &(wq)->wq_queue; \
294 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
295 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
296 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
297 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
298 } \
299 MACRO_END
300
301 #define WAIT_QUEUE_SET_CHECK(wqs) \
302 MACRO_BEGIN \
303 queue_t q2 = &(wqs)->wqs_setlinks; \
304 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
305 while (!queue_end(q2, (queue_entry_t)wql2)) { \
306 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
307 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
308 } \
309 MACRO_END
310
311 #else /* !_WAIT_QUEUE_DEBUG_ */
312
313 #define WQASSERT(e, s, p0, p1) assert(e)
314
315 #define WAIT_QUEUE_CHECK(wq)
316 #define WAIT_QUEUE_SET_CHECK(wqs)
317
318 #endif /* !_WAIT_QUEUE_DEBUG_ */
319
320 /*
321 * Routine: wait_queue_member_locked
322 * Purpose:
323 * Indicate if this set queue is a member of the queue
324 * Conditions:
325 * The wait queue is locked
326 * The set queue is just that, a set queue
327 */
328 __private_extern__ boolean_t
329 wait_queue_member_locked(
330 wait_queue_t wq,
331 wait_queue_set_t wq_set)
332 {
333 wait_queue_element_t wq_element;
334 queue_t q;
335
336 assert(wait_queue_held(wq));
337 assert(wait_queue_is_set(wq_set));
338
339 q = &wq->wq_queue;
340
341 wq_element = (wait_queue_element_t) queue_first(q);
342 while (!queue_end(q, (queue_entry_t)wq_element)) {
343 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
344 if ((wq_element->wqe_type == WAIT_QUEUE_LINK)) {
345 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
346
347 if (wql->wql_setqueue == wq_set)
348 return TRUE;
349 }
350 wq_element = (wait_queue_element_t)
351 queue_next((queue_t) wq_element);
352 }
353 return FALSE;
354 }
355
356
357 /*
358 * Routine: wait_queue_member
359 * Purpose:
360 * Indicate if this set queue is a member of the queue
361 * Conditions:
362 * The set queue is just that, a set queue
363 */
364 boolean_t
365 wait_queue_member(
366 wait_queue_t wq,
367 wait_queue_set_t wq_set)
368 {
369 boolean_t ret;
370 spl_t s;
371
372 if (!wait_queue_is_set(wq_set))
373 return FALSE;
374
375 s = splsched();
376 wait_queue_lock(wq);
377 ret = wait_queue_member_locked(wq, wq_set);
378 wait_queue_unlock(wq);
379 splx(s);
380
381 return ret;
382 }
383
384
385 /*
386 * Routine: wait_queue_link_noalloc
387 * Purpose:
388 * Insert a set wait queue into a wait queue. This
389 * requires us to link the two together using a wait_queue_link
390 * structure that we allocate.
391 * Conditions:
392 * The wait queue being inserted must be inited as a set queue
393 */
394 kern_return_t
395 wait_queue_link_noalloc(
396 wait_queue_t wq,
397 wait_queue_set_t wq_set,
398 wait_queue_link_t wql)
399 {
400 wait_queue_element_t wq_element;
401 queue_t q;
402 spl_t s;
403
404 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set))
405 return KERN_INVALID_ARGUMENT;
406
407 /*
408 * There are probably less threads and sets associated with
409 * the wait queue, then there are wait queues associated with
410 * the set. So lets validate it that way.
411 */
412 s = splsched();
413 wait_queue_lock(wq);
414 wqs_lock(wq_set);
415 q = &wq->wq_queue;
416 wq_element = (wait_queue_element_t) queue_first(q);
417 while (!queue_end(q, (queue_entry_t)wq_element)) {
418 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
419 if (wq_element->wqe_type == WAIT_QUEUE_LINK &&
420 ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
421 wqs_unlock(wq_set);
422 wait_queue_unlock(wq);
423 splx(s);
424 return KERN_ALREADY_IN_SET;
425 }
426 wq_element = (wait_queue_element_t)
427 queue_next((queue_t) wq_element);
428 }
429
430 /*
431 * Not already a member, so we can add it.
432 */
433
434 WAIT_QUEUE_SET_CHECK(wq_set);
435
436 wql->wql_queue = wq;
437 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
438 wql->wql_setqueue = wq_set;
439 queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
440 wql->wql_type = WAIT_QUEUE_LINK;
441
442 wqs_unlock(wq_set);
443 wait_queue_unlock(wq);
444 splx(s);
445
446 return KERN_SUCCESS;
447 }
448
449 /*
450 * Routine: wait_queue_link
451 * Purpose:
452 * Insert a set wait queue into a wait queue. This
453 * requires us to link the two together using a wait_queue_link
454 * structure that we allocate.
455 * Conditions:
456 * The wait queue being inserted must be inited as a set queue
457 */
458 kern_return_t
459 wait_queue_link(
460 wait_queue_t wq,
461 wait_queue_set_t wq_set)
462 {
463 wait_queue_link_t wql;
464 kern_return_t ret;
465
466 wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link));
467 if (wql == WAIT_QUEUE_LINK_NULL)
468 return KERN_RESOURCE_SHORTAGE;
469
470 ret = wait_queue_link_noalloc(wq, wq_set, wql);
471 if (ret != KERN_SUCCESS)
472 kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
473
474 return ret;
475 }
476
477
478 /*
479 * Routine: wait_queue_unlink_nofree
480 * Purpose:
481 * Undo the linkage between a wait queue and a set.
482 */
483 static void
484 wait_queue_unlink_locked(
485 wait_queue_t wq,
486 wait_queue_set_t wq_set,
487 wait_queue_link_t wql)
488 {
489 assert(wait_queue_held(wq));
490 assert(wait_queue_held(&wq_set->wqs_wait_queue));
491
492 wql->wql_queue = WAIT_QUEUE_NULL;
493 queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
494 wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
495 queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
496 wql->wql_type = WAIT_QUEUE_UNLINKED;
497
498 WAIT_QUEUE_CHECK(wq);
499 WAIT_QUEUE_SET_CHECK(wq_set);
500 }
501
502 /*
503 * Routine: wait_queue_unlink
504 * Purpose:
505 * Remove the linkage between a wait queue and a set,
506 * freeing the linkage structure.
507 * Conditions:
508 * The wait queue being must be a member set queue
509 */
510 kern_return_t
511 wait_queue_unlink(
512 wait_queue_t wq,
513 wait_queue_set_t wq_set)
514 {
515 wait_queue_element_t wq_element;
516 wait_queue_link_t wql;
517 queue_t q;
518 spl_t s;
519
520 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) {
521 return KERN_INVALID_ARGUMENT;
522 }
523 s = splsched();
524 wait_queue_lock(wq);
525
526 q = &wq->wq_queue;
527 wq_element = (wait_queue_element_t) queue_first(q);
528 while (!queue_end(q, (queue_entry_t)wq_element)) {
529 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
530 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
531 wql = (wait_queue_link_t)wq_element;
532
533 if (wql->wql_setqueue == wq_set) {
534 wqs_lock(wq_set);
535 wait_queue_unlink_locked(wq, wq_set, wql);
536 wqs_unlock(wq_set);
537 wait_queue_unlock(wq);
538 splx(s);
539 kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
540 return KERN_SUCCESS;
541 }
542 }
543 wq_element = (wait_queue_element_t)
544 queue_next((queue_t) wq_element);
545 }
546 wait_queue_unlock(wq);
547 splx(s);
548 return KERN_NOT_IN_SET;
549 }
550
551
552 /*
553 * Routine: wait_queue_unlinkall_nofree
554 * Purpose:
555 * Remove the linkage between a wait queue and all its
556 * sets. The caller is responsible for freeing
557 * the wait queue link structures.
558 */
559
560 kern_return_t
561 wait_queue_unlinkall_nofree(
562 wait_queue_t wq)
563 {
564 wait_queue_element_t wq_element;
565 wait_queue_element_t wq_next_element;
566 wait_queue_set_t wq_set;
567 wait_queue_link_t wql;
568 queue_head_t links_queue_head;
569 queue_t links = &links_queue_head;
570 queue_t q;
571 spl_t s;
572
573 if (!wait_queue_is_queue(wq)) {
574 return KERN_INVALID_ARGUMENT;
575 }
576
577 queue_init(links);
578
579 s = splsched();
580 wait_queue_lock(wq);
581
582 q = &wq->wq_queue;
583
584 wq_element = (wait_queue_element_t) queue_first(q);
585 while (!queue_end(q, (queue_entry_t)wq_element)) {
586 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
587 wq_next_element = (wait_queue_element_t)
588 queue_next((queue_t) wq_element);
589
590 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
591 wql = (wait_queue_link_t)wq_element;
592 wq_set = wql->wql_setqueue;
593 wqs_lock(wq_set);
594 wait_queue_unlink_locked(wq, wq_set, wql);
595 wqs_unlock(wq_set);
596 }
597 wq_element = wq_next_element;
598 }
599 wait_queue_unlock(wq);
600 splx(s);
601 return(KERN_SUCCESS);
602 }
603
604
605 /*
606 * Routine: wait_queue_unlink_all
607 * Purpose:
608 * Remove the linkage between a wait queue and all its sets.
609 * All the linkage structures are freed.
610 * Conditions:
611 * Nothing of interest locked.
612 */
613
614 kern_return_t
615 wait_queue_unlink_all(
616 wait_queue_t wq)
617 {
618 wait_queue_element_t wq_element;
619 wait_queue_element_t wq_next_element;
620 wait_queue_set_t wq_set;
621 wait_queue_link_t wql;
622 queue_head_t links_queue_head;
623 queue_t links = &links_queue_head;
624 queue_t q;
625 spl_t s;
626
627 if (!wait_queue_is_queue(wq)) {
628 return KERN_INVALID_ARGUMENT;
629 }
630
631 queue_init(links);
632
633 s = splsched();
634 wait_queue_lock(wq);
635
636 q = &wq->wq_queue;
637
638 wq_element = (wait_queue_element_t) queue_first(q);
639 while (!queue_end(q, (queue_entry_t)wq_element)) {
640 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
641 wq_next_element = (wait_queue_element_t)
642 queue_next((queue_t) wq_element);
643
644 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
645 wql = (wait_queue_link_t)wq_element;
646 wq_set = wql->wql_setqueue;
647 wqs_lock(wq_set);
648 wait_queue_unlink_locked(wq, wq_set, wql);
649 wqs_unlock(wq_set);
650 enqueue(links, &wql->wql_links);
651 }
652 wq_element = wq_next_element;
653 }
654 wait_queue_unlock(wq);
655 splx(s);
656
657 while(!queue_empty(links)) {
658 wql = (wait_queue_link_t) dequeue(links);
659 kfree((vm_offset_t) wql, sizeof(struct wait_queue_link));
660 }
661
662 return(KERN_SUCCESS);
663 }
664
665 /*
666 * Routine: wait_queue_set_unlink_all_nofree
667 * Purpose:
668 * Remove the linkage between a set wait queue and all its
669 * member wait queues. The link structures are not freed, nor
670 * returned. It is the caller's responsibility to track and free
671 * them.
672 * Conditions:
673 * The wait queue being must be a member set queue
674 */
675 kern_return_t
676 wait_queue_set_unlink_all_nofree(
677 wait_queue_set_t wq_set)
678 {
679 wait_queue_link_t wql;
680 wait_queue_t wq;
681 queue_t q;
682 kern_return_t kret;
683 spl_t s;
684
685 if (!wait_queue_is_set(wq_set)) {
686 return KERN_INVALID_ARGUMENT;
687 }
688
689 retry:
690 s = splsched();
691 wqs_lock(wq_set);
692
693 q = &wq_set->wqs_setlinks;
694
695 wql = (wait_queue_link_t)queue_first(q);
696 while (!queue_end(q, (queue_entry_t)wql)) {
697 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
698 wq = wql->wql_queue;
699 if (wait_queue_lock_try(wq)) {
700 wait_queue_unlink_locked(wq, wq_set, wql);
701 wait_queue_unlock(wq);
702 wql = (wait_queue_link_t)queue_first(q);
703 } else {
704 wqs_unlock(wq_set);
705 splx(s);
706 delay(1);
707 goto retry;
708 }
709 }
710 wqs_unlock(wq_set);
711 splx(s);
712
713 return(KERN_SUCCESS);
714 }
715
716 /* legacy interface naming */
717 kern_return_t
718 wait_subqueue_unlink_all(
719 wait_queue_set_t wq_set)
720 {
721 return wait_queue_set_unlink_all_nofree(wq_set);
722 }
723
724
725 /*
726 * Routine: wait_queue_set_unlink_all
727 * Purpose:
728 * Remove the linkage between a set wait queue and all its
729 * member wait queues. The link structures are freed.
730 * Conditions:
731 * The wait queue must be a set
732 */
733 kern_return_t
734 wait_queue_set_unlink_all(
735 wait_queue_set_t wq_set)
736 {
737 wait_queue_link_t wql;
738 wait_queue_t wq;
739 queue_t q;
740 queue_head_t links_queue_head;
741 queue_t links = &links_queue_head;
742 kern_return_t kret;
743 spl_t s;
744
745 if (!wait_queue_is_set(wq_set)) {
746 return KERN_INVALID_ARGUMENT;
747 }
748
749 queue_init(links);
750
751 retry:
752 s = splsched();
753 wqs_lock(wq_set);
754
755 q = &wq_set->wqs_setlinks;
756
757 wql = (wait_queue_link_t)queue_first(q);
758 while (!queue_end(q, (queue_entry_t)wql)) {
759 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
760 wq = wql->wql_queue;
761 if (wait_queue_lock_try(wq)) {
762 wait_queue_unlink_locked(wq, wq_set, wql);
763 wait_queue_unlock(wq);
764 enqueue(links, &wql->wql_links);
765 wql = (wait_queue_link_t)queue_first(q);
766 } else {
767 wqs_unlock(wq_set);
768 splx(s);
769 delay(1);
770 goto retry;
771 }
772 }
773 wqs_unlock(wq_set);
774 splx(s);
775
776 while (!queue_empty (links)) {
777 wql = (wait_queue_link_t) dequeue(links);
778 kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
779 }
780 return(KERN_SUCCESS);
781 }
782
783
784 /*
785 * Routine: wait_queue_unlink_one
786 * Purpose:
787 * Find and unlink one set wait queue
788 * Conditions:
789 * Nothing of interest locked.
790 */
791 void
792 wait_queue_unlink_one(
793 wait_queue_t wq,
794 wait_queue_set_t *wq_setp)
795 {
796 wait_queue_element_t wq_element;
797 queue_t q;
798 spl_t s;
799
800 s = splsched();
801 wait_queue_lock(wq);
802
803 q = &wq->wq_queue;
804
805 wq_element = (wait_queue_element_t) queue_first(q);
806 while (!queue_end(q, (queue_entry_t)wq_element)) {
807
808 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
809 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
810 wait_queue_set_t wq_set = wql->wql_setqueue;
811
812 wqs_lock(wq_set);
813 wait_queue_unlink_locked(wq, wq_set, wql);
814 wqs_unlock(wq_set);
815 wait_queue_unlock(wq);
816 splx(s);
817 kfree((vm_offset_t)wql,sizeof(struct wait_queue_link));
818 *wq_setp = wq_set;
819 return;
820 }
821
822 wq_element = (wait_queue_element_t)
823 queue_next((queue_t) wq_element);
824 }
825 wait_queue_unlock(wq);
826 splx(s);
827 *wq_setp = WAIT_QUEUE_SET_NULL;
828 }
829
830
831 /*
832 * Routine: wait_queue_assert_wait64_locked
833 * Purpose:
834 * Insert the current thread into the supplied wait queue
835 * waiting for a particular event to be posted to that queue.
836 *
837 * Conditions:
838 * The wait queue is assumed locked.
839 *
840 */
841 __private_extern__ wait_result_t
842 wait_queue_assert_wait64_locked(
843 wait_queue_t wq,
844 event64_t event,
845 wait_interrupt_t interruptible,
846 boolean_t unlock)
847 {
848 thread_t thread;
849 wait_result_t wait_result;
850
851 if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
852 wait_queue_set_t wqs = (wait_queue_set_t)wq;
853 if (wqs->wqs_isprepost && wqs->wqs_refcount > 0) {
854 if (unlock)
855 wait_queue_unlock(wq);
856 return(THREAD_AWAKENED);
857 }
858 }
859
860 /*
861 * This is the extent to which we currently take scheduling attributes
862 * into account. If the thread is vm priviledged, we stick it at
863 * the front of the queue. Later, these queues will honor the policy
864 * value set at wait_queue_init time.
865 */
866 thread = current_thread();
867 thread_lock(thread);
868 wait_result = thread_mark_wait_locked(thread, interruptible);
869 if (wait_result == THREAD_WAITING) {
870 if (thread->vm_privilege)
871 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
872 else
873 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
874 thread->wait_event = event;
875 thread->wait_queue = wq;
876 }
877 thread_unlock(thread);
878 if (unlock)
879 wait_queue_unlock(wq);
880 return(wait_result);
881 }
882
883 /*
884 * Routine: wait_queue_assert_wait
885 * Purpose:
886 * Insert the current thread into the supplied wait queue
887 * waiting for a particular event to be posted to that queue.
888 *
889 * Conditions:
890 * nothing of interest locked.
891 */
892 wait_result_t
893 wait_queue_assert_wait(
894 wait_queue_t wq,
895 event_t event,
896 wait_interrupt_t interruptible)
897 {
898 spl_t s;
899 wait_result_t ret;
900
901 /* If it is an invalid wait queue, you can't wait on it */
902 if (!wait_queue_is_valid(wq)) {
903 thread_t thread = current_thread();
904 return (thread->wait_result = THREAD_RESTART);
905 }
906
907 s = splsched();
908 wait_queue_lock(wq);
909 ret = wait_queue_assert_wait64_locked(
910 wq, (event64_t)((uint32_t)event),
911 interruptible, TRUE);
912 /* wait queue unlocked */
913 splx(s);
914 return(ret);
915 }
916
917 /*
918 * Routine: wait_queue_assert_wait64
919 * Purpose:
920 * Insert the current thread into the supplied wait queue
921 * waiting for a particular event to be posted to that queue.
922 * Conditions:
923 * nothing of interest locked.
924 */
925 wait_result_t
926 wait_queue_assert_wait64(
927 wait_queue_t wq,
928 event64_t event,
929 wait_interrupt_t interruptible)
930 {
931 spl_t s;
932 wait_result_t ret;
933
934 /* If it is an invalid wait queue, you cant wait on it */
935 if (!wait_queue_is_valid(wq)) {
936 thread_t thread = current_thread();
937 return (thread->wait_result = THREAD_RESTART);
938 }
939
940 s = splsched();
941 wait_queue_lock(wq);
942 ret = wait_queue_assert_wait64_locked(wq, event, interruptible, TRUE);
943 /* wait queue unlocked */
944 splx(s);
945 return(ret);
946 }
947
948
949 /*
950 * Routine: _wait_queue_select64_all
951 * Purpose:
952 * Select all threads off a wait queue that meet the
953 * supplied criteria.
954 * Conditions:
955 * at splsched
956 * wait queue locked
957 * wake_queue initialized and ready for insertion
958 * possibly recursive
959 * Returns:
960 * a queue of locked threads
961 */
962 static void
963 _wait_queue_select64_all(
964 wait_queue_t wq,
965 event64_t event,
966 queue_t wake_queue)
967 {
968 wait_queue_element_t wq_element;
969 wait_queue_element_t wqe_next;
970 queue_t q;
971
972 q = &wq->wq_queue;
973
974 wq_element = (wait_queue_element_t) queue_first(q);
975 while (!queue_end(q, (queue_entry_t)wq_element)) {
976 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
977 wqe_next = (wait_queue_element_t)
978 queue_next((queue_t) wq_element);
979
980 /*
981 * We may have to recurse if this is a compound wait queue.
982 */
983 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
984 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
985 wait_queue_t set_queue;
986
987 /*
988 * We have to check the set wait queue.
989 */
990 set_queue = (wait_queue_t)wql->wql_setqueue;
991 wait_queue_lock(set_queue);
992 if (set_queue->wq_isprepost) {
993 wait_queue_set_t wqs = (wait_queue_set_t)set_queue;
994
995 /*
996 * Preposting is only for sets and wait queue
997 * is the first element of set
998 */
999 wqs->wqs_refcount++;
1000 }
1001 if (! wait_queue_empty(set_queue))
1002 _wait_queue_select64_all(set_queue, event, wake_queue);
1003 wait_queue_unlock(set_queue);
1004 } else {
1005
1006 /*
1007 * Otherwise, its a thread. If it is waiting on
1008 * the event we are posting to this queue, pull
1009 * it off the queue and stick it in out wake_queue.
1010 */
1011 thread_t t = (thread_t)wq_element;
1012
1013 if (t->wait_event == event) {
1014 thread_lock(t);
1015 remqueue(q, (queue_entry_t) t);
1016 enqueue (wake_queue, (queue_entry_t) t);
1017 t->wait_queue = WAIT_QUEUE_NULL;
1018 t->wait_event = NO_EVENT64;
1019 t->at_safe_point = FALSE;
1020 /* returned locked */
1021 }
1022 }
1023 wq_element = wqe_next;
1024 }
1025 }
1026
1027 /*
1028 * Routine: wait_queue_wakeup64_all_locked
1029 * Purpose:
1030 * Wakeup some number of threads that are in the specified
1031 * wait queue and waiting on the specified event.
1032 * Conditions:
1033 * wait queue already locked (may be released).
1034 * Returns:
1035 * KERN_SUCCESS - Threads were woken up
1036 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1037 */
1038 __private_extern__ kern_return_t
1039 wait_queue_wakeup64_all_locked(
1040 wait_queue_t wq,
1041 event64_t event,
1042 wait_result_t result,
1043 boolean_t unlock)
1044 {
1045 queue_head_t wake_queue_head;
1046 queue_t q = &wake_queue_head;
1047 kern_return_t res;
1048
1049 assert(wait_queue_held(wq));
1050 queue_init(q);
1051
1052 /*
1053 * Select the threads that we will wake up. The threads
1054 * are returned to us locked and cleanly removed from the
1055 * wait queue.
1056 */
1057 _wait_queue_select64_all(wq, event, q);
1058 if (unlock)
1059 wait_queue_unlock(wq);
1060
1061 /*
1062 * For each thread, set it running.
1063 */
1064 res = KERN_NOT_WAITING;
1065 while (!queue_empty (q)) {
1066 thread_t thread = (thread_t) dequeue(q);
1067 res = thread_go_locked(thread, result);
1068 assert(res == KERN_SUCCESS);
1069 thread_unlock(thread);
1070 }
1071 return res;
1072 }
1073
1074
1075 /*
1076 * Routine: wait_queue_wakeup_all
1077 * Purpose:
1078 * Wakeup some number of threads that are in the specified
1079 * wait queue and waiting on the specified event.
1080 * Conditions:
1081 * Nothing locked
1082 * Returns:
1083 * KERN_SUCCESS - Threads were woken up
1084 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1085 */
1086 kern_return_t
1087 wait_queue_wakeup_all(
1088 wait_queue_t wq,
1089 event_t event,
1090 wait_result_t result)
1091 {
1092 kern_return_t ret;
1093 spl_t s;
1094
1095 if (!wait_queue_is_valid(wq)) {
1096 return KERN_INVALID_ARGUMENT;
1097 }
1098
1099 s = splsched();
1100 wait_queue_lock(wq);
1101 ret = wait_queue_wakeup64_all_locked(
1102 wq, (event64_t)((uint32_t)event),
1103 result, TRUE);
1104 /* lock released */
1105 splx(s);
1106 return ret;
1107 }
1108
1109 /*
1110 * Routine: wait_queue_wakeup64_all
1111 * Purpose:
1112 * Wakeup some number of threads that are in the specified
1113 * wait queue and waiting on the specified event.
1114 * Conditions:
1115 * Nothing locked
1116 * Returns:
1117 * KERN_SUCCESS - Threads were woken up
1118 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1119 */
1120 kern_return_t
1121 wait_queue_wakeup64_all(
1122 wait_queue_t wq,
1123 event64_t event,
1124 wait_result_t result)
1125 {
1126 kern_return_t ret;
1127 spl_t s;
1128
1129 if (!wait_queue_is_valid(wq)) {
1130 return KERN_INVALID_ARGUMENT;
1131 }
1132
1133 s = splsched();
1134 wait_queue_lock(wq);
1135 ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1136 /* lock released */
1137 splx(s);
1138 return ret;
1139 }
1140
1141 /*
1142 * Routine: _wait_queue_select64_one
1143 * Purpose:
1144 * Select the best thread off a wait queue that meet the
1145 * supplied criteria.
1146 * Conditions:
1147 * at splsched
1148 * wait queue locked
1149 * possibly recursive
1150 * Returns:
1151 * a locked thread - if one found
1152 * Note:
1153 * This is where the sync policy of the wait queue comes
1154 * into effect. For now, we just assume FIFO.
1155 */
1156 static thread_t
1157 _wait_queue_select64_one(
1158 wait_queue_t wq,
1159 event64_t event)
1160 {
1161 wait_queue_element_t wq_element;
1162 wait_queue_element_t wqe_next;
1163 thread_t t = THREAD_NULL;
1164 queue_t q;
1165
1166 assert(wq->wq_fifo);
1167
1168 q = &wq->wq_queue;
1169
1170 wq_element = (wait_queue_element_t) queue_first(q);
1171 while (!queue_end(q, (queue_entry_t)wq_element)) {
1172 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1173 wqe_next = (wait_queue_element_t)
1174 queue_next((queue_t) wq_element);
1175
1176 /*
1177 * We may have to recurse if this is a compound wait queue.
1178 */
1179 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1180 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1181 wait_queue_t set_queue;
1182
1183 /*
1184 * We have to check the set wait queue.
1185 */
1186 set_queue = (wait_queue_t)wql->wql_setqueue;
1187 wait_queue_lock(set_queue);
1188 if (! wait_queue_empty(set_queue)) {
1189 t = _wait_queue_select64_one(set_queue, event);
1190 }
1191 wait_queue_unlock(set_queue);
1192 if (t != THREAD_NULL)
1193 return t;
1194 } else {
1195
1196 /*
1197 * Otherwise, its a thread. If it is waiting on
1198 * the event we are posting to this queue, pull
1199 * it off the queue and stick it in out wake_queue.
1200 */
1201 thread_t t = (thread_t)wq_element;
1202
1203 if (t->wait_event == event) {
1204 thread_lock(t);
1205 remqueue(q, (queue_entry_t) t);
1206 t->wait_queue = WAIT_QUEUE_NULL;
1207 t->wait_event = NO_EVENT64;
1208 t->at_safe_point = FALSE;
1209 return t; /* still locked */
1210 }
1211 }
1212 wq_element = wqe_next;
1213 }
1214 return THREAD_NULL;
1215 }
1216
1217 /*
1218 * Routine: wait_queue_peek64_locked
1219 * Purpose:
1220 * Select the best thread from a wait queue that meet the
1221 * supplied criteria, but leave it on the queue it was
1222 * found on. The thread, and the actual wait_queue the
1223 * thread was found on are identified.
1224 * Conditions:
1225 * at splsched
1226 * wait queue locked
1227 * possibly recursive
1228 * Returns:
1229 * a locked thread - if one found
1230 * a locked waitq - the one the thread was found on
1231 * Note:
1232 * Both the waitq the thread was actually found on, and
1233 * the supplied wait queue, are locked after this.
1234 */
1235 __private_extern__ void
1236 wait_queue_peek64_locked(
1237 wait_queue_t wq,
1238 event64_t event,
1239 thread_t *tp,
1240 wait_queue_t *wqp)
1241 {
1242 wait_queue_element_t wq_element;
1243 wait_queue_element_t wqe_next;
1244 thread_t t;
1245 queue_t q;
1246
1247 assert(wq->wq_fifo);
1248
1249 *tp = THREAD_NULL;
1250
1251 q = &wq->wq_queue;
1252
1253 wq_element = (wait_queue_element_t) queue_first(q);
1254 while (!queue_end(q, (queue_entry_t)wq_element)) {
1255 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1256 wqe_next = (wait_queue_element_t)
1257 queue_next((queue_t) wq_element);
1258
1259 /*
1260 * We may have to recurse if this is a compound wait queue.
1261 */
1262 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1263 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1264 wait_queue_t set_queue;
1265
1266 /*
1267 * We have to check the set wait queue.
1268 */
1269 set_queue = (wait_queue_t)wql->wql_setqueue;
1270 wait_queue_lock(set_queue);
1271 if (! wait_queue_empty(set_queue)) {
1272 wait_queue_peek64_locked(set_queue, event, tp, wqp);
1273 }
1274 if (*tp != THREAD_NULL) {
1275 if (*wqp != set_queue)
1276 wait_queue_unlock(set_queue);
1277 return; /* thread and its waitq locked */
1278 }
1279
1280 wait_queue_unlock(set_queue);
1281 } else {
1282
1283 /*
1284 * Otherwise, its a thread. If it is waiting on
1285 * the event we are posting to this queue, return
1286 * it locked, but leave it on the queue.
1287 */
1288 thread_t t = (thread_t)wq_element;
1289
1290 if (t->wait_event == event) {
1291 thread_lock(t);
1292 *tp = t;
1293 *wqp = wq;
1294 return;
1295 }
1296 }
1297 wq_element = wqe_next;
1298 }
1299 }
1300
1301 /*
1302 * Routine: wait_queue_pull_thread_locked
1303 * Purpose:
1304 * Pull a thread that was previously "peeked" off the wait
1305 * queue and (possibly) unlock the waitq.
1306 * Conditions:
1307 * at splsched
1308 * wait queue locked
1309 * thread locked
1310 * Returns:
1311 * with the thread still locked.
1312 */
1313 void
1314 wait_queue_pull_thread_locked(
1315 wait_queue_t waitq,
1316 thread_t thread,
1317 boolean_t unlock)
1318 {
1319
1320 assert(thread->wait_queue == waitq);
1321
1322 remqueue(&waitq->wq_queue, (queue_entry_t)thread );
1323 thread->wait_queue = WAIT_QUEUE_NULL;
1324 thread->wait_event = NO_EVENT64;
1325 thread->at_safe_point = FALSE;
1326 if (unlock)
1327 wait_queue_unlock(waitq);
1328 }
1329
1330
1331 /*
1332 * Routine: wait_queue_select64_thread
1333 * Purpose:
1334 * Look for a thread and remove it from the queues, if
1335 * (and only if) the thread is waiting on the supplied
1336 * <wait_queue, event> pair.
1337 * Conditions:
1338 * at splsched
1339 * wait queue locked
1340 * possibly recursive
1341 * Returns:
1342 * KERN_NOT_WAITING: Thread is not waiting here.
1343 * KERN_SUCCESS: It was, and is now removed (returned locked)
1344 */
1345 static kern_return_t
1346 _wait_queue_select64_thread(
1347 wait_queue_t wq,
1348 event64_t event,
1349 thread_t thread)
1350 {
1351 wait_queue_element_t wq_element;
1352 wait_queue_element_t wqe_next;
1353 kern_return_t res = KERN_NOT_WAITING;
1354 queue_t q = &wq->wq_queue;
1355
1356 thread_lock(thread);
1357 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
1358 remqueue(q, (queue_entry_t) thread);
1359 thread->at_safe_point = FALSE;
1360 thread->wait_event = NO_EVENT64;
1361 thread->wait_queue = WAIT_QUEUE_NULL;
1362 /* thread still locked */
1363 return KERN_SUCCESS;
1364 }
1365 thread_unlock(thread);
1366
1367 /*
1368 * The wait_queue associated with the thread may be one of this
1369 * wait queue's sets. Go see. If so, removing it from
1370 * there is like removing it from here.
1371 */
1372 wq_element = (wait_queue_element_t) queue_first(q);
1373 while (!queue_end(q, (queue_entry_t)wq_element)) {
1374 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1375 wqe_next = (wait_queue_element_t)
1376 queue_next((queue_t) wq_element);
1377
1378 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1379 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1380 wait_queue_t set_queue;
1381
1382 set_queue = (wait_queue_t)wql->wql_setqueue;
1383 wait_queue_lock(set_queue);
1384 if (! wait_queue_empty(set_queue)) {
1385 res = _wait_queue_select64_thread(set_queue,
1386 event,
1387 thread);
1388 }
1389 wait_queue_unlock(set_queue);
1390 if (res == KERN_SUCCESS)
1391 return KERN_SUCCESS;
1392 }
1393 wq_element = wqe_next;
1394 }
1395 return res;
1396 }
1397
1398
1399 /*
1400 * Routine: wait_queue_wakeup64_identity_locked
1401 * Purpose:
1402 * Select a single thread that is most-eligible to run and set
1403 * set it running. But return the thread locked.
1404 *
1405 * Conditions:
1406 * at splsched
1407 * wait queue locked
1408 * possibly recursive
1409 * Returns:
1410 * a pointer to the locked thread that was awakened
1411 */
1412 __private_extern__ thread_t
1413 wait_queue_wakeup64_identity_locked(
1414 wait_queue_t wq,
1415 event64_t event,
1416 wait_result_t result,
1417 boolean_t unlock)
1418 {
1419 kern_return_t res;
1420 thread_t thread;
1421
1422 assert(wait_queue_held(wq));
1423
1424
1425 thread = _wait_queue_select64_one(wq, event);
1426 if (unlock)
1427 wait_queue_unlock(wq);
1428
1429 if (thread) {
1430 res = thread_go_locked(thread, result);
1431 assert(res == KERN_SUCCESS);
1432 }
1433 return thread; /* still locked if not NULL */
1434 }
1435
1436
1437 /*
1438 * Routine: wait_queue_wakeup64_one_locked
1439 * Purpose:
1440 * Select a single thread that is most-eligible to run and set
1441 * set it runnings.
1442 *
1443 * Conditions:
1444 * at splsched
1445 * wait queue locked
1446 * possibly recursive
1447 * Returns:
1448 * KERN_SUCCESS: It was, and is, now removed.
1449 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1450 */
1451 __private_extern__ kern_return_t
1452 wait_queue_wakeup64_one_locked(
1453 wait_queue_t wq,
1454 event64_t event,
1455 wait_result_t result,
1456 boolean_t unlock)
1457 {
1458 thread_t thread;
1459
1460 assert(wait_queue_held(wq));
1461
1462 thread = _wait_queue_select64_one(wq, event);
1463 if (unlock)
1464 wait_queue_unlock(wq);
1465
1466 if (thread) {
1467 kern_return_t res;
1468
1469 res = thread_go_locked(thread, result);
1470 assert(res == KERN_SUCCESS);
1471 thread_unlock(thread);
1472 return res;
1473 }
1474
1475 return KERN_NOT_WAITING;
1476 }
1477
1478 /*
1479 * Routine: wait_queue_wakeup_one
1480 * Purpose:
1481 * Wakeup the most appropriate thread that is in the specified
1482 * wait queue for the specified event.
1483 * Conditions:
1484 * Nothing locked
1485 * Returns:
1486 * KERN_SUCCESS - Thread was woken up
1487 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1488 */
1489 kern_return_t
1490 wait_queue_wakeup_one(
1491 wait_queue_t wq,
1492 event_t event,
1493 wait_result_t result)
1494 {
1495 thread_t thread;
1496 spl_t s;
1497
1498 if (!wait_queue_is_valid(wq)) {
1499 return KERN_INVALID_ARGUMENT;
1500 }
1501
1502 s = splsched();
1503 wait_queue_lock(wq);
1504 thread = _wait_queue_select64_one(wq, (event64_t)((uint32_t)event));
1505 wait_queue_unlock(wq);
1506
1507 if (thread) {
1508 kern_return_t res;
1509
1510 res = thread_go_locked(thread, result);
1511 assert(res == KERN_SUCCESS);
1512 thread_unlock(thread);
1513 splx(s);
1514 return res;
1515 }
1516
1517 splx(s);
1518 return KERN_NOT_WAITING;
1519 }
1520
1521 /*
1522 * Routine: wait_queue_wakeup64_one
1523 * Purpose:
1524 * Wakeup the most appropriate thread that is in the specified
1525 * wait queue for the specified event.
1526 * Conditions:
1527 * Nothing locked
1528 * Returns:
1529 * KERN_SUCCESS - Thread was woken up
1530 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1531 */
1532 kern_return_t
1533 wait_queue_wakeup64_one(
1534 wait_queue_t wq,
1535 event64_t event,
1536 wait_result_t result)
1537 {
1538 thread_t thread;
1539 spl_t s;
1540
1541 if (!wait_queue_is_valid(wq)) {
1542 return KERN_INVALID_ARGUMENT;
1543 }
1544 s = splsched();
1545 wait_queue_lock(wq);
1546 thread = _wait_queue_select64_one(wq, event);
1547 wait_queue_unlock(wq);
1548
1549 if (thread) {
1550 kern_return_t res;
1551
1552 res = thread_go_locked(thread, result);
1553 assert(res == KERN_SUCCESS);
1554 thread_unlock(thread);
1555 splx(s);
1556 return res;
1557 }
1558
1559 splx(s);
1560 return KERN_NOT_WAITING;
1561 }
1562
1563
1564 /*
1565 * Routine: wait_queue_wakeup64_thread_locked
1566 * Purpose:
1567 * Wakeup the particular thread that was specified if and only
1568 * it was in this wait queue (or one of it's set queues)
1569 * and waiting on the specified event.
1570 *
1571 * This is much safer than just removing the thread from
1572 * whatever wait queue it happens to be on. For instance, it
1573 * may have already been awoken from the wait you intended to
1574 * interrupt and waited on something else (like another
1575 * semaphore).
1576 * Conditions:
1577 * at splsched
1578 * wait queue already locked (may be released).
1579 * Returns:
1580 * KERN_SUCCESS - the thread was found waiting and awakened
1581 * KERN_NOT_WAITING - the thread was not waiting here
1582 */
1583 __private_extern__ kern_return_t
1584 wait_queue_wakeup64_thread_locked(
1585 wait_queue_t wq,
1586 event64_t event,
1587 thread_t thread,
1588 wait_result_t result,
1589 boolean_t unlock)
1590 {
1591 kern_return_t res;
1592
1593 assert(wait_queue_held(wq));
1594
1595 /*
1596 * See if the thread was still waiting there. If so, it got
1597 * dequeued and returned locked.
1598 */
1599 res = _wait_queue_select64_thread(wq, event, thread);
1600 if (unlock)
1601 wait_queue_unlock(wq);
1602
1603 if (res != KERN_SUCCESS)
1604 return KERN_NOT_WAITING;
1605
1606 res = thread_go_locked(thread, result);
1607 assert(res == KERN_SUCCESS);
1608 thread_unlock(thread);
1609 return res;
1610 }
1611
1612 /*
1613 * Routine: wait_queue_wakeup_thread
1614 * Purpose:
1615 * Wakeup the particular thread that was specified if and only
1616 * it was in this wait queue (or one of it's set queues)
1617 * and waiting on the specified event.
1618 *
1619 * This is much safer than just removing the thread from
1620 * whatever wait queue it happens to be on. For instance, it
1621 * may have already been awoken from the wait you intended to
1622 * interrupt and waited on something else (like another
1623 * semaphore).
1624 * Conditions:
1625 * nothing of interest locked
1626 * we need to assume spl needs to be raised
1627 * Returns:
1628 * KERN_SUCCESS - the thread was found waiting and awakened
1629 * KERN_NOT_WAITING - the thread was not waiting here
1630 */
1631 kern_return_t
1632 wait_queue_wakeup_thread(
1633 wait_queue_t wq,
1634 event_t event,
1635 thread_t thread,
1636 wait_result_t result)
1637 {
1638 kern_return_t res;
1639 spl_t s;
1640
1641 if (!wait_queue_is_valid(wq)) {
1642 return KERN_INVALID_ARGUMENT;
1643 }
1644
1645 s = splsched();
1646 wait_queue_lock(wq);
1647 res = _wait_queue_select64_thread(wq, (event64_t)((uint32_t)event), thread);
1648 wait_queue_unlock(wq);
1649
1650 if (res == KERN_SUCCESS) {
1651 res = thread_go_locked(thread, result);
1652 assert(res == KERN_SUCCESS);
1653 thread_unlock(thread);
1654 splx(s);
1655 return res;
1656 }
1657 splx(s);
1658 return KERN_NOT_WAITING;
1659 }
1660
1661 /*
1662 * Routine: wait_queue_wakeup64_thread
1663 * Purpose:
1664 * Wakeup the particular thread that was specified if and only
1665 * it was in this wait queue (or one of it's set's queues)
1666 * and waiting on the specified event.
1667 *
1668 * This is much safer than just removing the thread from
1669 * whatever wait queue it happens to be on. For instance, it
1670 * may have already been awoken from the wait you intended to
1671 * interrupt and waited on something else (like another
1672 * semaphore).
1673 * Conditions:
1674 * nothing of interest locked
1675 * we need to assume spl needs to be raised
1676 * Returns:
1677 * KERN_SUCCESS - the thread was found waiting and awakened
1678 * KERN_NOT_WAITING - the thread was not waiting here
1679 */
1680 kern_return_t
1681 wait_queue_wakeup64_thread(
1682 wait_queue_t wq,
1683 event64_t event,
1684 thread_t thread,
1685 wait_result_t result)
1686 {
1687 kern_return_t res;
1688 spl_t s;
1689
1690 if (!wait_queue_is_valid(wq)) {
1691 return KERN_INVALID_ARGUMENT;
1692 }
1693
1694 s = splsched();
1695 wait_queue_lock(wq);
1696 res = _wait_queue_select64_thread(wq, event, thread);
1697 wait_queue_unlock(wq);
1698
1699 if (res == KERN_SUCCESS) {
1700 res = thread_go_locked(thread, result);
1701 assert(res == KERN_SUCCESS);
1702 thread_unlock(thread);
1703 splx(s);
1704 return res;
1705 }
1706 splx(s);
1707 return KERN_NOT_WAITING;
1708 }