]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/wait_queue.c
xnu-792.tar.gz
[apple/xnu.git] / osfmk / kern / wait_queue.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: wait_queue.c (adapted from sched_prim.c)
54 * Author: Avadis Tevanian, Jr.
55 * Date: 1986
56 *
57 * Primitives for manipulating wait queues: either global
58 * ones from sched_prim.c, or private ones associated with
59 * particular structures(pots, semaphores, etc..).
60 */
61
62 #include <kern/kern_types.h>
63 #include <kern/simple_lock.h>
64 #include <kern/kalloc.h>
65 #include <kern/queue.h>
66 #include <kern/spl.h>
67 #include <mach/sync_policy.h>
68 #include <kern/sched_prim.h>
69
70 #include <kern/wait_queue.h>
71
72 /* forward declarations */
73 static boolean_t wait_queue_member_locked(
74 wait_queue_t wq,
75 wait_queue_set_t wq_set);
76
77 void wait_queue_unlink_one(
78 wait_queue_t wq,
79 wait_queue_set_t *wq_setp);
80
81 kern_return_t wait_queue_set_unlink_all_nofree(
82 wait_queue_set_t wq_set);
83
84 /*
85 * Routine: wait_queue_init
86 * Purpose:
87 * Initialize a previously allocated wait queue.
88 * Returns:
89 * KERN_SUCCESS - The wait_queue_t was initialized
90 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
91 */
92 kern_return_t
93 wait_queue_init(
94 wait_queue_t wq,
95 int policy)
96 {
97 if (!((policy & SYNC_POLICY_ORDER_MASK) == SYNC_POLICY_FIFO))
98 return KERN_INVALID_ARGUMENT;
99
100 wq->wq_fifo = TRUE;
101 wq->wq_type = _WAIT_QUEUE_inited;
102 queue_init(&wq->wq_queue);
103 hw_lock_init(&wq->wq_interlock);
104 return KERN_SUCCESS;
105 }
106
107 /*
108 * Routine: wait_queue_alloc
109 * Purpose:
110 * Allocate and initialize a wait queue for use outside of
111 * of the mach part of the kernel.
112 * Conditions:
113 * Nothing locked - can block.
114 * Returns:
115 * The allocated and initialized wait queue
116 * WAIT_QUEUE_NULL if there is a resource shortage
117 */
118 wait_queue_t
119 wait_queue_alloc(
120 int policy)
121 {
122 wait_queue_t wq;
123 kern_return_t ret;
124
125 wq = (wait_queue_t) kalloc(sizeof(struct wait_queue));
126 if (wq != WAIT_QUEUE_NULL) {
127 ret = wait_queue_init(wq, policy);
128 if (ret != KERN_SUCCESS) {
129 kfree(wq, sizeof(struct wait_queue));
130 wq = WAIT_QUEUE_NULL;
131 }
132 }
133 return wq;
134 }
135
136 /*
137 * Routine: wait_queue_free
138 * Purpose:
139 * Free an allocated wait queue.
140 * Conditions:
141 * May block.
142 */
143 kern_return_t
144 wait_queue_free(
145 wait_queue_t wq)
146 {
147 if (!wait_queue_is_queue(wq))
148 return KERN_INVALID_ARGUMENT;
149 if (!queue_empty(&wq->wq_queue))
150 return KERN_FAILURE;
151 kfree(wq, sizeof(struct wait_queue));
152 return KERN_SUCCESS;
153 }
154
155 /*
156 * Routine: wait_queue_set_init
157 * Purpose:
158 * Initialize a previously allocated wait queue set.
159 * Returns:
160 * KERN_SUCCESS - The wait_queue_set_t was initialized
161 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
162 */
163 kern_return_t
164 wait_queue_set_init(
165 wait_queue_set_t wqset,
166 int policy)
167 {
168 kern_return_t ret;
169
170 ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
171 if (ret != KERN_SUCCESS)
172 return ret;
173
174 wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
175 if (policy & SYNC_POLICY_PREPOST)
176 wqset->wqs_wait_queue.wq_isprepost = TRUE;
177 else
178 wqset->wqs_wait_queue.wq_isprepost = FALSE;
179 queue_init(&wqset->wqs_setlinks);
180 wqset->wqs_refcount = 0;
181 return KERN_SUCCESS;
182 }
183
184
185 kern_return_t
186 wait_queue_sub_init(
187 wait_queue_set_t wqset,
188 int policy)
189 {
190 return wait_queue_set_init(wqset, policy);
191 }
192
193 kern_return_t
194 wait_queue_sub_clearrefs(
195 wait_queue_set_t wq_set)
196 {
197 if (!wait_queue_is_set(wq_set))
198 return KERN_INVALID_ARGUMENT;
199
200 wqs_lock(wq_set);
201 wq_set->wqs_refcount = 0;
202 wqs_unlock(wq_set);
203 return KERN_SUCCESS;
204 }
205
206 /*
207 * Routine: wait_queue_set_alloc
208 * Purpose:
209 * Allocate and initialize a wait queue set for
210 * use outside of the mach part of the kernel.
211 * Conditions:
212 * May block.
213 * Returns:
214 * The allocated and initialized wait queue set
215 * WAIT_QUEUE_SET_NULL if there is a resource shortage
216 */
217 wait_queue_set_t
218 wait_queue_set_alloc(
219 int policy)
220 {
221 wait_queue_set_t wq_set;
222
223 wq_set = (wait_queue_set_t) kalloc(sizeof(struct wait_queue_set));
224 if (wq_set != WAIT_QUEUE_SET_NULL) {
225 kern_return_t ret;
226
227 ret = wait_queue_set_init(wq_set, policy);
228 if (ret != KERN_SUCCESS) {
229 kfree(wq_set, sizeof(struct wait_queue_set));
230 wq_set = WAIT_QUEUE_SET_NULL;
231 }
232 }
233 return wq_set;
234 }
235
236 /*
237 * Routine: wait_queue_set_free
238 * Purpose:
239 * Free an allocated wait queue set
240 * Conditions:
241 * May block.
242 */
243 kern_return_t
244 wait_queue_set_free(
245 wait_queue_set_t wq_set)
246 {
247 if (!wait_queue_is_set(wq_set))
248 return KERN_INVALID_ARGUMENT;
249
250 if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
251 return KERN_FAILURE;
252
253 kfree(wq_set, sizeof(struct wait_queue_set));
254 return KERN_SUCCESS;
255 }
256
257
258 /*
259 *
260 * Routine: wait_queue_set_size
261 * Routine: wait_queue_link_size
262 * Purpose:
263 * Return the size of opaque wait queue structures
264 */
265 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
266 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
267
268 /* declare a unique type for wait queue link structures */
269 static unsigned int _wait_queue_link;
270 static unsigned int _wait_queue_unlinked;
271
272 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
273 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
274
275 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
276 WQASSERT(((wqe)->wqe_queue == (wq) && \
277 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
278 "wait queue element list corruption: wq=%#x, wqe=%#x", \
279 (wq), (wqe))
280
281 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
282 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
283 (queue_t)(wql) : &(wql)->wql_setlinks)))
284
285 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
286 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
287 (queue_t)(wql) : &(wql)->wql_setlinks)))
288
289 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
290 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
291 ((wql)->wql_setqueue == (wqs)) && \
292 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
293 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
294 "wait queue set links corruption: wqs=%#x, wql=%#x", \
295 (wqs), (wql))
296
297 #if defined(_WAIT_QUEUE_DEBUG_)
298
299 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
300
301 #define WAIT_QUEUE_CHECK(wq) \
302 MACRO_BEGIN \
303 queue_t q2 = &(wq)->wq_queue; \
304 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
305 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
306 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
307 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
308 } \
309 MACRO_END
310
311 #define WAIT_QUEUE_SET_CHECK(wqs) \
312 MACRO_BEGIN \
313 queue_t q2 = &(wqs)->wqs_setlinks; \
314 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
315 while (!queue_end(q2, (queue_entry_t)wql2)) { \
316 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
317 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
318 } \
319 MACRO_END
320
321 #else /* !_WAIT_QUEUE_DEBUG_ */
322
323 #define WQASSERT(e, s, p0, p1) assert(e)
324
325 #define WAIT_QUEUE_CHECK(wq)
326 #define WAIT_QUEUE_SET_CHECK(wqs)
327
328 #endif /* !_WAIT_QUEUE_DEBUG_ */
329
330 /*
331 * Routine: wait_queue_member_locked
332 * Purpose:
333 * Indicate if this set queue is a member of the queue
334 * Conditions:
335 * The wait queue is locked
336 * The set queue is just that, a set queue
337 */
338 static boolean_t
339 wait_queue_member_locked(
340 wait_queue_t wq,
341 wait_queue_set_t wq_set)
342 {
343 wait_queue_element_t wq_element;
344 queue_t q;
345
346 assert(wait_queue_held(wq));
347 assert(wait_queue_is_set(wq_set));
348
349 q = &wq->wq_queue;
350
351 wq_element = (wait_queue_element_t) queue_first(q);
352 while (!queue_end(q, (queue_entry_t)wq_element)) {
353 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
354 if ((wq_element->wqe_type == WAIT_QUEUE_LINK)) {
355 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
356
357 if (wql->wql_setqueue == wq_set)
358 return TRUE;
359 }
360 wq_element = (wait_queue_element_t)
361 queue_next((queue_t) wq_element);
362 }
363 return FALSE;
364 }
365
366
367 /*
368 * Routine: wait_queue_member
369 * Purpose:
370 * Indicate if this set queue is a member of the queue
371 * Conditions:
372 * The set queue is just that, a set queue
373 */
374 boolean_t
375 wait_queue_member(
376 wait_queue_t wq,
377 wait_queue_set_t wq_set)
378 {
379 boolean_t ret;
380 spl_t s;
381
382 if (!wait_queue_is_set(wq_set))
383 return FALSE;
384
385 s = splsched();
386 wait_queue_lock(wq);
387 ret = wait_queue_member_locked(wq, wq_set);
388 wait_queue_unlock(wq);
389 splx(s);
390
391 return ret;
392 }
393
394
395 /*
396 * Routine: wait_queue_link_noalloc
397 * Purpose:
398 * Insert a set wait queue into a wait queue. This
399 * requires us to link the two together using a wait_queue_link
400 * structure that we allocate.
401 * Conditions:
402 * The wait queue being inserted must be inited as a set queue
403 */
404 kern_return_t
405 wait_queue_link_noalloc(
406 wait_queue_t wq,
407 wait_queue_set_t wq_set,
408 wait_queue_link_t wql)
409 {
410 wait_queue_element_t wq_element;
411 queue_t q;
412 spl_t s;
413
414 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set))
415 return KERN_INVALID_ARGUMENT;
416
417 /*
418 * There are probably less threads and sets associated with
419 * the wait queue, then there are wait queues associated with
420 * the set. So lets validate it that way.
421 */
422 s = splsched();
423 wait_queue_lock(wq);
424 q = &wq->wq_queue;
425 wq_element = (wait_queue_element_t) queue_first(q);
426 while (!queue_end(q, (queue_entry_t)wq_element)) {
427 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
428 if (wq_element->wqe_type == WAIT_QUEUE_LINK &&
429 ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
430 wait_queue_unlock(wq);
431 splx(s);
432 return KERN_ALREADY_IN_SET;
433 }
434 wq_element = (wait_queue_element_t)
435 queue_next((queue_t) wq_element);
436 }
437
438 /*
439 * Not already a member, so we can add it.
440 */
441 wqs_lock(wq_set);
442
443 WAIT_QUEUE_SET_CHECK(wq_set);
444
445 wql->wql_queue = wq;
446 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
447 wql->wql_setqueue = wq_set;
448 queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
449 wql->wql_type = WAIT_QUEUE_LINK;
450
451 wqs_unlock(wq_set);
452 wait_queue_unlock(wq);
453 splx(s);
454
455 return KERN_SUCCESS;
456 }
457
458 /*
459 * Routine: wait_queue_link
460 * Purpose:
461 * Insert a set wait queue into a wait queue. This
462 * requires us to link the two together using a wait_queue_link
463 * structure that we allocate.
464 * Conditions:
465 * The wait queue being inserted must be inited as a set queue
466 */
467 kern_return_t
468 wait_queue_link(
469 wait_queue_t wq,
470 wait_queue_set_t wq_set)
471 {
472 wait_queue_link_t wql;
473 kern_return_t ret;
474
475 wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link));
476 if (wql == WAIT_QUEUE_LINK_NULL)
477 return KERN_RESOURCE_SHORTAGE;
478
479 ret = wait_queue_link_noalloc(wq, wq_set, wql);
480 if (ret != KERN_SUCCESS)
481 kfree(wql, sizeof(struct wait_queue_link));
482
483 return ret;
484 }
485
486
487 /*
488 * Routine: wait_queue_unlink_nofree
489 * Purpose:
490 * Undo the linkage between a wait queue and a set.
491 */
492 static void
493 wait_queue_unlink_locked(
494 wait_queue_t wq,
495 wait_queue_set_t wq_set,
496 wait_queue_link_t wql)
497 {
498 assert(wait_queue_held(wq));
499 assert(wait_queue_held(&wq_set->wqs_wait_queue));
500
501 wql->wql_queue = WAIT_QUEUE_NULL;
502 queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
503 wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
504 queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
505 wql->wql_type = WAIT_QUEUE_UNLINKED;
506
507 WAIT_QUEUE_CHECK(wq);
508 WAIT_QUEUE_SET_CHECK(wq_set);
509 }
510
511 /*
512 * Routine: wait_queue_unlink
513 * Purpose:
514 * Remove the linkage between a wait queue and a set,
515 * freeing the linkage structure.
516 * Conditions:
517 * The wait queue being must be a member set queue
518 */
519 kern_return_t
520 wait_queue_unlink(
521 wait_queue_t wq,
522 wait_queue_set_t wq_set)
523 {
524 wait_queue_element_t wq_element;
525 wait_queue_link_t wql;
526 queue_t q;
527 spl_t s;
528
529 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) {
530 return KERN_INVALID_ARGUMENT;
531 }
532 s = splsched();
533 wait_queue_lock(wq);
534
535 q = &wq->wq_queue;
536 wq_element = (wait_queue_element_t) queue_first(q);
537 while (!queue_end(q, (queue_entry_t)wq_element)) {
538 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
539 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
540 wql = (wait_queue_link_t)wq_element;
541
542 if (wql->wql_setqueue == wq_set) {
543 wqs_lock(wq_set);
544 wait_queue_unlink_locked(wq, wq_set, wql);
545 wqs_unlock(wq_set);
546 wait_queue_unlock(wq);
547 splx(s);
548 kfree(wql, sizeof(struct wait_queue_link));
549 return KERN_SUCCESS;
550 }
551 }
552 wq_element = (wait_queue_element_t)
553 queue_next((queue_t) wq_element);
554 }
555 wait_queue_unlock(wq);
556 splx(s);
557 return KERN_NOT_IN_SET;
558 }
559
560
561 /*
562 * Routine: wait_queue_unlinkall_nofree
563 * Purpose:
564 * Remove the linkage between a wait queue and all its
565 * sets. The caller is responsible for freeing
566 * the wait queue link structures.
567 */
568
569 kern_return_t
570 wait_queue_unlinkall_nofree(
571 wait_queue_t wq)
572 {
573 wait_queue_element_t wq_element;
574 wait_queue_element_t wq_next_element;
575 wait_queue_set_t wq_set;
576 wait_queue_link_t wql;
577 queue_head_t links_queue_head;
578 queue_t links = &links_queue_head;
579 queue_t q;
580 spl_t s;
581
582 if (!wait_queue_is_queue(wq)) {
583 return KERN_INVALID_ARGUMENT;
584 }
585
586 queue_init(links);
587
588 s = splsched();
589 wait_queue_lock(wq);
590
591 q = &wq->wq_queue;
592
593 wq_element = (wait_queue_element_t) queue_first(q);
594 while (!queue_end(q, (queue_entry_t)wq_element)) {
595 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
596 wq_next_element = (wait_queue_element_t)
597 queue_next((queue_t) wq_element);
598
599 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
600 wql = (wait_queue_link_t)wq_element;
601 wq_set = wql->wql_setqueue;
602 wqs_lock(wq_set);
603 wait_queue_unlink_locked(wq, wq_set, wql);
604 wqs_unlock(wq_set);
605 }
606 wq_element = wq_next_element;
607 }
608 wait_queue_unlock(wq);
609 splx(s);
610 return(KERN_SUCCESS);
611 }
612
613
614 /*
615 * Routine: wait_queue_unlink_all
616 * Purpose:
617 * Remove the linkage between a wait queue and all its sets.
618 * All the linkage structures are freed.
619 * Conditions:
620 * Nothing of interest locked.
621 */
622
623 kern_return_t
624 wait_queue_unlink_all(
625 wait_queue_t wq)
626 {
627 wait_queue_element_t wq_element;
628 wait_queue_element_t wq_next_element;
629 wait_queue_set_t wq_set;
630 wait_queue_link_t wql;
631 queue_head_t links_queue_head;
632 queue_t links = &links_queue_head;
633 queue_t q;
634 spl_t s;
635
636 if (!wait_queue_is_queue(wq)) {
637 return KERN_INVALID_ARGUMENT;
638 }
639
640 queue_init(links);
641
642 s = splsched();
643 wait_queue_lock(wq);
644
645 q = &wq->wq_queue;
646
647 wq_element = (wait_queue_element_t) queue_first(q);
648 while (!queue_end(q, (queue_entry_t)wq_element)) {
649 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
650 wq_next_element = (wait_queue_element_t)
651 queue_next((queue_t) wq_element);
652
653 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
654 wql = (wait_queue_link_t)wq_element;
655 wq_set = wql->wql_setqueue;
656 wqs_lock(wq_set);
657 wait_queue_unlink_locked(wq, wq_set, wql);
658 wqs_unlock(wq_set);
659 enqueue(links, &wql->wql_links);
660 }
661 wq_element = wq_next_element;
662 }
663 wait_queue_unlock(wq);
664 splx(s);
665
666 while(!queue_empty(links)) {
667 wql = (wait_queue_link_t) dequeue(links);
668 kfree(wql, sizeof(struct wait_queue_link));
669 }
670
671 return(KERN_SUCCESS);
672 }
673
674 /*
675 * Routine: wait_queue_set_unlink_all_nofree
676 * Purpose:
677 * Remove the linkage between a set wait queue and all its
678 * member wait queues. The link structures are not freed, nor
679 * returned. It is the caller's responsibility to track and free
680 * them.
681 * Conditions:
682 * The wait queue being must be a member set queue
683 */
684 kern_return_t
685 wait_queue_set_unlink_all_nofree(
686 wait_queue_set_t wq_set)
687 {
688 wait_queue_link_t wql;
689 wait_queue_t wq;
690 queue_t q;
691 spl_t s;
692
693 if (!wait_queue_is_set(wq_set)) {
694 return KERN_INVALID_ARGUMENT;
695 }
696
697 retry:
698 s = splsched();
699 wqs_lock(wq_set);
700
701 q = &wq_set->wqs_setlinks;
702
703 wql = (wait_queue_link_t)queue_first(q);
704 while (!queue_end(q, (queue_entry_t)wql)) {
705 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
706 wq = wql->wql_queue;
707 if (wait_queue_lock_try(wq)) {
708 wait_queue_unlink_locked(wq, wq_set, wql);
709 wait_queue_unlock(wq);
710 wql = (wait_queue_link_t)queue_first(q);
711 } else {
712 wqs_unlock(wq_set);
713 splx(s);
714 delay(1);
715 goto retry;
716 }
717 }
718 wqs_unlock(wq_set);
719 splx(s);
720
721 return(KERN_SUCCESS);
722 }
723
724 /* legacy interface naming */
725 kern_return_t
726 wait_subqueue_unlink_all(
727 wait_queue_set_t wq_set)
728 {
729 return wait_queue_set_unlink_all_nofree(wq_set);
730 }
731
732
733 /*
734 * Routine: wait_queue_set_unlink_all
735 * Purpose:
736 * Remove the linkage between a set wait queue and all its
737 * member wait queues. The link structures are freed.
738 * Conditions:
739 * The wait queue must be a set
740 */
741 kern_return_t
742 wait_queue_set_unlink_all(
743 wait_queue_set_t wq_set)
744 {
745 wait_queue_link_t wql;
746 wait_queue_t wq;
747 queue_t q;
748 queue_head_t links_queue_head;
749 queue_t links = &links_queue_head;
750 spl_t s;
751
752 if (!wait_queue_is_set(wq_set)) {
753 return KERN_INVALID_ARGUMENT;
754 }
755
756 queue_init(links);
757
758 retry:
759 s = splsched();
760 wqs_lock(wq_set);
761
762 q = &wq_set->wqs_setlinks;
763
764 wql = (wait_queue_link_t)queue_first(q);
765 while (!queue_end(q, (queue_entry_t)wql)) {
766 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
767 wq = wql->wql_queue;
768 if (wait_queue_lock_try(wq)) {
769 wait_queue_unlink_locked(wq, wq_set, wql);
770 wait_queue_unlock(wq);
771 enqueue(links, &wql->wql_links);
772 wql = (wait_queue_link_t)queue_first(q);
773 } else {
774 wqs_unlock(wq_set);
775 splx(s);
776 delay(1);
777 goto retry;
778 }
779 }
780 wqs_unlock(wq_set);
781 splx(s);
782
783 while (!queue_empty (links)) {
784 wql = (wait_queue_link_t) dequeue(links);
785 kfree(wql, sizeof(struct wait_queue_link));
786 }
787 return(KERN_SUCCESS);
788 }
789
790
791 /*
792 * Routine: wait_queue_unlink_one
793 * Purpose:
794 * Find and unlink one set wait queue
795 * Conditions:
796 * Nothing of interest locked.
797 */
798 void
799 wait_queue_unlink_one(
800 wait_queue_t wq,
801 wait_queue_set_t *wq_setp)
802 {
803 wait_queue_element_t wq_element;
804 queue_t q;
805 spl_t s;
806
807 s = splsched();
808 wait_queue_lock(wq);
809
810 q = &wq->wq_queue;
811
812 wq_element = (wait_queue_element_t) queue_first(q);
813 while (!queue_end(q, (queue_entry_t)wq_element)) {
814
815 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
816 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
817 wait_queue_set_t wq_set = wql->wql_setqueue;
818
819 wqs_lock(wq_set);
820 wait_queue_unlink_locked(wq, wq_set, wql);
821 wqs_unlock(wq_set);
822 wait_queue_unlock(wq);
823 splx(s);
824 kfree(wql,sizeof(struct wait_queue_link));
825 *wq_setp = wq_set;
826 return;
827 }
828
829 wq_element = (wait_queue_element_t)
830 queue_next((queue_t) wq_element);
831 }
832 wait_queue_unlock(wq);
833 splx(s);
834 *wq_setp = WAIT_QUEUE_SET_NULL;
835 }
836
837
838 /*
839 * Routine: wait_queue_assert_wait64_locked
840 * Purpose:
841 * Insert the current thread into the supplied wait queue
842 * waiting for a particular event to be posted to that queue.
843 *
844 * Conditions:
845 * The wait queue is assumed locked.
846 * The waiting thread is assumed locked.
847 *
848 */
849 __private_extern__ wait_result_t
850 wait_queue_assert_wait64_locked(
851 wait_queue_t wq,
852 event64_t event,
853 wait_interrupt_t interruptible,
854 uint64_t deadline,
855 thread_t thread)
856 {
857 wait_result_t wait_result;
858
859 if (!wait_queue_assert_possible(thread))
860 panic("wait_queue_assert_wait64_locked");
861
862 if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
863 wait_queue_set_t wqs = (wait_queue_set_t)wq;
864
865 if (wqs->wqs_isprepost && wqs->wqs_refcount > 0)
866 return(THREAD_AWAKENED);
867 }
868
869 /*
870 * This is the extent to which we currently take scheduling attributes
871 * into account. If the thread is vm priviledged, we stick it at
872 * the front of the queue. Later, these queues will honor the policy
873 * value set at wait_queue_init time.
874 */
875 wait_result = thread_mark_wait_locked(thread, interruptible);
876 if (wait_result == THREAD_WAITING) {
877 if (thread->options & TH_OPT_VMPRIV)
878 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
879 else
880 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
881
882 thread->wait_event = event;
883 thread->wait_queue = wq;
884
885 if (deadline != 0) {
886 if (!timer_call_enter(&thread->wait_timer, deadline))
887 thread->wait_timer_active++;
888 thread->wait_timer_is_set = TRUE;
889 }
890 }
891 return(wait_result);
892 }
893
894 /*
895 * Routine: wait_queue_assert_wait
896 * Purpose:
897 * Insert the current thread into the supplied wait queue
898 * waiting for a particular event to be posted to that queue.
899 *
900 * Conditions:
901 * nothing of interest locked.
902 */
903 wait_result_t
904 wait_queue_assert_wait(
905 wait_queue_t wq,
906 event_t event,
907 wait_interrupt_t interruptible,
908 uint64_t deadline)
909 {
910 spl_t s;
911 wait_result_t ret;
912 thread_t thread = current_thread();
913
914 /* If it is an invalid wait queue, you can't wait on it */
915 if (!wait_queue_is_valid(wq))
916 return (thread->wait_result = THREAD_RESTART);
917
918 s = splsched();
919 wait_queue_lock(wq);
920 thread_lock(thread);
921 ret = wait_queue_assert_wait64_locked(wq, (event64_t)((uint32_t)event),
922 interruptible, deadline, thread);
923 thread_unlock(thread);
924 wait_queue_unlock(wq);
925 splx(s);
926 return(ret);
927 }
928
929 /*
930 * Routine: wait_queue_assert_wait64
931 * Purpose:
932 * Insert the current thread into the supplied wait queue
933 * waiting for a particular event to be posted to that queue.
934 * Conditions:
935 * nothing of interest locked.
936 */
937 wait_result_t
938 wait_queue_assert_wait64(
939 wait_queue_t wq,
940 event64_t event,
941 wait_interrupt_t interruptible,
942 uint64_t deadline)
943 {
944 spl_t s;
945 wait_result_t ret;
946 thread_t thread = current_thread();
947
948 /* If it is an invalid wait queue, you cant wait on it */
949 if (!wait_queue_is_valid(wq))
950 return (thread->wait_result = THREAD_RESTART);
951
952 s = splsched();
953 wait_queue_lock(wq);
954 thread_lock(thread);
955 ret = wait_queue_assert_wait64_locked(wq, event, interruptible, deadline, thread);
956 thread_unlock(thread);
957 wait_queue_unlock(wq);
958 splx(s);
959 return(ret);
960 }
961
962 /*
963 * Routine: _wait_queue_select64_all
964 * Purpose:
965 * Select all threads off a wait queue that meet the
966 * supplied criteria.
967 * Conditions:
968 * at splsched
969 * wait queue locked
970 * wake_queue initialized and ready for insertion
971 * possibly recursive
972 * Returns:
973 * a queue of locked threads
974 */
975 static void
976 _wait_queue_select64_all(
977 wait_queue_t wq,
978 event64_t event,
979 queue_t wake_queue)
980 {
981 wait_queue_element_t wq_element;
982 wait_queue_element_t wqe_next;
983 queue_t q;
984
985 q = &wq->wq_queue;
986
987 wq_element = (wait_queue_element_t) queue_first(q);
988 while (!queue_end(q, (queue_entry_t)wq_element)) {
989 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
990 wqe_next = (wait_queue_element_t)
991 queue_next((queue_t) wq_element);
992
993 /*
994 * We may have to recurse if this is a compound wait queue.
995 */
996 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
997 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
998 wait_queue_t set_queue;
999
1000 /*
1001 * We have to check the set wait queue.
1002 */
1003 set_queue = (wait_queue_t)wql->wql_setqueue;
1004 wait_queue_lock(set_queue);
1005 if (set_queue->wq_isprepost) {
1006 wait_queue_set_t wqs = (wait_queue_set_t)set_queue;
1007
1008 /*
1009 * Preposting is only for sets and wait queue
1010 * is the first element of set
1011 */
1012 wqs->wqs_refcount++;
1013 }
1014 if (! wait_queue_empty(set_queue))
1015 _wait_queue_select64_all(set_queue, event, wake_queue);
1016 wait_queue_unlock(set_queue);
1017 } else {
1018
1019 /*
1020 * Otherwise, its a thread. If it is waiting on
1021 * the event we are posting to this queue, pull
1022 * it off the queue and stick it in out wake_queue.
1023 */
1024 thread_t t = (thread_t)wq_element;
1025
1026 if (t->wait_event == event) {
1027 thread_lock(t);
1028 remqueue(q, (queue_entry_t) t);
1029 enqueue (wake_queue, (queue_entry_t) t);
1030 t->wait_queue = WAIT_QUEUE_NULL;
1031 t->wait_event = NO_EVENT64;
1032 t->at_safe_point = FALSE;
1033 /* returned locked */
1034 }
1035 }
1036 wq_element = wqe_next;
1037 }
1038 }
1039
1040 /*
1041 * Routine: wait_queue_wakeup64_all_locked
1042 * Purpose:
1043 * Wakeup some number of threads that are in the specified
1044 * wait queue and waiting on the specified event.
1045 * Conditions:
1046 * wait queue already locked (may be released).
1047 * Returns:
1048 * KERN_SUCCESS - Threads were woken up
1049 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1050 */
1051 __private_extern__ kern_return_t
1052 wait_queue_wakeup64_all_locked(
1053 wait_queue_t wq,
1054 event64_t event,
1055 wait_result_t result,
1056 boolean_t unlock)
1057 {
1058 queue_head_t wake_queue_head;
1059 queue_t q = &wake_queue_head;
1060 kern_return_t res;
1061
1062 assert(wait_queue_held(wq));
1063 queue_init(q);
1064
1065 /*
1066 * Select the threads that we will wake up. The threads
1067 * are returned to us locked and cleanly removed from the
1068 * wait queue.
1069 */
1070 _wait_queue_select64_all(wq, event, q);
1071 if (unlock)
1072 wait_queue_unlock(wq);
1073
1074 /*
1075 * For each thread, set it running.
1076 */
1077 res = KERN_NOT_WAITING;
1078 while (!queue_empty (q)) {
1079 thread_t thread = (thread_t) dequeue(q);
1080 res = thread_go(thread, result);
1081 assert(res == KERN_SUCCESS);
1082 thread_unlock(thread);
1083 }
1084 return res;
1085 }
1086
1087
1088 /*
1089 * Routine: wait_queue_wakeup_all
1090 * Purpose:
1091 * Wakeup some number of threads that are in the specified
1092 * wait queue and waiting on the specified event.
1093 * Conditions:
1094 * Nothing locked
1095 * Returns:
1096 * KERN_SUCCESS - Threads were woken up
1097 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1098 */
1099 kern_return_t
1100 wait_queue_wakeup_all(
1101 wait_queue_t wq,
1102 event_t event,
1103 wait_result_t result)
1104 {
1105 kern_return_t ret;
1106 spl_t s;
1107
1108 if (!wait_queue_is_valid(wq)) {
1109 return KERN_INVALID_ARGUMENT;
1110 }
1111
1112 s = splsched();
1113 wait_queue_lock(wq);
1114 ret = wait_queue_wakeup64_all_locked(
1115 wq, (event64_t)((uint32_t)event),
1116 result, TRUE);
1117 /* lock released */
1118 splx(s);
1119 return ret;
1120 }
1121
1122 /*
1123 * Routine: wait_queue_wakeup64_all
1124 * Purpose:
1125 * Wakeup some number of threads that are in the specified
1126 * wait queue and waiting on the specified event.
1127 * Conditions:
1128 * Nothing locked
1129 * Returns:
1130 * KERN_SUCCESS - Threads were woken up
1131 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1132 */
1133 kern_return_t
1134 wait_queue_wakeup64_all(
1135 wait_queue_t wq,
1136 event64_t event,
1137 wait_result_t result)
1138 {
1139 kern_return_t ret;
1140 spl_t s;
1141
1142 if (!wait_queue_is_valid(wq)) {
1143 return KERN_INVALID_ARGUMENT;
1144 }
1145
1146 s = splsched();
1147 wait_queue_lock(wq);
1148 ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1149 /* lock released */
1150 splx(s);
1151 return ret;
1152 }
1153
1154 /*
1155 * Routine: _wait_queue_select64_one
1156 * Purpose:
1157 * Select the best thread off a wait queue that meet the
1158 * supplied criteria.
1159 * Conditions:
1160 * at splsched
1161 * wait queue locked
1162 * possibly recursive
1163 * Returns:
1164 * a locked thread - if one found
1165 * Note:
1166 * This is where the sync policy of the wait queue comes
1167 * into effect. For now, we just assume FIFO.
1168 */
1169 static thread_t
1170 _wait_queue_select64_one(
1171 wait_queue_t wq,
1172 event64_t event)
1173 {
1174 wait_queue_element_t wq_element;
1175 wait_queue_element_t wqe_next;
1176 thread_t t = THREAD_NULL;
1177 queue_t q;
1178
1179 assert(wq->wq_fifo);
1180
1181 q = &wq->wq_queue;
1182
1183 wq_element = (wait_queue_element_t) queue_first(q);
1184 while (!queue_end(q, (queue_entry_t)wq_element)) {
1185 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1186 wqe_next = (wait_queue_element_t)
1187 queue_next((queue_t) wq_element);
1188
1189 /*
1190 * We may have to recurse if this is a compound wait queue.
1191 */
1192 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1193 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1194 wait_queue_t set_queue;
1195
1196 /*
1197 * We have to check the set wait queue.
1198 */
1199 set_queue = (wait_queue_t)wql->wql_setqueue;
1200 wait_queue_lock(set_queue);
1201 if (! wait_queue_empty(set_queue)) {
1202 t = _wait_queue_select64_one(set_queue, event);
1203 }
1204 wait_queue_unlock(set_queue);
1205 if (t != THREAD_NULL)
1206 return t;
1207 } else {
1208
1209 /*
1210 * Otherwise, its a thread. If it is waiting on
1211 * the event we are posting to this queue, pull
1212 * it off the queue and stick it in out wake_queue.
1213 */
1214 t = (thread_t)wq_element;
1215 if (t->wait_event == event) {
1216 thread_lock(t);
1217 remqueue(q, (queue_entry_t) t);
1218 t->wait_queue = WAIT_QUEUE_NULL;
1219 t->wait_event = NO_EVENT64;
1220 t->at_safe_point = FALSE;
1221 return t; /* still locked */
1222 }
1223
1224 t = THREAD_NULL;
1225 }
1226 wq_element = wqe_next;
1227 }
1228 return THREAD_NULL;
1229 }
1230
1231 /*
1232 * Routine: wait_queue_peek64_locked
1233 * Purpose:
1234 * Select the best thread from a wait queue that meet the
1235 * supplied criteria, but leave it on the queue it was
1236 * found on. The thread, and the actual wait_queue the
1237 * thread was found on are identified.
1238 * Conditions:
1239 * at splsched
1240 * wait queue locked
1241 * possibly recursive
1242 * Returns:
1243 * a locked thread - if one found
1244 * a locked waitq - the one the thread was found on
1245 * Note:
1246 * Both the waitq the thread was actually found on, and
1247 * the supplied wait queue, are locked after this.
1248 */
1249 __private_extern__ void
1250 wait_queue_peek64_locked(
1251 wait_queue_t wq,
1252 event64_t event,
1253 thread_t *tp,
1254 wait_queue_t *wqp)
1255 {
1256 wait_queue_element_t wq_element;
1257 wait_queue_element_t wqe_next;
1258 queue_t q;
1259
1260 assert(wq->wq_fifo);
1261
1262 *tp = THREAD_NULL;
1263
1264 q = &wq->wq_queue;
1265
1266 wq_element = (wait_queue_element_t) queue_first(q);
1267 while (!queue_end(q, (queue_entry_t)wq_element)) {
1268 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1269 wqe_next = (wait_queue_element_t)
1270 queue_next((queue_t) wq_element);
1271
1272 /*
1273 * We may have to recurse if this is a compound wait queue.
1274 */
1275 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1276 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1277 wait_queue_t set_queue;
1278
1279 /*
1280 * We have to check the set wait queue.
1281 */
1282 set_queue = (wait_queue_t)wql->wql_setqueue;
1283 wait_queue_lock(set_queue);
1284 if (! wait_queue_empty(set_queue)) {
1285 wait_queue_peek64_locked(set_queue, event, tp, wqp);
1286 }
1287 if (*tp != THREAD_NULL) {
1288 if (*wqp != set_queue)
1289 wait_queue_unlock(set_queue);
1290 return; /* thread and its waitq locked */
1291 }
1292
1293 wait_queue_unlock(set_queue);
1294 } else {
1295
1296 /*
1297 * Otherwise, its a thread. If it is waiting on
1298 * the event we are posting to this queue, return
1299 * it locked, but leave it on the queue.
1300 */
1301 thread_t t = (thread_t)wq_element;
1302
1303 if (t->wait_event == event) {
1304 thread_lock(t);
1305 *tp = t;
1306 *wqp = wq;
1307 return;
1308 }
1309 }
1310 wq_element = wqe_next;
1311 }
1312 }
1313
1314 /*
1315 * Routine: wait_queue_pull_thread_locked
1316 * Purpose:
1317 * Pull a thread that was previously "peeked" off the wait
1318 * queue and (possibly) unlock the waitq.
1319 * Conditions:
1320 * at splsched
1321 * wait queue locked
1322 * thread locked
1323 * Returns:
1324 * with the thread still locked.
1325 */
1326 void
1327 wait_queue_pull_thread_locked(
1328 wait_queue_t waitq,
1329 thread_t thread,
1330 boolean_t unlock)
1331 {
1332
1333 assert(thread->wait_queue == waitq);
1334
1335 remqueue(&waitq->wq_queue, (queue_entry_t)thread );
1336 thread->wait_queue = WAIT_QUEUE_NULL;
1337 thread->wait_event = NO_EVENT64;
1338 thread->at_safe_point = FALSE;
1339 if (unlock)
1340 wait_queue_unlock(waitq);
1341 }
1342
1343
1344 /*
1345 * Routine: wait_queue_select64_thread
1346 * Purpose:
1347 * Look for a thread and remove it from the queues, if
1348 * (and only if) the thread is waiting on the supplied
1349 * <wait_queue, event> pair.
1350 * Conditions:
1351 * at splsched
1352 * wait queue locked
1353 * possibly recursive
1354 * Returns:
1355 * KERN_NOT_WAITING: Thread is not waiting here.
1356 * KERN_SUCCESS: It was, and is now removed (returned locked)
1357 */
1358 static kern_return_t
1359 _wait_queue_select64_thread(
1360 wait_queue_t wq,
1361 event64_t event,
1362 thread_t thread)
1363 {
1364 wait_queue_element_t wq_element;
1365 wait_queue_element_t wqe_next;
1366 kern_return_t res = KERN_NOT_WAITING;
1367 queue_t q = &wq->wq_queue;
1368
1369 thread_lock(thread);
1370 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
1371 remqueue(q, (queue_entry_t) thread);
1372 thread->at_safe_point = FALSE;
1373 thread->wait_event = NO_EVENT64;
1374 thread->wait_queue = WAIT_QUEUE_NULL;
1375 /* thread still locked */
1376 return KERN_SUCCESS;
1377 }
1378 thread_unlock(thread);
1379
1380 /*
1381 * The wait_queue associated with the thread may be one of this
1382 * wait queue's sets. Go see. If so, removing it from
1383 * there is like removing it from here.
1384 */
1385 wq_element = (wait_queue_element_t) queue_first(q);
1386 while (!queue_end(q, (queue_entry_t)wq_element)) {
1387 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1388 wqe_next = (wait_queue_element_t)
1389 queue_next((queue_t) wq_element);
1390
1391 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1392 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1393 wait_queue_t set_queue;
1394
1395 set_queue = (wait_queue_t)wql->wql_setqueue;
1396 wait_queue_lock(set_queue);
1397 if (! wait_queue_empty(set_queue)) {
1398 res = _wait_queue_select64_thread(set_queue,
1399 event,
1400 thread);
1401 }
1402 wait_queue_unlock(set_queue);
1403 if (res == KERN_SUCCESS)
1404 return KERN_SUCCESS;
1405 }
1406 wq_element = wqe_next;
1407 }
1408 return res;
1409 }
1410
1411
1412 /*
1413 * Routine: wait_queue_wakeup64_identity_locked
1414 * Purpose:
1415 * Select a single thread that is most-eligible to run and set
1416 * set it running. But return the thread locked.
1417 *
1418 * Conditions:
1419 * at splsched
1420 * wait queue locked
1421 * possibly recursive
1422 * Returns:
1423 * a pointer to the locked thread that was awakened
1424 */
1425 __private_extern__ thread_t
1426 wait_queue_wakeup64_identity_locked(
1427 wait_queue_t wq,
1428 event64_t event,
1429 wait_result_t result,
1430 boolean_t unlock)
1431 {
1432 kern_return_t res;
1433 thread_t thread;
1434
1435 assert(wait_queue_held(wq));
1436
1437
1438 thread = _wait_queue_select64_one(wq, event);
1439 if (unlock)
1440 wait_queue_unlock(wq);
1441
1442 if (thread) {
1443 res = thread_go(thread, result);
1444 assert(res == KERN_SUCCESS);
1445 }
1446 return thread; /* still locked if not NULL */
1447 }
1448
1449
1450 /*
1451 * Routine: wait_queue_wakeup64_one_locked
1452 * Purpose:
1453 * Select a single thread that is most-eligible to run and set
1454 * set it runnings.
1455 *
1456 * Conditions:
1457 * at splsched
1458 * wait queue locked
1459 * possibly recursive
1460 * Returns:
1461 * KERN_SUCCESS: It was, and is, now removed.
1462 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1463 */
1464 __private_extern__ kern_return_t
1465 wait_queue_wakeup64_one_locked(
1466 wait_queue_t wq,
1467 event64_t event,
1468 wait_result_t result,
1469 boolean_t unlock)
1470 {
1471 thread_t thread;
1472
1473 assert(wait_queue_held(wq));
1474
1475 thread = _wait_queue_select64_one(wq, event);
1476 if (unlock)
1477 wait_queue_unlock(wq);
1478
1479 if (thread) {
1480 kern_return_t res;
1481
1482 res = thread_go(thread, result);
1483 assert(res == KERN_SUCCESS);
1484 thread_unlock(thread);
1485 return res;
1486 }
1487
1488 return KERN_NOT_WAITING;
1489 }
1490
1491 /*
1492 * Routine: wait_queue_wakeup_one
1493 * Purpose:
1494 * Wakeup the most appropriate thread that is in the specified
1495 * wait queue for the specified event.
1496 * Conditions:
1497 * Nothing locked
1498 * Returns:
1499 * KERN_SUCCESS - Thread was woken up
1500 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1501 */
1502 kern_return_t
1503 wait_queue_wakeup_one(
1504 wait_queue_t wq,
1505 event_t event,
1506 wait_result_t result)
1507 {
1508 thread_t thread;
1509 spl_t s;
1510
1511 if (!wait_queue_is_valid(wq)) {
1512 return KERN_INVALID_ARGUMENT;
1513 }
1514
1515 s = splsched();
1516 wait_queue_lock(wq);
1517 thread = _wait_queue_select64_one(wq, (event64_t)((uint32_t)event));
1518 wait_queue_unlock(wq);
1519
1520 if (thread) {
1521 kern_return_t res;
1522
1523 res = thread_go(thread, result);
1524 assert(res == KERN_SUCCESS);
1525 thread_unlock(thread);
1526 splx(s);
1527 return res;
1528 }
1529
1530 splx(s);
1531 return KERN_NOT_WAITING;
1532 }
1533
1534 /*
1535 * Routine: wait_queue_wakeup64_one
1536 * Purpose:
1537 * Wakeup the most appropriate thread that is in the specified
1538 * wait queue for the specified event.
1539 * Conditions:
1540 * Nothing locked
1541 * Returns:
1542 * KERN_SUCCESS - Thread was woken up
1543 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1544 */
1545 kern_return_t
1546 wait_queue_wakeup64_one(
1547 wait_queue_t wq,
1548 event64_t event,
1549 wait_result_t result)
1550 {
1551 thread_t thread;
1552 spl_t s;
1553
1554 if (!wait_queue_is_valid(wq)) {
1555 return KERN_INVALID_ARGUMENT;
1556 }
1557 s = splsched();
1558 wait_queue_lock(wq);
1559 thread = _wait_queue_select64_one(wq, event);
1560 wait_queue_unlock(wq);
1561
1562 if (thread) {
1563 kern_return_t res;
1564
1565 res = thread_go(thread, result);
1566 assert(res == KERN_SUCCESS);
1567 thread_unlock(thread);
1568 splx(s);
1569 return res;
1570 }
1571
1572 splx(s);
1573 return KERN_NOT_WAITING;
1574 }
1575
1576
1577 /*
1578 * Routine: wait_queue_wakeup64_thread_locked
1579 * Purpose:
1580 * Wakeup the particular thread that was specified if and only
1581 * it was in this wait queue (or one of it's set queues)
1582 * and waiting on the specified event.
1583 *
1584 * This is much safer than just removing the thread from
1585 * whatever wait queue it happens to be on. For instance, it
1586 * may have already been awoken from the wait you intended to
1587 * interrupt and waited on something else (like another
1588 * semaphore).
1589 * Conditions:
1590 * at splsched
1591 * wait queue already locked (may be released).
1592 * Returns:
1593 * KERN_SUCCESS - the thread was found waiting and awakened
1594 * KERN_NOT_WAITING - the thread was not waiting here
1595 */
1596 __private_extern__ kern_return_t
1597 wait_queue_wakeup64_thread_locked(
1598 wait_queue_t wq,
1599 event64_t event,
1600 thread_t thread,
1601 wait_result_t result,
1602 boolean_t unlock)
1603 {
1604 kern_return_t res;
1605
1606 assert(wait_queue_held(wq));
1607
1608 /*
1609 * See if the thread was still waiting there. If so, it got
1610 * dequeued and returned locked.
1611 */
1612 res = _wait_queue_select64_thread(wq, event, thread);
1613 if (unlock)
1614 wait_queue_unlock(wq);
1615
1616 if (res != KERN_SUCCESS)
1617 return KERN_NOT_WAITING;
1618
1619 res = thread_go(thread, result);
1620 assert(res == KERN_SUCCESS);
1621 thread_unlock(thread);
1622 return res;
1623 }
1624
1625 /*
1626 * Routine: wait_queue_wakeup_thread
1627 * Purpose:
1628 * Wakeup the particular thread that was specified if and only
1629 * it was in this wait queue (or one of it's set queues)
1630 * and waiting on the specified event.
1631 *
1632 * This is much safer than just removing the thread from
1633 * whatever wait queue it happens to be on. For instance, it
1634 * may have already been awoken from the wait you intended to
1635 * interrupt and waited on something else (like another
1636 * semaphore).
1637 * Conditions:
1638 * nothing of interest locked
1639 * we need to assume spl needs to be raised
1640 * Returns:
1641 * KERN_SUCCESS - the thread was found waiting and awakened
1642 * KERN_NOT_WAITING - the thread was not waiting here
1643 */
1644 kern_return_t
1645 wait_queue_wakeup_thread(
1646 wait_queue_t wq,
1647 event_t event,
1648 thread_t thread,
1649 wait_result_t result)
1650 {
1651 kern_return_t res;
1652 spl_t s;
1653
1654 if (!wait_queue_is_valid(wq)) {
1655 return KERN_INVALID_ARGUMENT;
1656 }
1657
1658 s = splsched();
1659 wait_queue_lock(wq);
1660 res = _wait_queue_select64_thread(wq, (event64_t)((uint32_t)event), thread);
1661 wait_queue_unlock(wq);
1662
1663 if (res == KERN_SUCCESS) {
1664 res = thread_go(thread, result);
1665 assert(res == KERN_SUCCESS);
1666 thread_unlock(thread);
1667 splx(s);
1668 return res;
1669 }
1670 splx(s);
1671 return KERN_NOT_WAITING;
1672 }
1673
1674 /*
1675 * Routine: wait_queue_wakeup64_thread
1676 * Purpose:
1677 * Wakeup the particular thread that was specified if and only
1678 * it was in this wait queue (or one of it's set's queues)
1679 * and waiting on the specified event.
1680 *
1681 * This is much safer than just removing the thread from
1682 * whatever wait queue it happens to be on. For instance, it
1683 * may have already been awoken from the wait you intended to
1684 * interrupt and waited on something else (like another
1685 * semaphore).
1686 * Conditions:
1687 * nothing of interest locked
1688 * we need to assume spl needs to be raised
1689 * Returns:
1690 * KERN_SUCCESS - the thread was found waiting and awakened
1691 * KERN_NOT_WAITING - the thread was not waiting here
1692 */
1693 kern_return_t
1694 wait_queue_wakeup64_thread(
1695 wait_queue_t wq,
1696 event64_t event,
1697 thread_t thread,
1698 wait_result_t result)
1699 {
1700 kern_return_t res;
1701 spl_t s;
1702
1703 if (!wait_queue_is_valid(wq)) {
1704 return KERN_INVALID_ARGUMENT;
1705 }
1706
1707 s = splsched();
1708 wait_queue_lock(wq);
1709 res = _wait_queue_select64_thread(wq, event, thread);
1710 wait_queue_unlock(wq);
1711
1712 if (res == KERN_SUCCESS) {
1713 res = thread_go(thread, result);
1714 assert(res == KERN_SUCCESS);
1715 thread_unlock(thread);
1716 splx(s);
1717 return res;
1718 }
1719 splx(s);
1720 return KERN_NOT_WAITING;
1721 }