]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/wait_queue.c
361cf796165e613a338e9de0d89706d55bfcce49
[apple/xnu.git] / osfmk / kern / wait_queue.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: wait_queue.c (adapted from sched_prim.c)
54 * Author: Avadis Tevanian, Jr.
55 * Date: 1986
56 *
57 * Primitives for manipulating wait queues: either global
58 * ones from sched_prim.c, or private ones associated with
59 * particular structures(pots, semaphores, etc..).
60 */
61
62 #include <kern/kern_types.h>
63 #include <kern/simple_lock.h>
64 #include <kern/kalloc.h>
65 #include <kern/queue.h>
66 #include <kern/spl.h>
67 #include <mach/sync_policy.h>
68 #include <kern/sched_prim.h>
69
70 #include <kern/wait_queue.h>
71
72 /* forward declarations */
73 static boolean_t wait_queue_member_locked(
74 wait_queue_t wq,
75 wait_queue_set_t wq_set);
76
77 void wait_queue_unlink_one(
78 wait_queue_t wq,
79 wait_queue_set_t *wq_setp);
80
81 kern_return_t wait_queue_set_unlink_all_nofree(
82 wait_queue_set_t wq_set);
83
84 /*
85 * Routine: wait_queue_init
86 * Purpose:
87 * Initialize a previously allocated wait queue.
88 * Returns:
89 * KERN_SUCCESS - The wait_queue_t was initialized
90 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
91 */
92 kern_return_t
93 wait_queue_init(
94 wait_queue_t wq,
95 int policy)
96 {
97 if (!((policy & SYNC_POLICY_ORDER_MASK) == SYNC_POLICY_FIFO))
98 return KERN_INVALID_ARGUMENT;
99
100 wq->wq_fifo = TRUE;
101 wq->wq_type = _WAIT_QUEUE_inited;
102 queue_init(&wq->wq_queue);
103 hw_lock_init(&wq->wq_interlock);
104 return KERN_SUCCESS;
105 }
106
107 /*
108 * Routine: wait_queue_alloc
109 * Purpose:
110 * Allocate and initialize a wait queue for use outside of
111 * of the mach part of the kernel.
112 * Conditions:
113 * Nothing locked - can block.
114 * Returns:
115 * The allocated and initialized wait queue
116 * WAIT_QUEUE_NULL if there is a resource shortage
117 */
118 wait_queue_t
119 wait_queue_alloc(
120 int policy)
121 {
122 wait_queue_t wq;
123 kern_return_t ret;
124
125 wq = (wait_queue_t) kalloc(sizeof(struct wait_queue));
126 if (wq != WAIT_QUEUE_NULL) {
127 ret = wait_queue_init(wq, policy);
128 if (ret != KERN_SUCCESS) {
129 kfree(wq, sizeof(struct wait_queue));
130 wq = WAIT_QUEUE_NULL;
131 }
132 }
133 return wq;
134 }
135
136 /*
137 * Routine: wait_queue_free
138 * Purpose:
139 * Free an allocated wait queue.
140 * Conditions:
141 * May block.
142 */
143 kern_return_t
144 wait_queue_free(
145 wait_queue_t wq)
146 {
147 if (!wait_queue_is_queue(wq))
148 return KERN_INVALID_ARGUMENT;
149 if (!queue_empty(&wq->wq_queue))
150 return KERN_FAILURE;
151 kfree(wq, sizeof(struct wait_queue));
152 return KERN_SUCCESS;
153 }
154
155 /*
156 * Routine: wait_queue_set_init
157 * Purpose:
158 * Initialize a previously allocated wait queue set.
159 * Returns:
160 * KERN_SUCCESS - The wait_queue_set_t was initialized
161 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
162 */
163 kern_return_t
164 wait_queue_set_init(
165 wait_queue_set_t wqset,
166 int policy)
167 {
168 kern_return_t ret;
169
170 ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
171 if (ret != KERN_SUCCESS)
172 return ret;
173
174 wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
175 if (policy & SYNC_POLICY_PREPOST)
176 wqset->wqs_wait_queue.wq_isprepost = TRUE;
177 else
178 wqset->wqs_wait_queue.wq_isprepost = FALSE;
179 queue_init(&wqset->wqs_setlinks);
180 wqset->wqs_refcount = 0;
181 return KERN_SUCCESS;
182 }
183
184
185 kern_return_t
186 wait_queue_sub_init(
187 wait_queue_set_t wqset,
188 int policy)
189 {
190 return wait_queue_set_init(wqset, policy);
191 }
192
193 kern_return_t
194 wait_queue_sub_clearrefs(
195 wait_queue_set_t wq_set)
196 {
197 if (!wait_queue_is_set(wq_set))
198 return KERN_INVALID_ARGUMENT;
199
200 wqs_lock(wq_set);
201 wq_set->wqs_refcount = 0;
202 wqs_unlock(wq_set);
203 return KERN_SUCCESS;
204 }
205
206 /*
207 * Routine: wait_queue_set_alloc
208 * Purpose:
209 * Allocate and initialize a wait queue set for
210 * use outside of the mach part of the kernel.
211 * Conditions:
212 * May block.
213 * Returns:
214 * The allocated and initialized wait queue set
215 * WAIT_QUEUE_SET_NULL if there is a resource shortage
216 */
217 wait_queue_set_t
218 wait_queue_set_alloc(
219 int policy)
220 {
221 wait_queue_set_t wq_set;
222
223 wq_set = (wait_queue_set_t) kalloc(sizeof(struct wait_queue_set));
224 if (wq_set != WAIT_QUEUE_SET_NULL) {
225 kern_return_t ret;
226
227 ret = wait_queue_set_init(wq_set, policy);
228 if (ret != KERN_SUCCESS) {
229 kfree(wq_set, sizeof(struct wait_queue_set));
230 wq_set = WAIT_QUEUE_SET_NULL;
231 }
232 }
233 return wq_set;
234 }
235
236 /*
237 * Routine: wait_queue_set_free
238 * Purpose:
239 * Free an allocated wait queue set
240 * Conditions:
241 * May block.
242 */
243 kern_return_t
244 wait_queue_set_free(
245 wait_queue_set_t wq_set)
246 {
247 if (!wait_queue_is_set(wq_set))
248 return KERN_INVALID_ARGUMENT;
249
250 if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
251 return KERN_FAILURE;
252
253 kfree(wq_set, sizeof(struct wait_queue_set));
254 return KERN_SUCCESS;
255 }
256
257
258 /*
259 *
260 * Routine: wait_queue_set_size
261 * Routine: wait_queue_link_size
262 * Purpose:
263 * Return the size of opaque wait queue structures
264 */
265 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
266 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
267
268 /* declare a unique type for wait queue link structures */
269 static unsigned int _wait_queue_link;
270 static unsigned int _wait_queue_unlinked;
271
272 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
273 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
274
275 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
276 WQASSERT(((wqe)->wqe_queue == (wq) && \
277 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
278 "wait queue element list corruption: wq=%#x, wqe=%#x", \
279 (wq), (wqe))
280
281 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
282 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
283 (queue_t)(wql) : &(wql)->wql_setlinks)))
284
285 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
286 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
287 (queue_t)(wql) : &(wql)->wql_setlinks)))
288
289 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
290 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
291 ((wql)->wql_setqueue == (wqs)) && \
292 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
293 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
294 "wait queue set links corruption: wqs=%#x, wql=%#x", \
295 (wqs), (wql))
296
297 #if defined(_WAIT_QUEUE_DEBUG_)
298
299 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
300
301 #define WAIT_QUEUE_CHECK(wq) \
302 MACRO_BEGIN \
303 queue_t q2 = &(wq)->wq_queue; \
304 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
305 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
306 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
307 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
308 } \
309 MACRO_END
310
311 #define WAIT_QUEUE_SET_CHECK(wqs) \
312 MACRO_BEGIN \
313 queue_t q2 = &(wqs)->wqs_setlinks; \
314 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
315 while (!queue_end(q2, (queue_entry_t)wql2)) { \
316 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
317 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
318 } \
319 MACRO_END
320
321 #else /* !_WAIT_QUEUE_DEBUG_ */
322
323 #define WQASSERT(e, s, p0, p1) assert(e)
324
325 #define WAIT_QUEUE_CHECK(wq)
326 #define WAIT_QUEUE_SET_CHECK(wqs)
327
328 #endif /* !_WAIT_QUEUE_DEBUG_ */
329
330 /*
331 * Routine: wait_queue_member_locked
332 * Purpose:
333 * Indicate if this set queue is a member of the queue
334 * Conditions:
335 * The wait queue is locked
336 * The set queue is just that, a set queue
337 */
338 static boolean_t
339 wait_queue_member_locked(
340 wait_queue_t wq,
341 wait_queue_set_t wq_set)
342 {
343 wait_queue_element_t wq_element;
344 queue_t q;
345
346 assert(wait_queue_held(wq));
347 assert(wait_queue_is_set(wq_set));
348
349 q = &wq->wq_queue;
350
351 wq_element = (wait_queue_element_t) queue_first(q);
352 while (!queue_end(q, (queue_entry_t)wq_element)) {
353 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
354 if ((wq_element->wqe_type == WAIT_QUEUE_LINK)) {
355 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
356
357 if (wql->wql_setqueue == wq_set)
358 return TRUE;
359 }
360 wq_element = (wait_queue_element_t)
361 queue_next((queue_t) wq_element);
362 }
363 return FALSE;
364 }
365
366
367 /*
368 * Routine: wait_queue_member
369 * Purpose:
370 * Indicate if this set queue is a member of the queue
371 * Conditions:
372 * The set queue is just that, a set queue
373 */
374 boolean_t
375 wait_queue_member(
376 wait_queue_t wq,
377 wait_queue_set_t wq_set)
378 {
379 boolean_t ret;
380 spl_t s;
381
382 if (!wait_queue_is_set(wq_set))
383 return FALSE;
384
385 s = splsched();
386 wait_queue_lock(wq);
387 ret = wait_queue_member_locked(wq, wq_set);
388 wait_queue_unlock(wq);
389 splx(s);
390
391 return ret;
392 }
393
394
395 /*
396 * Routine: wait_queue_link_noalloc
397 * Purpose:
398 * Insert a set wait queue into a wait queue. This
399 * requires us to link the two together using a wait_queue_link
400 * structure that we allocate.
401 * Conditions:
402 * The wait queue being inserted must be inited as a set queue
403 */
404 kern_return_t
405 wait_queue_link_noalloc(
406 wait_queue_t wq,
407 wait_queue_set_t wq_set,
408 wait_queue_link_t wql)
409 {
410 wait_queue_element_t wq_element;
411 queue_t q;
412 spl_t s;
413
414 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set))
415 return KERN_INVALID_ARGUMENT;
416
417 /*
418 * There are probably less threads and sets associated with
419 * the wait queue, then there are wait queues associated with
420 * the set. So lets validate it that way.
421 */
422 s = splsched();
423 wait_queue_lock(wq);
424 q = &wq->wq_queue;
425 wq_element = (wait_queue_element_t) queue_first(q);
426 while (!queue_end(q, (queue_entry_t)wq_element)) {
427 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
428 if (wq_element->wqe_type == WAIT_QUEUE_LINK &&
429 ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
430 wait_queue_unlock(wq);
431 splx(s);
432 return KERN_ALREADY_IN_SET;
433 }
434 wq_element = (wait_queue_element_t)
435 queue_next((queue_t) wq_element);
436 }
437
438 /*
439 * Not already a member, so we can add it.
440 */
441 wqs_lock(wq_set);
442
443 WAIT_QUEUE_SET_CHECK(wq_set);
444
445 wql->wql_queue = wq;
446 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
447 wql->wql_setqueue = wq_set;
448 queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
449 wql->wql_type = WAIT_QUEUE_LINK;
450
451 wqs_unlock(wq_set);
452 wait_queue_unlock(wq);
453 splx(s);
454
455 return KERN_SUCCESS;
456 }
457
458 /*
459 * Routine: wait_queue_link
460 * Purpose:
461 * Insert a set wait queue into a wait queue. This
462 * requires us to link the two together using a wait_queue_link
463 * structure that we allocate.
464 * Conditions:
465 * The wait queue being inserted must be inited as a set queue
466 */
467 kern_return_t
468 wait_queue_link(
469 wait_queue_t wq,
470 wait_queue_set_t wq_set)
471 {
472 wait_queue_link_t wql;
473 kern_return_t ret;
474
475 wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link));
476 if (wql == WAIT_QUEUE_LINK_NULL)
477 return KERN_RESOURCE_SHORTAGE;
478
479 ret = wait_queue_link_noalloc(wq, wq_set, wql);
480 if (ret != KERN_SUCCESS)
481 kfree(wql, sizeof(struct wait_queue_link));
482
483 return ret;
484 }
485
486
487 /*
488 * Routine: wait_queue_unlink_nofree
489 * Purpose:
490 * Undo the linkage between a wait queue and a set.
491 */
492 static void
493 wait_queue_unlink_locked(
494 wait_queue_t wq,
495 wait_queue_set_t wq_set,
496 wait_queue_link_t wql)
497 {
498 assert(wait_queue_held(wq));
499 assert(wait_queue_held(&wq_set->wqs_wait_queue));
500
501 wql->wql_queue = WAIT_QUEUE_NULL;
502 queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
503 wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
504 queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
505 wql->wql_type = WAIT_QUEUE_UNLINKED;
506
507 WAIT_QUEUE_CHECK(wq);
508 WAIT_QUEUE_SET_CHECK(wq_set);
509 }
510
511 /*
512 * Routine: wait_queue_unlink
513 * Purpose:
514 * Remove the linkage between a wait queue and a set,
515 * freeing the linkage structure.
516 * Conditions:
517 * The wait queue being must be a member set queue
518 */
519 kern_return_t
520 wait_queue_unlink(
521 wait_queue_t wq,
522 wait_queue_set_t wq_set)
523 {
524 wait_queue_element_t wq_element;
525 wait_queue_link_t wql;
526 queue_t q;
527 spl_t s;
528
529 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) {
530 return KERN_INVALID_ARGUMENT;
531 }
532 s = splsched();
533 wait_queue_lock(wq);
534
535 q = &wq->wq_queue;
536 wq_element = (wait_queue_element_t) queue_first(q);
537 while (!queue_end(q, (queue_entry_t)wq_element)) {
538 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
539 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
540 wql = (wait_queue_link_t)wq_element;
541
542 if (wql->wql_setqueue == wq_set) {
543 wqs_lock(wq_set);
544 wait_queue_unlink_locked(wq, wq_set, wql);
545 wqs_unlock(wq_set);
546 wait_queue_unlock(wq);
547 splx(s);
548 kfree(wql, sizeof(struct wait_queue_link));
549 return KERN_SUCCESS;
550 }
551 }
552 wq_element = (wait_queue_element_t)
553 queue_next((queue_t) wq_element);
554 }
555 wait_queue_unlock(wq);
556 splx(s);
557 return KERN_NOT_IN_SET;
558 }
559
560
561 /*
562 * Routine: wait_queue_unlinkall_nofree
563 * Purpose:
564 * Remove the linkage between a wait queue and all its
565 * sets. The caller is responsible for freeing
566 * the wait queue link structures.
567 */
568
569 kern_return_t
570 wait_queue_unlinkall_nofree(
571 wait_queue_t wq)
572 {
573 wait_queue_element_t wq_element;
574 wait_queue_element_t wq_next_element;
575 wait_queue_set_t wq_set;
576 wait_queue_link_t wql;
577 queue_head_t links_queue_head;
578 queue_t links = &links_queue_head;
579 queue_t q;
580 spl_t s;
581
582 if (!wait_queue_is_queue(wq)) {
583 return KERN_INVALID_ARGUMENT;
584 }
585
586 queue_init(links);
587
588 s = splsched();
589 wait_queue_lock(wq);
590
591 q = &wq->wq_queue;
592
593 wq_element = (wait_queue_element_t) queue_first(q);
594 while (!queue_end(q, (queue_entry_t)wq_element)) {
595 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
596 wq_next_element = (wait_queue_element_t)
597 queue_next((queue_t) wq_element);
598
599 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
600 wql = (wait_queue_link_t)wq_element;
601 wq_set = wql->wql_setqueue;
602 wqs_lock(wq_set);
603 wait_queue_unlink_locked(wq, wq_set, wql);
604 wqs_unlock(wq_set);
605 }
606 wq_element = wq_next_element;
607 }
608 wait_queue_unlock(wq);
609 splx(s);
610 return(KERN_SUCCESS);
611 }
612
613
614 /*
615 * Routine: wait_queue_unlink_all
616 * Purpose:
617 * Remove the linkage between a wait queue and all its sets.
618 * All the linkage structures are freed.
619 * Conditions:
620 * Nothing of interest locked.
621 */
622
623 kern_return_t
624 wait_queue_unlink_all(
625 wait_queue_t wq)
626 {
627 wait_queue_element_t wq_element;
628 wait_queue_element_t wq_next_element;
629 wait_queue_set_t wq_set;
630 wait_queue_link_t wql;
631 queue_head_t links_queue_head;
632 queue_t links = &links_queue_head;
633 queue_t q;
634 spl_t s;
635
636 if (!wait_queue_is_queue(wq)) {
637 return KERN_INVALID_ARGUMENT;
638 }
639
640 queue_init(links);
641
642 s = splsched();
643 wait_queue_lock(wq);
644
645 q = &wq->wq_queue;
646
647 wq_element = (wait_queue_element_t) queue_first(q);
648 while (!queue_end(q, (queue_entry_t)wq_element)) {
649 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
650 wq_next_element = (wait_queue_element_t)
651 queue_next((queue_t) wq_element);
652
653 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
654 wql = (wait_queue_link_t)wq_element;
655 wq_set = wql->wql_setqueue;
656 wqs_lock(wq_set);
657 wait_queue_unlink_locked(wq, wq_set, wql);
658 wqs_unlock(wq_set);
659 enqueue(links, &wql->wql_links);
660 }
661 wq_element = wq_next_element;
662 }
663 wait_queue_unlock(wq);
664 splx(s);
665
666 while(!queue_empty(links)) {
667 wql = (wait_queue_link_t) dequeue(links);
668 kfree(wql, sizeof(struct wait_queue_link));
669 }
670
671 return(KERN_SUCCESS);
672 }
673
674 /*
675 * Routine: wait_queue_set_unlink_all_nofree
676 * Purpose:
677 * Remove the linkage between a set wait queue and all its
678 * member wait queues. The link structures are not freed, nor
679 * returned. It is the caller's responsibility to track and free
680 * them.
681 * Conditions:
682 * The wait queue being must be a member set queue
683 */
684 kern_return_t
685 wait_queue_set_unlink_all_nofree(
686 wait_queue_set_t wq_set)
687 {
688 wait_queue_link_t wql;
689 wait_queue_t wq;
690 queue_t q;
691 spl_t s;
692
693 if (!wait_queue_is_set(wq_set)) {
694 return KERN_INVALID_ARGUMENT;
695 }
696
697 retry:
698 s = splsched();
699 wqs_lock(wq_set);
700
701 q = &wq_set->wqs_setlinks;
702
703 wql = (wait_queue_link_t)queue_first(q);
704 while (!queue_end(q, (queue_entry_t)wql)) {
705 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
706 wq = wql->wql_queue;
707 if (wait_queue_lock_try(wq)) {
708 wait_queue_unlink_locked(wq, wq_set, wql);
709 wait_queue_unlock(wq);
710 wql = (wait_queue_link_t)queue_first(q);
711 } else {
712 wqs_unlock(wq_set);
713 splx(s);
714 delay(1);
715 goto retry;
716 }
717 }
718 wqs_unlock(wq_set);
719 splx(s);
720
721 return(KERN_SUCCESS);
722 }
723
724 /* legacy interface naming */
725 kern_return_t
726 wait_subqueue_unlink_all(
727 wait_queue_set_t wq_set)
728 {
729 return wait_queue_set_unlink_all_nofree(wq_set);
730 }
731
732
733 /*
734 * Routine: wait_queue_set_unlink_all
735 * Purpose:
736 * Remove the linkage between a set wait queue and all its
737 * member wait queues. The link structures are freed.
738 * Conditions:
739 * The wait queue must be a set
740 */
741 kern_return_t
742 wait_queue_set_unlink_all(
743 wait_queue_set_t wq_set)
744 {
745 wait_queue_link_t wql;
746 wait_queue_t wq;
747 queue_t q;
748 queue_head_t links_queue_head;
749 queue_t links = &links_queue_head;
750 spl_t s;
751
752 if (!wait_queue_is_set(wq_set)) {
753 return KERN_INVALID_ARGUMENT;
754 }
755
756 queue_init(links);
757
758 retry:
759 s = splsched();
760 wqs_lock(wq_set);
761
762 q = &wq_set->wqs_setlinks;
763
764 wql = (wait_queue_link_t)queue_first(q);
765 while (!queue_end(q, (queue_entry_t)wql)) {
766 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
767 wq = wql->wql_queue;
768 if (wait_queue_lock_try(wq)) {
769 wait_queue_unlink_locked(wq, wq_set, wql);
770 wait_queue_unlock(wq);
771 enqueue(links, &wql->wql_links);
772 wql = (wait_queue_link_t)queue_first(q);
773 } else {
774 wqs_unlock(wq_set);
775 splx(s);
776 delay(1);
777 goto retry;
778 }
779 }
780 wqs_unlock(wq_set);
781 splx(s);
782
783 while (!queue_empty (links)) {
784 wql = (wait_queue_link_t) dequeue(links);
785 kfree(wql, sizeof(struct wait_queue_link));
786 }
787 return(KERN_SUCCESS);
788 }
789
790
791 /*
792 * Routine: wait_queue_unlink_one
793 * Purpose:
794 * Find and unlink one set wait queue
795 * Conditions:
796 * Nothing of interest locked.
797 */
798 void
799 wait_queue_unlink_one(
800 wait_queue_t wq,
801 wait_queue_set_t *wq_setp)
802 {
803 wait_queue_element_t wq_element;
804 queue_t q;
805 spl_t s;
806
807 s = splsched();
808 wait_queue_lock(wq);
809
810 q = &wq->wq_queue;
811
812 wq_element = (wait_queue_element_t) queue_first(q);
813 while (!queue_end(q, (queue_entry_t)wq_element)) {
814
815 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
816 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
817 wait_queue_set_t wq_set = wql->wql_setqueue;
818
819 wqs_lock(wq_set);
820 wait_queue_unlink_locked(wq, wq_set, wql);
821 wqs_unlock(wq_set);
822 wait_queue_unlock(wq);
823 splx(s);
824 kfree(wql,sizeof(struct wait_queue_link));
825 *wq_setp = wq_set;
826 return;
827 }
828
829 wq_element = (wait_queue_element_t)
830 queue_next((queue_t) wq_element);
831 }
832 wait_queue_unlock(wq);
833 splx(s);
834 *wq_setp = WAIT_QUEUE_SET_NULL;
835 }
836
837
838 /*
839 * Routine: wait_queue_assert_wait64_locked
840 * Purpose:
841 * Insert the current thread into the supplied wait queue
842 * waiting for a particular event to be posted to that queue.
843 *
844 * Conditions:
845 * The wait queue is assumed locked.
846 * The waiting thread is assumed locked.
847 *
848 */
849 __private_extern__ wait_result_t
850 wait_queue_assert_wait64_locked(
851 wait_queue_t wq,
852 event64_t event,
853 wait_interrupt_t interruptible,
854 uint64_t deadline,
855 thread_t thread)
856 {
857 wait_result_t wait_result;
858
859 if (!wait_queue_assert_possible(thread))
860 panic("wait_queue_assert_wait64_locked");
861
862 if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
863 wait_queue_set_t wqs = (wait_queue_set_t)wq;
864
865 if (wqs->wqs_isprepost && wqs->wqs_refcount > 0)
866 return(THREAD_AWAKENED);
867 }
868
869 /*
870 * This is the extent to which we currently take scheduling attributes
871 * into account. If the thread is vm priviledged, we stick it at
872 * the front of the queue. Later, these queues will honor the policy
873 * value set at wait_queue_init time.
874 */
875 wait_result = thread_mark_wait_locked(thread, interruptible);
876 if (wait_result == THREAD_WAITING) {
877 if (thread->options & TH_OPT_VMPRIV)
878 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
879 else
880 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
881
882 thread->wait_event = event;
883 thread->wait_queue = wq;
884
885 if (deadline != 0) {
886 if (!timer_call_enter(&thread->wait_timer, deadline))
887 thread->wait_timer_active++;
888 thread->wait_timer_is_set = TRUE;
889 }
890 }
891 return(wait_result);
892 }
893
894 /*
895 * Routine: wait_queue_assert_wait
896 * Purpose:
897 * Insert the current thread into the supplied wait queue
898 * waiting for a particular event to be posted to that queue.
899 *
900 * Conditions:
901 * nothing of interest locked.
902 */
903 wait_result_t
904 wait_queue_assert_wait(
905 wait_queue_t wq,
906 event_t event,
907 wait_interrupt_t interruptible,
908 uint64_t deadline)
909 {
910 spl_t s;
911 wait_result_t ret;
912 thread_t thread = current_thread();
913
914 /* If it is an invalid wait queue, you can't wait on it */
915 if (!wait_queue_is_valid(wq))
916 return (thread->wait_result = THREAD_RESTART);
917
918 s = splsched();
919 wait_queue_lock(wq);
920 thread_lock(thread);
921 ret = wait_queue_assert_wait64_locked(wq, (event64_t)((uint32_t)event),
922 interruptible, deadline, thread);
923 thread_unlock(thread);
924 wait_queue_unlock(wq);
925 splx(s);
926 return(ret);
927 }
928
929 /*
930 * Routine: wait_queue_assert_wait64
931 * Purpose:
932 * Insert the current thread into the supplied wait queue
933 * waiting for a particular event to be posted to that queue.
934 * Conditions:
935 * nothing of interest locked.
936 */
937 wait_result_t
938 wait_queue_assert_wait64(
939 wait_queue_t wq,
940 event64_t event,
941 wait_interrupt_t interruptible,
942 uint64_t deadline)
943 {
944 spl_t s;
945 wait_result_t ret;
946 thread_t thread = current_thread();
947
948 /* If it is an invalid wait queue, you cant wait on it */
949 if (!wait_queue_is_valid(wq))
950 return (thread->wait_result = THREAD_RESTART);
951
952 s = splsched();
953 wait_queue_lock(wq);
954 thread_lock(thread);
955 ret = wait_queue_assert_wait64_locked(wq, event, interruptible, deadline, thread);
956 thread_unlock(thread);
957 wait_queue_unlock(wq);
958 splx(s);
959 return(ret);
960 }
961
962 /*
963 * Routine: _wait_queue_select64_all
964 * Purpose:
965 * Select all threads off a wait queue that meet the
966 * supplied criteria.
967 * Conditions:
968 * at splsched
969 * wait queue locked
970 * wake_queue initialized and ready for insertion
971 * possibly recursive
972 * Returns:
973 * a queue of locked threads
974 */
975 static void
976 _wait_queue_select64_all(
977 wait_queue_t wq,
978 event64_t event,
979 queue_t wake_queue)
980 {
981 wait_queue_element_t wq_element;
982 wait_queue_element_t wqe_next;
983 queue_t q;
984
985 q = &wq->wq_queue;
986
987 wq_element = (wait_queue_element_t) queue_first(q);
988 while (!queue_end(q, (queue_entry_t)wq_element)) {
989 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
990 wqe_next = (wait_queue_element_t)
991 queue_next((queue_t) wq_element);
992
993 /*
994 * We may have to recurse if this is a compound wait queue.
995 */
996 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
997 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
998 wait_queue_t set_queue;
999
1000 /*
1001 * We have to check the set wait queue.
1002 */
1003 set_queue = (wait_queue_t)wql->wql_setqueue;
1004 wait_queue_lock(set_queue);
1005 if (set_queue->wq_isprepost) {
1006 wait_queue_set_t wqs = (wait_queue_set_t)set_queue;
1007
1008 /*
1009 * Preposting is only for sets and wait queue
1010 * is the first element of set
1011 */
1012 wqs->wqs_refcount++;
1013 }
1014 if (! wait_queue_empty(set_queue))
1015 _wait_queue_select64_all(set_queue, event, wake_queue);
1016 wait_queue_unlock(set_queue);
1017 } else {
1018
1019 /*
1020 * Otherwise, its a thread. If it is waiting on
1021 * the event we are posting to this queue, pull
1022 * it off the queue and stick it in out wake_queue.
1023 */
1024 thread_t t = (thread_t)wq_element;
1025
1026 if (t->wait_event == event) {
1027 thread_lock(t);
1028 remqueue(q, (queue_entry_t) t);
1029 enqueue (wake_queue, (queue_entry_t) t);
1030 t->wait_queue = WAIT_QUEUE_NULL;
1031 t->wait_event = NO_EVENT64;
1032 t->at_safe_point = FALSE;
1033 /* returned locked */
1034 }
1035 }
1036 wq_element = wqe_next;
1037 }
1038 }
1039
1040 /*
1041 * Routine: wait_queue_wakeup64_all_locked
1042 * Purpose:
1043 * Wakeup some number of threads that are in the specified
1044 * wait queue and waiting on the specified event.
1045 * Conditions:
1046 * wait queue already locked (may be released).
1047 * Returns:
1048 * KERN_SUCCESS - Threads were woken up
1049 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1050 */
1051 __private_extern__ kern_return_t
1052 wait_queue_wakeup64_all_locked(
1053 wait_queue_t wq,
1054 event64_t event,
1055 wait_result_t result,
1056 boolean_t unlock)
1057 {
1058 queue_head_t wake_queue_head;
1059 queue_t q = &wake_queue_head;
1060 kern_return_t res;
1061
1062 // assert(wait_queue_held(wq));
1063 if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1064 panic("wait_queue_wakeup64_all_locked: lock not held on %08X\n", wq); /* (BRINGUP) */
1065 }
1066
1067 queue_init(q);
1068
1069 /*
1070 * Select the threads that we will wake up. The threads
1071 * are returned to us locked and cleanly removed from the
1072 * wait queue.
1073 */
1074 _wait_queue_select64_all(wq, event, q);
1075 if (unlock)
1076 wait_queue_unlock(wq);
1077
1078 /*
1079 * For each thread, set it running.
1080 */
1081 res = KERN_NOT_WAITING;
1082 while (!queue_empty (q)) {
1083 thread_t thread = (thread_t) dequeue(q);
1084 res = thread_go(thread, result);
1085 assert(res == KERN_SUCCESS);
1086 thread_unlock(thread);
1087 }
1088 return res;
1089 }
1090
1091
1092 /*
1093 * Routine: wait_queue_wakeup_all
1094 * Purpose:
1095 * Wakeup some number of threads that are in the specified
1096 * wait queue and waiting on the specified event.
1097 * Conditions:
1098 * Nothing locked
1099 * Returns:
1100 * KERN_SUCCESS - Threads were woken up
1101 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1102 */
1103 kern_return_t
1104 wait_queue_wakeup_all(
1105 wait_queue_t wq,
1106 event_t event,
1107 wait_result_t result)
1108 {
1109 kern_return_t ret;
1110 spl_t s;
1111
1112 if (!wait_queue_is_valid(wq)) {
1113 return KERN_INVALID_ARGUMENT;
1114 }
1115
1116 s = splsched();
1117 wait_queue_lock(wq);
1118 if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1119 panic("wait_queue_wakeup_all: we did not get the lock on %08X\n", wq); /* (BRINGUP) */
1120 }
1121 ret = wait_queue_wakeup64_all_locked(
1122 wq, (event64_t)((uint32_t)event),
1123 result, TRUE);
1124 /* lock released */
1125 splx(s);
1126 return ret;
1127 }
1128
1129 /*
1130 * Routine: wait_queue_wakeup64_all
1131 * Purpose:
1132 * Wakeup some number of threads that are in the specified
1133 * wait queue and waiting on the specified event.
1134 * Conditions:
1135 * Nothing locked
1136 * Returns:
1137 * KERN_SUCCESS - Threads were woken up
1138 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1139 */
1140 kern_return_t
1141 wait_queue_wakeup64_all(
1142 wait_queue_t wq,
1143 event64_t event,
1144 wait_result_t result)
1145 {
1146 kern_return_t ret;
1147 spl_t s;
1148
1149 if (!wait_queue_is_valid(wq)) {
1150 return KERN_INVALID_ARGUMENT;
1151 }
1152
1153 s = splsched();
1154 wait_queue_lock(wq);
1155 ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1156 /* lock released */
1157 splx(s);
1158 return ret;
1159 }
1160
1161 /*
1162 * Routine: _wait_queue_select64_one
1163 * Purpose:
1164 * Select the best thread off a wait queue that meet the
1165 * supplied criteria.
1166 * Conditions:
1167 * at splsched
1168 * wait queue locked
1169 * possibly recursive
1170 * Returns:
1171 * a locked thread - if one found
1172 * Note:
1173 * This is where the sync policy of the wait queue comes
1174 * into effect. For now, we just assume FIFO.
1175 */
1176 static thread_t
1177 _wait_queue_select64_one(
1178 wait_queue_t wq,
1179 event64_t event)
1180 {
1181 wait_queue_element_t wq_element;
1182 wait_queue_element_t wqe_next;
1183 thread_t t = THREAD_NULL;
1184 queue_t q;
1185
1186 assert(wq->wq_fifo);
1187
1188 q = &wq->wq_queue;
1189
1190 wq_element = (wait_queue_element_t) queue_first(q);
1191 while (!queue_end(q, (queue_entry_t)wq_element)) {
1192 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1193 wqe_next = (wait_queue_element_t)
1194 queue_next((queue_t) wq_element);
1195
1196 /*
1197 * We may have to recurse if this is a compound wait queue.
1198 */
1199 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1200 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1201 wait_queue_t set_queue;
1202
1203 /*
1204 * We have to check the set wait queue.
1205 */
1206 set_queue = (wait_queue_t)wql->wql_setqueue;
1207 wait_queue_lock(set_queue);
1208 if (! wait_queue_empty(set_queue)) {
1209 t = _wait_queue_select64_one(set_queue, event);
1210 }
1211 wait_queue_unlock(set_queue);
1212 if (t != THREAD_NULL)
1213 return t;
1214 } else {
1215
1216 /*
1217 * Otherwise, its a thread. If it is waiting on
1218 * the event we are posting to this queue, pull
1219 * it off the queue and stick it in out wake_queue.
1220 */
1221 t = (thread_t)wq_element;
1222 if (t->wait_event == event) {
1223 thread_lock(t);
1224 remqueue(q, (queue_entry_t) t);
1225 t->wait_queue = WAIT_QUEUE_NULL;
1226 t->wait_event = NO_EVENT64;
1227 t->at_safe_point = FALSE;
1228 return t; /* still locked */
1229 }
1230
1231 t = THREAD_NULL;
1232 }
1233 wq_element = wqe_next;
1234 }
1235 return THREAD_NULL;
1236 }
1237
1238 /*
1239 * Routine: wait_queue_peek64_locked
1240 * Purpose:
1241 * Select the best thread from a wait queue that meet the
1242 * supplied criteria, but leave it on the queue it was
1243 * found on. The thread, and the actual wait_queue the
1244 * thread was found on are identified.
1245 * Conditions:
1246 * at splsched
1247 * wait queue locked
1248 * possibly recursive
1249 * Returns:
1250 * a locked thread - if one found
1251 * a locked waitq - the one the thread was found on
1252 * Note:
1253 * Both the waitq the thread was actually found on, and
1254 * the supplied wait queue, are locked after this.
1255 */
1256 __private_extern__ void
1257 wait_queue_peek64_locked(
1258 wait_queue_t wq,
1259 event64_t event,
1260 thread_t *tp,
1261 wait_queue_t *wqp)
1262 {
1263 wait_queue_element_t wq_element;
1264 wait_queue_element_t wqe_next;
1265 queue_t q;
1266
1267 assert(wq->wq_fifo);
1268
1269 *tp = THREAD_NULL;
1270
1271 q = &wq->wq_queue;
1272
1273 wq_element = (wait_queue_element_t) queue_first(q);
1274 while (!queue_end(q, (queue_entry_t)wq_element)) {
1275 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1276 wqe_next = (wait_queue_element_t)
1277 queue_next((queue_t) wq_element);
1278
1279 /*
1280 * We may have to recurse if this is a compound wait queue.
1281 */
1282 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1283 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1284 wait_queue_t set_queue;
1285
1286 /*
1287 * We have to check the set wait queue.
1288 */
1289 set_queue = (wait_queue_t)wql->wql_setqueue;
1290 wait_queue_lock(set_queue);
1291 if (! wait_queue_empty(set_queue)) {
1292 wait_queue_peek64_locked(set_queue, event, tp, wqp);
1293 }
1294 if (*tp != THREAD_NULL) {
1295 if (*wqp != set_queue)
1296 wait_queue_unlock(set_queue);
1297 return; /* thread and its waitq locked */
1298 }
1299
1300 wait_queue_unlock(set_queue);
1301 } else {
1302
1303 /*
1304 * Otherwise, its a thread. If it is waiting on
1305 * the event we are posting to this queue, return
1306 * it locked, but leave it on the queue.
1307 */
1308 thread_t t = (thread_t)wq_element;
1309
1310 if (t->wait_event == event) {
1311 thread_lock(t);
1312 *tp = t;
1313 *wqp = wq;
1314 return;
1315 }
1316 }
1317 wq_element = wqe_next;
1318 }
1319 }
1320
1321 /*
1322 * Routine: wait_queue_pull_thread_locked
1323 * Purpose:
1324 * Pull a thread that was previously "peeked" off the wait
1325 * queue and (possibly) unlock the waitq.
1326 * Conditions:
1327 * at splsched
1328 * wait queue locked
1329 * thread locked
1330 * Returns:
1331 * with the thread still locked.
1332 */
1333 void
1334 wait_queue_pull_thread_locked(
1335 wait_queue_t waitq,
1336 thread_t thread,
1337 boolean_t unlock)
1338 {
1339
1340 assert(thread->wait_queue == waitq);
1341
1342 remqueue(&waitq->wq_queue, (queue_entry_t)thread );
1343 thread->wait_queue = WAIT_QUEUE_NULL;
1344 thread->wait_event = NO_EVENT64;
1345 thread->at_safe_point = FALSE;
1346 if (unlock)
1347 wait_queue_unlock(waitq);
1348 }
1349
1350
1351 /*
1352 * Routine: wait_queue_select64_thread
1353 * Purpose:
1354 * Look for a thread and remove it from the queues, if
1355 * (and only if) the thread is waiting on the supplied
1356 * <wait_queue, event> pair.
1357 * Conditions:
1358 * at splsched
1359 * wait queue locked
1360 * possibly recursive
1361 * Returns:
1362 * KERN_NOT_WAITING: Thread is not waiting here.
1363 * KERN_SUCCESS: It was, and is now removed (returned locked)
1364 */
1365 static kern_return_t
1366 _wait_queue_select64_thread(
1367 wait_queue_t wq,
1368 event64_t event,
1369 thread_t thread)
1370 {
1371 wait_queue_element_t wq_element;
1372 wait_queue_element_t wqe_next;
1373 kern_return_t res = KERN_NOT_WAITING;
1374 queue_t q = &wq->wq_queue;
1375
1376 thread_lock(thread);
1377 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
1378 remqueue(q, (queue_entry_t) thread);
1379 thread->at_safe_point = FALSE;
1380 thread->wait_event = NO_EVENT64;
1381 thread->wait_queue = WAIT_QUEUE_NULL;
1382 /* thread still locked */
1383 return KERN_SUCCESS;
1384 }
1385 thread_unlock(thread);
1386
1387 /*
1388 * The wait_queue associated with the thread may be one of this
1389 * wait queue's sets. Go see. If so, removing it from
1390 * there is like removing it from here.
1391 */
1392 wq_element = (wait_queue_element_t) queue_first(q);
1393 while (!queue_end(q, (queue_entry_t)wq_element)) {
1394 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1395 wqe_next = (wait_queue_element_t)
1396 queue_next((queue_t) wq_element);
1397
1398 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1399 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1400 wait_queue_t set_queue;
1401
1402 set_queue = (wait_queue_t)wql->wql_setqueue;
1403 wait_queue_lock(set_queue);
1404 if (! wait_queue_empty(set_queue)) {
1405 res = _wait_queue_select64_thread(set_queue,
1406 event,
1407 thread);
1408 }
1409 wait_queue_unlock(set_queue);
1410 if (res == KERN_SUCCESS)
1411 return KERN_SUCCESS;
1412 }
1413 wq_element = wqe_next;
1414 }
1415 return res;
1416 }
1417
1418
1419 /*
1420 * Routine: wait_queue_wakeup64_identity_locked
1421 * Purpose:
1422 * Select a single thread that is most-eligible to run and set
1423 * set it running. But return the thread locked.
1424 *
1425 * Conditions:
1426 * at splsched
1427 * wait queue locked
1428 * possibly recursive
1429 * Returns:
1430 * a pointer to the locked thread that was awakened
1431 */
1432 __private_extern__ thread_t
1433 wait_queue_wakeup64_identity_locked(
1434 wait_queue_t wq,
1435 event64_t event,
1436 wait_result_t result,
1437 boolean_t unlock)
1438 {
1439 kern_return_t res;
1440 thread_t thread;
1441
1442 assert(wait_queue_held(wq));
1443
1444
1445 thread = _wait_queue_select64_one(wq, event);
1446 if (unlock)
1447 wait_queue_unlock(wq);
1448
1449 if (thread) {
1450 res = thread_go(thread, result);
1451 assert(res == KERN_SUCCESS);
1452 }
1453 return thread; /* still locked if not NULL */
1454 }
1455
1456
1457 /*
1458 * Routine: wait_queue_wakeup64_one_locked
1459 * Purpose:
1460 * Select a single thread that is most-eligible to run and set
1461 * set it runnings.
1462 *
1463 * Conditions:
1464 * at splsched
1465 * wait queue locked
1466 * possibly recursive
1467 * Returns:
1468 * KERN_SUCCESS: It was, and is, now removed.
1469 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1470 */
1471 __private_extern__ kern_return_t
1472 wait_queue_wakeup64_one_locked(
1473 wait_queue_t wq,
1474 event64_t event,
1475 wait_result_t result,
1476 boolean_t unlock)
1477 {
1478 thread_t thread;
1479
1480 assert(wait_queue_held(wq));
1481
1482 thread = _wait_queue_select64_one(wq, event);
1483 if (unlock)
1484 wait_queue_unlock(wq);
1485
1486 if (thread) {
1487 kern_return_t res;
1488
1489 res = thread_go(thread, result);
1490 assert(res == KERN_SUCCESS);
1491 thread_unlock(thread);
1492 return res;
1493 }
1494
1495 return KERN_NOT_WAITING;
1496 }
1497
1498 /*
1499 * Routine: wait_queue_wakeup_one
1500 * Purpose:
1501 * Wakeup the most appropriate thread that is in the specified
1502 * wait queue for the specified event.
1503 * Conditions:
1504 * Nothing locked
1505 * Returns:
1506 * KERN_SUCCESS - Thread was woken up
1507 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1508 */
1509 kern_return_t
1510 wait_queue_wakeup_one(
1511 wait_queue_t wq,
1512 event_t event,
1513 wait_result_t result)
1514 {
1515 thread_t thread;
1516 spl_t s;
1517
1518 if (!wait_queue_is_valid(wq)) {
1519 return KERN_INVALID_ARGUMENT;
1520 }
1521
1522 s = splsched();
1523 wait_queue_lock(wq);
1524 thread = _wait_queue_select64_one(wq, (event64_t)((uint32_t)event));
1525 wait_queue_unlock(wq);
1526
1527 if (thread) {
1528 kern_return_t res;
1529
1530 res = thread_go(thread, result);
1531 assert(res == KERN_SUCCESS);
1532 thread_unlock(thread);
1533 splx(s);
1534 return res;
1535 }
1536
1537 splx(s);
1538 return KERN_NOT_WAITING;
1539 }
1540
1541 /*
1542 * Routine: wait_queue_wakeup64_one
1543 * Purpose:
1544 * Wakeup the most appropriate thread that is in the specified
1545 * wait queue for the specified event.
1546 * Conditions:
1547 * Nothing locked
1548 * Returns:
1549 * KERN_SUCCESS - Thread was woken up
1550 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1551 */
1552 kern_return_t
1553 wait_queue_wakeup64_one(
1554 wait_queue_t wq,
1555 event64_t event,
1556 wait_result_t result)
1557 {
1558 thread_t thread;
1559 spl_t s;
1560
1561 if (!wait_queue_is_valid(wq)) {
1562 return KERN_INVALID_ARGUMENT;
1563 }
1564 s = splsched();
1565 wait_queue_lock(wq);
1566 thread = _wait_queue_select64_one(wq, event);
1567 wait_queue_unlock(wq);
1568
1569 if (thread) {
1570 kern_return_t res;
1571
1572 res = thread_go(thread, result);
1573 assert(res == KERN_SUCCESS);
1574 thread_unlock(thread);
1575 splx(s);
1576 return res;
1577 }
1578
1579 splx(s);
1580 return KERN_NOT_WAITING;
1581 }
1582
1583
1584 /*
1585 * Routine: wait_queue_wakeup64_thread_locked
1586 * Purpose:
1587 * Wakeup the particular thread that was specified if and only
1588 * it was in this wait queue (or one of it's set queues)
1589 * and waiting on the specified event.
1590 *
1591 * This is much safer than just removing the thread from
1592 * whatever wait queue it happens to be on. For instance, it
1593 * may have already been awoken from the wait you intended to
1594 * interrupt and waited on something else (like another
1595 * semaphore).
1596 * Conditions:
1597 * at splsched
1598 * wait queue already locked (may be released).
1599 * Returns:
1600 * KERN_SUCCESS - the thread was found waiting and awakened
1601 * KERN_NOT_WAITING - the thread was not waiting here
1602 */
1603 __private_extern__ kern_return_t
1604 wait_queue_wakeup64_thread_locked(
1605 wait_queue_t wq,
1606 event64_t event,
1607 thread_t thread,
1608 wait_result_t result,
1609 boolean_t unlock)
1610 {
1611 kern_return_t res;
1612
1613 assert(wait_queue_held(wq));
1614
1615 /*
1616 * See if the thread was still waiting there. If so, it got
1617 * dequeued and returned locked.
1618 */
1619 res = _wait_queue_select64_thread(wq, event, thread);
1620 if (unlock)
1621 wait_queue_unlock(wq);
1622
1623 if (res != KERN_SUCCESS)
1624 return KERN_NOT_WAITING;
1625
1626 res = thread_go(thread, result);
1627 assert(res == KERN_SUCCESS);
1628 thread_unlock(thread);
1629 return res;
1630 }
1631
1632 /*
1633 * Routine: wait_queue_wakeup_thread
1634 * Purpose:
1635 * Wakeup the particular thread that was specified if and only
1636 * it was in this wait queue (or one of it's set queues)
1637 * and waiting on the specified event.
1638 *
1639 * This is much safer than just removing the thread from
1640 * whatever wait queue it happens to be on. For instance, it
1641 * may have already been awoken from the wait you intended to
1642 * interrupt and waited on something else (like another
1643 * semaphore).
1644 * Conditions:
1645 * nothing of interest locked
1646 * we need to assume spl needs to be raised
1647 * Returns:
1648 * KERN_SUCCESS - the thread was found waiting and awakened
1649 * KERN_NOT_WAITING - the thread was not waiting here
1650 */
1651 kern_return_t
1652 wait_queue_wakeup_thread(
1653 wait_queue_t wq,
1654 event_t event,
1655 thread_t thread,
1656 wait_result_t result)
1657 {
1658 kern_return_t res;
1659 spl_t s;
1660
1661 if (!wait_queue_is_valid(wq)) {
1662 return KERN_INVALID_ARGUMENT;
1663 }
1664
1665 s = splsched();
1666 wait_queue_lock(wq);
1667 res = _wait_queue_select64_thread(wq, (event64_t)((uint32_t)event), thread);
1668 wait_queue_unlock(wq);
1669
1670 if (res == KERN_SUCCESS) {
1671 res = thread_go(thread, result);
1672 assert(res == KERN_SUCCESS);
1673 thread_unlock(thread);
1674 splx(s);
1675 return res;
1676 }
1677 splx(s);
1678 return KERN_NOT_WAITING;
1679 }
1680
1681 /*
1682 * Routine: wait_queue_wakeup64_thread
1683 * Purpose:
1684 * Wakeup the particular thread that was specified if and only
1685 * it was in this wait queue (or one of it's set's queues)
1686 * and waiting on the specified event.
1687 *
1688 * This is much safer than just removing the thread from
1689 * whatever wait queue it happens to be on. For instance, it
1690 * may have already been awoken from the wait you intended to
1691 * interrupt and waited on something else (like another
1692 * semaphore).
1693 * Conditions:
1694 * nothing of interest locked
1695 * we need to assume spl needs to be raised
1696 * Returns:
1697 * KERN_SUCCESS - the thread was found waiting and awakened
1698 * KERN_NOT_WAITING - the thread was not waiting here
1699 */
1700 kern_return_t
1701 wait_queue_wakeup64_thread(
1702 wait_queue_t wq,
1703 event64_t event,
1704 thread_t thread,
1705 wait_result_t result)
1706 {
1707 kern_return_t res;
1708 spl_t s;
1709
1710 if (!wait_queue_is_valid(wq)) {
1711 return KERN_INVALID_ARGUMENT;
1712 }
1713
1714 s = splsched();
1715 wait_queue_lock(wq);
1716 res = _wait_queue_select64_thread(wq, event, thread);
1717 wait_queue_unlock(wq);
1718
1719 if (res == KERN_SUCCESS) {
1720 res = thread_go(thread, result);
1721 assert(res == KERN_SUCCESS);
1722 thread_unlock(thread);
1723 splx(s);
1724 return res;
1725 }
1726 splx(s);
1727 return KERN_NOT_WAITING;
1728 }