]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/wait_queue.c
b5e9c2d1f58436103169bca740c5964a12d2408c
[apple/xnu.git] / osfmk / kern / wait_queue.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: wait_queue.c (adapted from sched_prim.c)
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Primitives for manipulating wait queues: either global
64 * ones from sched_prim.c, or private ones associated with
65 * particular structures(pots, semaphores, etc..).
66 */
67
68 #include <kern/kern_types.h>
69 #include <kern/simple_lock.h>
70 #include <kern/kalloc.h>
71 #include <kern/queue.h>
72 #include <kern/spl.h>
73 #include <mach/sync_policy.h>
74 #include <kern/sched_prim.h>
75
76 #include <kern/wait_queue.h>
77
78 /* forward declarations */
79 static boolean_t wait_queue_member_locked(
80 wait_queue_t wq,
81 wait_queue_set_t wq_set);
82
83 void wait_queue_unlink_one(
84 wait_queue_t wq,
85 wait_queue_set_t *wq_setp);
86
87 kern_return_t wait_queue_set_unlink_all_nofree(
88 wait_queue_set_t wq_set);
89
90 /*
91 * Routine: wait_queue_init
92 * Purpose:
93 * Initialize a previously allocated wait queue.
94 * Returns:
95 * KERN_SUCCESS - The wait_queue_t was initialized
96 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
97 */
98 kern_return_t
99 wait_queue_init(
100 wait_queue_t wq,
101 int policy)
102 {
103 if (!((policy & SYNC_POLICY_ORDER_MASK) == SYNC_POLICY_FIFO))
104 return KERN_INVALID_ARGUMENT;
105
106 wq->wq_fifo = TRUE;
107 wq->wq_type = _WAIT_QUEUE_inited;
108 queue_init(&wq->wq_queue);
109 hw_lock_init(&wq->wq_interlock);
110 return KERN_SUCCESS;
111 }
112
113 /*
114 * Routine: wait_queue_alloc
115 * Purpose:
116 * Allocate and initialize a wait queue for use outside of
117 * of the mach part of the kernel.
118 * Conditions:
119 * Nothing locked - can block.
120 * Returns:
121 * The allocated and initialized wait queue
122 * WAIT_QUEUE_NULL if there is a resource shortage
123 */
124 wait_queue_t
125 wait_queue_alloc(
126 int policy)
127 {
128 wait_queue_t wq;
129 kern_return_t ret;
130
131 wq = (wait_queue_t) kalloc(sizeof(struct wait_queue));
132 if (wq != WAIT_QUEUE_NULL) {
133 ret = wait_queue_init(wq, policy);
134 if (ret != KERN_SUCCESS) {
135 kfree(wq, sizeof(struct wait_queue));
136 wq = WAIT_QUEUE_NULL;
137 }
138 }
139 return wq;
140 }
141
142 /*
143 * Routine: wait_queue_free
144 * Purpose:
145 * Free an allocated wait queue.
146 * Conditions:
147 * May block.
148 */
149 kern_return_t
150 wait_queue_free(
151 wait_queue_t wq)
152 {
153 if (!wait_queue_is_queue(wq))
154 return KERN_INVALID_ARGUMENT;
155 if (!queue_empty(&wq->wq_queue))
156 return KERN_FAILURE;
157 kfree(wq, sizeof(struct wait_queue));
158 return KERN_SUCCESS;
159 }
160
161 /*
162 * Routine: wait_queue_set_init
163 * Purpose:
164 * Initialize a previously allocated wait queue set.
165 * Returns:
166 * KERN_SUCCESS - The wait_queue_set_t was initialized
167 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
168 */
169 kern_return_t
170 wait_queue_set_init(
171 wait_queue_set_t wqset,
172 int policy)
173 {
174 kern_return_t ret;
175
176 ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
177 if (ret != KERN_SUCCESS)
178 return ret;
179
180 wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
181 if (policy & SYNC_POLICY_PREPOST)
182 wqset->wqs_wait_queue.wq_isprepost = TRUE;
183 else
184 wqset->wqs_wait_queue.wq_isprepost = FALSE;
185 queue_init(&wqset->wqs_setlinks);
186 wqset->wqs_refcount = 0;
187 return KERN_SUCCESS;
188 }
189
190
191 kern_return_t
192 wait_queue_sub_init(
193 wait_queue_set_t wqset,
194 int policy)
195 {
196 return wait_queue_set_init(wqset, policy);
197 }
198
199 kern_return_t
200 wait_queue_sub_clearrefs(
201 wait_queue_set_t wq_set)
202 {
203 if (!wait_queue_is_set(wq_set))
204 return KERN_INVALID_ARGUMENT;
205
206 wqs_lock(wq_set);
207 wq_set->wqs_refcount = 0;
208 wqs_unlock(wq_set);
209 return KERN_SUCCESS;
210 }
211
212 /*
213 * Routine: wait_queue_set_alloc
214 * Purpose:
215 * Allocate and initialize a wait queue set for
216 * use outside of the mach part of the kernel.
217 * Conditions:
218 * May block.
219 * Returns:
220 * The allocated and initialized wait queue set
221 * WAIT_QUEUE_SET_NULL if there is a resource shortage
222 */
223 wait_queue_set_t
224 wait_queue_set_alloc(
225 int policy)
226 {
227 wait_queue_set_t wq_set;
228
229 wq_set = (wait_queue_set_t) kalloc(sizeof(struct wait_queue_set));
230 if (wq_set != WAIT_QUEUE_SET_NULL) {
231 kern_return_t ret;
232
233 ret = wait_queue_set_init(wq_set, policy);
234 if (ret != KERN_SUCCESS) {
235 kfree(wq_set, sizeof(struct wait_queue_set));
236 wq_set = WAIT_QUEUE_SET_NULL;
237 }
238 }
239 return wq_set;
240 }
241
242 /*
243 * Routine: wait_queue_set_free
244 * Purpose:
245 * Free an allocated wait queue set
246 * Conditions:
247 * May block.
248 */
249 kern_return_t
250 wait_queue_set_free(
251 wait_queue_set_t wq_set)
252 {
253 if (!wait_queue_is_set(wq_set))
254 return KERN_INVALID_ARGUMENT;
255
256 if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
257 return KERN_FAILURE;
258
259 kfree(wq_set, sizeof(struct wait_queue_set));
260 return KERN_SUCCESS;
261 }
262
263
264 /*
265 *
266 * Routine: wait_queue_set_size
267 * Routine: wait_queue_link_size
268 * Purpose:
269 * Return the size of opaque wait queue structures
270 */
271 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
272 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
273
274 /* declare a unique type for wait queue link structures */
275 static unsigned int _wait_queue_link;
276 static unsigned int _wait_queue_unlinked;
277
278 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
279 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
280
281 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
282 WQASSERT(((wqe)->wqe_queue == (wq) && \
283 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
284 "wait queue element list corruption: wq=%#x, wqe=%#x", \
285 (wq), (wqe))
286
287 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
288 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
289 (queue_t)(wql) : &(wql)->wql_setlinks)))
290
291 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
292 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
293 (queue_t)(wql) : &(wql)->wql_setlinks)))
294
295 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
296 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
297 ((wql)->wql_setqueue == (wqs)) && \
298 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
299 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
300 "wait queue set links corruption: wqs=%#x, wql=%#x", \
301 (wqs), (wql))
302
303 #if defined(_WAIT_QUEUE_DEBUG_)
304
305 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
306
307 #define WAIT_QUEUE_CHECK(wq) \
308 MACRO_BEGIN \
309 queue_t q2 = &(wq)->wq_queue; \
310 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
311 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
312 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
313 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
314 } \
315 MACRO_END
316
317 #define WAIT_QUEUE_SET_CHECK(wqs) \
318 MACRO_BEGIN \
319 queue_t q2 = &(wqs)->wqs_setlinks; \
320 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
321 while (!queue_end(q2, (queue_entry_t)wql2)) { \
322 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
323 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
324 } \
325 MACRO_END
326
327 #else /* !_WAIT_QUEUE_DEBUG_ */
328
329 #define WQASSERT(e, s, p0, p1) assert(e)
330
331 #define WAIT_QUEUE_CHECK(wq)
332 #define WAIT_QUEUE_SET_CHECK(wqs)
333
334 #endif /* !_WAIT_QUEUE_DEBUG_ */
335
336 /*
337 * Routine: wait_queue_member_locked
338 * Purpose:
339 * Indicate if this set queue is a member of the queue
340 * Conditions:
341 * The wait queue is locked
342 * The set queue is just that, a set queue
343 */
344 static boolean_t
345 wait_queue_member_locked(
346 wait_queue_t wq,
347 wait_queue_set_t wq_set)
348 {
349 wait_queue_element_t wq_element;
350 queue_t q;
351
352 assert(wait_queue_held(wq));
353 assert(wait_queue_is_set(wq_set));
354
355 q = &wq->wq_queue;
356
357 wq_element = (wait_queue_element_t) queue_first(q);
358 while (!queue_end(q, (queue_entry_t)wq_element)) {
359 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
360 if ((wq_element->wqe_type == WAIT_QUEUE_LINK)) {
361 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
362
363 if (wql->wql_setqueue == wq_set)
364 return TRUE;
365 }
366 wq_element = (wait_queue_element_t)
367 queue_next((queue_t) wq_element);
368 }
369 return FALSE;
370 }
371
372
373 /*
374 * Routine: wait_queue_member
375 * Purpose:
376 * Indicate if this set queue is a member of the queue
377 * Conditions:
378 * The set queue is just that, a set queue
379 */
380 boolean_t
381 wait_queue_member(
382 wait_queue_t wq,
383 wait_queue_set_t wq_set)
384 {
385 boolean_t ret;
386 spl_t s;
387
388 if (!wait_queue_is_set(wq_set))
389 return FALSE;
390
391 s = splsched();
392 wait_queue_lock(wq);
393 ret = wait_queue_member_locked(wq, wq_set);
394 wait_queue_unlock(wq);
395 splx(s);
396
397 return ret;
398 }
399
400
401 /*
402 * Routine: wait_queue_link_noalloc
403 * Purpose:
404 * Insert a set wait queue into a wait queue. This
405 * requires us to link the two together using a wait_queue_link
406 * structure that we allocate.
407 * Conditions:
408 * The wait queue being inserted must be inited as a set queue
409 */
410 kern_return_t
411 wait_queue_link_noalloc(
412 wait_queue_t wq,
413 wait_queue_set_t wq_set,
414 wait_queue_link_t wql)
415 {
416 wait_queue_element_t wq_element;
417 queue_t q;
418 spl_t s;
419
420 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set))
421 return KERN_INVALID_ARGUMENT;
422
423 /*
424 * There are probably less threads and sets associated with
425 * the wait queue, then there are wait queues associated with
426 * the set. So lets validate it that way.
427 */
428 s = splsched();
429 wait_queue_lock(wq);
430 q = &wq->wq_queue;
431 wq_element = (wait_queue_element_t) queue_first(q);
432 while (!queue_end(q, (queue_entry_t)wq_element)) {
433 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
434 if (wq_element->wqe_type == WAIT_QUEUE_LINK &&
435 ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
436 wait_queue_unlock(wq);
437 splx(s);
438 return KERN_ALREADY_IN_SET;
439 }
440 wq_element = (wait_queue_element_t)
441 queue_next((queue_t) wq_element);
442 }
443
444 /*
445 * Not already a member, so we can add it.
446 */
447 wqs_lock(wq_set);
448
449 WAIT_QUEUE_SET_CHECK(wq_set);
450
451 wql->wql_queue = wq;
452 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
453 wql->wql_setqueue = wq_set;
454 queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
455 wql->wql_type = WAIT_QUEUE_LINK;
456
457 wqs_unlock(wq_set);
458 wait_queue_unlock(wq);
459 splx(s);
460
461 return KERN_SUCCESS;
462 }
463
464 /*
465 * Routine: wait_queue_link
466 * Purpose:
467 * Insert a set wait queue into a wait queue. This
468 * requires us to link the two together using a wait_queue_link
469 * structure that we allocate.
470 * Conditions:
471 * The wait queue being inserted must be inited as a set queue
472 */
473 kern_return_t
474 wait_queue_link(
475 wait_queue_t wq,
476 wait_queue_set_t wq_set)
477 {
478 wait_queue_link_t wql;
479 kern_return_t ret;
480
481 wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link));
482 if (wql == WAIT_QUEUE_LINK_NULL)
483 return KERN_RESOURCE_SHORTAGE;
484
485 ret = wait_queue_link_noalloc(wq, wq_set, wql);
486 if (ret != KERN_SUCCESS)
487 kfree(wql, sizeof(struct wait_queue_link));
488
489 return ret;
490 }
491
492
493 /*
494 * Routine: wait_queue_unlink_nofree
495 * Purpose:
496 * Undo the linkage between a wait queue and a set.
497 */
498 static void
499 wait_queue_unlink_locked(
500 wait_queue_t wq,
501 wait_queue_set_t wq_set,
502 wait_queue_link_t wql)
503 {
504 assert(wait_queue_held(wq));
505 assert(wait_queue_held(&wq_set->wqs_wait_queue));
506
507 wql->wql_queue = WAIT_QUEUE_NULL;
508 queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
509 wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
510 queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
511 wql->wql_type = WAIT_QUEUE_UNLINKED;
512
513 WAIT_QUEUE_CHECK(wq);
514 WAIT_QUEUE_SET_CHECK(wq_set);
515 }
516
517 /*
518 * Routine: wait_queue_unlink
519 * Purpose:
520 * Remove the linkage between a wait queue and a set,
521 * freeing the linkage structure.
522 * Conditions:
523 * The wait queue being must be a member set queue
524 */
525 kern_return_t
526 wait_queue_unlink(
527 wait_queue_t wq,
528 wait_queue_set_t wq_set)
529 {
530 wait_queue_element_t wq_element;
531 wait_queue_link_t wql;
532 queue_t q;
533 spl_t s;
534
535 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) {
536 return KERN_INVALID_ARGUMENT;
537 }
538 s = splsched();
539 wait_queue_lock(wq);
540
541 q = &wq->wq_queue;
542 wq_element = (wait_queue_element_t) queue_first(q);
543 while (!queue_end(q, (queue_entry_t)wq_element)) {
544 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
545 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
546 wql = (wait_queue_link_t)wq_element;
547
548 if (wql->wql_setqueue == wq_set) {
549 wqs_lock(wq_set);
550 wait_queue_unlink_locked(wq, wq_set, wql);
551 wqs_unlock(wq_set);
552 wait_queue_unlock(wq);
553 splx(s);
554 kfree(wql, sizeof(struct wait_queue_link));
555 return KERN_SUCCESS;
556 }
557 }
558 wq_element = (wait_queue_element_t)
559 queue_next((queue_t) wq_element);
560 }
561 wait_queue_unlock(wq);
562 splx(s);
563 return KERN_NOT_IN_SET;
564 }
565
566
567 /*
568 * Routine: wait_queue_unlinkall_nofree
569 * Purpose:
570 * Remove the linkage between a wait queue and all its
571 * sets. The caller is responsible for freeing
572 * the wait queue link structures.
573 */
574
575 kern_return_t
576 wait_queue_unlinkall_nofree(
577 wait_queue_t wq)
578 {
579 wait_queue_element_t wq_element;
580 wait_queue_element_t wq_next_element;
581 wait_queue_set_t wq_set;
582 wait_queue_link_t wql;
583 queue_head_t links_queue_head;
584 queue_t links = &links_queue_head;
585 queue_t q;
586 spl_t s;
587
588 if (!wait_queue_is_queue(wq)) {
589 return KERN_INVALID_ARGUMENT;
590 }
591
592 queue_init(links);
593
594 s = splsched();
595 wait_queue_lock(wq);
596
597 q = &wq->wq_queue;
598
599 wq_element = (wait_queue_element_t) queue_first(q);
600 while (!queue_end(q, (queue_entry_t)wq_element)) {
601 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
602 wq_next_element = (wait_queue_element_t)
603 queue_next((queue_t) wq_element);
604
605 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
606 wql = (wait_queue_link_t)wq_element;
607 wq_set = wql->wql_setqueue;
608 wqs_lock(wq_set);
609 wait_queue_unlink_locked(wq, wq_set, wql);
610 wqs_unlock(wq_set);
611 }
612 wq_element = wq_next_element;
613 }
614 wait_queue_unlock(wq);
615 splx(s);
616 return(KERN_SUCCESS);
617 }
618
619
620 /*
621 * Routine: wait_queue_unlink_all
622 * Purpose:
623 * Remove the linkage between a wait queue and all its sets.
624 * All the linkage structures are freed.
625 * Conditions:
626 * Nothing of interest locked.
627 */
628
629 kern_return_t
630 wait_queue_unlink_all(
631 wait_queue_t wq)
632 {
633 wait_queue_element_t wq_element;
634 wait_queue_element_t wq_next_element;
635 wait_queue_set_t wq_set;
636 wait_queue_link_t wql;
637 queue_head_t links_queue_head;
638 queue_t links = &links_queue_head;
639 queue_t q;
640 spl_t s;
641
642 if (!wait_queue_is_queue(wq)) {
643 return KERN_INVALID_ARGUMENT;
644 }
645
646 queue_init(links);
647
648 s = splsched();
649 wait_queue_lock(wq);
650
651 q = &wq->wq_queue;
652
653 wq_element = (wait_queue_element_t) queue_first(q);
654 while (!queue_end(q, (queue_entry_t)wq_element)) {
655 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
656 wq_next_element = (wait_queue_element_t)
657 queue_next((queue_t) wq_element);
658
659 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
660 wql = (wait_queue_link_t)wq_element;
661 wq_set = wql->wql_setqueue;
662 wqs_lock(wq_set);
663 wait_queue_unlink_locked(wq, wq_set, wql);
664 wqs_unlock(wq_set);
665 enqueue(links, &wql->wql_links);
666 }
667 wq_element = wq_next_element;
668 }
669 wait_queue_unlock(wq);
670 splx(s);
671
672 while(!queue_empty(links)) {
673 wql = (wait_queue_link_t) dequeue(links);
674 kfree(wql, sizeof(struct wait_queue_link));
675 }
676
677 return(KERN_SUCCESS);
678 }
679
680 /*
681 * Routine: wait_queue_set_unlink_all_nofree
682 * Purpose:
683 * Remove the linkage between a set wait queue and all its
684 * member wait queues. The link structures are not freed, nor
685 * returned. It is the caller's responsibility to track and free
686 * them.
687 * Conditions:
688 * The wait queue being must be a member set queue
689 */
690 kern_return_t
691 wait_queue_set_unlink_all_nofree(
692 wait_queue_set_t wq_set)
693 {
694 wait_queue_link_t wql;
695 wait_queue_t wq;
696 queue_t q;
697 spl_t s;
698
699 if (!wait_queue_is_set(wq_set)) {
700 return KERN_INVALID_ARGUMENT;
701 }
702
703 retry:
704 s = splsched();
705 wqs_lock(wq_set);
706
707 q = &wq_set->wqs_setlinks;
708
709 wql = (wait_queue_link_t)queue_first(q);
710 while (!queue_end(q, (queue_entry_t)wql)) {
711 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
712 wq = wql->wql_queue;
713 if (wait_queue_lock_try(wq)) {
714 wait_queue_unlink_locked(wq, wq_set, wql);
715 wait_queue_unlock(wq);
716 wql = (wait_queue_link_t)queue_first(q);
717 } else {
718 wqs_unlock(wq_set);
719 splx(s);
720 delay(1);
721 goto retry;
722 }
723 }
724 wqs_unlock(wq_set);
725 splx(s);
726
727 return(KERN_SUCCESS);
728 }
729
730 /* legacy interface naming */
731 kern_return_t
732 wait_subqueue_unlink_all(
733 wait_queue_set_t wq_set)
734 {
735 return wait_queue_set_unlink_all_nofree(wq_set);
736 }
737
738
739 /*
740 * Routine: wait_queue_set_unlink_all
741 * Purpose:
742 * Remove the linkage between a set wait queue and all its
743 * member wait queues. The link structures are freed.
744 * Conditions:
745 * The wait queue must be a set
746 */
747 kern_return_t
748 wait_queue_set_unlink_all(
749 wait_queue_set_t wq_set)
750 {
751 wait_queue_link_t wql;
752 wait_queue_t wq;
753 queue_t q;
754 queue_head_t links_queue_head;
755 queue_t links = &links_queue_head;
756 spl_t s;
757
758 if (!wait_queue_is_set(wq_set)) {
759 return KERN_INVALID_ARGUMENT;
760 }
761
762 queue_init(links);
763
764 retry:
765 s = splsched();
766 wqs_lock(wq_set);
767
768 q = &wq_set->wqs_setlinks;
769
770 wql = (wait_queue_link_t)queue_first(q);
771 while (!queue_end(q, (queue_entry_t)wql)) {
772 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
773 wq = wql->wql_queue;
774 if (wait_queue_lock_try(wq)) {
775 wait_queue_unlink_locked(wq, wq_set, wql);
776 wait_queue_unlock(wq);
777 enqueue(links, &wql->wql_links);
778 wql = (wait_queue_link_t)queue_first(q);
779 } else {
780 wqs_unlock(wq_set);
781 splx(s);
782 delay(1);
783 goto retry;
784 }
785 }
786 wqs_unlock(wq_set);
787 splx(s);
788
789 while (!queue_empty (links)) {
790 wql = (wait_queue_link_t) dequeue(links);
791 kfree(wql, sizeof(struct wait_queue_link));
792 }
793 return(KERN_SUCCESS);
794 }
795
796
797 /*
798 * Routine: wait_queue_unlink_one
799 * Purpose:
800 * Find and unlink one set wait queue
801 * Conditions:
802 * Nothing of interest locked.
803 */
804 void
805 wait_queue_unlink_one(
806 wait_queue_t wq,
807 wait_queue_set_t *wq_setp)
808 {
809 wait_queue_element_t wq_element;
810 queue_t q;
811 spl_t s;
812
813 s = splsched();
814 wait_queue_lock(wq);
815
816 q = &wq->wq_queue;
817
818 wq_element = (wait_queue_element_t) queue_first(q);
819 while (!queue_end(q, (queue_entry_t)wq_element)) {
820
821 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
822 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
823 wait_queue_set_t wq_set = wql->wql_setqueue;
824
825 wqs_lock(wq_set);
826 wait_queue_unlink_locked(wq, wq_set, wql);
827 wqs_unlock(wq_set);
828 wait_queue_unlock(wq);
829 splx(s);
830 kfree(wql,sizeof(struct wait_queue_link));
831 *wq_setp = wq_set;
832 return;
833 }
834
835 wq_element = (wait_queue_element_t)
836 queue_next((queue_t) wq_element);
837 }
838 wait_queue_unlock(wq);
839 splx(s);
840 *wq_setp = WAIT_QUEUE_SET_NULL;
841 }
842
843
844 /*
845 * Routine: wait_queue_assert_wait64_locked
846 * Purpose:
847 * Insert the current thread into the supplied wait queue
848 * waiting for a particular event to be posted to that queue.
849 *
850 * Conditions:
851 * The wait queue is assumed locked.
852 * The waiting thread is assumed locked.
853 *
854 */
855 __private_extern__ wait_result_t
856 wait_queue_assert_wait64_locked(
857 wait_queue_t wq,
858 event64_t event,
859 wait_interrupt_t interruptible,
860 uint64_t deadline,
861 thread_t thread)
862 {
863 wait_result_t wait_result;
864
865 if (!wait_queue_assert_possible(thread))
866 panic("wait_queue_assert_wait64_locked");
867
868 if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
869 wait_queue_set_t wqs = (wait_queue_set_t)wq;
870
871 if (wqs->wqs_isprepost && wqs->wqs_refcount > 0)
872 return(THREAD_AWAKENED);
873 }
874
875 /*
876 * This is the extent to which we currently take scheduling attributes
877 * into account. If the thread is vm priviledged, we stick it at
878 * the front of the queue. Later, these queues will honor the policy
879 * value set at wait_queue_init time.
880 */
881 wait_result = thread_mark_wait_locked(thread, interruptible);
882 if (wait_result == THREAD_WAITING) {
883 if (thread->options & TH_OPT_VMPRIV)
884 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
885 else
886 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
887
888 thread->wait_event = event;
889 thread->wait_queue = wq;
890
891 if (deadline != 0) {
892 if (!timer_call_enter(&thread->wait_timer, deadline))
893 thread->wait_timer_active++;
894 thread->wait_timer_is_set = TRUE;
895 }
896 }
897 return(wait_result);
898 }
899
900 /*
901 * Routine: wait_queue_assert_wait
902 * Purpose:
903 * Insert the current thread into the supplied wait queue
904 * waiting for a particular event to be posted to that queue.
905 *
906 * Conditions:
907 * nothing of interest locked.
908 */
909 wait_result_t
910 wait_queue_assert_wait(
911 wait_queue_t wq,
912 event_t event,
913 wait_interrupt_t interruptible,
914 uint64_t deadline)
915 {
916 spl_t s;
917 wait_result_t ret;
918 thread_t thread = current_thread();
919
920 /* If it is an invalid wait queue, you can't wait on it */
921 if (!wait_queue_is_valid(wq))
922 return (thread->wait_result = THREAD_RESTART);
923
924 s = splsched();
925 wait_queue_lock(wq);
926 thread_lock(thread);
927 ret = wait_queue_assert_wait64_locked(wq, (event64_t)((uint32_t)event),
928 interruptible, deadline, thread);
929 thread_unlock(thread);
930 wait_queue_unlock(wq);
931 splx(s);
932 return(ret);
933 }
934
935 /*
936 * Routine: wait_queue_assert_wait64
937 * Purpose:
938 * Insert the current thread into the supplied wait queue
939 * waiting for a particular event to be posted to that queue.
940 * Conditions:
941 * nothing of interest locked.
942 */
943 wait_result_t
944 wait_queue_assert_wait64(
945 wait_queue_t wq,
946 event64_t event,
947 wait_interrupt_t interruptible,
948 uint64_t deadline)
949 {
950 spl_t s;
951 wait_result_t ret;
952 thread_t thread = current_thread();
953
954 /* If it is an invalid wait queue, you cant wait on it */
955 if (!wait_queue_is_valid(wq))
956 return (thread->wait_result = THREAD_RESTART);
957
958 s = splsched();
959 wait_queue_lock(wq);
960 thread_lock(thread);
961 ret = wait_queue_assert_wait64_locked(wq, event, interruptible, deadline, thread);
962 thread_unlock(thread);
963 wait_queue_unlock(wq);
964 splx(s);
965 return(ret);
966 }
967
968 /*
969 * Routine: _wait_queue_select64_all
970 * Purpose:
971 * Select all threads off a wait queue that meet the
972 * supplied criteria.
973 * Conditions:
974 * at splsched
975 * wait queue locked
976 * wake_queue initialized and ready for insertion
977 * possibly recursive
978 * Returns:
979 * a queue of locked threads
980 */
981 static void
982 _wait_queue_select64_all(
983 wait_queue_t wq,
984 event64_t event,
985 queue_t wake_queue)
986 {
987 wait_queue_element_t wq_element;
988 wait_queue_element_t wqe_next;
989 queue_t q;
990
991 q = &wq->wq_queue;
992
993 wq_element = (wait_queue_element_t) queue_first(q);
994 while (!queue_end(q, (queue_entry_t)wq_element)) {
995 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
996 wqe_next = (wait_queue_element_t)
997 queue_next((queue_t) wq_element);
998
999 /*
1000 * We may have to recurse if this is a compound wait queue.
1001 */
1002 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1003 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1004 wait_queue_t set_queue;
1005
1006 /*
1007 * We have to check the set wait queue.
1008 */
1009 set_queue = (wait_queue_t)wql->wql_setqueue;
1010 wait_queue_lock(set_queue);
1011 if (set_queue->wq_isprepost) {
1012 wait_queue_set_t wqs = (wait_queue_set_t)set_queue;
1013
1014 /*
1015 * Preposting is only for sets and wait queue
1016 * is the first element of set
1017 */
1018 wqs->wqs_refcount++;
1019 }
1020 if (! wait_queue_empty(set_queue))
1021 _wait_queue_select64_all(set_queue, event, wake_queue);
1022 wait_queue_unlock(set_queue);
1023 } else {
1024
1025 /*
1026 * Otherwise, its a thread. If it is waiting on
1027 * the event we are posting to this queue, pull
1028 * it off the queue and stick it in out wake_queue.
1029 */
1030 thread_t t = (thread_t)wq_element;
1031
1032 if (t->wait_event == event) {
1033 thread_lock(t);
1034 remqueue(q, (queue_entry_t) t);
1035 enqueue (wake_queue, (queue_entry_t) t);
1036 t->wait_queue = WAIT_QUEUE_NULL;
1037 t->wait_event = NO_EVENT64;
1038 t->at_safe_point = FALSE;
1039 /* returned locked */
1040 }
1041 }
1042 wq_element = wqe_next;
1043 }
1044 }
1045
1046 /*
1047 * Routine: wait_queue_wakeup64_all_locked
1048 * Purpose:
1049 * Wakeup some number of threads that are in the specified
1050 * wait queue and waiting on the specified event.
1051 * Conditions:
1052 * wait queue already locked (may be released).
1053 * Returns:
1054 * KERN_SUCCESS - Threads were woken up
1055 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1056 */
1057 __private_extern__ kern_return_t
1058 wait_queue_wakeup64_all_locked(
1059 wait_queue_t wq,
1060 event64_t event,
1061 wait_result_t result,
1062 boolean_t unlock)
1063 {
1064 queue_head_t wake_queue_head;
1065 queue_t q = &wake_queue_head;
1066 kern_return_t res;
1067
1068 // assert(wait_queue_held(wq));
1069 if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1070 panic("wait_queue_wakeup64_all_locked: lock not held on %08X\n", wq); /* (BRINGUP) */
1071 }
1072
1073 queue_init(q);
1074
1075 /*
1076 * Select the threads that we will wake up. The threads
1077 * are returned to us locked and cleanly removed from the
1078 * wait queue.
1079 */
1080 _wait_queue_select64_all(wq, event, q);
1081 if (unlock)
1082 wait_queue_unlock(wq);
1083
1084 /*
1085 * For each thread, set it running.
1086 */
1087 res = KERN_NOT_WAITING;
1088 while (!queue_empty (q)) {
1089 thread_t thread = (thread_t) dequeue(q);
1090 res = thread_go(thread, result);
1091 assert(res == KERN_SUCCESS);
1092 thread_unlock(thread);
1093 }
1094 return res;
1095 }
1096
1097
1098 /*
1099 * Routine: wait_queue_wakeup_all
1100 * Purpose:
1101 * Wakeup some number of threads that are in the specified
1102 * wait queue and waiting on the specified event.
1103 * Conditions:
1104 * Nothing locked
1105 * Returns:
1106 * KERN_SUCCESS - Threads were woken up
1107 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1108 */
1109 kern_return_t
1110 wait_queue_wakeup_all(
1111 wait_queue_t wq,
1112 event_t event,
1113 wait_result_t result)
1114 {
1115 kern_return_t ret;
1116 spl_t s;
1117
1118 if (!wait_queue_is_valid(wq)) {
1119 return KERN_INVALID_ARGUMENT;
1120 }
1121
1122 s = splsched();
1123 wait_queue_lock(wq);
1124 if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1125 panic("wait_queue_wakeup_all: we did not get the lock on %08X\n", wq); /* (BRINGUP) */
1126 }
1127 ret = wait_queue_wakeup64_all_locked(
1128 wq, (event64_t)((uint32_t)event),
1129 result, TRUE);
1130 /* lock released */
1131 splx(s);
1132 return ret;
1133 }
1134
1135 /*
1136 * Routine: wait_queue_wakeup64_all
1137 * Purpose:
1138 * Wakeup some number of threads that are in the specified
1139 * wait queue and waiting on the specified event.
1140 * Conditions:
1141 * Nothing locked
1142 * Returns:
1143 * KERN_SUCCESS - Threads were woken up
1144 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1145 */
1146 kern_return_t
1147 wait_queue_wakeup64_all(
1148 wait_queue_t wq,
1149 event64_t event,
1150 wait_result_t result)
1151 {
1152 kern_return_t ret;
1153 spl_t s;
1154
1155 if (!wait_queue_is_valid(wq)) {
1156 return KERN_INVALID_ARGUMENT;
1157 }
1158
1159 s = splsched();
1160 wait_queue_lock(wq);
1161 ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1162 /* lock released */
1163 splx(s);
1164 return ret;
1165 }
1166
1167 /*
1168 * Routine: _wait_queue_select64_one
1169 * Purpose:
1170 * Select the best thread off a wait queue that meet the
1171 * supplied criteria.
1172 * Conditions:
1173 * at splsched
1174 * wait queue locked
1175 * possibly recursive
1176 * Returns:
1177 * a locked thread - if one found
1178 * Note:
1179 * This is where the sync policy of the wait queue comes
1180 * into effect. For now, we just assume FIFO.
1181 */
1182 static thread_t
1183 _wait_queue_select64_one(
1184 wait_queue_t wq,
1185 event64_t event)
1186 {
1187 wait_queue_element_t wq_element;
1188 wait_queue_element_t wqe_next;
1189 thread_t t = THREAD_NULL;
1190 queue_t q;
1191
1192 assert(wq->wq_fifo);
1193
1194 q = &wq->wq_queue;
1195
1196 wq_element = (wait_queue_element_t) queue_first(q);
1197 while (!queue_end(q, (queue_entry_t)wq_element)) {
1198 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1199 wqe_next = (wait_queue_element_t)
1200 queue_next((queue_t) wq_element);
1201
1202 /*
1203 * We may have to recurse if this is a compound wait queue.
1204 */
1205 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1206 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1207 wait_queue_t set_queue;
1208
1209 /*
1210 * We have to check the set wait queue.
1211 */
1212 set_queue = (wait_queue_t)wql->wql_setqueue;
1213 wait_queue_lock(set_queue);
1214 if (! wait_queue_empty(set_queue)) {
1215 t = _wait_queue_select64_one(set_queue, event);
1216 }
1217 wait_queue_unlock(set_queue);
1218 if (t != THREAD_NULL)
1219 return t;
1220 } else {
1221
1222 /*
1223 * Otherwise, its a thread. If it is waiting on
1224 * the event we are posting to this queue, pull
1225 * it off the queue and stick it in out wake_queue.
1226 */
1227 t = (thread_t)wq_element;
1228 if (t->wait_event == event) {
1229 thread_lock(t);
1230 remqueue(q, (queue_entry_t) t);
1231 t->wait_queue = WAIT_QUEUE_NULL;
1232 t->wait_event = NO_EVENT64;
1233 t->at_safe_point = FALSE;
1234 return t; /* still locked */
1235 }
1236
1237 t = THREAD_NULL;
1238 }
1239 wq_element = wqe_next;
1240 }
1241 return THREAD_NULL;
1242 }
1243
1244 /*
1245 * Routine: wait_queue_peek64_locked
1246 * Purpose:
1247 * Select the best thread from a wait queue that meet the
1248 * supplied criteria, but leave it on the queue it was
1249 * found on. The thread, and the actual wait_queue the
1250 * thread was found on are identified.
1251 * Conditions:
1252 * at splsched
1253 * wait queue locked
1254 * possibly recursive
1255 * Returns:
1256 * a locked thread - if one found
1257 * a locked waitq - the one the thread was found on
1258 * Note:
1259 * Both the waitq the thread was actually found on, and
1260 * the supplied wait queue, are locked after this.
1261 */
1262 __private_extern__ void
1263 wait_queue_peek64_locked(
1264 wait_queue_t wq,
1265 event64_t event,
1266 thread_t *tp,
1267 wait_queue_t *wqp)
1268 {
1269 wait_queue_element_t wq_element;
1270 wait_queue_element_t wqe_next;
1271 queue_t q;
1272
1273 assert(wq->wq_fifo);
1274
1275 *tp = THREAD_NULL;
1276
1277 q = &wq->wq_queue;
1278
1279 wq_element = (wait_queue_element_t) queue_first(q);
1280 while (!queue_end(q, (queue_entry_t)wq_element)) {
1281 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1282 wqe_next = (wait_queue_element_t)
1283 queue_next((queue_t) wq_element);
1284
1285 /*
1286 * We may have to recurse if this is a compound wait queue.
1287 */
1288 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1289 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1290 wait_queue_t set_queue;
1291
1292 /*
1293 * We have to check the set wait queue.
1294 */
1295 set_queue = (wait_queue_t)wql->wql_setqueue;
1296 wait_queue_lock(set_queue);
1297 if (! wait_queue_empty(set_queue)) {
1298 wait_queue_peek64_locked(set_queue, event, tp, wqp);
1299 }
1300 if (*tp != THREAD_NULL) {
1301 if (*wqp != set_queue)
1302 wait_queue_unlock(set_queue);
1303 return; /* thread and its waitq locked */
1304 }
1305
1306 wait_queue_unlock(set_queue);
1307 } else {
1308
1309 /*
1310 * Otherwise, its a thread. If it is waiting on
1311 * the event we are posting to this queue, return
1312 * it locked, but leave it on the queue.
1313 */
1314 thread_t t = (thread_t)wq_element;
1315
1316 if (t->wait_event == event) {
1317 thread_lock(t);
1318 *tp = t;
1319 *wqp = wq;
1320 return;
1321 }
1322 }
1323 wq_element = wqe_next;
1324 }
1325 }
1326
1327 /*
1328 * Routine: wait_queue_pull_thread_locked
1329 * Purpose:
1330 * Pull a thread that was previously "peeked" off the wait
1331 * queue and (possibly) unlock the waitq.
1332 * Conditions:
1333 * at splsched
1334 * wait queue locked
1335 * thread locked
1336 * Returns:
1337 * with the thread still locked.
1338 */
1339 void
1340 wait_queue_pull_thread_locked(
1341 wait_queue_t waitq,
1342 thread_t thread,
1343 boolean_t unlock)
1344 {
1345
1346 assert(thread->wait_queue == waitq);
1347
1348 remqueue(&waitq->wq_queue, (queue_entry_t)thread );
1349 thread->wait_queue = WAIT_QUEUE_NULL;
1350 thread->wait_event = NO_EVENT64;
1351 thread->at_safe_point = FALSE;
1352 if (unlock)
1353 wait_queue_unlock(waitq);
1354 }
1355
1356
1357 /*
1358 * Routine: wait_queue_select64_thread
1359 * Purpose:
1360 * Look for a thread and remove it from the queues, if
1361 * (and only if) the thread is waiting on the supplied
1362 * <wait_queue, event> pair.
1363 * Conditions:
1364 * at splsched
1365 * wait queue locked
1366 * possibly recursive
1367 * Returns:
1368 * KERN_NOT_WAITING: Thread is not waiting here.
1369 * KERN_SUCCESS: It was, and is now removed (returned locked)
1370 */
1371 static kern_return_t
1372 _wait_queue_select64_thread(
1373 wait_queue_t wq,
1374 event64_t event,
1375 thread_t thread)
1376 {
1377 wait_queue_element_t wq_element;
1378 wait_queue_element_t wqe_next;
1379 kern_return_t res = KERN_NOT_WAITING;
1380 queue_t q = &wq->wq_queue;
1381
1382 thread_lock(thread);
1383 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
1384 remqueue(q, (queue_entry_t) thread);
1385 thread->at_safe_point = FALSE;
1386 thread->wait_event = NO_EVENT64;
1387 thread->wait_queue = WAIT_QUEUE_NULL;
1388 /* thread still locked */
1389 return KERN_SUCCESS;
1390 }
1391 thread_unlock(thread);
1392
1393 /*
1394 * The wait_queue associated with the thread may be one of this
1395 * wait queue's sets. Go see. If so, removing it from
1396 * there is like removing it from here.
1397 */
1398 wq_element = (wait_queue_element_t) queue_first(q);
1399 while (!queue_end(q, (queue_entry_t)wq_element)) {
1400 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1401 wqe_next = (wait_queue_element_t)
1402 queue_next((queue_t) wq_element);
1403
1404 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1405 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1406 wait_queue_t set_queue;
1407
1408 set_queue = (wait_queue_t)wql->wql_setqueue;
1409 wait_queue_lock(set_queue);
1410 if (! wait_queue_empty(set_queue)) {
1411 res = _wait_queue_select64_thread(set_queue,
1412 event,
1413 thread);
1414 }
1415 wait_queue_unlock(set_queue);
1416 if (res == KERN_SUCCESS)
1417 return KERN_SUCCESS;
1418 }
1419 wq_element = wqe_next;
1420 }
1421 return res;
1422 }
1423
1424
1425 /*
1426 * Routine: wait_queue_wakeup64_identity_locked
1427 * Purpose:
1428 * Select a single thread that is most-eligible to run and set
1429 * set it running. But return the thread locked.
1430 *
1431 * Conditions:
1432 * at splsched
1433 * wait queue locked
1434 * possibly recursive
1435 * Returns:
1436 * a pointer to the locked thread that was awakened
1437 */
1438 __private_extern__ thread_t
1439 wait_queue_wakeup64_identity_locked(
1440 wait_queue_t wq,
1441 event64_t event,
1442 wait_result_t result,
1443 boolean_t unlock)
1444 {
1445 kern_return_t res;
1446 thread_t thread;
1447
1448 assert(wait_queue_held(wq));
1449
1450
1451 thread = _wait_queue_select64_one(wq, event);
1452 if (unlock)
1453 wait_queue_unlock(wq);
1454
1455 if (thread) {
1456 res = thread_go(thread, result);
1457 assert(res == KERN_SUCCESS);
1458 }
1459 return thread; /* still locked if not NULL */
1460 }
1461
1462
1463 /*
1464 * Routine: wait_queue_wakeup64_one_locked
1465 * Purpose:
1466 * Select a single thread that is most-eligible to run and set
1467 * set it runnings.
1468 *
1469 * Conditions:
1470 * at splsched
1471 * wait queue locked
1472 * possibly recursive
1473 * Returns:
1474 * KERN_SUCCESS: It was, and is, now removed.
1475 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1476 */
1477 __private_extern__ kern_return_t
1478 wait_queue_wakeup64_one_locked(
1479 wait_queue_t wq,
1480 event64_t event,
1481 wait_result_t result,
1482 boolean_t unlock)
1483 {
1484 thread_t thread;
1485
1486 assert(wait_queue_held(wq));
1487
1488 thread = _wait_queue_select64_one(wq, event);
1489 if (unlock)
1490 wait_queue_unlock(wq);
1491
1492 if (thread) {
1493 kern_return_t res;
1494
1495 res = thread_go(thread, result);
1496 assert(res == KERN_SUCCESS);
1497 thread_unlock(thread);
1498 return res;
1499 }
1500
1501 return KERN_NOT_WAITING;
1502 }
1503
1504 /*
1505 * Routine: wait_queue_wakeup_one
1506 * Purpose:
1507 * Wakeup the most appropriate thread that is in the specified
1508 * wait queue for the specified event.
1509 * Conditions:
1510 * Nothing locked
1511 * Returns:
1512 * KERN_SUCCESS - Thread was woken up
1513 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1514 */
1515 kern_return_t
1516 wait_queue_wakeup_one(
1517 wait_queue_t wq,
1518 event_t event,
1519 wait_result_t result)
1520 {
1521 thread_t thread;
1522 spl_t s;
1523
1524 if (!wait_queue_is_valid(wq)) {
1525 return KERN_INVALID_ARGUMENT;
1526 }
1527
1528 s = splsched();
1529 wait_queue_lock(wq);
1530 thread = _wait_queue_select64_one(wq, (event64_t)((uint32_t)event));
1531 wait_queue_unlock(wq);
1532
1533 if (thread) {
1534 kern_return_t res;
1535
1536 res = thread_go(thread, result);
1537 assert(res == KERN_SUCCESS);
1538 thread_unlock(thread);
1539 splx(s);
1540 return res;
1541 }
1542
1543 splx(s);
1544 return KERN_NOT_WAITING;
1545 }
1546
1547 /*
1548 * Routine: wait_queue_wakeup64_one
1549 * Purpose:
1550 * Wakeup the most appropriate thread that is in the specified
1551 * wait queue for the specified event.
1552 * Conditions:
1553 * Nothing locked
1554 * Returns:
1555 * KERN_SUCCESS - Thread was woken up
1556 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1557 */
1558 kern_return_t
1559 wait_queue_wakeup64_one(
1560 wait_queue_t wq,
1561 event64_t event,
1562 wait_result_t result)
1563 {
1564 thread_t thread;
1565 spl_t s;
1566
1567 if (!wait_queue_is_valid(wq)) {
1568 return KERN_INVALID_ARGUMENT;
1569 }
1570 s = splsched();
1571 wait_queue_lock(wq);
1572 thread = _wait_queue_select64_one(wq, event);
1573 wait_queue_unlock(wq);
1574
1575 if (thread) {
1576 kern_return_t res;
1577
1578 res = thread_go(thread, result);
1579 assert(res == KERN_SUCCESS);
1580 thread_unlock(thread);
1581 splx(s);
1582 return res;
1583 }
1584
1585 splx(s);
1586 return KERN_NOT_WAITING;
1587 }
1588
1589
1590 /*
1591 * Routine: wait_queue_wakeup64_thread_locked
1592 * Purpose:
1593 * Wakeup the particular thread that was specified if and only
1594 * it was in this wait queue (or one of it's set queues)
1595 * and waiting on the specified event.
1596 *
1597 * This is much safer than just removing the thread from
1598 * whatever wait queue it happens to be on. For instance, it
1599 * may have already been awoken from the wait you intended to
1600 * interrupt and waited on something else (like another
1601 * semaphore).
1602 * Conditions:
1603 * at splsched
1604 * wait queue already locked (may be released).
1605 * Returns:
1606 * KERN_SUCCESS - the thread was found waiting and awakened
1607 * KERN_NOT_WAITING - the thread was not waiting here
1608 */
1609 __private_extern__ kern_return_t
1610 wait_queue_wakeup64_thread_locked(
1611 wait_queue_t wq,
1612 event64_t event,
1613 thread_t thread,
1614 wait_result_t result,
1615 boolean_t unlock)
1616 {
1617 kern_return_t res;
1618
1619 assert(wait_queue_held(wq));
1620
1621 /*
1622 * See if the thread was still waiting there. If so, it got
1623 * dequeued and returned locked.
1624 */
1625 res = _wait_queue_select64_thread(wq, event, thread);
1626 if (unlock)
1627 wait_queue_unlock(wq);
1628
1629 if (res != KERN_SUCCESS)
1630 return KERN_NOT_WAITING;
1631
1632 res = thread_go(thread, result);
1633 assert(res == KERN_SUCCESS);
1634 thread_unlock(thread);
1635 return res;
1636 }
1637
1638 /*
1639 * Routine: wait_queue_wakeup_thread
1640 * Purpose:
1641 * Wakeup the particular thread that was specified if and only
1642 * it was in this wait queue (or one of it's set queues)
1643 * and waiting on the specified event.
1644 *
1645 * This is much safer than just removing the thread from
1646 * whatever wait queue it happens to be on. For instance, it
1647 * may have already been awoken from the wait you intended to
1648 * interrupt and waited on something else (like another
1649 * semaphore).
1650 * Conditions:
1651 * nothing of interest locked
1652 * we need to assume spl needs to be raised
1653 * Returns:
1654 * KERN_SUCCESS - the thread was found waiting and awakened
1655 * KERN_NOT_WAITING - the thread was not waiting here
1656 */
1657 kern_return_t
1658 wait_queue_wakeup_thread(
1659 wait_queue_t wq,
1660 event_t event,
1661 thread_t thread,
1662 wait_result_t result)
1663 {
1664 kern_return_t res;
1665 spl_t s;
1666
1667 if (!wait_queue_is_valid(wq)) {
1668 return KERN_INVALID_ARGUMENT;
1669 }
1670
1671 s = splsched();
1672 wait_queue_lock(wq);
1673 res = _wait_queue_select64_thread(wq, (event64_t)((uint32_t)event), thread);
1674 wait_queue_unlock(wq);
1675
1676 if (res == KERN_SUCCESS) {
1677 res = thread_go(thread, result);
1678 assert(res == KERN_SUCCESS);
1679 thread_unlock(thread);
1680 splx(s);
1681 return res;
1682 }
1683 splx(s);
1684 return KERN_NOT_WAITING;
1685 }
1686
1687 /*
1688 * Routine: wait_queue_wakeup64_thread
1689 * Purpose:
1690 * Wakeup the particular thread that was specified if and only
1691 * it was in this wait queue (or one of it's set's queues)
1692 * and waiting on the specified event.
1693 *
1694 * This is much safer than just removing the thread from
1695 * whatever wait queue it happens to be on. For instance, it
1696 * may have already been awoken from the wait you intended to
1697 * interrupt and waited on something else (like another
1698 * semaphore).
1699 * Conditions:
1700 * nothing of interest locked
1701 * we need to assume spl needs to be raised
1702 * Returns:
1703 * KERN_SUCCESS - the thread was found waiting and awakened
1704 * KERN_NOT_WAITING - the thread was not waiting here
1705 */
1706 kern_return_t
1707 wait_queue_wakeup64_thread(
1708 wait_queue_t wq,
1709 event64_t event,
1710 thread_t thread,
1711 wait_result_t result)
1712 {
1713 kern_return_t res;
1714 spl_t s;
1715
1716 if (!wait_queue_is_valid(wq)) {
1717 return KERN_INVALID_ARGUMENT;
1718 }
1719
1720 s = splsched();
1721 wait_queue_lock(wq);
1722 res = _wait_queue_select64_thread(wq, event, thread);
1723 wait_queue_unlock(wq);
1724
1725 if (res == KERN_SUCCESS) {
1726 res = thread_go(thread, result);
1727 assert(res == KERN_SUCCESS);
1728 thread_unlock(thread);
1729 splx(s);
1730 return res;
1731 }
1732 splx(s);
1733 return KERN_NOT_WAITING;
1734 }