]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/wait_queue.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / kern / wait_queue.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_FREE_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60 /*
61 * File: wait_queue.c (adapted from sched_prim.c)
62 * Author: Avadis Tevanian, Jr.
63 * Date: 1986
64 *
65 * Primitives for manipulating wait queues: either global
66 * ones from sched_prim.c, or private ones associated with
67 * particular structures(pots, semaphores, etc..).
68 */
69
70 #include <kern/kern_types.h>
71 #include <kern/simple_lock.h>
72 #include <kern/kalloc.h>
73 #include <kern/queue.h>
74 #include <kern/spl.h>
75 #include <mach/sync_policy.h>
76 #include <kern/sched_prim.h>
77
78 #include <kern/wait_queue.h>
79
80 /* forward declarations */
81 static boolean_t wait_queue_member_locked(
82 wait_queue_t wq,
83 wait_queue_set_t wq_set);
84
85 void wait_queue_unlink_one(
86 wait_queue_t wq,
87 wait_queue_set_t *wq_setp);
88
89 kern_return_t wait_queue_set_unlink_all_nofree(
90 wait_queue_set_t wq_set);
91
92 /*
93 * Routine: wait_queue_init
94 * Purpose:
95 * Initialize a previously allocated wait queue.
96 * Returns:
97 * KERN_SUCCESS - The wait_queue_t was initialized
98 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
99 */
100 kern_return_t
101 wait_queue_init(
102 wait_queue_t wq,
103 int policy)
104 {
105 if (!((policy & SYNC_POLICY_ORDER_MASK) == SYNC_POLICY_FIFO))
106 return KERN_INVALID_ARGUMENT;
107
108 wq->wq_fifo = TRUE;
109 wq->wq_type = _WAIT_QUEUE_inited;
110 queue_init(&wq->wq_queue);
111 hw_lock_init(&wq->wq_interlock);
112 return KERN_SUCCESS;
113 }
114
115 /*
116 * Routine: wait_queue_alloc
117 * Purpose:
118 * Allocate and initialize a wait queue for use outside of
119 * of the mach part of the kernel.
120 * Conditions:
121 * Nothing locked - can block.
122 * Returns:
123 * The allocated and initialized wait queue
124 * WAIT_QUEUE_NULL if there is a resource shortage
125 */
126 wait_queue_t
127 wait_queue_alloc(
128 int policy)
129 {
130 wait_queue_t wq;
131 kern_return_t ret;
132
133 wq = (wait_queue_t) kalloc(sizeof(struct wait_queue));
134 if (wq != WAIT_QUEUE_NULL) {
135 ret = wait_queue_init(wq, policy);
136 if (ret != KERN_SUCCESS) {
137 kfree(wq, sizeof(struct wait_queue));
138 wq = WAIT_QUEUE_NULL;
139 }
140 }
141 return wq;
142 }
143
144 /*
145 * Routine: wait_queue_free
146 * Purpose:
147 * Free an allocated wait queue.
148 * Conditions:
149 * May block.
150 */
151 kern_return_t
152 wait_queue_free(
153 wait_queue_t wq)
154 {
155 if (!wait_queue_is_queue(wq))
156 return KERN_INVALID_ARGUMENT;
157 if (!queue_empty(&wq->wq_queue))
158 return KERN_FAILURE;
159 kfree(wq, sizeof(struct wait_queue));
160 return KERN_SUCCESS;
161 }
162
163 /*
164 * Routine: wait_queue_set_init
165 * Purpose:
166 * Initialize a previously allocated wait queue set.
167 * Returns:
168 * KERN_SUCCESS - The wait_queue_set_t was initialized
169 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
170 */
171 kern_return_t
172 wait_queue_set_init(
173 wait_queue_set_t wqset,
174 int policy)
175 {
176 kern_return_t ret;
177
178 ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
179 if (ret != KERN_SUCCESS)
180 return ret;
181
182 wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
183 if (policy & SYNC_POLICY_PREPOST)
184 wqset->wqs_wait_queue.wq_isprepost = TRUE;
185 else
186 wqset->wqs_wait_queue.wq_isprepost = FALSE;
187 queue_init(&wqset->wqs_setlinks);
188 wqset->wqs_refcount = 0;
189 return KERN_SUCCESS;
190 }
191
192
193 kern_return_t
194 wait_queue_sub_init(
195 wait_queue_set_t wqset,
196 int policy)
197 {
198 return wait_queue_set_init(wqset, policy);
199 }
200
201 kern_return_t
202 wait_queue_sub_clearrefs(
203 wait_queue_set_t wq_set)
204 {
205 if (!wait_queue_is_set(wq_set))
206 return KERN_INVALID_ARGUMENT;
207
208 wqs_lock(wq_set);
209 wq_set->wqs_refcount = 0;
210 wqs_unlock(wq_set);
211 return KERN_SUCCESS;
212 }
213
214 /*
215 * Routine: wait_queue_set_alloc
216 * Purpose:
217 * Allocate and initialize a wait queue set for
218 * use outside of the mach part of the kernel.
219 * Conditions:
220 * May block.
221 * Returns:
222 * The allocated and initialized wait queue set
223 * WAIT_QUEUE_SET_NULL if there is a resource shortage
224 */
225 wait_queue_set_t
226 wait_queue_set_alloc(
227 int policy)
228 {
229 wait_queue_set_t wq_set;
230
231 wq_set = (wait_queue_set_t) kalloc(sizeof(struct wait_queue_set));
232 if (wq_set != WAIT_QUEUE_SET_NULL) {
233 kern_return_t ret;
234
235 ret = wait_queue_set_init(wq_set, policy);
236 if (ret != KERN_SUCCESS) {
237 kfree(wq_set, sizeof(struct wait_queue_set));
238 wq_set = WAIT_QUEUE_SET_NULL;
239 }
240 }
241 return wq_set;
242 }
243
244 /*
245 * Routine: wait_queue_set_free
246 * Purpose:
247 * Free an allocated wait queue set
248 * Conditions:
249 * May block.
250 */
251 kern_return_t
252 wait_queue_set_free(
253 wait_queue_set_t wq_set)
254 {
255 if (!wait_queue_is_set(wq_set))
256 return KERN_INVALID_ARGUMENT;
257
258 if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
259 return KERN_FAILURE;
260
261 kfree(wq_set, sizeof(struct wait_queue_set));
262 return KERN_SUCCESS;
263 }
264
265
266 /*
267 *
268 * Routine: wait_queue_set_size
269 * Routine: wait_queue_link_size
270 * Purpose:
271 * Return the size of opaque wait queue structures
272 */
273 unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
274 unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
275
276 /* declare a unique type for wait queue link structures */
277 static unsigned int _wait_queue_link;
278 static unsigned int _wait_queue_unlinked;
279
280 #define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
281 #define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
282
283 #define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
284 WQASSERT(((wqe)->wqe_queue == (wq) && \
285 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
286 "wait queue element list corruption: wq=%#x, wqe=%#x", \
287 (wq), (wqe))
288
289 #define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
290 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
291 (queue_t)(wql) : &(wql)->wql_setlinks)))
292
293 #define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
294 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
295 (queue_t)(wql) : &(wql)->wql_setlinks)))
296
297 #define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
298 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
299 ((wql)->wql_setqueue == (wqs)) && \
300 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
301 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
302 "wait queue set links corruption: wqs=%#x, wql=%#x", \
303 (wqs), (wql))
304
305 #if defined(_WAIT_QUEUE_DEBUG_)
306
307 #define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
308
309 #define WAIT_QUEUE_CHECK(wq) \
310 MACRO_BEGIN \
311 queue_t q2 = &(wq)->wq_queue; \
312 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
313 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
314 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
315 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
316 } \
317 MACRO_END
318
319 #define WAIT_QUEUE_SET_CHECK(wqs) \
320 MACRO_BEGIN \
321 queue_t q2 = &(wqs)->wqs_setlinks; \
322 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
323 while (!queue_end(q2, (queue_entry_t)wql2)) { \
324 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
325 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
326 } \
327 MACRO_END
328
329 #else /* !_WAIT_QUEUE_DEBUG_ */
330
331 #define WQASSERT(e, s, p0, p1) assert(e)
332
333 #define WAIT_QUEUE_CHECK(wq)
334 #define WAIT_QUEUE_SET_CHECK(wqs)
335
336 #endif /* !_WAIT_QUEUE_DEBUG_ */
337
338 /*
339 * Routine: wait_queue_member_locked
340 * Purpose:
341 * Indicate if this set queue is a member of the queue
342 * Conditions:
343 * The wait queue is locked
344 * The set queue is just that, a set queue
345 */
346 static boolean_t
347 wait_queue_member_locked(
348 wait_queue_t wq,
349 wait_queue_set_t wq_set)
350 {
351 wait_queue_element_t wq_element;
352 queue_t q;
353
354 assert(wait_queue_held(wq));
355 assert(wait_queue_is_set(wq_set));
356
357 q = &wq->wq_queue;
358
359 wq_element = (wait_queue_element_t) queue_first(q);
360 while (!queue_end(q, (queue_entry_t)wq_element)) {
361 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
362 if ((wq_element->wqe_type == WAIT_QUEUE_LINK)) {
363 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
364
365 if (wql->wql_setqueue == wq_set)
366 return TRUE;
367 }
368 wq_element = (wait_queue_element_t)
369 queue_next((queue_t) wq_element);
370 }
371 return FALSE;
372 }
373
374
375 /*
376 * Routine: wait_queue_member
377 * Purpose:
378 * Indicate if this set queue is a member of the queue
379 * Conditions:
380 * The set queue is just that, a set queue
381 */
382 boolean_t
383 wait_queue_member(
384 wait_queue_t wq,
385 wait_queue_set_t wq_set)
386 {
387 boolean_t ret;
388 spl_t s;
389
390 if (!wait_queue_is_set(wq_set))
391 return FALSE;
392
393 s = splsched();
394 wait_queue_lock(wq);
395 ret = wait_queue_member_locked(wq, wq_set);
396 wait_queue_unlock(wq);
397 splx(s);
398
399 return ret;
400 }
401
402
403 /*
404 * Routine: wait_queue_link_noalloc
405 * Purpose:
406 * Insert a set wait queue into a wait queue. This
407 * requires us to link the two together using a wait_queue_link
408 * structure that we allocate.
409 * Conditions:
410 * The wait queue being inserted must be inited as a set queue
411 */
412 kern_return_t
413 wait_queue_link_noalloc(
414 wait_queue_t wq,
415 wait_queue_set_t wq_set,
416 wait_queue_link_t wql)
417 {
418 wait_queue_element_t wq_element;
419 queue_t q;
420 spl_t s;
421
422 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set))
423 return KERN_INVALID_ARGUMENT;
424
425 /*
426 * There are probably less threads and sets associated with
427 * the wait queue, then there are wait queues associated with
428 * the set. So lets validate it that way.
429 */
430 s = splsched();
431 wait_queue_lock(wq);
432 q = &wq->wq_queue;
433 wq_element = (wait_queue_element_t) queue_first(q);
434 while (!queue_end(q, (queue_entry_t)wq_element)) {
435 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
436 if (wq_element->wqe_type == WAIT_QUEUE_LINK &&
437 ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
438 wait_queue_unlock(wq);
439 splx(s);
440 return KERN_ALREADY_IN_SET;
441 }
442 wq_element = (wait_queue_element_t)
443 queue_next((queue_t) wq_element);
444 }
445
446 /*
447 * Not already a member, so we can add it.
448 */
449 wqs_lock(wq_set);
450
451 WAIT_QUEUE_SET_CHECK(wq_set);
452
453 wql->wql_queue = wq;
454 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
455 wql->wql_setqueue = wq_set;
456 queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
457 wql->wql_type = WAIT_QUEUE_LINK;
458
459 wqs_unlock(wq_set);
460 wait_queue_unlock(wq);
461 splx(s);
462
463 return KERN_SUCCESS;
464 }
465
466 /*
467 * Routine: wait_queue_link
468 * Purpose:
469 * Insert a set wait queue into a wait queue. This
470 * requires us to link the two together using a wait_queue_link
471 * structure that we allocate.
472 * Conditions:
473 * The wait queue being inserted must be inited as a set queue
474 */
475 kern_return_t
476 wait_queue_link(
477 wait_queue_t wq,
478 wait_queue_set_t wq_set)
479 {
480 wait_queue_link_t wql;
481 kern_return_t ret;
482
483 wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link));
484 if (wql == WAIT_QUEUE_LINK_NULL)
485 return KERN_RESOURCE_SHORTAGE;
486
487 ret = wait_queue_link_noalloc(wq, wq_set, wql);
488 if (ret != KERN_SUCCESS)
489 kfree(wql, sizeof(struct wait_queue_link));
490
491 return ret;
492 }
493
494
495 /*
496 * Routine: wait_queue_unlink_nofree
497 * Purpose:
498 * Undo the linkage between a wait queue and a set.
499 */
500 static void
501 wait_queue_unlink_locked(
502 wait_queue_t wq,
503 wait_queue_set_t wq_set,
504 wait_queue_link_t wql)
505 {
506 assert(wait_queue_held(wq));
507 assert(wait_queue_held(&wq_set->wqs_wait_queue));
508
509 wql->wql_queue = WAIT_QUEUE_NULL;
510 queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
511 wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
512 queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
513 wql->wql_type = WAIT_QUEUE_UNLINKED;
514
515 WAIT_QUEUE_CHECK(wq);
516 WAIT_QUEUE_SET_CHECK(wq_set);
517 }
518
519 /*
520 * Routine: wait_queue_unlink
521 * Purpose:
522 * Remove the linkage between a wait queue and a set,
523 * freeing the linkage structure.
524 * Conditions:
525 * The wait queue being must be a member set queue
526 */
527 kern_return_t
528 wait_queue_unlink(
529 wait_queue_t wq,
530 wait_queue_set_t wq_set)
531 {
532 wait_queue_element_t wq_element;
533 wait_queue_link_t wql;
534 queue_t q;
535 spl_t s;
536
537 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) {
538 return KERN_INVALID_ARGUMENT;
539 }
540 s = splsched();
541 wait_queue_lock(wq);
542
543 q = &wq->wq_queue;
544 wq_element = (wait_queue_element_t) queue_first(q);
545 while (!queue_end(q, (queue_entry_t)wq_element)) {
546 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
547 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
548 wql = (wait_queue_link_t)wq_element;
549
550 if (wql->wql_setqueue == wq_set) {
551 wqs_lock(wq_set);
552 wait_queue_unlink_locked(wq, wq_set, wql);
553 wqs_unlock(wq_set);
554 wait_queue_unlock(wq);
555 splx(s);
556 kfree(wql, sizeof(struct wait_queue_link));
557 return KERN_SUCCESS;
558 }
559 }
560 wq_element = (wait_queue_element_t)
561 queue_next((queue_t) wq_element);
562 }
563 wait_queue_unlock(wq);
564 splx(s);
565 return KERN_NOT_IN_SET;
566 }
567
568
569 /*
570 * Routine: wait_queue_unlinkall_nofree
571 * Purpose:
572 * Remove the linkage between a wait queue and all its
573 * sets. The caller is responsible for freeing
574 * the wait queue link structures.
575 */
576
577 kern_return_t
578 wait_queue_unlinkall_nofree(
579 wait_queue_t wq)
580 {
581 wait_queue_element_t wq_element;
582 wait_queue_element_t wq_next_element;
583 wait_queue_set_t wq_set;
584 wait_queue_link_t wql;
585 queue_head_t links_queue_head;
586 queue_t links = &links_queue_head;
587 queue_t q;
588 spl_t s;
589
590 if (!wait_queue_is_queue(wq)) {
591 return KERN_INVALID_ARGUMENT;
592 }
593
594 queue_init(links);
595
596 s = splsched();
597 wait_queue_lock(wq);
598
599 q = &wq->wq_queue;
600
601 wq_element = (wait_queue_element_t) queue_first(q);
602 while (!queue_end(q, (queue_entry_t)wq_element)) {
603 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
604 wq_next_element = (wait_queue_element_t)
605 queue_next((queue_t) wq_element);
606
607 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
608 wql = (wait_queue_link_t)wq_element;
609 wq_set = wql->wql_setqueue;
610 wqs_lock(wq_set);
611 wait_queue_unlink_locked(wq, wq_set, wql);
612 wqs_unlock(wq_set);
613 }
614 wq_element = wq_next_element;
615 }
616 wait_queue_unlock(wq);
617 splx(s);
618 return(KERN_SUCCESS);
619 }
620
621
622 /*
623 * Routine: wait_queue_unlink_all
624 * Purpose:
625 * Remove the linkage between a wait queue and all its sets.
626 * All the linkage structures are freed.
627 * Conditions:
628 * Nothing of interest locked.
629 */
630
631 kern_return_t
632 wait_queue_unlink_all(
633 wait_queue_t wq)
634 {
635 wait_queue_element_t wq_element;
636 wait_queue_element_t wq_next_element;
637 wait_queue_set_t wq_set;
638 wait_queue_link_t wql;
639 queue_head_t links_queue_head;
640 queue_t links = &links_queue_head;
641 queue_t q;
642 spl_t s;
643
644 if (!wait_queue_is_queue(wq)) {
645 return KERN_INVALID_ARGUMENT;
646 }
647
648 queue_init(links);
649
650 s = splsched();
651 wait_queue_lock(wq);
652
653 q = &wq->wq_queue;
654
655 wq_element = (wait_queue_element_t) queue_first(q);
656 while (!queue_end(q, (queue_entry_t)wq_element)) {
657 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
658 wq_next_element = (wait_queue_element_t)
659 queue_next((queue_t) wq_element);
660
661 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
662 wql = (wait_queue_link_t)wq_element;
663 wq_set = wql->wql_setqueue;
664 wqs_lock(wq_set);
665 wait_queue_unlink_locked(wq, wq_set, wql);
666 wqs_unlock(wq_set);
667 enqueue(links, &wql->wql_links);
668 }
669 wq_element = wq_next_element;
670 }
671 wait_queue_unlock(wq);
672 splx(s);
673
674 while(!queue_empty(links)) {
675 wql = (wait_queue_link_t) dequeue(links);
676 kfree(wql, sizeof(struct wait_queue_link));
677 }
678
679 return(KERN_SUCCESS);
680 }
681
682 /*
683 * Routine: wait_queue_set_unlink_all_nofree
684 * Purpose:
685 * Remove the linkage between a set wait queue and all its
686 * member wait queues. The link structures are not freed, nor
687 * returned. It is the caller's responsibility to track and free
688 * them.
689 * Conditions:
690 * The wait queue being must be a member set queue
691 */
692 kern_return_t
693 wait_queue_set_unlink_all_nofree(
694 wait_queue_set_t wq_set)
695 {
696 wait_queue_link_t wql;
697 wait_queue_t wq;
698 queue_t q;
699 spl_t s;
700
701 if (!wait_queue_is_set(wq_set)) {
702 return KERN_INVALID_ARGUMENT;
703 }
704
705 retry:
706 s = splsched();
707 wqs_lock(wq_set);
708
709 q = &wq_set->wqs_setlinks;
710
711 wql = (wait_queue_link_t)queue_first(q);
712 while (!queue_end(q, (queue_entry_t)wql)) {
713 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
714 wq = wql->wql_queue;
715 if (wait_queue_lock_try(wq)) {
716 wait_queue_unlink_locked(wq, wq_set, wql);
717 wait_queue_unlock(wq);
718 wql = (wait_queue_link_t)queue_first(q);
719 } else {
720 wqs_unlock(wq_set);
721 splx(s);
722 delay(1);
723 goto retry;
724 }
725 }
726 wqs_unlock(wq_set);
727 splx(s);
728
729 return(KERN_SUCCESS);
730 }
731
732 /* legacy interface naming */
733 kern_return_t
734 wait_subqueue_unlink_all(
735 wait_queue_set_t wq_set)
736 {
737 return wait_queue_set_unlink_all_nofree(wq_set);
738 }
739
740
741 /*
742 * Routine: wait_queue_set_unlink_all
743 * Purpose:
744 * Remove the linkage between a set wait queue and all its
745 * member wait queues. The link structures are freed.
746 * Conditions:
747 * The wait queue must be a set
748 */
749 kern_return_t
750 wait_queue_set_unlink_all(
751 wait_queue_set_t wq_set)
752 {
753 wait_queue_link_t wql;
754 wait_queue_t wq;
755 queue_t q;
756 queue_head_t links_queue_head;
757 queue_t links = &links_queue_head;
758 spl_t s;
759
760 if (!wait_queue_is_set(wq_set)) {
761 return KERN_INVALID_ARGUMENT;
762 }
763
764 queue_init(links);
765
766 retry:
767 s = splsched();
768 wqs_lock(wq_set);
769
770 q = &wq_set->wqs_setlinks;
771
772 wql = (wait_queue_link_t)queue_first(q);
773 while (!queue_end(q, (queue_entry_t)wql)) {
774 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
775 wq = wql->wql_queue;
776 if (wait_queue_lock_try(wq)) {
777 wait_queue_unlink_locked(wq, wq_set, wql);
778 wait_queue_unlock(wq);
779 enqueue(links, &wql->wql_links);
780 wql = (wait_queue_link_t)queue_first(q);
781 } else {
782 wqs_unlock(wq_set);
783 splx(s);
784 delay(1);
785 goto retry;
786 }
787 }
788 wqs_unlock(wq_set);
789 splx(s);
790
791 while (!queue_empty (links)) {
792 wql = (wait_queue_link_t) dequeue(links);
793 kfree(wql, sizeof(struct wait_queue_link));
794 }
795 return(KERN_SUCCESS);
796 }
797
798
799 /*
800 * Routine: wait_queue_unlink_one
801 * Purpose:
802 * Find and unlink one set wait queue
803 * Conditions:
804 * Nothing of interest locked.
805 */
806 void
807 wait_queue_unlink_one(
808 wait_queue_t wq,
809 wait_queue_set_t *wq_setp)
810 {
811 wait_queue_element_t wq_element;
812 queue_t q;
813 spl_t s;
814
815 s = splsched();
816 wait_queue_lock(wq);
817
818 q = &wq->wq_queue;
819
820 wq_element = (wait_queue_element_t) queue_first(q);
821 while (!queue_end(q, (queue_entry_t)wq_element)) {
822
823 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
824 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
825 wait_queue_set_t wq_set = wql->wql_setqueue;
826
827 wqs_lock(wq_set);
828 wait_queue_unlink_locked(wq, wq_set, wql);
829 wqs_unlock(wq_set);
830 wait_queue_unlock(wq);
831 splx(s);
832 kfree(wql,sizeof(struct wait_queue_link));
833 *wq_setp = wq_set;
834 return;
835 }
836
837 wq_element = (wait_queue_element_t)
838 queue_next((queue_t) wq_element);
839 }
840 wait_queue_unlock(wq);
841 splx(s);
842 *wq_setp = WAIT_QUEUE_SET_NULL;
843 }
844
845
846 /*
847 * Routine: wait_queue_assert_wait64_locked
848 * Purpose:
849 * Insert the current thread into the supplied wait queue
850 * waiting for a particular event to be posted to that queue.
851 *
852 * Conditions:
853 * The wait queue is assumed locked.
854 * The waiting thread is assumed locked.
855 *
856 */
857 __private_extern__ wait_result_t
858 wait_queue_assert_wait64_locked(
859 wait_queue_t wq,
860 event64_t event,
861 wait_interrupt_t interruptible,
862 uint64_t deadline,
863 thread_t thread)
864 {
865 wait_result_t wait_result;
866
867 if (!wait_queue_assert_possible(thread))
868 panic("wait_queue_assert_wait64_locked");
869
870 if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
871 wait_queue_set_t wqs = (wait_queue_set_t)wq;
872
873 if (wqs->wqs_isprepost && wqs->wqs_refcount > 0)
874 return(THREAD_AWAKENED);
875 }
876
877 /*
878 * This is the extent to which we currently take scheduling attributes
879 * into account. If the thread is vm priviledged, we stick it at
880 * the front of the queue. Later, these queues will honor the policy
881 * value set at wait_queue_init time.
882 */
883 wait_result = thread_mark_wait_locked(thread, interruptible);
884 if (wait_result == THREAD_WAITING) {
885 if (thread->options & TH_OPT_VMPRIV)
886 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
887 else
888 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
889
890 thread->wait_event = event;
891 thread->wait_queue = wq;
892
893 if (deadline != 0) {
894 if (!timer_call_enter(&thread->wait_timer, deadline))
895 thread->wait_timer_active++;
896 thread->wait_timer_is_set = TRUE;
897 }
898 }
899 return(wait_result);
900 }
901
902 /*
903 * Routine: wait_queue_assert_wait
904 * Purpose:
905 * Insert the current thread into the supplied wait queue
906 * waiting for a particular event to be posted to that queue.
907 *
908 * Conditions:
909 * nothing of interest locked.
910 */
911 wait_result_t
912 wait_queue_assert_wait(
913 wait_queue_t wq,
914 event_t event,
915 wait_interrupt_t interruptible,
916 uint64_t deadline)
917 {
918 spl_t s;
919 wait_result_t ret;
920 thread_t thread = current_thread();
921
922 /* If it is an invalid wait queue, you can't wait on it */
923 if (!wait_queue_is_valid(wq))
924 return (thread->wait_result = THREAD_RESTART);
925
926 s = splsched();
927 wait_queue_lock(wq);
928 thread_lock(thread);
929 ret = wait_queue_assert_wait64_locked(wq, (event64_t)((uint32_t)event),
930 interruptible, deadline, thread);
931 thread_unlock(thread);
932 wait_queue_unlock(wq);
933 splx(s);
934 return(ret);
935 }
936
937 /*
938 * Routine: wait_queue_assert_wait64
939 * Purpose:
940 * Insert the current thread into the supplied wait queue
941 * waiting for a particular event to be posted to that queue.
942 * Conditions:
943 * nothing of interest locked.
944 */
945 wait_result_t
946 wait_queue_assert_wait64(
947 wait_queue_t wq,
948 event64_t event,
949 wait_interrupt_t interruptible,
950 uint64_t deadline)
951 {
952 spl_t s;
953 wait_result_t ret;
954 thread_t thread = current_thread();
955
956 /* If it is an invalid wait queue, you cant wait on it */
957 if (!wait_queue_is_valid(wq))
958 return (thread->wait_result = THREAD_RESTART);
959
960 s = splsched();
961 wait_queue_lock(wq);
962 thread_lock(thread);
963 ret = wait_queue_assert_wait64_locked(wq, event, interruptible, deadline, thread);
964 thread_unlock(thread);
965 wait_queue_unlock(wq);
966 splx(s);
967 return(ret);
968 }
969
970 /*
971 * Routine: _wait_queue_select64_all
972 * Purpose:
973 * Select all threads off a wait queue that meet the
974 * supplied criteria.
975 * Conditions:
976 * at splsched
977 * wait queue locked
978 * wake_queue initialized and ready for insertion
979 * possibly recursive
980 * Returns:
981 * a queue of locked threads
982 */
983 static void
984 _wait_queue_select64_all(
985 wait_queue_t wq,
986 event64_t event,
987 queue_t wake_queue)
988 {
989 wait_queue_element_t wq_element;
990 wait_queue_element_t wqe_next;
991 queue_t q;
992
993 q = &wq->wq_queue;
994
995 wq_element = (wait_queue_element_t) queue_first(q);
996 while (!queue_end(q, (queue_entry_t)wq_element)) {
997 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
998 wqe_next = (wait_queue_element_t)
999 queue_next((queue_t) wq_element);
1000
1001 /*
1002 * We may have to recurse if this is a compound wait queue.
1003 */
1004 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1005 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1006 wait_queue_t set_queue;
1007
1008 /*
1009 * We have to check the set wait queue.
1010 */
1011 set_queue = (wait_queue_t)wql->wql_setqueue;
1012 wait_queue_lock(set_queue);
1013 if (set_queue->wq_isprepost) {
1014 wait_queue_set_t wqs = (wait_queue_set_t)set_queue;
1015
1016 /*
1017 * Preposting is only for sets and wait queue
1018 * is the first element of set
1019 */
1020 wqs->wqs_refcount++;
1021 }
1022 if (! wait_queue_empty(set_queue))
1023 _wait_queue_select64_all(set_queue, event, wake_queue);
1024 wait_queue_unlock(set_queue);
1025 } else {
1026
1027 /*
1028 * Otherwise, its a thread. If it is waiting on
1029 * the event we are posting to this queue, pull
1030 * it off the queue and stick it in out wake_queue.
1031 */
1032 thread_t t = (thread_t)wq_element;
1033
1034 if (t->wait_event == event) {
1035 thread_lock(t);
1036 remqueue(q, (queue_entry_t) t);
1037 enqueue (wake_queue, (queue_entry_t) t);
1038 t->wait_queue = WAIT_QUEUE_NULL;
1039 t->wait_event = NO_EVENT64;
1040 t->at_safe_point = FALSE;
1041 /* returned locked */
1042 }
1043 }
1044 wq_element = wqe_next;
1045 }
1046 }
1047
1048 /*
1049 * Routine: wait_queue_wakeup64_all_locked
1050 * Purpose:
1051 * Wakeup some number of threads that are in the specified
1052 * wait queue and waiting on the specified event.
1053 * Conditions:
1054 * wait queue already locked (may be released).
1055 * Returns:
1056 * KERN_SUCCESS - Threads were woken up
1057 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1058 */
1059 __private_extern__ kern_return_t
1060 wait_queue_wakeup64_all_locked(
1061 wait_queue_t wq,
1062 event64_t event,
1063 wait_result_t result,
1064 boolean_t unlock)
1065 {
1066 queue_head_t wake_queue_head;
1067 queue_t q = &wake_queue_head;
1068 kern_return_t res;
1069
1070 // assert(wait_queue_held(wq));
1071 if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1072 panic("wait_queue_wakeup64_all_locked: lock not held on %08X\n", wq); /* (BRINGUP) */
1073 }
1074
1075 queue_init(q);
1076
1077 /*
1078 * Select the threads that we will wake up. The threads
1079 * are returned to us locked and cleanly removed from the
1080 * wait queue.
1081 */
1082 _wait_queue_select64_all(wq, event, q);
1083 if (unlock)
1084 wait_queue_unlock(wq);
1085
1086 /*
1087 * For each thread, set it running.
1088 */
1089 res = KERN_NOT_WAITING;
1090 while (!queue_empty (q)) {
1091 thread_t thread = (thread_t) dequeue(q);
1092 res = thread_go(thread, result);
1093 assert(res == KERN_SUCCESS);
1094 thread_unlock(thread);
1095 }
1096 return res;
1097 }
1098
1099
1100 /*
1101 * Routine: wait_queue_wakeup_all
1102 * Purpose:
1103 * Wakeup some number of threads that are in the specified
1104 * wait queue and waiting on the specified event.
1105 * Conditions:
1106 * Nothing locked
1107 * Returns:
1108 * KERN_SUCCESS - Threads were woken up
1109 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1110 */
1111 kern_return_t
1112 wait_queue_wakeup_all(
1113 wait_queue_t wq,
1114 event_t event,
1115 wait_result_t result)
1116 {
1117 kern_return_t ret;
1118 spl_t s;
1119
1120 if (!wait_queue_is_valid(wq)) {
1121 return KERN_INVALID_ARGUMENT;
1122 }
1123
1124 s = splsched();
1125 wait_queue_lock(wq);
1126 if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1127 panic("wait_queue_wakeup_all: we did not get the lock on %08X\n", wq); /* (BRINGUP) */
1128 }
1129 ret = wait_queue_wakeup64_all_locked(
1130 wq, (event64_t)((uint32_t)event),
1131 result, TRUE);
1132 /* lock released */
1133 splx(s);
1134 return ret;
1135 }
1136
1137 /*
1138 * Routine: wait_queue_wakeup64_all
1139 * Purpose:
1140 * Wakeup some number of threads that are in the specified
1141 * wait queue and waiting on the specified event.
1142 * Conditions:
1143 * Nothing locked
1144 * Returns:
1145 * KERN_SUCCESS - Threads were woken up
1146 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1147 */
1148 kern_return_t
1149 wait_queue_wakeup64_all(
1150 wait_queue_t wq,
1151 event64_t event,
1152 wait_result_t result)
1153 {
1154 kern_return_t ret;
1155 spl_t s;
1156
1157 if (!wait_queue_is_valid(wq)) {
1158 return KERN_INVALID_ARGUMENT;
1159 }
1160
1161 s = splsched();
1162 wait_queue_lock(wq);
1163 ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1164 /* lock released */
1165 splx(s);
1166 return ret;
1167 }
1168
1169 /*
1170 * Routine: _wait_queue_select64_one
1171 * Purpose:
1172 * Select the best thread off a wait queue that meet the
1173 * supplied criteria.
1174 * Conditions:
1175 * at splsched
1176 * wait queue locked
1177 * possibly recursive
1178 * Returns:
1179 * a locked thread - if one found
1180 * Note:
1181 * This is where the sync policy of the wait queue comes
1182 * into effect. For now, we just assume FIFO.
1183 */
1184 static thread_t
1185 _wait_queue_select64_one(
1186 wait_queue_t wq,
1187 event64_t event)
1188 {
1189 wait_queue_element_t wq_element;
1190 wait_queue_element_t wqe_next;
1191 thread_t t = THREAD_NULL;
1192 queue_t q;
1193
1194 assert(wq->wq_fifo);
1195
1196 q = &wq->wq_queue;
1197
1198 wq_element = (wait_queue_element_t) queue_first(q);
1199 while (!queue_end(q, (queue_entry_t)wq_element)) {
1200 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1201 wqe_next = (wait_queue_element_t)
1202 queue_next((queue_t) wq_element);
1203
1204 /*
1205 * We may have to recurse if this is a compound wait queue.
1206 */
1207 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1208 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1209 wait_queue_t set_queue;
1210
1211 /*
1212 * We have to check the set wait queue.
1213 */
1214 set_queue = (wait_queue_t)wql->wql_setqueue;
1215 wait_queue_lock(set_queue);
1216 if (! wait_queue_empty(set_queue)) {
1217 t = _wait_queue_select64_one(set_queue, event);
1218 }
1219 wait_queue_unlock(set_queue);
1220 if (t != THREAD_NULL)
1221 return t;
1222 } else {
1223
1224 /*
1225 * Otherwise, its a thread. If it is waiting on
1226 * the event we are posting to this queue, pull
1227 * it off the queue and stick it in out wake_queue.
1228 */
1229 t = (thread_t)wq_element;
1230 if (t->wait_event == event) {
1231 thread_lock(t);
1232 remqueue(q, (queue_entry_t) t);
1233 t->wait_queue = WAIT_QUEUE_NULL;
1234 t->wait_event = NO_EVENT64;
1235 t->at_safe_point = FALSE;
1236 return t; /* still locked */
1237 }
1238
1239 t = THREAD_NULL;
1240 }
1241 wq_element = wqe_next;
1242 }
1243 return THREAD_NULL;
1244 }
1245
1246 /*
1247 * Routine: wait_queue_peek64_locked
1248 * Purpose:
1249 * Select the best thread from a wait queue that meet the
1250 * supplied criteria, but leave it on the queue it was
1251 * found on. The thread, and the actual wait_queue the
1252 * thread was found on are identified.
1253 * Conditions:
1254 * at splsched
1255 * wait queue locked
1256 * possibly recursive
1257 * Returns:
1258 * a locked thread - if one found
1259 * a locked waitq - the one the thread was found on
1260 * Note:
1261 * Both the waitq the thread was actually found on, and
1262 * the supplied wait queue, are locked after this.
1263 */
1264 __private_extern__ void
1265 wait_queue_peek64_locked(
1266 wait_queue_t wq,
1267 event64_t event,
1268 thread_t *tp,
1269 wait_queue_t *wqp)
1270 {
1271 wait_queue_element_t wq_element;
1272 wait_queue_element_t wqe_next;
1273 queue_t q;
1274
1275 assert(wq->wq_fifo);
1276
1277 *tp = THREAD_NULL;
1278
1279 q = &wq->wq_queue;
1280
1281 wq_element = (wait_queue_element_t) queue_first(q);
1282 while (!queue_end(q, (queue_entry_t)wq_element)) {
1283 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1284 wqe_next = (wait_queue_element_t)
1285 queue_next((queue_t) wq_element);
1286
1287 /*
1288 * We may have to recurse if this is a compound wait queue.
1289 */
1290 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1291 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1292 wait_queue_t set_queue;
1293
1294 /*
1295 * We have to check the set wait queue.
1296 */
1297 set_queue = (wait_queue_t)wql->wql_setqueue;
1298 wait_queue_lock(set_queue);
1299 if (! wait_queue_empty(set_queue)) {
1300 wait_queue_peek64_locked(set_queue, event, tp, wqp);
1301 }
1302 if (*tp != THREAD_NULL) {
1303 if (*wqp != set_queue)
1304 wait_queue_unlock(set_queue);
1305 return; /* thread and its waitq locked */
1306 }
1307
1308 wait_queue_unlock(set_queue);
1309 } else {
1310
1311 /*
1312 * Otherwise, its a thread. If it is waiting on
1313 * the event we are posting to this queue, return
1314 * it locked, but leave it on the queue.
1315 */
1316 thread_t t = (thread_t)wq_element;
1317
1318 if (t->wait_event == event) {
1319 thread_lock(t);
1320 *tp = t;
1321 *wqp = wq;
1322 return;
1323 }
1324 }
1325 wq_element = wqe_next;
1326 }
1327 }
1328
1329 /*
1330 * Routine: wait_queue_pull_thread_locked
1331 * Purpose:
1332 * Pull a thread that was previously "peeked" off the wait
1333 * queue and (possibly) unlock the waitq.
1334 * Conditions:
1335 * at splsched
1336 * wait queue locked
1337 * thread locked
1338 * Returns:
1339 * with the thread still locked.
1340 */
1341 void
1342 wait_queue_pull_thread_locked(
1343 wait_queue_t waitq,
1344 thread_t thread,
1345 boolean_t unlock)
1346 {
1347
1348 assert(thread->wait_queue == waitq);
1349
1350 remqueue(&waitq->wq_queue, (queue_entry_t)thread );
1351 thread->wait_queue = WAIT_QUEUE_NULL;
1352 thread->wait_event = NO_EVENT64;
1353 thread->at_safe_point = FALSE;
1354 if (unlock)
1355 wait_queue_unlock(waitq);
1356 }
1357
1358
1359 /*
1360 * Routine: wait_queue_select64_thread
1361 * Purpose:
1362 * Look for a thread and remove it from the queues, if
1363 * (and only if) the thread is waiting on the supplied
1364 * <wait_queue, event> pair.
1365 * Conditions:
1366 * at splsched
1367 * wait queue locked
1368 * possibly recursive
1369 * Returns:
1370 * KERN_NOT_WAITING: Thread is not waiting here.
1371 * KERN_SUCCESS: It was, and is now removed (returned locked)
1372 */
1373 static kern_return_t
1374 _wait_queue_select64_thread(
1375 wait_queue_t wq,
1376 event64_t event,
1377 thread_t thread)
1378 {
1379 wait_queue_element_t wq_element;
1380 wait_queue_element_t wqe_next;
1381 kern_return_t res = KERN_NOT_WAITING;
1382 queue_t q = &wq->wq_queue;
1383
1384 thread_lock(thread);
1385 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
1386 remqueue(q, (queue_entry_t) thread);
1387 thread->at_safe_point = FALSE;
1388 thread->wait_event = NO_EVENT64;
1389 thread->wait_queue = WAIT_QUEUE_NULL;
1390 /* thread still locked */
1391 return KERN_SUCCESS;
1392 }
1393 thread_unlock(thread);
1394
1395 /*
1396 * The wait_queue associated with the thread may be one of this
1397 * wait queue's sets. Go see. If so, removing it from
1398 * there is like removing it from here.
1399 */
1400 wq_element = (wait_queue_element_t) queue_first(q);
1401 while (!queue_end(q, (queue_entry_t)wq_element)) {
1402 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1403 wqe_next = (wait_queue_element_t)
1404 queue_next((queue_t) wq_element);
1405
1406 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1407 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1408 wait_queue_t set_queue;
1409
1410 set_queue = (wait_queue_t)wql->wql_setqueue;
1411 wait_queue_lock(set_queue);
1412 if (! wait_queue_empty(set_queue)) {
1413 res = _wait_queue_select64_thread(set_queue,
1414 event,
1415 thread);
1416 }
1417 wait_queue_unlock(set_queue);
1418 if (res == KERN_SUCCESS)
1419 return KERN_SUCCESS;
1420 }
1421 wq_element = wqe_next;
1422 }
1423 return res;
1424 }
1425
1426
1427 /*
1428 * Routine: wait_queue_wakeup64_identity_locked
1429 * Purpose:
1430 * Select a single thread that is most-eligible to run and set
1431 * set it running. But return the thread locked.
1432 *
1433 * Conditions:
1434 * at splsched
1435 * wait queue locked
1436 * possibly recursive
1437 * Returns:
1438 * a pointer to the locked thread that was awakened
1439 */
1440 __private_extern__ thread_t
1441 wait_queue_wakeup64_identity_locked(
1442 wait_queue_t wq,
1443 event64_t event,
1444 wait_result_t result,
1445 boolean_t unlock)
1446 {
1447 kern_return_t res;
1448 thread_t thread;
1449
1450 assert(wait_queue_held(wq));
1451
1452
1453 thread = _wait_queue_select64_one(wq, event);
1454 if (unlock)
1455 wait_queue_unlock(wq);
1456
1457 if (thread) {
1458 res = thread_go(thread, result);
1459 assert(res == KERN_SUCCESS);
1460 }
1461 return thread; /* still locked if not NULL */
1462 }
1463
1464
1465 /*
1466 * Routine: wait_queue_wakeup64_one_locked
1467 * Purpose:
1468 * Select a single thread that is most-eligible to run and set
1469 * set it runnings.
1470 *
1471 * Conditions:
1472 * at splsched
1473 * wait queue locked
1474 * possibly recursive
1475 * Returns:
1476 * KERN_SUCCESS: It was, and is, now removed.
1477 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1478 */
1479 __private_extern__ kern_return_t
1480 wait_queue_wakeup64_one_locked(
1481 wait_queue_t wq,
1482 event64_t event,
1483 wait_result_t result,
1484 boolean_t unlock)
1485 {
1486 thread_t thread;
1487
1488 assert(wait_queue_held(wq));
1489
1490 thread = _wait_queue_select64_one(wq, event);
1491 if (unlock)
1492 wait_queue_unlock(wq);
1493
1494 if (thread) {
1495 kern_return_t res;
1496
1497 res = thread_go(thread, result);
1498 assert(res == KERN_SUCCESS);
1499 thread_unlock(thread);
1500 return res;
1501 }
1502
1503 return KERN_NOT_WAITING;
1504 }
1505
1506 /*
1507 * Routine: wait_queue_wakeup_one
1508 * Purpose:
1509 * Wakeup the most appropriate thread that is in the specified
1510 * wait queue for the specified event.
1511 * Conditions:
1512 * Nothing locked
1513 * Returns:
1514 * KERN_SUCCESS - Thread was woken up
1515 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1516 */
1517 kern_return_t
1518 wait_queue_wakeup_one(
1519 wait_queue_t wq,
1520 event_t event,
1521 wait_result_t result)
1522 {
1523 thread_t thread;
1524 spl_t s;
1525
1526 if (!wait_queue_is_valid(wq)) {
1527 return KERN_INVALID_ARGUMENT;
1528 }
1529
1530 s = splsched();
1531 wait_queue_lock(wq);
1532 thread = _wait_queue_select64_one(wq, (event64_t)((uint32_t)event));
1533 wait_queue_unlock(wq);
1534
1535 if (thread) {
1536 kern_return_t res;
1537
1538 res = thread_go(thread, result);
1539 assert(res == KERN_SUCCESS);
1540 thread_unlock(thread);
1541 splx(s);
1542 return res;
1543 }
1544
1545 splx(s);
1546 return KERN_NOT_WAITING;
1547 }
1548
1549 /*
1550 * Routine: wait_queue_wakeup64_one
1551 * Purpose:
1552 * Wakeup the most appropriate thread that is in the specified
1553 * wait queue for the specified event.
1554 * Conditions:
1555 * Nothing locked
1556 * Returns:
1557 * KERN_SUCCESS - Thread was woken up
1558 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1559 */
1560 kern_return_t
1561 wait_queue_wakeup64_one(
1562 wait_queue_t wq,
1563 event64_t event,
1564 wait_result_t result)
1565 {
1566 thread_t thread;
1567 spl_t s;
1568
1569 if (!wait_queue_is_valid(wq)) {
1570 return KERN_INVALID_ARGUMENT;
1571 }
1572 s = splsched();
1573 wait_queue_lock(wq);
1574 thread = _wait_queue_select64_one(wq, event);
1575 wait_queue_unlock(wq);
1576
1577 if (thread) {
1578 kern_return_t res;
1579
1580 res = thread_go(thread, result);
1581 assert(res == KERN_SUCCESS);
1582 thread_unlock(thread);
1583 splx(s);
1584 return res;
1585 }
1586
1587 splx(s);
1588 return KERN_NOT_WAITING;
1589 }
1590
1591
1592 /*
1593 * Routine: wait_queue_wakeup64_thread_locked
1594 * Purpose:
1595 * Wakeup the particular thread that was specified if and only
1596 * it was in this wait queue (or one of it's set queues)
1597 * and waiting on the specified event.
1598 *
1599 * This is much safer than just removing the thread from
1600 * whatever wait queue it happens to be on. For instance, it
1601 * may have already been awoken from the wait you intended to
1602 * interrupt and waited on something else (like another
1603 * semaphore).
1604 * Conditions:
1605 * at splsched
1606 * wait queue already locked (may be released).
1607 * Returns:
1608 * KERN_SUCCESS - the thread was found waiting and awakened
1609 * KERN_NOT_WAITING - the thread was not waiting here
1610 */
1611 __private_extern__ kern_return_t
1612 wait_queue_wakeup64_thread_locked(
1613 wait_queue_t wq,
1614 event64_t event,
1615 thread_t thread,
1616 wait_result_t result,
1617 boolean_t unlock)
1618 {
1619 kern_return_t res;
1620
1621 assert(wait_queue_held(wq));
1622
1623 /*
1624 * See if the thread was still waiting there. If so, it got
1625 * dequeued and returned locked.
1626 */
1627 res = _wait_queue_select64_thread(wq, event, thread);
1628 if (unlock)
1629 wait_queue_unlock(wq);
1630
1631 if (res != KERN_SUCCESS)
1632 return KERN_NOT_WAITING;
1633
1634 res = thread_go(thread, result);
1635 assert(res == KERN_SUCCESS);
1636 thread_unlock(thread);
1637 return res;
1638 }
1639
1640 /*
1641 * Routine: wait_queue_wakeup_thread
1642 * Purpose:
1643 * Wakeup the particular thread that was specified if and only
1644 * it was in this wait queue (or one of it's set queues)
1645 * and waiting on the specified event.
1646 *
1647 * This is much safer than just removing the thread from
1648 * whatever wait queue it happens to be on. For instance, it
1649 * may have already been awoken from the wait you intended to
1650 * interrupt and waited on something else (like another
1651 * semaphore).
1652 * Conditions:
1653 * nothing of interest locked
1654 * we need to assume spl needs to be raised
1655 * Returns:
1656 * KERN_SUCCESS - the thread was found waiting and awakened
1657 * KERN_NOT_WAITING - the thread was not waiting here
1658 */
1659 kern_return_t
1660 wait_queue_wakeup_thread(
1661 wait_queue_t wq,
1662 event_t event,
1663 thread_t thread,
1664 wait_result_t result)
1665 {
1666 kern_return_t res;
1667 spl_t s;
1668
1669 if (!wait_queue_is_valid(wq)) {
1670 return KERN_INVALID_ARGUMENT;
1671 }
1672
1673 s = splsched();
1674 wait_queue_lock(wq);
1675 res = _wait_queue_select64_thread(wq, (event64_t)((uint32_t)event), thread);
1676 wait_queue_unlock(wq);
1677
1678 if (res == KERN_SUCCESS) {
1679 res = thread_go(thread, result);
1680 assert(res == KERN_SUCCESS);
1681 thread_unlock(thread);
1682 splx(s);
1683 return res;
1684 }
1685 splx(s);
1686 return KERN_NOT_WAITING;
1687 }
1688
1689 /*
1690 * Routine: wait_queue_wakeup64_thread
1691 * Purpose:
1692 * Wakeup the particular thread that was specified if and only
1693 * it was in this wait queue (or one of it's set's queues)
1694 * and waiting on the specified event.
1695 *
1696 * This is much safer than just removing the thread from
1697 * whatever wait queue it happens to be on. For instance, it
1698 * may have already been awoken from the wait you intended to
1699 * interrupt and waited on something else (like another
1700 * semaphore).
1701 * Conditions:
1702 * nothing of interest locked
1703 * we need to assume spl needs to be raised
1704 * Returns:
1705 * KERN_SUCCESS - the thread was found waiting and awakened
1706 * KERN_NOT_WAITING - the thread was not waiting here
1707 */
1708 kern_return_t
1709 wait_queue_wakeup64_thread(
1710 wait_queue_t wq,
1711 event64_t event,
1712 thread_t thread,
1713 wait_result_t result)
1714 {
1715 kern_return_t res;
1716 spl_t s;
1717
1718 if (!wait_queue_is_valid(wq)) {
1719 return KERN_INVALID_ARGUMENT;
1720 }
1721
1722 s = splsched();
1723 wait_queue_lock(wq);
1724 res = _wait_queue_select64_thread(wq, event, thread);
1725 wait_queue_unlock(wq);
1726
1727 if (res == KERN_SUCCESS) {
1728 res = thread_go(thread, result);
1729 assert(res == KERN_SUCCESS);
1730 thread_unlock(thread);
1731 splx(s);
1732 return res;
1733 }
1734 splx(s);
1735 return KERN_NOT_WAITING;
1736 }