]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/wait_queue.c
b71aba5710b118594a205d561049e1477932d753
[apple/xnu.git] / osfmk / kern / wait_queue.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: wait_queue.c (adapted from sched_prim.c)
54 * Author: Avadis Tevanian, Jr.
55 * Date: 1986
56 *
57 * Primitives for manipulating wait queues: either global
58 * ones from sched_prim.c, or private ones associated with
59 * particular structures(pots, semaphores, etc..).
60 */
61
62 #include <kern/kern_types.h>
63 #include <kern/simple_lock.h>
64 #include <kern/kalloc.h>
65 #include <kern/queue.h>
66 #include <kern/spl.h>
67 #include <mach/sync_policy.h>
68
69 #include <kern/sched_prim.h>
70 #include <kern/wait_queue.h>
71
72 void
73 wait_queue_init(
74 wait_queue_t wq,
75 int policy)
76 {
77 wq->wq_fifo = (policy == SYNC_POLICY_FIFO);
78 wq->wq_issub = FALSE;
79 queue_init(&wq->wq_queue);
80 hw_lock_init(&wq->wq_interlock);
81 }
82
83 void
84 wait_queue_sub_init(
85 wait_queue_sub_t wqsub,
86 int policy)
87 {
88 wait_queue_init(&wqsub->wqs_wait_queue, policy);
89 wqsub->wqs_wait_queue.wq_issub = TRUE;
90 queue_init(&wqsub->wqs_sublinks);
91 }
92
93 void
94 wait_queue_link_init(
95 wait_queue_link_t wql)
96 {
97 queue_init(&wql->wql_links);
98 queue_init(&wql->wql_sublinks);
99 wql->wql_queue = WAIT_QUEUE_NULL;
100 wql->wql_subqueue = WAIT_QUEUE_SUB_NULL;
101 wql->wql_event = NO_EVENT;
102 }
103
104
105 /*
106 * Routine: wait_queue_lock
107 * Purpose:
108 * Lock the wait queue.
109 * Conditions:
110 * the appropriate spl level (if any) is already raised.
111 */
112 void
113 wait_queue_lock(
114 wait_queue_t wq)
115 {
116 #ifdef __ppc__
117 vm_offset_t pc;
118
119 /*
120 * Double the standard lock timeout, because wait queues tend
121 * to iterate over a number of threads - locking each. If there is
122 * a problem with a thread lock, it normally times out at the wait
123 * queue level first, hiding the real problem.
124 */
125 pc = GET_RETURN_PC(&wq);
126 if (!hw_lock_to(&wq->wq_interlock, LockTimeOut * 2)) {
127 panic("wait queue deadlock detection - wq=0x%x, cpu=%d, ret=0x%x\n", wq, cpu_number(), pc);
128 }
129 #else
130 hw_lock_lock(&wq->wq_interlock);
131 #endif
132 }
133
134 /*
135 * Routine: wait_queue_lock_try
136 * Purpose:
137 * Try to lock the wait queue without waiting
138 * Conditions:
139 * the appropriate spl level (if any) is already raised.
140 * Returns:
141 * TRUE if the lock was acquired
142 * FALSE if we would have needed to wait
143 */
144 boolean_t
145 wait_queue_lock_try(
146 wait_queue_t wq)
147 {
148 return hw_lock_try(&wq->wq_interlock);
149 }
150
151 /*
152 * Routine: wait_queue_unlock
153 * Purpose:
154 * unlock the wait queue
155 * Conditions:
156 * The wait queue is assumed locked.
157 * appropriate spl level is still maintained
158 */
159 void
160 wait_queue_unlock(
161 wait_queue_t wq)
162 {
163 assert(hw_lock_held(&wq->wq_interlock));
164
165 hw_lock_unlock(&wq->wq_interlock);
166 }
167
168 int _wait_queue_subordinate; /* phoney event for subordinate wait q elements */
169
170
171 /*
172 * Routine: wait_queue_member_locked
173 * Purpose:
174 * Indicate if this sub queue is a member of the queue
175 * Conditions:
176 * The wait queue is locked
177 * The sub queue is just that, a sub queue
178 */
179 boolean_t
180 wait_queue_member_locked(
181 wait_queue_t wq,
182 wait_queue_sub_t wq_sub)
183 {
184 wait_queue_element_t wq_element;
185 queue_t q;
186
187 assert(wait_queue_held(wq));
188 assert(wait_queue_is_sub(wq_sub));
189
190 q = &wq->wq_queue;
191
192 wq_element = (wait_queue_element_t) queue_first(q);
193 while (!queue_end(q, (queue_entry_t)wq_element)) {
194
195 if ((wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE)) {
196 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
197
198 if (wql->wql_subqueue == wq_sub)
199 return TRUE;
200 }
201 wq_element = (wait_queue_element_t)
202 queue_next((queue_t) wq_element);
203 }
204 return FALSE;
205 }
206
207
208 /*
209 * Routine: wait_queue_member
210 * Purpose:
211 * Indicate if this sub queue is a member of the queue
212 * Conditions:
213 * The sub queue is just that, a sub queue
214 */
215 boolean_t
216 wait_queue_member(
217 wait_queue_t wq,
218 wait_queue_sub_t wq_sub)
219 {
220 boolean_t ret;
221 spl_t s;
222
223 assert(wait_queue_is_sub(wq_sub));
224
225 s = splsched();
226 wait_queue_lock(wq);
227 ret = wait_queue_member_locked(wq, wq_sub);
228 wait_queue_unlock(wq);
229 splx(s);
230
231 return ret;
232 }
233
234 /*
235 * Routine: wait_queue_link
236 * Purpose:
237 * Insert a subordinate wait queue into a wait queue. This
238 * requires us to link the two together using a wait_queue_link
239 * structure that we allocate.
240 * Conditions:
241 * The wait queue being inserted must be inited as a sub queue
242 * The sub waitq is not already linked
243 *
244 */
245 kern_return_t
246 wait_queue_link(
247 wait_queue_t wq,
248 wait_queue_sub_t wq_sub)
249 {
250 wait_queue_link_t wql;
251 spl_t s;
252
253 assert(wait_queue_is_sub(wq_sub));
254 assert(!wait_queue_member(wq, wq_sub));
255
256 wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link));
257 if (wql == WAIT_QUEUE_LINK_NULL)
258 return KERN_RESOURCE_SHORTAGE;
259
260 wait_queue_link_init(wql);
261
262 s = splsched();
263 wait_queue_lock(wq);
264 wqs_lock(wq_sub);
265
266 wql->wql_queue = wq;
267 wql->wql_subqueue = wq_sub;
268 wql->wql_event = WAIT_QUEUE_SUBORDINATE;
269 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
270 queue_enter(&wq_sub->wqs_sublinks, wql, wait_queue_link_t, wql_sublinks);
271
272 wqs_unlock(wq_sub);
273 wait_queue_unlock(wq);
274 splx(s);
275
276 return KERN_SUCCESS;
277 }
278
279 /*
280 * Routine: wait_queue_unlink
281 * Purpose:
282 * Remove the linkage between a wait queue and its subordinate.
283 * Conditions:
284 * The wait queue being must be a member sub queue
285 */
286 kern_return_t
287 wait_queue_unlink(
288 wait_queue_t wq,
289 wait_queue_sub_t wq_sub)
290 {
291 wait_queue_element_t wq_element;
292 queue_t q;
293 spl_t s;
294
295 assert(wait_queue_is_sub(wq_sub));
296 assert(wait_queue_member(wq, wq_sub));
297
298 s = splsched();
299 wait_queue_lock(wq);
300 wqs_lock(wq_sub);
301
302 q = &wq->wq_queue;
303
304 wq_element = (wait_queue_element_t) queue_first(q);
305 while (!queue_end(q, (queue_entry_t)wq_element)) {
306
307 if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) {
308 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
309 queue_t sq;
310
311 if (wql->wql_subqueue == wq_sub) {
312 sq = &wq_sub->wqs_sublinks;
313 queue_remove(q, wql, wait_queue_link_t, wql_links);
314 queue_remove(sq, wql, wait_queue_link_t, wql_sublinks);
315 wqs_unlock(wq_sub);
316 wait_queue_unlock(wq);
317 splx(s);
318 kfree((vm_offset_t)wql,sizeof(struct wait_queue_link));
319 return;
320 }
321 }
322
323 wq_element = (wait_queue_element_t)
324 queue_next((queue_t) wq_element);
325 }
326 panic("wait_queue_unlink");
327 }
328
329 /*
330 * Routine: wait_queue_unlink_one
331 * Purpose:
332 * Find and unlink one subordinate wait queue
333 * Conditions:
334 * Nothing of interest locked.
335 */
336 void
337 wait_queue_unlink_one(
338 wait_queue_t wq,
339 wait_queue_sub_t *wq_subp)
340 {
341 wait_queue_element_t wq_element;
342 queue_t q;
343 spl_t s;
344
345 s = splsched();
346 wait_queue_lock(wq);
347
348 q = &wq->wq_queue;
349
350 wq_element = (wait_queue_element_t) queue_first(q);
351 while (!queue_end(q, (queue_entry_t)wq_element)) {
352
353 if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) {
354 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
355 wait_queue_sub_t wq_sub = wql->wql_subqueue;
356 queue_t sq;
357
358 wqs_lock(wq_sub);
359 sq = &wq_sub->wqs_sublinks;
360 queue_remove(q, wql, wait_queue_link_t, wql_links);
361 queue_remove(sq, wql, wait_queue_link_t, wql_sublinks);
362 wqs_unlock(wq_sub);
363 wait_queue_unlock(wq);
364 splx(s);
365 kfree((vm_offset_t)wql,sizeof(struct wait_queue_link));
366 *wq_subp = wq_sub;
367 return;
368 }
369
370 wq_element = (wait_queue_element_t)
371 queue_next((queue_t) wq_element);
372 }
373 wait_queue_unlock(wq);
374 splx(s);
375 *wq_subp = WAIT_QUEUE_SUB_NULL;
376 }
377
378 /*
379 * Routine: wait_queue_assert_wait_locked
380 * Purpose:
381 * Insert the current thread into the supplied wait queue
382 * waiting for a particular event to be posted to that queue.
383 *
384 * Conditions:
385 * The wait queue is assumed locked.
386 *
387 */
388 void
389 wait_queue_assert_wait_locked(
390 wait_queue_t wq,
391 event_t event,
392 int interruptible,
393 boolean_t unlock)
394 {
395 thread_t thread = current_thread();
396
397 thread_lock(thread);
398
399 /*
400 * This is the extent to which we currently take scheduling attributes
401 * into account. If the thread is vm priviledged, we stick it at
402 * the front of the queue. Later, these queues will honor the policy
403 * value set at wait_queue_init time.
404 */
405 if (thread->vm_privilege)
406 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
407 else
408 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
409 thread->wait_event = event;
410 thread->wait_queue = wq;
411 thread_mark_wait_locked(thread, interruptible);
412 thread_unlock(thread);
413 if (unlock)
414 wait_queue_unlock(wq);
415 }
416
417 /*
418 * Routine: wait_queue_assert_wait
419 * Purpose:
420 * Insert the current thread into the supplied wait queue
421 * waiting for a particular event to be posted to that queue.
422 *
423 * Conditions:
424 * nothing of interest locked.
425 */
426 void
427 wait_queue_assert_wait(
428 wait_queue_t wq,
429 event_t event,
430 int interruptible)
431 {
432 spl_t s;
433
434 s = splsched();
435 wait_queue_lock(wq);
436 wait_queue_assert_wait_locked(wq, event, interruptible, TRUE);
437 /* wait queue unlocked */
438 splx(s);
439 }
440
441
442 /*
443 * Routine: wait_queue_select_all
444 * Purpose:
445 * Select all threads off a wait queue that meet the
446 * supplied criteria.
447 *
448 * Conditions:
449 * at splsched
450 * wait queue locked
451 * wake_queue initialized and ready for insertion
452 * possibly recursive
453 *
454 * Returns:
455 * a queue of locked threads
456 */
457 void
458 _wait_queue_select_all(
459 wait_queue_t wq,
460 event_t event,
461 queue_t wake_queue)
462 {
463 wait_queue_element_t wq_element;
464 wait_queue_element_t wqe_next;
465 queue_t q;
466
467 q = &wq->wq_queue;
468
469 wq_element = (wait_queue_element_t) queue_first(q);
470 while (!queue_end(q, (queue_entry_t)wq_element)) {
471 wqe_next = (wait_queue_element_t)
472 queue_next((queue_t) wq_element);
473
474 /*
475 * We may have to recurse if this is a compound wait queue.
476 */
477 if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) {
478 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
479 wait_queue_t sub_queue;
480
481 /*
482 * We have to check the subordinate wait queue.
483 */
484 sub_queue = (wait_queue_t)wql->wql_subqueue;
485 wait_queue_lock(sub_queue);
486 if (! wait_queue_empty(sub_queue))
487 _wait_queue_select_all(sub_queue, event, wake_queue);
488 wait_queue_unlock(sub_queue);
489 } else {
490
491 /*
492 * Otherwise, its a thread. If it is waiting on
493 * the event we are posting to this queue, pull
494 * it off the queue and stick it in out wake_queue.
495 */
496 thread_t t = (thread_t)wq_element;
497
498 if (t->wait_event == event) {
499 thread_lock(t);
500 remqueue(q, (queue_entry_t) t);
501 enqueue (wake_queue, (queue_entry_t) t);
502 t->wait_queue = WAIT_QUEUE_NULL;
503 t->wait_event = NO_EVENT;
504 t->at_safe_point = FALSE;
505 /* returned locked */
506 }
507 }
508 wq_element = wqe_next;
509 }
510 }
511
512 /*
513 * Routine: wait_queue_wakeup_all_locked
514 * Purpose:
515 * Wakeup some number of threads that are in the specified
516 * wait queue and waiting on the specified event.
517 * Conditions:
518 * wait queue already locked (may be released).
519 * Returns:
520 * KERN_SUCCESS - Threads were woken up
521 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
522 */
523 kern_return_t
524 wait_queue_wakeup_all_locked(
525 wait_queue_t wq,
526 event_t event,
527 int result,
528 boolean_t unlock)
529 {
530 queue_head_t wake_queue_head;
531 queue_t q = &wake_queue_head;
532 kern_return_t ret = KERN_NOT_WAITING;
533
534 assert(wait_queue_held(wq));
535
536 queue_init(q);
537
538 /*
539 * Select the threads that we will wake up. The threads
540 * are returned to us locked and cleanly removed from the
541 * wait queue.
542 */
543 _wait_queue_select_all(wq, event, q);
544 if (unlock)
545 wait_queue_unlock(wq);
546
547 /*
548 * For each thread, set it running.
549 */
550 while (!queue_empty (q)) {
551 thread_t thread = (thread_t) dequeue(q);
552 thread_go_locked(thread, result);
553 thread_unlock(thread);
554 ret = KERN_SUCCESS;
555 }
556 return ret;
557 }
558
559
560 /*
561 * Routine: wait_queue_wakeup_all
562 * Purpose:
563 * Wakeup some number of threads that are in the specified
564 * wait queue and waiting on the specified event.
565 *
566 * Conditions:
567 * Nothing locked
568 *
569 * Returns:
570 * KERN_SUCCESS - Threads were woken up
571 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
572 */
573 kern_return_t
574 wait_queue_wakeup_all(
575 wait_queue_t wq,
576 event_t event,
577 int result)
578 {
579 kern_return_t ret;
580 spl_t s;
581
582 s = splsched();
583 wait_queue_lock(wq);
584 ret = wait_queue_wakeup_all_locked(wq, event, result, TRUE);
585 /* lock released */
586 splx(s);
587
588 return ret;
589 }
590
591 /*
592 * Routine: wait_queue_select_one
593 * Purpose:
594 * Select the best thread off a wait queue that meet the
595 * supplied criteria.
596 * Conditions:
597 * at splsched
598 * wait queue locked
599 * possibly recursive
600 * Returns:
601 * a locked thread - if one found
602 * Note:
603 * This is where the sync policy of the wait queue comes
604 * into effect. For now, we just assume FIFO.
605 */
606 thread_t
607 _wait_queue_select_one(
608 wait_queue_t wq,
609 event_t event)
610 {
611 wait_queue_element_t wq_element;
612 wait_queue_element_t wqe_next;
613 thread_t t = THREAD_NULL;
614 queue_t q;
615
616 assert(wq->wq_fifo);
617
618 q = &wq->wq_queue;
619
620 wq_element = (wait_queue_element_t) queue_first(q);
621 while (!queue_end(q, (queue_entry_t)wq_element)) {
622 wqe_next = (wait_queue_element_t)
623 queue_next((queue_t) wq_element);
624
625 /*
626 * We may have to recurse if this is a compound wait queue.
627 */
628 if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) {
629 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
630 wait_queue_t sub_queue;
631
632 /*
633 * We have to check the subordinate wait queue.
634 */
635 sub_queue = (wait_queue_t)wql->wql_subqueue;
636 wait_queue_lock(sub_queue);
637 if (! wait_queue_empty(sub_queue)) {
638 t = _wait_queue_select_one(sub_queue, event);
639 }
640 wait_queue_unlock(sub_queue);
641 if (t != THREAD_NULL)
642 return t;
643 } else {
644
645 /*
646 * Otherwise, its a thread. If it is waiting on
647 * the event we are posting to this queue, pull
648 * it off the queue and stick it in out wake_queue.
649 */
650 thread_t t = (thread_t)wq_element;
651
652 if (t->wait_event == event) {
653 thread_lock(t);
654 remqueue(q, (queue_entry_t) t);
655 t->wait_queue = WAIT_QUEUE_NULL;
656 t->wait_event = NO_EVENT;
657 t->at_safe_point = FALSE;
658 return t; /* still locked */
659 }
660 }
661 wq_element = wqe_next;
662 }
663 return THREAD_NULL;
664 }
665
666 /*
667 * Routine: wait_queue_peek_locked
668 * Purpose:
669 * Select the best thread from a wait queue that meet the
670 * supplied criteria, but leave it on the queue you it was
671 * found on. The thread, and the actual wait_queue the
672 * thread was found on are identified.
673 * Conditions:
674 * at splsched
675 * wait queue locked
676 * possibly recursive
677 * Returns:
678 * a locked thread - if one found
679 * a locked waitq - the one the thread was found on
680 * Note:
681 * Only the waitq the thread was actually found on is locked
682 * after this.
683 */
684 void
685 wait_queue_peek_locked(
686 wait_queue_t wq,
687 event_t event,
688 thread_t *tp,
689 wait_queue_t *wqp)
690 {
691 wait_queue_element_t wq_element;
692 wait_queue_element_t wqe_next;
693 thread_t t;
694 queue_t q;
695
696 assert(wq->wq_fifo);
697
698 *tp = THREAD_NULL;
699
700 q = &wq->wq_queue;
701
702 wq_element = (wait_queue_element_t) queue_first(q);
703 while (!queue_end(q, (queue_entry_t)wq_element)) {
704 wqe_next = (wait_queue_element_t)
705 queue_next((queue_t) wq_element);
706
707 /*
708 * We may have to recurse if this is a compound wait queue.
709 */
710 if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) {
711 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
712 wait_queue_t sub_queue;
713
714 /*
715 * We have to check the subordinate wait queue.
716 */
717 sub_queue = (wait_queue_t)wql->wql_subqueue;
718 wait_queue_lock(sub_queue);
719 if (! wait_queue_empty(sub_queue)) {
720 wait_queue_peek_locked(sub_queue, event, tp, wqp);
721 }
722 if (*tp != THREAD_NULL)
723 return; /* thread and its waitq locked */
724
725 wait_queue_unlock(sub_queue);
726 } else {
727
728 /*
729 * Otherwise, its a thread. If it is waiting on
730 * the event we are posting to this queue, return
731 * it locked, but leave it on the queue.
732 */
733 thread_t t = (thread_t)wq_element;
734
735 if (t->wait_event == event) {
736 thread_lock(t);
737 *tp = t;
738 *wqp = wq;
739 return;
740 }
741 }
742 wq_element = wqe_next;
743 }
744 }
745
746 /*
747 * Routine: wait_queue_pull_thread_locked
748 * Purpose:
749 * Pull a thread that was previously "peeked" off the wait
750 * queue and (possibly) unlock the waitq.
751 * Conditions:
752 * at splsched
753 * wait queue locked
754 * thread locked
755 * Returns:
756 * with the thread still locked.
757 */
758 void
759 wait_queue_pull_thread_locked(
760 wait_queue_t waitq,
761 thread_t thread,
762 boolean_t unlock)
763 {
764
765 assert(thread->wait_queue == waitq);
766
767 remqueue(&waitq->wq_queue, (queue_entry_t)thread );
768 thread->wait_queue = WAIT_QUEUE_NULL;
769 thread->wait_event = NO_EVENT;
770 thread->at_safe_point = FALSE;
771 if (unlock)
772 wait_queue_unlock(waitq);
773 }
774
775
776 /*
777 * Routine: wait_queue_select_thread
778 * Purpose:
779 * Look for a thread and remove it from the queues, if
780 * (and only if) the thread is waiting on the supplied
781 * <wait_queue, event> pair.
782 * Conditions:
783 * at splsched
784 * wait queue locked
785 * possibly recursive
786 * Returns:
787 * KERN_NOT_WAITING: Thread is not waiting here.
788 * KERN_SUCCESS: It was, and is now removed (returned locked)
789 */
790 kern_return_t
791 _wait_queue_select_thread(
792 wait_queue_t wq,
793 event_t event,
794 thread_t thread)
795 {
796 wait_queue_element_t wq_element;
797 wait_queue_element_t wqe_next;
798 kern_return_t res = KERN_NOT_WAITING;
799 queue_t q = &wq->wq_queue;
800
801 assert(wq->wq_fifo);
802
803 thread_lock(thread);
804 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
805 remqueue(q, (queue_entry_t) thread);
806 thread->at_safe_point = FALSE;
807 thread->wait_event = NO_EVENT;
808 thread->wait_queue = WAIT_QUEUE_NULL;
809 /* thread still locked */
810 return KERN_SUCCESS;
811 }
812 thread_unlock(thread);
813
814 /*
815 * The wait_queue associated with the thread may be one of this
816 * wait queue's subordinates. Go see. If so, removing it from
817 * there is like removing it from here.
818 */
819 wq_element = (wait_queue_element_t) queue_first(q);
820 while (!queue_end(q, (queue_entry_t)wq_element)) {
821 wqe_next = (wait_queue_element_t)
822 queue_next((queue_t) wq_element);
823
824 if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) {
825 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
826 wait_queue_t sub_queue;
827
828 sub_queue = (wait_queue_t)wql->wql_subqueue;
829 wait_queue_lock(sub_queue);
830 if (! wait_queue_empty(sub_queue)) {
831 res = _wait_queue_select_thread(sub_queue,
832 event,
833 thread);
834 }
835 wait_queue_unlock(sub_queue);
836 if (res == KERN_SUCCESS)
837 return KERN_SUCCESS;
838 }
839 wq_element = wqe_next;
840 }
841 return res;
842 }
843
844
845 /*
846 * Routine: wait_queue_wakeup_identity_locked
847 * Purpose:
848 * Select a single thread that is most-eligible to run and set
849 * set it running. But return the thread locked.
850 *
851 * Conditions:
852 * at splsched
853 * wait queue locked
854 * possibly recursive
855 * Returns:
856 * a pointer to the locked thread that was awakened
857 */
858 thread_t
859 wait_queue_wakeup_identity_locked(
860 wait_queue_t wq,
861 event_t event,
862 int result,
863 boolean_t unlock)
864 {
865 thread_t thread;
866
867 assert(wait_queue_held(wq));
868
869 thread = _wait_queue_select_one(wq, event);
870 if (unlock)
871 wait_queue_unlock(wq);
872
873 if (thread)
874 thread_go_locked(thread, result);
875 return thread; /* still locked if not NULL */
876 }
877
878
879 /*
880 * Routine: wait_queue_wakeup_one_locked
881 * Purpose:
882 * Select a single thread that is most-eligible to run and set
883 * set it runnings.
884 *
885 * Conditions:
886 * at splsched
887 * wait queue locked
888 * possibly recursive
889 * Returns:
890 * KERN_SUCCESS: It was, and is, now removed.
891 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
892 */
893 kern_return_t
894 wait_queue_wakeup_one_locked(
895 wait_queue_t wq,
896 event_t event,
897 int result,
898 boolean_t unlock)
899 {
900 thread_t thread;
901
902 assert(wait_queue_held(wq));
903
904 thread = _wait_queue_select_one(wq, event);
905 if (unlock)
906 wait_queue_unlock(wq);
907
908 if (thread) {
909 thread_go_locked(thread, result);
910 thread_unlock(thread);
911 return KERN_SUCCESS;
912 }
913
914 return KERN_NOT_WAITING;
915 }
916
917 /*
918 * Routine: wait_queue_wakeup_one
919 * Purpose:
920 * Wakeup the most appropriate thread that is in the specified
921 * wait queue for the specified event.
922 *
923 * Conditions:
924 * Nothing locked
925 *
926 * Returns:
927 * KERN_SUCCESS - Thread was woken up
928 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
929 */
930 kern_return_t
931 wait_queue_wakeup_one(
932 wait_queue_t wq,
933 event_t event,
934 int result)
935 {
936 thread_t thread;
937 spl_t s;
938
939 s = splsched();
940 wait_queue_lock(wq);
941 thread = _wait_queue_select_one(wq, event);
942 wait_queue_unlock(wq);
943
944 if (thread) {
945 thread_go_locked(thread, result);
946 thread_unlock(thread);
947 splx(s);
948 return KERN_SUCCESS;
949 }
950
951 splx(s);
952 return KERN_NOT_WAITING;
953 }
954
955
956
957 /*
958 * Routine: wait_queue_wakeup_thread_locked
959 * Purpose:
960 * Wakeup the particular thread that was specified if and only
961 * it was in this wait queue (or one of it's subordinate queues)
962 * and waiting on the specified event.
963 *
964 * This is much safer than just removing the thread from
965 * whatever wait queue it happens to be on. For instance, it
966 * may have already been awoken from the wait you intended to
967 * interrupt and waited on something else (like another
968 * semaphore).
969 * Conditions:
970 * at splsched
971 * wait queue already locked (may be released).
972 * Returns:
973 * KERN_SUCCESS - the thread was found waiting and awakened
974 * KERN_NOT_WAITING - the thread was not waiting here
975 */
976 kern_return_t
977 wait_queue_wakeup_thread_locked(
978 wait_queue_t wq,
979 event_t event,
980 thread_t thread,
981 int result,
982 boolean_t unlock)
983 {
984 kern_return_t res;
985
986 assert(wait_queue_held(wq));
987
988 /*
989 * See if the thread was still waiting there. If so, it got
990 * dequeued and returned locked.
991 */
992 res = _wait_queue_select_thread(wq, event, thread);
993 if (unlock)
994 wait_queue_unlock(wq);
995
996 if (res != KERN_SUCCESS)
997 return KERN_NOT_WAITING;
998
999 thread_go_locked(thread, result);
1000 thread_unlock(thread);
1001 return KERN_SUCCESS;
1002 }
1003
1004 /*
1005 * Routine: wait_queue_wakeup_thread
1006 * Purpose:
1007 * Wakeup the particular thread that was specified if and only
1008 * it was in this wait queue (or one of it's subordinate queues)
1009 * and waiting on the specified event.
1010 *
1011 * This is much safer than just removing the thread from
1012 * whatever wait queue it happens to be on. For instance, it
1013 * may have already been awoken from the wait you intended to
1014 * interrupt and waited on something else (like another
1015 * semaphore).
1016 * Conditions:
1017 * nothing of interest locked
1018 * we need to assume spl needs to be raised
1019 * Returns:
1020 * KERN_SUCCESS - the thread was found waiting and awakened
1021 * KERN_NOT_WAITING - the thread was not waiting here
1022 */
1023 kern_return_t
1024 wait_queue_wakeup_thread(
1025 wait_queue_t wq,
1026 event_t event,
1027 thread_t thread,
1028 int result)
1029 {
1030 kern_return_t res;
1031 spl_t s;
1032
1033 s = splsched();
1034 wait_queue_lock(wq);
1035 res = _wait_queue_select_thread(wq, event, thread);
1036 wait_queue_unlock(wq);
1037
1038 if (res == KERN_SUCCESS) {
1039 thread_go_locked(thread, result);
1040 thread_unlock(thread);
1041 splx(s);
1042 return KERN_SUCCESS;
1043 }
1044 splx(s);
1045 return KERN_NOT_WAITING;
1046 }
1047
1048
1049 /*
1050 * Routine: wait_queue_remove
1051 * Purpose:
1052 * Normal removal operations from wait queues drive from the
1053 * wait queue to select a thread. However, if a thread is
1054 * interrupted out of a wait, this routine is called to
1055 * remove it from whatever wait queue it may be in.
1056 *
1057 * Conditions:
1058 * splsched
1059 * thread locked on entry and exit, but may be dropped.
1060 *
1061 * Returns:
1062 * KERN_SUCCESS - if thread was in a wait queue
1063 * KERN_NOT_WAITING - it was not
1064 */
1065 kern_return_t
1066 wait_queue_remove(
1067 thread_t thread)
1068 {
1069 wait_queue_t wq = thread->wait_queue;
1070
1071 if (wq == WAIT_QUEUE_NULL)
1072 return KERN_NOT_WAITING;
1073
1074 /*
1075 * have to get the locks again in the right order.
1076 */
1077 thread_unlock(thread);
1078 wait_queue_lock(wq);
1079 thread_lock(thread);
1080
1081 if (thread->wait_queue == wq) {
1082 remqueue(&wq->wq_queue, (queue_entry_t)thread);
1083 thread->wait_queue = WAIT_QUEUE_NULL;
1084 thread->wait_event = NO_EVENT;
1085 thread->at_safe_point = FALSE;
1086 wait_queue_unlock(wq);
1087 return KERN_SUCCESS;
1088 } else {
1089 wait_queue_unlock(wq);
1090 return KERN_NOT_WAITING; /* anymore */
1091 }
1092 }
1093