]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/wait_queue.c
xnu-517.3.15.tar.gz
[apple/xnu.git] / osfmk / kern / wait_queue.c
CommitLineData
1c79356b 1/*
9bccf70c 2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37
A
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_FREE_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 */
55/*
56 * File: wait_queue.c (adapted from sched_prim.c)
57 * Author: Avadis Tevanian, Jr.
58 * Date: 1986
59 *
60 * Primitives for manipulating wait queues: either global
61 * ones from sched_prim.c, or private ones associated with
62 * particular structures(pots, semaphores, etc..).
63 */
64
65#include <kern/kern_types.h>
66#include <kern/simple_lock.h>
67#include <kern/kalloc.h>
68#include <kern/queue.h>
69#include <kern/spl.h>
70#include <mach/sync_policy.h>
1c79356b 71#include <kern/sched_prim.h>
9bccf70c 72
1c79356b
A
73#include <kern/wait_queue.h>
74
9bccf70c
A
75/*
76 * Routine: wait_queue_init
77 * Purpose:
78 * Initialize a previously allocated wait queue.
79 * Returns:
80 * KERN_SUCCESS - The wait_queue_t was initialized
81 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
82 */
83kern_return_t
1c79356b 84wait_queue_init(
9bccf70c 85 wait_queue_t wq,
1c79356b
A
86 int policy)
87{
9bccf70c
A
88 if (!((policy & SYNC_POLICY_ORDER_MASK) == SYNC_POLICY_FIFO))
89 return KERN_INVALID_ARGUMENT;
90
91 wq->wq_fifo = TRUE;
92 wq->wq_type = _WAIT_QUEUE_inited;
1c79356b
A
93 queue_init(&wq->wq_queue);
94 hw_lock_init(&wq->wq_interlock);
9bccf70c 95 return KERN_SUCCESS;
1c79356b
A
96}
97
0b4e3aa0 98/*
9bccf70c
A
99 * Routine: wait_queue_alloc
100 * Purpose:
101 * Allocate and initialize a wait queue for use outside of
102 * of the mach part of the kernel.
103 * Conditions:
104 * Nothing locked - can block.
105 * Returns:
106 * The allocated and initialized wait queue
107 * WAIT_QUEUE_NULL if there is a resource shortage
0b4e3aa0
A
108 */
109wait_queue_t
110wait_queue_alloc(
9bccf70c 111 int policy)
0b4e3aa0
A
112{
113 wait_queue_t wq;
9bccf70c 114 kern_return_t ret;
0b4e3aa0
A
115
116 wq = (wait_queue_t) kalloc(sizeof(struct wait_queue));
9bccf70c
A
117 if (wq != WAIT_QUEUE_NULL) {
118 ret = wait_queue_init(wq, policy);
119 if (ret != KERN_SUCCESS) {
120 kfree((vm_offset_t)wq, sizeof(struct wait_queue));
121 wq = WAIT_QUEUE_NULL;
122 }
123 }
0b4e3aa0
A
124 return wq;
125}
126
127/*
9bccf70c
A
128 * Routine: wait_queue_free
129 * Purpose:
130 * Free an allocated wait queue.
131 * Conditions:
132 * May block.
0b4e3aa0 133 */
9bccf70c 134kern_return_t
0b4e3aa0
A
135wait_queue_free(
136 wait_queue_t wq)
137{
9bccf70c
A
138 if (!wait_queue_is_queue(wq))
139 return KERN_INVALID_ARGUMENT;
140 if (!queue_empty(&wq->wq_queue))
141 return KERN_FAILURE;
0b4e3aa0 142 kfree((vm_offset_t)wq, sizeof(struct wait_queue));
9bccf70c 143 return KERN_SUCCESS;
0b4e3aa0
A
144}
145
1c79356b 146/*
9bccf70c 147 * Routine: wait_queue_set_init
1c79356b 148 * Purpose:
9bccf70c
A
149 * Initialize a previously allocated wait queue set.
150 * Returns:
151 * KERN_SUCCESS - The wait_queue_set_t was initialized
152 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
1c79356b 153 */
9bccf70c
A
154kern_return_t
155wait_queue_set_init(
156 wait_queue_set_t wqset,
157 int policy)
1c79356b 158{
9bccf70c
A
159 kern_return_t ret;
160
161 ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
162 if (ret != KERN_SUCCESS)
163 return ret;
164
165 wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
166 if (policy & SYNC_POLICY_PREPOST)
167 wqset->wqs_wait_queue.wq_isprepost = TRUE;
168 else
169 wqset->wqs_wait_queue.wq_isprepost = FALSE;
170 queue_init(&wqset->wqs_setlinks);
171 wqset->wqs_refcount = 0;
172 return KERN_SUCCESS;
173}
174
175/* legacy API */
176kern_return_t
177wait_queue_sub_init(
178 wait_queue_set_t wqset,
179 int policy)
180{
181 return wait_queue_set_init(wqset, policy);
1c79356b
A
182}
183
184/*
9bccf70c 185 * Routine: wait_queue_set_alloc
1c79356b 186 * Purpose:
9bccf70c
A
187 * Allocate and initialize a wait queue set for
188 * use outside of the mach part of the kernel.
1c79356b 189 * Conditions:
9bccf70c
A
190 * May block.
191 * Returns:
192 * The allocated and initialized wait queue set
193 * WAIT_QUEUE_SET_NULL if there is a resource shortage
1c79356b 194 */
9bccf70c
A
195wait_queue_set_t
196wait_queue_set_alloc(
197 int policy)
1c79356b 198{
9bccf70c
A
199 wait_queue_set_t wq_set;
200
201 wq_set = (wait_queue_set_t) kalloc(sizeof(struct wait_queue_set));
202 if (wq_set != WAIT_QUEUE_SET_NULL) {
203 kern_return_t ret;
204
205 ret = wait_queue_set_init(wq_set, policy);
206 if (ret != KERN_SUCCESS) {
207 kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set));
208 wq_set = WAIT_QUEUE_SET_NULL;
209 }
210 }
211 return wq_set;
1c79356b
A
212}
213
214/*
9bccf70c
A
215 * Routine: wait_queue_set_free
216 * Purpose:
217 * Free an allocated wait queue set
218 * Conditions:
219 * May block.
1c79356b 220 */
9bccf70c
A
221kern_return_t
222wait_queue_set_free(
223 wait_queue_set_t wq_set)
1c79356b 224{
9bccf70c
A
225 if (!wait_queue_is_set(wq_set))
226 return KERN_INVALID_ARGUMENT;
1c79356b 227
9bccf70c
A
228 if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
229 return KERN_FAILURE;
230
231 kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set));
232 return KERN_SUCCESS;
1c79356b
A
233}
234
9bccf70c
A
235kern_return_t
236wait_queue_sub_clearrefs(
237 wait_queue_set_t wq_set)
238{
239 if (!wait_queue_is_set(wq_set))
240 return KERN_INVALID_ARGUMENT;
241
242 wqs_lock(wq_set);
243 wq_set->wqs_refcount = 0;
244 wqs_unlock(wq_set);
245 return KERN_SUCCESS;
246}
247
248/*
249 *
250 * Routine: wait_queue_set_size
251 * Routine: wait_queue_link_size
252 * Purpose:
253 * Return the size of opaque wait queue structures
254 */
255unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
256unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
257
258/* declare a unique type for wait queue link structures */
259static unsigned int _wait_queue_link;
260static unsigned int _wait_queue_unlinked;
261
262#define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
263#define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
264
265#define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
266 WQASSERT(((wqe)->wqe_queue == (wq) && \
267 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
268 "wait queue element list corruption: wq=%#x, wqe=%#x", \
269 (wq), (wqe))
270
271#define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
272 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
273 (queue_t)(wql) : &(wql)->wql_setlinks)))
274
275#define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
276 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
277 (queue_t)(wql) : &(wql)->wql_setlinks)))
278
279#define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
280 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
281 ((wql)->wql_setqueue == (wqs)) && \
282 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
283 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
284 "wait queue set links corruption: wqs=%#x, wql=%#x", \
285 (wqs), (wql))
286
287#if defined(_WAIT_QUEUE_DEBUG_)
288
289#define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
290
291#define WAIT_QUEUE_CHECK(wq) \
292MACRO_BEGIN \
293 queue_t q2 = &(wq)->wq_queue; \
294 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
295 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
296 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
297 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
298 } \
299MACRO_END
300
301#define WAIT_QUEUE_SET_CHECK(wqs) \
302MACRO_BEGIN \
303 queue_t q2 = &(wqs)->wqs_setlinks; \
304 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
305 while (!queue_end(q2, (queue_entry_t)wql2)) { \
306 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
307 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
308 } \
309MACRO_END
310
311#else /* !_WAIT_QUEUE_DEBUG_ */
312
313#define WQASSERT(e, s, p0, p1) assert(e)
314
315#define WAIT_QUEUE_CHECK(wq)
316#define WAIT_QUEUE_SET_CHECK(wqs)
317
318#endif /* !_WAIT_QUEUE_DEBUG_ */
1c79356b 319
1c79356b
A
320/*
321 * Routine: wait_queue_member_locked
322 * Purpose:
9bccf70c 323 * Indicate if this set queue is a member of the queue
1c79356b
A
324 * Conditions:
325 * The wait queue is locked
9bccf70c 326 * The set queue is just that, a set queue
1c79356b 327 */
9bccf70c 328__private_extern__ boolean_t
1c79356b
A
329wait_queue_member_locked(
330 wait_queue_t wq,
9bccf70c 331 wait_queue_set_t wq_set)
1c79356b
A
332{
333 wait_queue_element_t wq_element;
334 queue_t q;
335
336 assert(wait_queue_held(wq));
9bccf70c 337 assert(wait_queue_is_set(wq_set));
1c79356b
A
338
339 q = &wq->wq_queue;
340
341 wq_element = (wait_queue_element_t) queue_first(q);
342 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c
A
343 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
344 if ((wq_element->wqe_type == WAIT_QUEUE_LINK)) {
1c79356b
A
345 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
346
9bccf70c 347 if (wql->wql_setqueue == wq_set)
1c79356b
A
348 return TRUE;
349 }
350 wq_element = (wait_queue_element_t)
351 queue_next((queue_t) wq_element);
352 }
353 return FALSE;
354}
355
356
357/*
358 * Routine: wait_queue_member
359 * Purpose:
9bccf70c 360 * Indicate if this set queue is a member of the queue
1c79356b 361 * Conditions:
9bccf70c 362 * The set queue is just that, a set queue
1c79356b
A
363 */
364boolean_t
365wait_queue_member(
366 wait_queue_t wq,
9bccf70c 367 wait_queue_set_t wq_set)
1c79356b
A
368{
369 boolean_t ret;
370 spl_t s;
371
9bccf70c
A
372 if (!wait_queue_is_set(wq_set))
373 return FALSE;
1c79356b
A
374
375 s = splsched();
376 wait_queue_lock(wq);
9bccf70c 377 ret = wait_queue_member_locked(wq, wq_set);
1c79356b
A
378 wait_queue_unlock(wq);
379 splx(s);
380
381 return ret;
382}
383
9bccf70c 384
1c79356b 385/*
9bccf70c 386 * Routine: wait_queue_link_noalloc
1c79356b 387 * Purpose:
9bccf70c 388 * Insert a set wait queue into a wait queue. This
1c79356b
A
389 * requires us to link the two together using a wait_queue_link
390 * structure that we allocate.
391 * Conditions:
9bccf70c 392 * The wait queue being inserted must be inited as a set queue
1c79356b
A
393 */
394kern_return_t
9bccf70c 395wait_queue_link_noalloc(
1c79356b 396 wait_queue_t wq,
9bccf70c
A
397 wait_queue_set_t wq_set,
398 wait_queue_link_t wql)
1c79356b 399{
9bccf70c
A
400 wait_queue_element_t wq_element;
401 queue_t q;
1c79356b
A
402 spl_t s;
403
9bccf70c
A
404 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set))
405 return KERN_INVALID_ARGUMENT;
1c79356b 406
9bccf70c
A
407 /*
408 * There are probably less threads and sets associated with
409 * the wait queue, then there are wait queues associated with
410 * the set. So lets validate it that way.
411 */
1c79356b
A
412 s = splsched();
413 wait_queue_lock(wq);
9bccf70c
A
414 q = &wq->wq_queue;
415 wq_element = (wait_queue_element_t) queue_first(q);
416 while (!queue_end(q, (queue_entry_t)wq_element)) {
417 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
418 if (wq_element->wqe_type == WAIT_QUEUE_LINK &&
419 ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
420 wait_queue_unlock(wq);
421 splx(s);
422 return KERN_ALREADY_IN_SET;
423 }
424 wq_element = (wait_queue_element_t)
425 queue_next((queue_t) wq_element);
426 }
427
428 /*
429 * Not already a member, so we can add it.
430 */
55e303ae 431 wqs_lock(wq_set);
9bccf70c
A
432
433 WAIT_QUEUE_SET_CHECK(wq_set);
1c79356b
A
434
435 wql->wql_queue = wq;
1c79356b 436 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
9bccf70c
A
437 wql->wql_setqueue = wq_set;
438 queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
439 wql->wql_type = WAIT_QUEUE_LINK;
440
441 wqs_unlock(wq_set);
1c79356b
A
442 wait_queue_unlock(wq);
443 splx(s);
444
445 return KERN_SUCCESS;
446}
9bccf70c 447
0b4e3aa0 448/*
9bccf70c 449 * Routine: wait_queue_link
0b4e3aa0 450 * Purpose:
9bccf70c 451 * Insert a set wait queue into a wait queue. This
0b4e3aa0
A
452 * requires us to link the two together using a wait_queue_link
453 * structure that we allocate.
454 * Conditions:
9bccf70c 455 * The wait queue being inserted must be inited as a set queue
0b4e3aa0
A
456 */
457kern_return_t
9bccf70c 458wait_queue_link(
0b4e3aa0 459 wait_queue_t wq,
9bccf70c 460 wait_queue_set_t wq_set)
0b4e3aa0 461{
9bccf70c
A
462 wait_queue_link_t wql;
463 kern_return_t ret;
0b4e3aa0 464
9bccf70c
A
465 wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link));
466 if (wql == WAIT_QUEUE_LINK_NULL)
467 return KERN_RESOURCE_SHORTAGE;
0b4e3aa0 468
9bccf70c
A
469 ret = wait_queue_link_noalloc(wq, wq_set, wql);
470 if (ret != KERN_SUCCESS)
471 kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
0b4e3aa0 472
9bccf70c
A
473 return ret;
474}
0b4e3aa0 475
0b4e3aa0 476
9bccf70c
A
477/*
478 * Routine: wait_queue_unlink_nofree
479 * Purpose:
480 * Undo the linkage between a wait queue and a set.
481 */
482static void
483wait_queue_unlink_locked(
484 wait_queue_t wq,
485 wait_queue_set_t wq_set,
486 wait_queue_link_t wql)
487{
488 assert(wait_queue_held(wq));
489 assert(wait_queue_held(&wq_set->wqs_wait_queue));
490
491 wql->wql_queue = WAIT_QUEUE_NULL;
492 queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
493 wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
494 queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
495 wql->wql_type = WAIT_QUEUE_UNLINKED;
496
497 WAIT_QUEUE_CHECK(wq);
498 WAIT_QUEUE_SET_CHECK(wq_set);
499}
1c79356b
A
500
501/*
502 * Routine: wait_queue_unlink
503 * Purpose:
9bccf70c
A
504 * Remove the linkage between a wait queue and a set,
505 * freeing the linkage structure.
1c79356b 506 * Conditions:
9bccf70c 507 * The wait queue being must be a member set queue
1c79356b
A
508 */
509kern_return_t
510wait_queue_unlink(
511 wait_queue_t wq,
9bccf70c 512 wait_queue_set_t wq_set)
1c79356b
A
513{
514 wait_queue_element_t wq_element;
9bccf70c 515 wait_queue_link_t wql;
1c79356b
A
516 queue_t q;
517 spl_t s;
518
9bccf70c
A
519 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) {
520 return KERN_INVALID_ARGUMENT;
521 }
1c79356b
A
522 s = splsched();
523 wait_queue_lock(wq);
1c79356b
A
524
525 q = &wq->wq_queue;
1c79356b
A
526 wq_element = (wait_queue_element_t) queue_first(q);
527 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c
A
528 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
529 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
530 wql = (wait_queue_link_t)wq_element;
1c79356b 531
9bccf70c
A
532 if (wql->wql_setqueue == wq_set) {
533 wqs_lock(wq_set);
534 wait_queue_unlink_locked(wq, wq_set, wql);
535 wqs_unlock(wq_set);
1c79356b
A
536 wait_queue_unlock(wq);
537 splx(s);
9bccf70c
A
538 kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
539 return KERN_SUCCESS;
1c79356b
A
540 }
541 }
1c79356b 542 wq_element = (wait_queue_element_t)
9bccf70c
A
543 queue_next((queue_t) wq_element);
544 }
545 wait_queue_unlock(wq);
546 splx(s);
547 return KERN_NOT_IN_SET;
548}
549
550
551/*
552 * Routine: wait_queue_unlinkall_nofree
553 * Purpose:
554 * Remove the linkage between a wait queue and all its
555 * sets. The caller is responsible for freeing
556 * the wait queue link structures.
557 */
558
559kern_return_t
560wait_queue_unlinkall_nofree(
561 wait_queue_t wq)
562{
563 wait_queue_element_t wq_element;
564 wait_queue_element_t wq_next_element;
565 wait_queue_set_t wq_set;
566 wait_queue_link_t wql;
567 queue_head_t links_queue_head;
568 queue_t links = &links_queue_head;
569 queue_t q;
570 spl_t s;
571
572 if (!wait_queue_is_queue(wq)) {
573 return KERN_INVALID_ARGUMENT;
574 }
575
576 queue_init(links);
577
578 s = splsched();
579 wait_queue_lock(wq);
580
581 q = &wq->wq_queue;
582
583 wq_element = (wait_queue_element_t) queue_first(q);
584 while (!queue_end(q, (queue_entry_t)wq_element)) {
585 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
586 wq_next_element = (wait_queue_element_t)
1c79356b 587 queue_next((queue_t) wq_element);
9bccf70c
A
588
589 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
590 wql = (wait_queue_link_t)wq_element;
591 wq_set = wql->wql_setqueue;
592 wqs_lock(wq_set);
593 wait_queue_unlink_locked(wq, wq_set, wql);
594 wqs_unlock(wq_set);
595 }
596 wq_element = wq_next_element;
1c79356b 597 }
9bccf70c
A
598 wait_queue_unlock(wq);
599 splx(s);
600 return(KERN_SUCCESS);
1c79356b
A
601}
602
9bccf70c 603
0b4e3aa0 604/*
9bccf70c 605 * Routine: wait_queue_unlink_all
0b4e3aa0 606 * Purpose:
9bccf70c
A
607 * Remove the linkage between a wait queue and all its sets.
608 * All the linkage structures are freed.
0b4e3aa0 609 * Conditions:
9bccf70c 610 * Nothing of interest locked.
0b4e3aa0 611 */
9bccf70c 612
0b4e3aa0 613kern_return_t
9bccf70c
A
614wait_queue_unlink_all(
615 wait_queue_t wq)
0b4e3aa0
A
616{
617 wait_queue_element_t wq_element;
9bccf70c
A
618 wait_queue_element_t wq_next_element;
619 wait_queue_set_t wq_set;
620 wait_queue_link_t wql;
621 queue_head_t links_queue_head;
622 queue_t links = &links_queue_head;
0b4e3aa0 623 queue_t q;
9bccf70c 624 spl_t s;
0b4e3aa0 625
9bccf70c
A
626 if (!wait_queue_is_queue(wq)) {
627 return KERN_INVALID_ARGUMENT;
628 }
629
630 queue_init(links);
631
632 s = splsched();
633 wait_queue_lock(wq);
0b4e3aa0
A
634
635 q = &wq->wq_queue;
636
637 wq_element = (wait_queue_element_t) queue_first(q);
638 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c
A
639 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
640 wq_next_element = (wait_queue_element_t)
641 queue_next((queue_t) wq_element);
0b4e3aa0 642
9bccf70c
A
643 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
644 wql = (wait_queue_link_t)wq_element;
645 wq_set = wql->wql_setqueue;
646 wqs_lock(wq_set);
647 wait_queue_unlink_locked(wq, wq_set, wql);
648 wqs_unlock(wq_set);
649 enqueue(links, &wql->wql_links);
0b4e3aa0 650 }
9bccf70c
A
651 wq_element = wq_next_element;
652 }
653 wait_queue_unlock(wq);
654 splx(s);
0b4e3aa0 655
9bccf70c
A
656 while(!queue_empty(links)) {
657 wql = (wait_queue_link_t) dequeue(links);
658 kfree((vm_offset_t) wql, sizeof(struct wait_queue_link));
0b4e3aa0 659 }
9bccf70c
A
660
661 return(KERN_SUCCESS);
662}
0b4e3aa0
A
663
664/*
9bccf70c 665 * Routine: wait_queue_set_unlink_all_nofree
0b4e3aa0 666 * Purpose:
9bccf70c
A
667 * Remove the linkage between a set wait queue and all its
668 * member wait queues. The link structures are not freed, nor
669 * returned. It is the caller's responsibility to track and free
670 * them.
0b4e3aa0 671 * Conditions:
9bccf70c 672 * The wait queue being must be a member set queue
0b4e3aa0
A
673 */
674kern_return_t
9bccf70c
A
675wait_queue_set_unlink_all_nofree(
676 wait_queue_set_t wq_set)
0b4e3aa0
A
677{
678 wait_queue_link_t wql;
679 wait_queue_t wq;
680 queue_t q;
681 kern_return_t kret;
682 spl_t s;
683
9bccf70c
A
684 if (!wait_queue_is_set(wq_set)) {
685 return KERN_INVALID_ARGUMENT;
686 }
0b4e3aa0
A
687
688retry:
689 s = splsched();
9bccf70c 690 wqs_lock(wq_set);
0b4e3aa0 691
9bccf70c 692 q = &wq_set->wqs_setlinks;
0b4e3aa0
A
693
694 wql = (wait_queue_link_t)queue_first(q);
695 while (!queue_end(q, (queue_entry_t)wql)) {
9bccf70c 696 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
0b4e3aa0
A
697 wq = wql->wql_queue;
698 if (wait_queue_lock_try(wq)) {
9bccf70c
A
699 wait_queue_unlink_locked(wq, wq_set, wql);
700 wait_queue_unlock(wq);
701 wql = (wait_queue_link_t)queue_first(q);
0b4e3aa0 702 } else {
9bccf70c 703 wqs_unlock(wq_set);
0b4e3aa0 704 splx(s);
9bccf70c 705 delay(1);
0b4e3aa0
A
706 goto retry;
707 }
708 }
9bccf70c 709 wqs_unlock(wq_set);
0b4e3aa0 710 splx(s);
9bccf70c 711
0b4e3aa0
A
712 return(KERN_SUCCESS);
713}
714
9bccf70c
A
715/* legacy interface naming */
716kern_return_t
717wait_subqueue_unlink_all(
718 wait_queue_set_t wq_set)
719{
720 return wait_queue_set_unlink_all_nofree(wq_set);
721}
722
0b4e3aa0
A
723
724/*
9bccf70c 725 * Routine: wait_queue_set_unlink_all
0b4e3aa0 726 * Purpose:
9bccf70c
A
727 * Remove the linkage between a set wait queue and all its
728 * member wait queues. The link structures are freed.
729 * Conditions:
730 * The wait queue must be a set
0b4e3aa0 731 */
0b4e3aa0 732kern_return_t
9bccf70c
A
733wait_queue_set_unlink_all(
734 wait_queue_set_t wq_set)
0b4e3aa0 735{
9bccf70c
A
736 wait_queue_link_t wql;
737 wait_queue_t wq;
0b4e3aa0 738 queue_t q;
9bccf70c
A
739 queue_head_t links_queue_head;
740 queue_t links = &links_queue_head;
741 kern_return_t kret;
0b4e3aa0
A
742 spl_t s;
743
9bccf70c
A
744 if (!wait_queue_is_set(wq_set)) {
745 return KERN_INVALID_ARGUMENT;
746 }
747
748 queue_init(links);
0b4e3aa0 749
9bccf70c 750retry:
0b4e3aa0 751 s = splsched();
9bccf70c 752 wqs_lock(wq_set);
0b4e3aa0 753
9bccf70c 754 q = &wq_set->wqs_setlinks;
0b4e3aa0 755
9bccf70c
A
756 wql = (wait_queue_link_t)queue_first(q);
757 while (!queue_end(q, (queue_entry_t)wql)) {
758 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
759 wq = wql->wql_queue;
760 if (wait_queue_lock_try(wq)) {
761 wait_queue_unlink_locked(wq, wq_set, wql);
762 wait_queue_unlock(wq);
763 enqueue(links, &wql->wql_links);
764 wql = (wait_queue_link_t)queue_first(q);
0b4e3aa0 765 } else {
9bccf70c
A
766 wqs_unlock(wq_set);
767 splx(s);
768 delay(1);
769 goto retry;
0b4e3aa0 770 }
0b4e3aa0 771 }
9bccf70c 772 wqs_unlock(wq_set);
0b4e3aa0
A
773 splx(s);
774
9bccf70c
A
775 while (!queue_empty (links)) {
776 wql = (wait_queue_link_t) dequeue(links);
777 kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
778 }
0b4e3aa0
A
779 return(KERN_SUCCESS);
780}
9bccf70c
A
781
782
1c79356b
A
783/*
784 * Routine: wait_queue_unlink_one
785 * Purpose:
9bccf70c 786 * Find and unlink one set wait queue
1c79356b
A
787 * Conditions:
788 * Nothing of interest locked.
789 */
790void
791wait_queue_unlink_one(
792 wait_queue_t wq,
9bccf70c 793 wait_queue_set_t *wq_setp)
1c79356b
A
794{
795 wait_queue_element_t wq_element;
796 queue_t q;
797 spl_t s;
798
799 s = splsched();
800 wait_queue_lock(wq);
801
802 q = &wq->wq_queue;
9bccf70c 803
1c79356b
A
804 wq_element = (wait_queue_element_t) queue_first(q);
805 while (!queue_end(q, (queue_entry_t)wq_element)) {
806
9bccf70c 807 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1c79356b 808 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
9bccf70c
A
809 wait_queue_set_t wq_set = wql->wql_setqueue;
810
811 wqs_lock(wq_set);
812 wait_queue_unlink_locked(wq, wq_set, wql);
813 wqs_unlock(wq_set);
1c79356b
A
814 wait_queue_unlock(wq);
815 splx(s);
816 kfree((vm_offset_t)wql,sizeof(struct wait_queue_link));
9bccf70c 817 *wq_setp = wq_set;
1c79356b
A
818 return;
819 }
820
821 wq_element = (wait_queue_element_t)
9bccf70c 822 queue_next((queue_t) wq_element);
1c79356b
A
823 }
824 wait_queue_unlock(wq);
825 splx(s);
9bccf70c
A
826 *wq_setp = WAIT_QUEUE_SET_NULL;
827}
828
1c79356b
A
829
830/*
9bccf70c 831 * Routine: wait_queue_assert_wait64_locked
1c79356b
A
832 * Purpose:
833 * Insert the current thread into the supplied wait queue
834 * waiting for a particular event to be posted to that queue.
835 *
836 * Conditions:
837 * The wait queue is assumed locked.
55e303ae 838 * The waiting thread is assumed locked.
1c79356b
A
839 *
840 */
9bccf70c
A
841__private_extern__ wait_result_t
842wait_queue_assert_wait64_locked(
1c79356b 843 wait_queue_t wq,
9bccf70c
A
844 event64_t event,
845 wait_interrupt_t interruptible,
55e303ae 846 thread_t thread)
1c79356b 847{
9bccf70c 848 wait_result_t wait_result;
0b4e3aa0 849
55e303ae
A
850 if (!wait_queue_assert_possible(thread))
851 panic("wait_queue_assert_wait64_locked");
852
9bccf70c
A
853 if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
854 wait_queue_set_t wqs = (wait_queue_set_t)wq;
55e303ae
A
855
856 if (wqs->wqs_isprepost && wqs->wqs_refcount > 0)
9bccf70c 857 return(THREAD_AWAKENED);
0b4e3aa0 858 }
9bccf70c 859
1c79356b
A
860 /*
861 * This is the extent to which we currently take scheduling attributes
862 * into account. If the thread is vm priviledged, we stick it at
863 * the front of the queue. Later, these queues will honor the policy
864 * value set at wait_queue_init time.
865 */
9bccf70c
A
866 wait_result = thread_mark_wait_locked(thread, interruptible);
867 if (wait_result == THREAD_WAITING) {
868 if (thread->vm_privilege)
869 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
870 else
871 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
872 thread->wait_event = event;
873 thread->wait_queue = wq;
874 }
9bccf70c 875 return(wait_result);
1c79356b
A
876}
877
878/*
879 * Routine: wait_queue_assert_wait
880 * Purpose:
881 * Insert the current thread into the supplied wait queue
882 * waiting for a particular event to be posted to that queue.
883 *
884 * Conditions:
885 * nothing of interest locked.
886 */
9bccf70c 887wait_result_t
1c79356b
A
888wait_queue_assert_wait(
889 wait_queue_t wq,
890 event_t event,
9bccf70c 891 wait_interrupt_t interruptible)
1c79356b
A
892{
893 spl_t s;
9bccf70c 894 wait_result_t ret;
55e303ae 895 thread_t cur_thread = current_thread();
9bccf70c
A
896
897 /* If it is an invalid wait queue, you can't wait on it */
898 if (!wait_queue_is_valid(wq)) {
899 thread_t thread = current_thread();
900 return (thread->wait_result = THREAD_RESTART);
901 }
902
903 s = splsched();
904 wait_queue_lock(wq);
55e303ae 905 thread_lock(cur_thread);
9bccf70c
A
906 ret = wait_queue_assert_wait64_locked(
907 wq, (event64_t)((uint32_t)event),
55e303ae
A
908 interruptible, cur_thread);
909 thread_unlock(cur_thread);
910 wait_queue_unlock(wq);
9bccf70c
A
911 splx(s);
912 return(ret);
913}
914
915/*
916 * Routine: wait_queue_assert_wait64
917 * Purpose:
918 * Insert the current thread into the supplied wait queue
919 * waiting for a particular event to be posted to that queue.
920 * Conditions:
921 * nothing of interest locked.
922 */
923wait_result_t
924wait_queue_assert_wait64(
925 wait_queue_t wq,
926 event64_t event,
927 wait_interrupt_t interruptible)
928{
929 spl_t s;
930 wait_result_t ret;
55e303ae 931 thread_t cur_thread = current_thread();
9bccf70c
A
932
933 /* If it is an invalid wait queue, you cant wait on it */
934 if (!wait_queue_is_valid(wq)) {
935 thread_t thread = current_thread();
936 return (thread->wait_result = THREAD_RESTART);
937 }
1c79356b
A
938
939 s = splsched();
940 wait_queue_lock(wq);
55e303ae
A
941 thread_lock(cur_thread);
942 ret = wait_queue_assert_wait64_locked(wq, event, interruptible, cur_thread);
943 thread_unlock(cur_thread);
944 wait_queue_unlock(wq);
1c79356b 945 splx(s);
0b4e3aa0 946 return(ret);
1c79356b
A
947}
948
949
950/*
9bccf70c 951 * Routine: _wait_queue_select64_all
1c79356b
A
952 * Purpose:
953 * Select all threads off a wait queue that meet the
954 * supplied criteria.
1c79356b
A
955 * Conditions:
956 * at splsched
957 * wait queue locked
958 * wake_queue initialized and ready for insertion
959 * possibly recursive
1c79356b
A
960 * Returns:
961 * a queue of locked threads
962 */
9bccf70c
A
963static void
964_wait_queue_select64_all(
1c79356b 965 wait_queue_t wq,
9bccf70c 966 event64_t event,
1c79356b
A
967 queue_t wake_queue)
968{
969 wait_queue_element_t wq_element;
970 wait_queue_element_t wqe_next;
971 queue_t q;
972
973 q = &wq->wq_queue;
974
975 wq_element = (wait_queue_element_t) queue_first(q);
976 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c 977 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1c79356b
A
978 wqe_next = (wait_queue_element_t)
979 queue_next((queue_t) wq_element);
980
981 /*
982 * We may have to recurse if this is a compound wait queue.
983 */
9bccf70c 984 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1c79356b 985 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
9bccf70c 986 wait_queue_t set_queue;
1c79356b
A
987
988 /*
9bccf70c 989 * We have to check the set wait queue.
1c79356b 990 */
9bccf70c
A
991 set_queue = (wait_queue_t)wql->wql_setqueue;
992 wait_queue_lock(set_queue);
993 if (set_queue->wq_isprepost) {
994 wait_queue_set_t wqs = (wait_queue_set_t)set_queue;
0b4e3aa0
A
995
996 /*
9bccf70c
A
997 * Preposting is only for sets and wait queue
998 * is the first element of set
0b4e3aa0
A
999 */
1000 wqs->wqs_refcount++;
1001 }
9bccf70c
A
1002 if (! wait_queue_empty(set_queue))
1003 _wait_queue_select64_all(set_queue, event, wake_queue);
1004 wait_queue_unlock(set_queue);
1c79356b
A
1005 } else {
1006
1007 /*
1008 * Otherwise, its a thread. If it is waiting on
1009 * the event we are posting to this queue, pull
1010 * it off the queue and stick it in out wake_queue.
1011 */
1012 thread_t t = (thread_t)wq_element;
1013
1014 if (t->wait_event == event) {
1015 thread_lock(t);
1016 remqueue(q, (queue_entry_t) t);
1017 enqueue (wake_queue, (queue_entry_t) t);
1018 t->wait_queue = WAIT_QUEUE_NULL;
9bccf70c 1019 t->wait_event = NO_EVENT64;
1c79356b
A
1020 t->at_safe_point = FALSE;
1021 /* returned locked */
1022 }
1023 }
1024 wq_element = wqe_next;
1025 }
1026}
1027
1028/*
9bccf70c
A
1029 * Routine: wait_queue_wakeup64_all_locked
1030 * Purpose:
1031 * Wakeup some number of threads that are in the specified
1032 * wait queue and waiting on the specified event.
1033 * Conditions:
1034 * wait queue already locked (may be released).
1035 * Returns:
1036 * KERN_SUCCESS - Threads were woken up
1037 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1c79356b 1038 */
9bccf70c
A
1039__private_extern__ kern_return_t
1040wait_queue_wakeup64_all_locked(
1041 wait_queue_t wq,
1042 event64_t event,
1043 wait_result_t result,
1044 boolean_t unlock)
1c79356b 1045{
9bccf70c
A
1046 queue_head_t wake_queue_head;
1047 queue_t q = &wake_queue_head;
1048 kern_return_t res;
1049
1050 assert(wait_queue_held(wq));
1051 queue_init(q);
1052
1053 /*
1054 * Select the threads that we will wake up. The threads
1055 * are returned to us locked and cleanly removed from the
1056 * wait queue.
1057 */
1058 _wait_queue_select64_all(wq, event, q);
1059 if (unlock)
1060 wait_queue_unlock(wq);
1061
1062 /*
1063 * For each thread, set it running.
1064 */
1065 res = KERN_NOT_WAITING;
1066 while (!queue_empty (q)) {
1067 thread_t thread = (thread_t) dequeue(q);
1068 res = thread_go_locked(thread, result);
1069 assert(res == KERN_SUCCESS);
1070 thread_unlock(thread);
1071 }
1072 return res;
1c79356b
A
1073}
1074
1075
1076/*
9bccf70c
A
1077 * Routine: wait_queue_wakeup_all
1078 * Purpose:
1079 * Wakeup some number of threads that are in the specified
1080 * wait queue and waiting on the specified event.
1081 * Conditions:
1082 * Nothing locked
1083 * Returns:
1084 * KERN_SUCCESS - Threads were woken up
1085 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1c79356b
A
1086 */
1087kern_return_t
1088wait_queue_wakeup_all(
9bccf70c
A
1089 wait_queue_t wq,
1090 event_t event,
1091 wait_result_t result)
1092{
1093 kern_return_t ret;
1094 spl_t s;
1095
1096 if (!wait_queue_is_valid(wq)) {
1097 return KERN_INVALID_ARGUMENT;
1098 }
1099
1100 s = splsched();
1101 wait_queue_lock(wq);
1102 ret = wait_queue_wakeup64_all_locked(
1103 wq, (event64_t)((uint32_t)event),
1104 result, TRUE);
1105 /* lock released */
1106 splx(s);
1107 return ret;
1108}
1109
1110/*
1111 * Routine: wait_queue_wakeup64_all
1112 * Purpose:
1113 * Wakeup some number of threads that are in the specified
1114 * wait queue and waiting on the specified event.
1115 * Conditions:
1116 * Nothing locked
1117 * Returns:
1118 * KERN_SUCCESS - Threads were woken up
1119 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1120 */
1121kern_return_t
1122wait_queue_wakeup64_all(
1123 wait_queue_t wq,
1124 event64_t event,
1125 wait_result_t result)
1c79356b 1126{
9bccf70c
A
1127 kern_return_t ret;
1128 spl_t s;
1c79356b 1129
9bccf70c
A
1130 if (!wait_queue_is_valid(wq)) {
1131 return KERN_INVALID_ARGUMENT;
1132 }
1c79356b 1133
9bccf70c
A
1134 s = splsched();
1135 wait_queue_lock(wq);
1136 ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1137 /* lock released */
1138 splx(s);
1139 return ret;
1c79356b
A
1140}
1141
1142/*
9bccf70c 1143 * Routine: _wait_queue_select64_one
1c79356b
A
1144 * Purpose:
1145 * Select the best thread off a wait queue that meet the
1146 * supplied criteria.
1147 * Conditions:
1148 * at splsched
1149 * wait queue locked
1150 * possibly recursive
1151 * Returns:
1152 * a locked thread - if one found
1153 * Note:
1154 * This is where the sync policy of the wait queue comes
1155 * into effect. For now, we just assume FIFO.
1156 */
9bccf70c
A
1157static thread_t
1158_wait_queue_select64_one(
1c79356b 1159 wait_queue_t wq,
9bccf70c 1160 event64_t event)
1c79356b
A
1161{
1162 wait_queue_element_t wq_element;
1163 wait_queue_element_t wqe_next;
1164 thread_t t = THREAD_NULL;
1165 queue_t q;
1166
1167 assert(wq->wq_fifo);
1168
1169 q = &wq->wq_queue;
1170
1171 wq_element = (wait_queue_element_t) queue_first(q);
1172 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c 1173 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1c79356b
A
1174 wqe_next = (wait_queue_element_t)
1175 queue_next((queue_t) wq_element);
1176
1177 /*
1178 * We may have to recurse if this is a compound wait queue.
1179 */
9bccf70c 1180 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1c79356b 1181 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
9bccf70c 1182 wait_queue_t set_queue;
1c79356b
A
1183
1184 /*
9bccf70c 1185 * We have to check the set wait queue.
1c79356b 1186 */
9bccf70c
A
1187 set_queue = (wait_queue_t)wql->wql_setqueue;
1188 wait_queue_lock(set_queue);
1189 if (! wait_queue_empty(set_queue)) {
1190 t = _wait_queue_select64_one(set_queue, event);
1c79356b 1191 }
9bccf70c 1192 wait_queue_unlock(set_queue);
1c79356b
A
1193 if (t != THREAD_NULL)
1194 return t;
1195 } else {
1196
1197 /*
1198 * Otherwise, its a thread. If it is waiting on
1199 * the event we are posting to this queue, pull
1200 * it off the queue and stick it in out wake_queue.
1201 */
1202 thread_t t = (thread_t)wq_element;
1203
1204 if (t->wait_event == event) {
1205 thread_lock(t);
1206 remqueue(q, (queue_entry_t) t);
1207 t->wait_queue = WAIT_QUEUE_NULL;
9bccf70c 1208 t->wait_event = NO_EVENT64;
1c79356b
A
1209 t->at_safe_point = FALSE;
1210 return t; /* still locked */
1211 }
1212 }
1213 wq_element = wqe_next;
1214 }
1215 return THREAD_NULL;
1216}
1217
1218/*
9bccf70c 1219 * Routine: wait_queue_peek64_locked
1c79356b
A
1220 * Purpose:
1221 * Select the best thread from a wait queue that meet the
9bccf70c 1222 * supplied criteria, but leave it on the queue it was
1c79356b
A
1223 * found on. The thread, and the actual wait_queue the
1224 * thread was found on are identified.
1225 * Conditions:
1226 * at splsched
1227 * wait queue locked
1228 * possibly recursive
1229 * Returns:
1230 * a locked thread - if one found
1231 * a locked waitq - the one the thread was found on
1232 * Note:
9bccf70c
A
1233 * Both the waitq the thread was actually found on, and
1234 * the supplied wait queue, are locked after this.
1c79356b 1235 */
9bccf70c
A
1236__private_extern__ void
1237wait_queue_peek64_locked(
1c79356b 1238 wait_queue_t wq,
9bccf70c 1239 event64_t event,
1c79356b
A
1240 thread_t *tp,
1241 wait_queue_t *wqp)
1242{
1243 wait_queue_element_t wq_element;
1244 wait_queue_element_t wqe_next;
1245 thread_t t;
1246 queue_t q;
1247
1248 assert(wq->wq_fifo);
1249
1250 *tp = THREAD_NULL;
1251
1252 q = &wq->wq_queue;
1253
1254 wq_element = (wait_queue_element_t) queue_first(q);
1255 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c 1256 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1c79356b
A
1257 wqe_next = (wait_queue_element_t)
1258 queue_next((queue_t) wq_element);
1259
1260 /*
1261 * We may have to recurse if this is a compound wait queue.
1262 */
9bccf70c 1263 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1c79356b 1264 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
9bccf70c 1265 wait_queue_t set_queue;
1c79356b
A
1266
1267 /*
9bccf70c 1268 * We have to check the set wait queue.
1c79356b 1269 */
9bccf70c
A
1270 set_queue = (wait_queue_t)wql->wql_setqueue;
1271 wait_queue_lock(set_queue);
1272 if (! wait_queue_empty(set_queue)) {
1273 wait_queue_peek64_locked(set_queue, event, tp, wqp);
1c79356b 1274 }
9bccf70c
A
1275 if (*tp != THREAD_NULL) {
1276 if (*wqp != set_queue)
1277 wait_queue_unlock(set_queue);
1c79356b 1278 return; /* thread and its waitq locked */
9bccf70c 1279 }
1c79356b 1280
9bccf70c 1281 wait_queue_unlock(set_queue);
1c79356b
A
1282 } else {
1283
1284 /*
1285 * Otherwise, its a thread. If it is waiting on
1286 * the event we are posting to this queue, return
1287 * it locked, but leave it on the queue.
1288 */
1289 thread_t t = (thread_t)wq_element;
1290
1291 if (t->wait_event == event) {
1292 thread_lock(t);
1293 *tp = t;
1294 *wqp = wq;
1295 return;
1296 }
1297 }
1298 wq_element = wqe_next;
1299 }
1300}
1301
1302/*
1303 * Routine: wait_queue_pull_thread_locked
1304 * Purpose:
1305 * Pull a thread that was previously "peeked" off the wait
1306 * queue and (possibly) unlock the waitq.
1307 * Conditions:
1308 * at splsched
1309 * wait queue locked
1310 * thread locked
1311 * Returns:
1312 * with the thread still locked.
1313 */
1314void
1315wait_queue_pull_thread_locked(
1316 wait_queue_t waitq,
1317 thread_t thread,
1318 boolean_t unlock)
1319{
1320
1321 assert(thread->wait_queue == waitq);
1322
1323 remqueue(&waitq->wq_queue, (queue_entry_t)thread );
1324 thread->wait_queue = WAIT_QUEUE_NULL;
9bccf70c 1325 thread->wait_event = NO_EVENT64;
1c79356b
A
1326 thread->at_safe_point = FALSE;
1327 if (unlock)
1328 wait_queue_unlock(waitq);
1329}
1330
1331
1332/*
9bccf70c 1333 * Routine: wait_queue_select64_thread
1c79356b
A
1334 * Purpose:
1335 * Look for a thread and remove it from the queues, if
1336 * (and only if) the thread is waiting on the supplied
1337 * <wait_queue, event> pair.
1338 * Conditions:
1339 * at splsched
1340 * wait queue locked
1341 * possibly recursive
1342 * Returns:
1343 * KERN_NOT_WAITING: Thread is not waiting here.
1344 * KERN_SUCCESS: It was, and is now removed (returned locked)
1345 */
9bccf70c
A
1346static kern_return_t
1347_wait_queue_select64_thread(
1c79356b 1348 wait_queue_t wq,
9bccf70c 1349 event64_t event,
1c79356b
A
1350 thread_t thread)
1351{
1352 wait_queue_element_t wq_element;
1353 wait_queue_element_t wqe_next;
1354 kern_return_t res = KERN_NOT_WAITING;
1355 queue_t q = &wq->wq_queue;
1356
1c79356b
A
1357 thread_lock(thread);
1358 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
1359 remqueue(q, (queue_entry_t) thread);
1360 thread->at_safe_point = FALSE;
9bccf70c 1361 thread->wait_event = NO_EVENT64;
1c79356b
A
1362 thread->wait_queue = WAIT_QUEUE_NULL;
1363 /* thread still locked */
1364 return KERN_SUCCESS;
1365 }
1366 thread_unlock(thread);
1367
1368 /*
1369 * The wait_queue associated with the thread may be one of this
9bccf70c 1370 * wait queue's sets. Go see. If so, removing it from
1c79356b
A
1371 * there is like removing it from here.
1372 */
1373 wq_element = (wait_queue_element_t) queue_first(q);
1374 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c 1375 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1c79356b
A
1376 wqe_next = (wait_queue_element_t)
1377 queue_next((queue_t) wq_element);
1378
9bccf70c 1379 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1c79356b 1380 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
9bccf70c 1381 wait_queue_t set_queue;
1c79356b 1382
9bccf70c
A
1383 set_queue = (wait_queue_t)wql->wql_setqueue;
1384 wait_queue_lock(set_queue);
1385 if (! wait_queue_empty(set_queue)) {
1386 res = _wait_queue_select64_thread(set_queue,
1c79356b
A
1387 event,
1388 thread);
1389 }
9bccf70c 1390 wait_queue_unlock(set_queue);
1c79356b
A
1391 if (res == KERN_SUCCESS)
1392 return KERN_SUCCESS;
1393 }
1394 wq_element = wqe_next;
1395 }
1396 return res;
1397}
1398
1399
1400/*
9bccf70c 1401 * Routine: wait_queue_wakeup64_identity_locked
1c79356b
A
1402 * Purpose:
1403 * Select a single thread that is most-eligible to run and set
1404 * set it running. But return the thread locked.
1405 *
1406 * Conditions:
1407 * at splsched
1408 * wait queue locked
1409 * possibly recursive
1410 * Returns:
1411 * a pointer to the locked thread that was awakened
1412 */
9bccf70c
A
1413__private_extern__ thread_t
1414wait_queue_wakeup64_identity_locked(
1c79356b 1415 wait_queue_t wq,
9bccf70c
A
1416 event64_t event,
1417 wait_result_t result,
1c79356b
A
1418 boolean_t unlock)
1419{
9bccf70c 1420 kern_return_t res;
1c79356b
A
1421 thread_t thread;
1422
1423 assert(wait_queue_held(wq));
1424
9bccf70c
A
1425
1426 thread = _wait_queue_select64_one(wq, event);
1c79356b
A
1427 if (unlock)
1428 wait_queue_unlock(wq);
1429
9bccf70c
A
1430 if (thread) {
1431 res = thread_go_locked(thread, result);
1432 assert(res == KERN_SUCCESS);
1433 }
1c79356b
A
1434 return thread; /* still locked if not NULL */
1435}
1436
1437
1438/*
9bccf70c 1439 * Routine: wait_queue_wakeup64_one_locked
1c79356b
A
1440 * Purpose:
1441 * Select a single thread that is most-eligible to run and set
1442 * set it runnings.
1443 *
1444 * Conditions:
1445 * at splsched
1446 * wait queue locked
1447 * possibly recursive
1448 * Returns:
1449 * KERN_SUCCESS: It was, and is, now removed.
1450 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1451 */
9bccf70c
A
1452__private_extern__ kern_return_t
1453wait_queue_wakeup64_one_locked(
1c79356b 1454 wait_queue_t wq,
9bccf70c
A
1455 event64_t event,
1456 wait_result_t result,
1c79356b
A
1457 boolean_t unlock)
1458{
1459 thread_t thread;
1460
1461 assert(wait_queue_held(wq));
1462
9bccf70c 1463 thread = _wait_queue_select64_one(wq, event);
1c79356b
A
1464 if (unlock)
1465 wait_queue_unlock(wq);
1466
1467 if (thread) {
9bccf70c
A
1468 kern_return_t res;
1469
1470 res = thread_go_locked(thread, result);
1471 assert(res == KERN_SUCCESS);
1c79356b 1472 thread_unlock(thread);
9bccf70c 1473 return res;
1c79356b
A
1474 }
1475
1476 return KERN_NOT_WAITING;
1477}
1478
1479/*
1480 * Routine: wait_queue_wakeup_one
1481 * Purpose:
1482 * Wakeup the most appropriate thread that is in the specified
1483 * wait queue for the specified event.
1c79356b
A
1484 * Conditions:
1485 * Nothing locked
1c79356b
A
1486 * Returns:
1487 * KERN_SUCCESS - Thread was woken up
1488 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1489 */
1490kern_return_t
1491wait_queue_wakeup_one(
1492 wait_queue_t wq,
1493 event_t event,
9bccf70c 1494 wait_result_t result)
1c79356b
A
1495{
1496 thread_t thread;
1497 spl_t s;
1498
9bccf70c
A
1499 if (!wait_queue_is_valid(wq)) {
1500 return KERN_INVALID_ARGUMENT;
1501 }
1502
1c79356b
A
1503 s = splsched();
1504 wait_queue_lock(wq);
9bccf70c 1505 thread = _wait_queue_select64_one(wq, (event64_t)((uint32_t)event));
1c79356b
A
1506 wait_queue_unlock(wq);
1507
1508 if (thread) {
9bccf70c
A
1509 kern_return_t res;
1510
1511 res = thread_go_locked(thread, result);
1512 assert(res == KERN_SUCCESS);
1c79356b
A
1513 thread_unlock(thread);
1514 splx(s);
9bccf70c 1515 return res;
1c79356b
A
1516 }
1517
1518 splx(s);
1519 return KERN_NOT_WAITING;
1520}
1521
9bccf70c
A
1522/*
1523 * Routine: wait_queue_wakeup64_one
1524 * Purpose:
1525 * Wakeup the most appropriate thread that is in the specified
1526 * wait queue for the specified event.
1527 * Conditions:
1528 * Nothing locked
1529 * Returns:
1530 * KERN_SUCCESS - Thread was woken up
1531 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1532 */
1533kern_return_t
1534wait_queue_wakeup64_one(
1535 wait_queue_t wq,
1536 event64_t event,
1537 wait_result_t result)
1538{
1539 thread_t thread;
1540 spl_t s;
1541
1542 if (!wait_queue_is_valid(wq)) {
1543 return KERN_INVALID_ARGUMENT;
1544 }
1545 s = splsched();
1546 wait_queue_lock(wq);
1547 thread = _wait_queue_select64_one(wq, event);
1548 wait_queue_unlock(wq);
1549
1550 if (thread) {
1551 kern_return_t res;
1552
1553 res = thread_go_locked(thread, result);
1554 assert(res == KERN_SUCCESS);
1555 thread_unlock(thread);
1556 splx(s);
1557 return res;
1558 }
1559
1560 splx(s);
1561 return KERN_NOT_WAITING;
1562}
1c79356b
A
1563
1564
1565/*
9bccf70c 1566 * Routine: wait_queue_wakeup64_thread_locked
1c79356b
A
1567 * Purpose:
1568 * Wakeup the particular thread that was specified if and only
9bccf70c 1569 * it was in this wait queue (or one of it's set queues)
1c79356b
A
1570 * and waiting on the specified event.
1571 *
1572 * This is much safer than just removing the thread from
1573 * whatever wait queue it happens to be on. For instance, it
1574 * may have already been awoken from the wait you intended to
1575 * interrupt and waited on something else (like another
1576 * semaphore).
1577 * Conditions:
1578 * at splsched
1579 * wait queue already locked (may be released).
1580 * Returns:
1581 * KERN_SUCCESS - the thread was found waiting and awakened
1582 * KERN_NOT_WAITING - the thread was not waiting here
1583 */
9bccf70c
A
1584__private_extern__ kern_return_t
1585wait_queue_wakeup64_thread_locked(
1c79356b 1586 wait_queue_t wq,
9bccf70c 1587 event64_t event,
1c79356b 1588 thread_t thread,
9bccf70c 1589 wait_result_t result,
1c79356b
A
1590 boolean_t unlock)
1591{
1592 kern_return_t res;
1593
1594 assert(wait_queue_held(wq));
1595
1596 /*
1597 * See if the thread was still waiting there. If so, it got
1598 * dequeued and returned locked.
1599 */
9bccf70c 1600 res = _wait_queue_select64_thread(wq, event, thread);
1c79356b
A
1601 if (unlock)
1602 wait_queue_unlock(wq);
1603
1604 if (res != KERN_SUCCESS)
1605 return KERN_NOT_WAITING;
1606
9bccf70c
A
1607 res = thread_go_locked(thread, result);
1608 assert(res == KERN_SUCCESS);
1c79356b 1609 thread_unlock(thread);
9bccf70c 1610 return res;
1c79356b
A
1611}
1612
1613/*
1614 * Routine: wait_queue_wakeup_thread
1615 * Purpose:
1616 * Wakeup the particular thread that was specified if and only
9bccf70c 1617 * it was in this wait queue (or one of it's set queues)
1c79356b
A
1618 * and waiting on the specified event.
1619 *
1620 * This is much safer than just removing the thread from
1621 * whatever wait queue it happens to be on. For instance, it
1622 * may have already been awoken from the wait you intended to
1623 * interrupt and waited on something else (like another
1624 * semaphore).
1625 * Conditions:
1626 * nothing of interest locked
1627 * we need to assume spl needs to be raised
1628 * Returns:
1629 * KERN_SUCCESS - the thread was found waiting and awakened
1630 * KERN_NOT_WAITING - the thread was not waiting here
1631 */
1632kern_return_t
1633wait_queue_wakeup_thread(
1634 wait_queue_t wq,
1635 event_t event,
1636 thread_t thread,
9bccf70c 1637 wait_result_t result)
1c79356b
A
1638{
1639 kern_return_t res;
1640 spl_t s;
1641
9bccf70c
A
1642 if (!wait_queue_is_valid(wq)) {
1643 return KERN_INVALID_ARGUMENT;
1644 }
1645
1c79356b
A
1646 s = splsched();
1647 wait_queue_lock(wq);
9bccf70c 1648 res = _wait_queue_select64_thread(wq, (event64_t)((uint32_t)event), thread);
1c79356b
A
1649 wait_queue_unlock(wq);
1650
1651 if (res == KERN_SUCCESS) {
9bccf70c
A
1652 res = thread_go_locked(thread, result);
1653 assert(res == KERN_SUCCESS);
1c79356b
A
1654 thread_unlock(thread);
1655 splx(s);
9bccf70c 1656 return res;
1c79356b
A
1657 }
1658 splx(s);
1659 return KERN_NOT_WAITING;
1660}
1661
1c79356b 1662/*
9bccf70c 1663 * Routine: wait_queue_wakeup64_thread
1c79356b 1664 * Purpose:
9bccf70c
A
1665 * Wakeup the particular thread that was specified if and only
1666 * it was in this wait queue (or one of it's set's queues)
1667 * and waiting on the specified event.
1c79356b 1668 *
9bccf70c
A
1669 * This is much safer than just removing the thread from
1670 * whatever wait queue it happens to be on. For instance, it
1671 * may have already been awoken from the wait you intended to
1672 * interrupt and waited on something else (like another
1673 * semaphore).
1c79356b 1674 * Conditions:
9bccf70c
A
1675 * nothing of interest locked
1676 * we need to assume spl needs to be raised
1c79356b 1677 * Returns:
9bccf70c
A
1678 * KERN_SUCCESS - the thread was found waiting and awakened
1679 * KERN_NOT_WAITING - the thread was not waiting here
1c79356b
A
1680 */
1681kern_return_t
9bccf70c
A
1682wait_queue_wakeup64_thread(
1683 wait_queue_t wq,
1684 event64_t event,
1685 thread_t thread,
1686 wait_result_t result)
1c79356b 1687{
9bccf70c
A
1688 kern_return_t res;
1689 spl_t s;
1c79356b 1690
9bccf70c
A
1691 if (!wait_queue_is_valid(wq)) {
1692 return KERN_INVALID_ARGUMENT;
1693 }
1c79356b 1694
9bccf70c 1695 s = splsched();
1c79356b 1696 wait_queue_lock(wq);
9bccf70c
A
1697 res = _wait_queue_select64_thread(wq, event, thread);
1698 wait_queue_unlock(wq);
1699
1700 if (res == KERN_SUCCESS) {
1701 res = thread_go_locked(thread, result);
1702 assert(res == KERN_SUCCESS);
1703 thread_unlock(thread);
1704 splx(s);
1705 return res;
1c79356b 1706 }
9bccf70c
A
1707 splx(s);
1708 return KERN_NOT_WAITING;
1c79356b 1709}