]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/wait_queue.c
xnu-792.6.22.tar.gz
[apple/xnu.git] / osfmk / kern / wait_queue.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_FREE_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: wait_queue.c (adapted from sched_prim.c)
54 * Author: Avadis Tevanian, Jr.
55 * Date: 1986
56 *
57 * Primitives for manipulating wait queues: either global
58 * ones from sched_prim.c, or private ones associated with
59 * particular structures(pots, semaphores, etc..).
60 */
61
62#include <kern/kern_types.h>
63#include <kern/simple_lock.h>
64#include <kern/kalloc.h>
65#include <kern/queue.h>
66#include <kern/spl.h>
67#include <mach/sync_policy.h>
1c79356b 68#include <kern/sched_prim.h>
9bccf70c 69
1c79356b
A
70#include <kern/wait_queue.h>
71
91447636
A
72/* forward declarations */
73static boolean_t wait_queue_member_locked(
74 wait_queue_t wq,
75 wait_queue_set_t wq_set);
76
77void wait_queue_unlink_one(
78 wait_queue_t wq,
79 wait_queue_set_t *wq_setp);
80
81kern_return_t wait_queue_set_unlink_all_nofree(
82 wait_queue_set_t wq_set);
83
9bccf70c
A
84/*
85 * Routine: wait_queue_init
86 * Purpose:
87 * Initialize a previously allocated wait queue.
88 * Returns:
89 * KERN_SUCCESS - The wait_queue_t was initialized
90 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
91 */
92kern_return_t
1c79356b 93wait_queue_init(
9bccf70c 94 wait_queue_t wq,
1c79356b
A
95 int policy)
96{
9bccf70c
A
97 if (!((policy & SYNC_POLICY_ORDER_MASK) == SYNC_POLICY_FIFO))
98 return KERN_INVALID_ARGUMENT;
99
100 wq->wq_fifo = TRUE;
101 wq->wq_type = _WAIT_QUEUE_inited;
1c79356b
A
102 queue_init(&wq->wq_queue);
103 hw_lock_init(&wq->wq_interlock);
9bccf70c 104 return KERN_SUCCESS;
1c79356b
A
105}
106
0b4e3aa0 107/*
9bccf70c
A
108 * Routine: wait_queue_alloc
109 * Purpose:
110 * Allocate and initialize a wait queue for use outside of
111 * of the mach part of the kernel.
112 * Conditions:
113 * Nothing locked - can block.
114 * Returns:
115 * The allocated and initialized wait queue
116 * WAIT_QUEUE_NULL if there is a resource shortage
0b4e3aa0
A
117 */
118wait_queue_t
119wait_queue_alloc(
9bccf70c 120 int policy)
0b4e3aa0
A
121{
122 wait_queue_t wq;
9bccf70c 123 kern_return_t ret;
0b4e3aa0
A
124
125 wq = (wait_queue_t) kalloc(sizeof(struct wait_queue));
9bccf70c
A
126 if (wq != WAIT_QUEUE_NULL) {
127 ret = wait_queue_init(wq, policy);
128 if (ret != KERN_SUCCESS) {
91447636 129 kfree(wq, sizeof(struct wait_queue));
9bccf70c
A
130 wq = WAIT_QUEUE_NULL;
131 }
132 }
0b4e3aa0
A
133 return wq;
134}
135
136/*
9bccf70c
A
137 * Routine: wait_queue_free
138 * Purpose:
139 * Free an allocated wait queue.
140 * Conditions:
141 * May block.
0b4e3aa0 142 */
9bccf70c 143kern_return_t
0b4e3aa0
A
144wait_queue_free(
145 wait_queue_t wq)
146{
9bccf70c
A
147 if (!wait_queue_is_queue(wq))
148 return KERN_INVALID_ARGUMENT;
149 if (!queue_empty(&wq->wq_queue))
150 return KERN_FAILURE;
91447636 151 kfree(wq, sizeof(struct wait_queue));
9bccf70c 152 return KERN_SUCCESS;
0b4e3aa0
A
153}
154
1c79356b 155/*
9bccf70c 156 * Routine: wait_queue_set_init
1c79356b 157 * Purpose:
9bccf70c
A
158 * Initialize a previously allocated wait queue set.
159 * Returns:
160 * KERN_SUCCESS - The wait_queue_set_t was initialized
161 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
1c79356b 162 */
9bccf70c
A
163kern_return_t
164wait_queue_set_init(
165 wait_queue_set_t wqset,
166 int policy)
1c79356b 167{
9bccf70c
A
168 kern_return_t ret;
169
170 ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
171 if (ret != KERN_SUCCESS)
172 return ret;
173
174 wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
175 if (policy & SYNC_POLICY_PREPOST)
176 wqset->wqs_wait_queue.wq_isprepost = TRUE;
177 else
178 wqset->wqs_wait_queue.wq_isprepost = FALSE;
179 queue_init(&wqset->wqs_setlinks);
180 wqset->wqs_refcount = 0;
181 return KERN_SUCCESS;
182}
183
91447636 184
9bccf70c
A
185kern_return_t
186wait_queue_sub_init(
187 wait_queue_set_t wqset,
188 int policy)
189{
190 return wait_queue_set_init(wqset, policy);
1c79356b
A
191}
192
91447636
A
193kern_return_t
194wait_queue_sub_clearrefs(
195 wait_queue_set_t wq_set)
196{
197 if (!wait_queue_is_set(wq_set))
198 return KERN_INVALID_ARGUMENT;
199
200 wqs_lock(wq_set);
201 wq_set->wqs_refcount = 0;
202 wqs_unlock(wq_set);
203 return KERN_SUCCESS;
204}
205
1c79356b 206/*
9bccf70c 207 * Routine: wait_queue_set_alloc
1c79356b 208 * Purpose:
9bccf70c
A
209 * Allocate and initialize a wait queue set for
210 * use outside of the mach part of the kernel.
1c79356b 211 * Conditions:
9bccf70c
A
212 * May block.
213 * Returns:
214 * The allocated and initialized wait queue set
215 * WAIT_QUEUE_SET_NULL if there is a resource shortage
1c79356b 216 */
9bccf70c
A
217wait_queue_set_t
218wait_queue_set_alloc(
219 int policy)
1c79356b 220{
9bccf70c
A
221 wait_queue_set_t wq_set;
222
223 wq_set = (wait_queue_set_t) kalloc(sizeof(struct wait_queue_set));
224 if (wq_set != WAIT_QUEUE_SET_NULL) {
225 kern_return_t ret;
226
227 ret = wait_queue_set_init(wq_set, policy);
228 if (ret != KERN_SUCCESS) {
91447636 229 kfree(wq_set, sizeof(struct wait_queue_set));
9bccf70c
A
230 wq_set = WAIT_QUEUE_SET_NULL;
231 }
232 }
233 return wq_set;
1c79356b
A
234}
235
236/*
9bccf70c
A
237 * Routine: wait_queue_set_free
238 * Purpose:
239 * Free an allocated wait queue set
240 * Conditions:
241 * May block.
1c79356b 242 */
9bccf70c
A
243kern_return_t
244wait_queue_set_free(
245 wait_queue_set_t wq_set)
1c79356b 246{
9bccf70c
A
247 if (!wait_queue_is_set(wq_set))
248 return KERN_INVALID_ARGUMENT;
1c79356b 249
9bccf70c
A
250 if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
251 return KERN_FAILURE;
252
91447636 253 kfree(wq_set, sizeof(struct wait_queue_set));
9bccf70c 254 return KERN_SUCCESS;
1c79356b
A
255}
256
9bccf70c
A
257
258/*
259 *
260 * Routine: wait_queue_set_size
261 * Routine: wait_queue_link_size
262 * Purpose:
263 * Return the size of opaque wait queue structures
264 */
265unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
266unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
267
268/* declare a unique type for wait queue link structures */
269static unsigned int _wait_queue_link;
270static unsigned int _wait_queue_unlinked;
271
272#define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
273#define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
274
275#define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
276 WQASSERT(((wqe)->wqe_queue == (wq) && \
277 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
278 "wait queue element list corruption: wq=%#x, wqe=%#x", \
279 (wq), (wqe))
280
281#define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
282 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
283 (queue_t)(wql) : &(wql)->wql_setlinks)))
284
285#define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
286 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
287 (queue_t)(wql) : &(wql)->wql_setlinks)))
288
289#define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
290 WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
291 ((wql)->wql_setqueue == (wqs)) && \
292 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
293 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
294 "wait queue set links corruption: wqs=%#x, wql=%#x", \
295 (wqs), (wql))
296
297#if defined(_WAIT_QUEUE_DEBUG_)
298
299#define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
300
301#define WAIT_QUEUE_CHECK(wq) \
302MACRO_BEGIN \
303 queue_t q2 = &(wq)->wq_queue; \
304 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
305 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
306 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
307 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
308 } \
309MACRO_END
310
311#define WAIT_QUEUE_SET_CHECK(wqs) \
312MACRO_BEGIN \
313 queue_t q2 = &(wqs)->wqs_setlinks; \
314 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
315 while (!queue_end(q2, (queue_entry_t)wql2)) { \
316 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
317 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
318 } \
319MACRO_END
320
321#else /* !_WAIT_QUEUE_DEBUG_ */
322
323#define WQASSERT(e, s, p0, p1) assert(e)
324
325#define WAIT_QUEUE_CHECK(wq)
326#define WAIT_QUEUE_SET_CHECK(wqs)
327
328#endif /* !_WAIT_QUEUE_DEBUG_ */
1c79356b 329
1c79356b
A
330/*
331 * Routine: wait_queue_member_locked
332 * Purpose:
9bccf70c 333 * Indicate if this set queue is a member of the queue
1c79356b
A
334 * Conditions:
335 * The wait queue is locked
9bccf70c 336 * The set queue is just that, a set queue
1c79356b 337 */
91447636 338static boolean_t
1c79356b
A
339wait_queue_member_locked(
340 wait_queue_t wq,
9bccf70c 341 wait_queue_set_t wq_set)
1c79356b
A
342{
343 wait_queue_element_t wq_element;
344 queue_t q;
345
346 assert(wait_queue_held(wq));
9bccf70c 347 assert(wait_queue_is_set(wq_set));
1c79356b
A
348
349 q = &wq->wq_queue;
350
351 wq_element = (wait_queue_element_t) queue_first(q);
352 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c
A
353 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
354 if ((wq_element->wqe_type == WAIT_QUEUE_LINK)) {
1c79356b
A
355 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
356
9bccf70c 357 if (wql->wql_setqueue == wq_set)
1c79356b
A
358 return TRUE;
359 }
360 wq_element = (wait_queue_element_t)
361 queue_next((queue_t) wq_element);
362 }
363 return FALSE;
364}
365
366
367/*
368 * Routine: wait_queue_member
369 * Purpose:
9bccf70c 370 * Indicate if this set queue is a member of the queue
1c79356b 371 * Conditions:
9bccf70c 372 * The set queue is just that, a set queue
1c79356b
A
373 */
374boolean_t
375wait_queue_member(
376 wait_queue_t wq,
9bccf70c 377 wait_queue_set_t wq_set)
1c79356b
A
378{
379 boolean_t ret;
380 spl_t s;
381
9bccf70c
A
382 if (!wait_queue_is_set(wq_set))
383 return FALSE;
1c79356b
A
384
385 s = splsched();
386 wait_queue_lock(wq);
9bccf70c 387 ret = wait_queue_member_locked(wq, wq_set);
1c79356b
A
388 wait_queue_unlock(wq);
389 splx(s);
390
391 return ret;
392}
393
9bccf70c 394
1c79356b 395/*
9bccf70c 396 * Routine: wait_queue_link_noalloc
1c79356b 397 * Purpose:
9bccf70c 398 * Insert a set wait queue into a wait queue. This
1c79356b
A
399 * requires us to link the two together using a wait_queue_link
400 * structure that we allocate.
401 * Conditions:
9bccf70c 402 * The wait queue being inserted must be inited as a set queue
1c79356b
A
403 */
404kern_return_t
9bccf70c 405wait_queue_link_noalloc(
1c79356b 406 wait_queue_t wq,
9bccf70c
A
407 wait_queue_set_t wq_set,
408 wait_queue_link_t wql)
1c79356b 409{
9bccf70c
A
410 wait_queue_element_t wq_element;
411 queue_t q;
1c79356b
A
412 spl_t s;
413
9bccf70c
A
414 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set))
415 return KERN_INVALID_ARGUMENT;
1c79356b 416
9bccf70c
A
417 /*
418 * There are probably less threads and sets associated with
419 * the wait queue, then there are wait queues associated with
420 * the set. So lets validate it that way.
421 */
1c79356b
A
422 s = splsched();
423 wait_queue_lock(wq);
9bccf70c
A
424 q = &wq->wq_queue;
425 wq_element = (wait_queue_element_t) queue_first(q);
426 while (!queue_end(q, (queue_entry_t)wq_element)) {
427 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
428 if (wq_element->wqe_type == WAIT_QUEUE_LINK &&
429 ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
430 wait_queue_unlock(wq);
431 splx(s);
432 return KERN_ALREADY_IN_SET;
433 }
434 wq_element = (wait_queue_element_t)
435 queue_next((queue_t) wq_element);
436 }
437
438 /*
439 * Not already a member, so we can add it.
440 */
55e303ae 441 wqs_lock(wq_set);
9bccf70c
A
442
443 WAIT_QUEUE_SET_CHECK(wq_set);
1c79356b
A
444
445 wql->wql_queue = wq;
1c79356b 446 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
9bccf70c
A
447 wql->wql_setqueue = wq_set;
448 queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
449 wql->wql_type = WAIT_QUEUE_LINK;
450
451 wqs_unlock(wq_set);
1c79356b
A
452 wait_queue_unlock(wq);
453 splx(s);
454
455 return KERN_SUCCESS;
456}
9bccf70c 457
0b4e3aa0 458/*
9bccf70c 459 * Routine: wait_queue_link
0b4e3aa0 460 * Purpose:
9bccf70c 461 * Insert a set wait queue into a wait queue. This
0b4e3aa0
A
462 * requires us to link the two together using a wait_queue_link
463 * structure that we allocate.
464 * Conditions:
9bccf70c 465 * The wait queue being inserted must be inited as a set queue
0b4e3aa0
A
466 */
467kern_return_t
9bccf70c 468wait_queue_link(
0b4e3aa0 469 wait_queue_t wq,
9bccf70c 470 wait_queue_set_t wq_set)
0b4e3aa0 471{
9bccf70c
A
472 wait_queue_link_t wql;
473 kern_return_t ret;
0b4e3aa0 474
9bccf70c
A
475 wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link));
476 if (wql == WAIT_QUEUE_LINK_NULL)
477 return KERN_RESOURCE_SHORTAGE;
0b4e3aa0 478
9bccf70c
A
479 ret = wait_queue_link_noalloc(wq, wq_set, wql);
480 if (ret != KERN_SUCCESS)
91447636 481 kfree(wql, sizeof(struct wait_queue_link));
0b4e3aa0 482
9bccf70c
A
483 return ret;
484}
0b4e3aa0 485
0b4e3aa0 486
9bccf70c
A
487/*
488 * Routine: wait_queue_unlink_nofree
489 * Purpose:
490 * Undo the linkage between a wait queue and a set.
491 */
492static void
493wait_queue_unlink_locked(
494 wait_queue_t wq,
495 wait_queue_set_t wq_set,
496 wait_queue_link_t wql)
497{
498 assert(wait_queue_held(wq));
499 assert(wait_queue_held(&wq_set->wqs_wait_queue));
500
501 wql->wql_queue = WAIT_QUEUE_NULL;
502 queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
503 wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
504 queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
505 wql->wql_type = WAIT_QUEUE_UNLINKED;
506
507 WAIT_QUEUE_CHECK(wq);
508 WAIT_QUEUE_SET_CHECK(wq_set);
509}
1c79356b
A
510
511/*
512 * Routine: wait_queue_unlink
513 * Purpose:
9bccf70c
A
514 * Remove the linkage between a wait queue and a set,
515 * freeing the linkage structure.
1c79356b 516 * Conditions:
9bccf70c 517 * The wait queue being must be a member set queue
1c79356b
A
518 */
519kern_return_t
520wait_queue_unlink(
521 wait_queue_t wq,
9bccf70c 522 wait_queue_set_t wq_set)
1c79356b
A
523{
524 wait_queue_element_t wq_element;
9bccf70c 525 wait_queue_link_t wql;
1c79356b
A
526 queue_t q;
527 spl_t s;
528
9bccf70c
A
529 if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) {
530 return KERN_INVALID_ARGUMENT;
531 }
1c79356b
A
532 s = splsched();
533 wait_queue_lock(wq);
1c79356b
A
534
535 q = &wq->wq_queue;
1c79356b
A
536 wq_element = (wait_queue_element_t) queue_first(q);
537 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c
A
538 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
539 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
540 wql = (wait_queue_link_t)wq_element;
1c79356b 541
9bccf70c
A
542 if (wql->wql_setqueue == wq_set) {
543 wqs_lock(wq_set);
544 wait_queue_unlink_locked(wq, wq_set, wql);
545 wqs_unlock(wq_set);
1c79356b
A
546 wait_queue_unlock(wq);
547 splx(s);
91447636 548 kfree(wql, sizeof(struct wait_queue_link));
9bccf70c 549 return KERN_SUCCESS;
1c79356b
A
550 }
551 }
1c79356b 552 wq_element = (wait_queue_element_t)
9bccf70c
A
553 queue_next((queue_t) wq_element);
554 }
555 wait_queue_unlock(wq);
556 splx(s);
557 return KERN_NOT_IN_SET;
558}
559
560
561/*
562 * Routine: wait_queue_unlinkall_nofree
563 * Purpose:
564 * Remove the linkage between a wait queue and all its
565 * sets. The caller is responsible for freeing
566 * the wait queue link structures.
567 */
568
569kern_return_t
570wait_queue_unlinkall_nofree(
571 wait_queue_t wq)
572{
573 wait_queue_element_t wq_element;
574 wait_queue_element_t wq_next_element;
575 wait_queue_set_t wq_set;
576 wait_queue_link_t wql;
577 queue_head_t links_queue_head;
578 queue_t links = &links_queue_head;
579 queue_t q;
580 spl_t s;
581
582 if (!wait_queue_is_queue(wq)) {
583 return KERN_INVALID_ARGUMENT;
584 }
585
586 queue_init(links);
587
588 s = splsched();
589 wait_queue_lock(wq);
590
591 q = &wq->wq_queue;
592
593 wq_element = (wait_queue_element_t) queue_first(q);
594 while (!queue_end(q, (queue_entry_t)wq_element)) {
595 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
596 wq_next_element = (wait_queue_element_t)
1c79356b 597 queue_next((queue_t) wq_element);
9bccf70c
A
598
599 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
600 wql = (wait_queue_link_t)wq_element;
601 wq_set = wql->wql_setqueue;
602 wqs_lock(wq_set);
603 wait_queue_unlink_locked(wq, wq_set, wql);
604 wqs_unlock(wq_set);
605 }
606 wq_element = wq_next_element;
1c79356b 607 }
9bccf70c
A
608 wait_queue_unlock(wq);
609 splx(s);
610 return(KERN_SUCCESS);
1c79356b
A
611}
612
9bccf70c 613
0b4e3aa0 614/*
9bccf70c 615 * Routine: wait_queue_unlink_all
0b4e3aa0 616 * Purpose:
9bccf70c
A
617 * Remove the linkage between a wait queue and all its sets.
618 * All the linkage structures are freed.
0b4e3aa0 619 * Conditions:
9bccf70c 620 * Nothing of interest locked.
0b4e3aa0 621 */
9bccf70c 622
0b4e3aa0 623kern_return_t
9bccf70c
A
624wait_queue_unlink_all(
625 wait_queue_t wq)
0b4e3aa0
A
626{
627 wait_queue_element_t wq_element;
9bccf70c
A
628 wait_queue_element_t wq_next_element;
629 wait_queue_set_t wq_set;
630 wait_queue_link_t wql;
631 queue_head_t links_queue_head;
632 queue_t links = &links_queue_head;
0b4e3aa0 633 queue_t q;
9bccf70c 634 spl_t s;
0b4e3aa0 635
9bccf70c
A
636 if (!wait_queue_is_queue(wq)) {
637 return KERN_INVALID_ARGUMENT;
638 }
639
640 queue_init(links);
641
642 s = splsched();
643 wait_queue_lock(wq);
0b4e3aa0
A
644
645 q = &wq->wq_queue;
646
647 wq_element = (wait_queue_element_t) queue_first(q);
648 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c
A
649 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
650 wq_next_element = (wait_queue_element_t)
651 queue_next((queue_t) wq_element);
0b4e3aa0 652
9bccf70c
A
653 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
654 wql = (wait_queue_link_t)wq_element;
655 wq_set = wql->wql_setqueue;
656 wqs_lock(wq_set);
657 wait_queue_unlink_locked(wq, wq_set, wql);
658 wqs_unlock(wq_set);
659 enqueue(links, &wql->wql_links);
0b4e3aa0 660 }
9bccf70c
A
661 wq_element = wq_next_element;
662 }
663 wait_queue_unlock(wq);
664 splx(s);
0b4e3aa0 665
9bccf70c
A
666 while(!queue_empty(links)) {
667 wql = (wait_queue_link_t) dequeue(links);
91447636 668 kfree(wql, sizeof(struct wait_queue_link));
0b4e3aa0 669 }
9bccf70c
A
670
671 return(KERN_SUCCESS);
672}
0b4e3aa0
A
673
674/*
9bccf70c 675 * Routine: wait_queue_set_unlink_all_nofree
0b4e3aa0 676 * Purpose:
9bccf70c
A
677 * Remove the linkage between a set wait queue and all its
678 * member wait queues. The link structures are not freed, nor
679 * returned. It is the caller's responsibility to track and free
680 * them.
0b4e3aa0 681 * Conditions:
9bccf70c 682 * The wait queue being must be a member set queue
0b4e3aa0
A
683 */
684kern_return_t
9bccf70c
A
685wait_queue_set_unlink_all_nofree(
686 wait_queue_set_t wq_set)
0b4e3aa0
A
687{
688 wait_queue_link_t wql;
689 wait_queue_t wq;
690 queue_t q;
0b4e3aa0
A
691 spl_t s;
692
9bccf70c
A
693 if (!wait_queue_is_set(wq_set)) {
694 return KERN_INVALID_ARGUMENT;
695 }
0b4e3aa0
A
696
697retry:
698 s = splsched();
9bccf70c 699 wqs_lock(wq_set);
0b4e3aa0 700
9bccf70c 701 q = &wq_set->wqs_setlinks;
0b4e3aa0
A
702
703 wql = (wait_queue_link_t)queue_first(q);
704 while (!queue_end(q, (queue_entry_t)wql)) {
9bccf70c 705 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
0b4e3aa0
A
706 wq = wql->wql_queue;
707 if (wait_queue_lock_try(wq)) {
9bccf70c
A
708 wait_queue_unlink_locked(wq, wq_set, wql);
709 wait_queue_unlock(wq);
710 wql = (wait_queue_link_t)queue_first(q);
0b4e3aa0 711 } else {
9bccf70c 712 wqs_unlock(wq_set);
0b4e3aa0 713 splx(s);
9bccf70c 714 delay(1);
0b4e3aa0
A
715 goto retry;
716 }
717 }
9bccf70c 718 wqs_unlock(wq_set);
0b4e3aa0 719 splx(s);
9bccf70c 720
0b4e3aa0
A
721 return(KERN_SUCCESS);
722}
723
9bccf70c
A
724/* legacy interface naming */
725kern_return_t
726wait_subqueue_unlink_all(
727 wait_queue_set_t wq_set)
728{
729 return wait_queue_set_unlink_all_nofree(wq_set);
730}
731
0b4e3aa0
A
732
733/*
9bccf70c 734 * Routine: wait_queue_set_unlink_all
0b4e3aa0 735 * Purpose:
9bccf70c
A
736 * Remove the linkage between a set wait queue and all its
737 * member wait queues. The link structures are freed.
738 * Conditions:
739 * The wait queue must be a set
0b4e3aa0 740 */
0b4e3aa0 741kern_return_t
9bccf70c
A
742wait_queue_set_unlink_all(
743 wait_queue_set_t wq_set)
0b4e3aa0 744{
9bccf70c
A
745 wait_queue_link_t wql;
746 wait_queue_t wq;
0b4e3aa0 747 queue_t q;
9bccf70c
A
748 queue_head_t links_queue_head;
749 queue_t links = &links_queue_head;
0b4e3aa0
A
750 spl_t s;
751
9bccf70c
A
752 if (!wait_queue_is_set(wq_set)) {
753 return KERN_INVALID_ARGUMENT;
754 }
755
756 queue_init(links);
0b4e3aa0 757
9bccf70c 758retry:
0b4e3aa0 759 s = splsched();
9bccf70c 760 wqs_lock(wq_set);
0b4e3aa0 761
9bccf70c 762 q = &wq_set->wqs_setlinks;
0b4e3aa0 763
9bccf70c
A
764 wql = (wait_queue_link_t)queue_first(q);
765 while (!queue_end(q, (queue_entry_t)wql)) {
766 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
767 wq = wql->wql_queue;
768 if (wait_queue_lock_try(wq)) {
769 wait_queue_unlink_locked(wq, wq_set, wql);
770 wait_queue_unlock(wq);
771 enqueue(links, &wql->wql_links);
772 wql = (wait_queue_link_t)queue_first(q);
0b4e3aa0 773 } else {
9bccf70c
A
774 wqs_unlock(wq_set);
775 splx(s);
776 delay(1);
777 goto retry;
0b4e3aa0 778 }
0b4e3aa0 779 }
9bccf70c 780 wqs_unlock(wq_set);
0b4e3aa0
A
781 splx(s);
782
9bccf70c
A
783 while (!queue_empty (links)) {
784 wql = (wait_queue_link_t) dequeue(links);
91447636 785 kfree(wql, sizeof(struct wait_queue_link));
9bccf70c 786 }
0b4e3aa0
A
787 return(KERN_SUCCESS);
788}
9bccf70c
A
789
790
1c79356b
A
791/*
792 * Routine: wait_queue_unlink_one
793 * Purpose:
9bccf70c 794 * Find and unlink one set wait queue
1c79356b
A
795 * Conditions:
796 * Nothing of interest locked.
797 */
798void
799wait_queue_unlink_one(
800 wait_queue_t wq,
9bccf70c 801 wait_queue_set_t *wq_setp)
1c79356b
A
802{
803 wait_queue_element_t wq_element;
804 queue_t q;
805 spl_t s;
806
807 s = splsched();
808 wait_queue_lock(wq);
809
810 q = &wq->wq_queue;
9bccf70c 811
1c79356b
A
812 wq_element = (wait_queue_element_t) queue_first(q);
813 while (!queue_end(q, (queue_entry_t)wq_element)) {
814
9bccf70c 815 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1c79356b 816 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
9bccf70c
A
817 wait_queue_set_t wq_set = wql->wql_setqueue;
818
819 wqs_lock(wq_set);
820 wait_queue_unlink_locked(wq, wq_set, wql);
821 wqs_unlock(wq_set);
1c79356b
A
822 wait_queue_unlock(wq);
823 splx(s);
91447636 824 kfree(wql,sizeof(struct wait_queue_link));
9bccf70c 825 *wq_setp = wq_set;
1c79356b
A
826 return;
827 }
828
829 wq_element = (wait_queue_element_t)
9bccf70c 830 queue_next((queue_t) wq_element);
1c79356b
A
831 }
832 wait_queue_unlock(wq);
833 splx(s);
9bccf70c
A
834 *wq_setp = WAIT_QUEUE_SET_NULL;
835}
836
1c79356b
A
837
838/*
9bccf70c 839 * Routine: wait_queue_assert_wait64_locked
1c79356b
A
840 * Purpose:
841 * Insert the current thread into the supplied wait queue
842 * waiting for a particular event to be posted to that queue.
843 *
844 * Conditions:
845 * The wait queue is assumed locked.
55e303ae 846 * The waiting thread is assumed locked.
1c79356b
A
847 *
848 */
9bccf70c
A
849__private_extern__ wait_result_t
850wait_queue_assert_wait64_locked(
1c79356b 851 wait_queue_t wq,
9bccf70c
A
852 event64_t event,
853 wait_interrupt_t interruptible,
91447636 854 uint64_t deadline,
55e303ae 855 thread_t thread)
1c79356b 856{
9bccf70c 857 wait_result_t wait_result;
0b4e3aa0 858
55e303ae
A
859 if (!wait_queue_assert_possible(thread))
860 panic("wait_queue_assert_wait64_locked");
861
9bccf70c
A
862 if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
863 wait_queue_set_t wqs = (wait_queue_set_t)wq;
55e303ae
A
864
865 if (wqs->wqs_isprepost && wqs->wqs_refcount > 0)
9bccf70c 866 return(THREAD_AWAKENED);
0b4e3aa0 867 }
9bccf70c 868
1c79356b
A
869 /*
870 * This is the extent to which we currently take scheduling attributes
871 * into account. If the thread is vm priviledged, we stick it at
872 * the front of the queue. Later, these queues will honor the policy
873 * value set at wait_queue_init time.
874 */
9bccf70c
A
875 wait_result = thread_mark_wait_locked(thread, interruptible);
876 if (wait_result == THREAD_WAITING) {
91447636 877 if (thread->options & TH_OPT_VMPRIV)
9bccf70c
A
878 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
879 else
880 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
91447636 881
9bccf70c
A
882 thread->wait_event = event;
883 thread->wait_queue = wq;
91447636
A
884
885 if (deadline != 0) {
886 if (!timer_call_enter(&thread->wait_timer, deadline))
887 thread->wait_timer_active++;
888 thread->wait_timer_is_set = TRUE;
889 }
9bccf70c 890 }
9bccf70c 891 return(wait_result);
1c79356b
A
892}
893
894/*
895 * Routine: wait_queue_assert_wait
896 * Purpose:
897 * Insert the current thread into the supplied wait queue
898 * waiting for a particular event to be posted to that queue.
899 *
900 * Conditions:
901 * nothing of interest locked.
902 */
9bccf70c 903wait_result_t
1c79356b
A
904wait_queue_assert_wait(
905 wait_queue_t wq,
906 event_t event,
91447636
A
907 wait_interrupt_t interruptible,
908 uint64_t deadline)
1c79356b
A
909{
910 spl_t s;
9bccf70c 911 wait_result_t ret;
91447636 912 thread_t thread = current_thread();
9bccf70c
A
913
914 /* If it is an invalid wait queue, you can't wait on it */
91447636 915 if (!wait_queue_is_valid(wq))
9bccf70c 916 return (thread->wait_result = THREAD_RESTART);
9bccf70c
A
917
918 s = splsched();
919 wait_queue_lock(wq);
91447636
A
920 thread_lock(thread);
921 ret = wait_queue_assert_wait64_locked(wq, (event64_t)((uint32_t)event),
922 interruptible, deadline, thread);
923 thread_unlock(thread);
55e303ae 924 wait_queue_unlock(wq);
9bccf70c
A
925 splx(s);
926 return(ret);
927}
928
929/*
930 * Routine: wait_queue_assert_wait64
931 * Purpose:
932 * Insert the current thread into the supplied wait queue
933 * waiting for a particular event to be posted to that queue.
934 * Conditions:
935 * nothing of interest locked.
936 */
937wait_result_t
938wait_queue_assert_wait64(
939 wait_queue_t wq,
940 event64_t event,
91447636
A
941 wait_interrupt_t interruptible,
942 uint64_t deadline)
9bccf70c
A
943{
944 spl_t s;
945 wait_result_t ret;
91447636 946 thread_t thread = current_thread();
9bccf70c
A
947
948 /* If it is an invalid wait queue, you cant wait on it */
91447636 949 if (!wait_queue_is_valid(wq))
9bccf70c 950 return (thread->wait_result = THREAD_RESTART);
1c79356b
A
951
952 s = splsched();
953 wait_queue_lock(wq);
91447636
A
954 thread_lock(thread);
955 ret = wait_queue_assert_wait64_locked(wq, event, interruptible, deadline, thread);
956 thread_unlock(thread);
55e303ae 957 wait_queue_unlock(wq);
1c79356b 958 splx(s);
0b4e3aa0 959 return(ret);
1c79356b
A
960}
961
1c79356b 962/*
9bccf70c 963 * Routine: _wait_queue_select64_all
1c79356b
A
964 * Purpose:
965 * Select all threads off a wait queue that meet the
966 * supplied criteria.
1c79356b
A
967 * Conditions:
968 * at splsched
969 * wait queue locked
970 * wake_queue initialized and ready for insertion
971 * possibly recursive
1c79356b
A
972 * Returns:
973 * a queue of locked threads
974 */
9bccf70c
A
975static void
976_wait_queue_select64_all(
1c79356b 977 wait_queue_t wq,
9bccf70c 978 event64_t event,
1c79356b
A
979 queue_t wake_queue)
980{
981 wait_queue_element_t wq_element;
982 wait_queue_element_t wqe_next;
983 queue_t q;
984
985 q = &wq->wq_queue;
986
987 wq_element = (wait_queue_element_t) queue_first(q);
988 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c 989 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1c79356b
A
990 wqe_next = (wait_queue_element_t)
991 queue_next((queue_t) wq_element);
992
993 /*
994 * We may have to recurse if this is a compound wait queue.
995 */
9bccf70c 996 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1c79356b 997 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
9bccf70c 998 wait_queue_t set_queue;
1c79356b
A
999
1000 /*
9bccf70c 1001 * We have to check the set wait queue.
1c79356b 1002 */
9bccf70c
A
1003 set_queue = (wait_queue_t)wql->wql_setqueue;
1004 wait_queue_lock(set_queue);
1005 if (set_queue->wq_isprepost) {
1006 wait_queue_set_t wqs = (wait_queue_set_t)set_queue;
0b4e3aa0
A
1007
1008 /*
9bccf70c
A
1009 * Preposting is only for sets and wait queue
1010 * is the first element of set
0b4e3aa0
A
1011 */
1012 wqs->wqs_refcount++;
1013 }
9bccf70c
A
1014 if (! wait_queue_empty(set_queue))
1015 _wait_queue_select64_all(set_queue, event, wake_queue);
1016 wait_queue_unlock(set_queue);
1c79356b
A
1017 } else {
1018
1019 /*
1020 * Otherwise, its a thread. If it is waiting on
1021 * the event we are posting to this queue, pull
1022 * it off the queue and stick it in out wake_queue.
1023 */
1024 thread_t t = (thread_t)wq_element;
1025
1026 if (t->wait_event == event) {
1027 thread_lock(t);
1028 remqueue(q, (queue_entry_t) t);
1029 enqueue (wake_queue, (queue_entry_t) t);
1030 t->wait_queue = WAIT_QUEUE_NULL;
9bccf70c 1031 t->wait_event = NO_EVENT64;
1c79356b
A
1032 t->at_safe_point = FALSE;
1033 /* returned locked */
1034 }
1035 }
1036 wq_element = wqe_next;
1037 }
1038}
1039
1040/*
9bccf70c
A
1041 * Routine: wait_queue_wakeup64_all_locked
1042 * Purpose:
1043 * Wakeup some number of threads that are in the specified
1044 * wait queue and waiting on the specified event.
1045 * Conditions:
1046 * wait queue already locked (may be released).
1047 * Returns:
1048 * KERN_SUCCESS - Threads were woken up
1049 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1c79356b 1050 */
9bccf70c
A
1051__private_extern__ kern_return_t
1052wait_queue_wakeup64_all_locked(
1053 wait_queue_t wq,
1054 event64_t event,
1055 wait_result_t result,
1056 boolean_t unlock)
1c79356b 1057{
9bccf70c
A
1058 queue_head_t wake_queue_head;
1059 queue_t q = &wake_queue_head;
1060 kern_return_t res;
1061
1062 assert(wait_queue_held(wq));
1063 queue_init(q);
1064
1065 /*
1066 * Select the threads that we will wake up. The threads
1067 * are returned to us locked and cleanly removed from the
1068 * wait queue.
1069 */
1070 _wait_queue_select64_all(wq, event, q);
1071 if (unlock)
1072 wait_queue_unlock(wq);
1073
1074 /*
1075 * For each thread, set it running.
1076 */
1077 res = KERN_NOT_WAITING;
1078 while (!queue_empty (q)) {
1079 thread_t thread = (thread_t) dequeue(q);
91447636 1080 res = thread_go(thread, result);
9bccf70c
A
1081 assert(res == KERN_SUCCESS);
1082 thread_unlock(thread);
1083 }
1084 return res;
1c79356b
A
1085}
1086
1087
1088/*
9bccf70c
A
1089 * Routine: wait_queue_wakeup_all
1090 * Purpose:
1091 * Wakeup some number of threads that are in the specified
1092 * wait queue and waiting on the specified event.
1093 * Conditions:
1094 * Nothing locked
1095 * Returns:
1096 * KERN_SUCCESS - Threads were woken up
1097 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1c79356b
A
1098 */
1099kern_return_t
1100wait_queue_wakeup_all(
9bccf70c
A
1101 wait_queue_t wq,
1102 event_t event,
1103 wait_result_t result)
1104{
1105 kern_return_t ret;
1106 spl_t s;
1107
1108 if (!wait_queue_is_valid(wq)) {
1109 return KERN_INVALID_ARGUMENT;
1110 }
1111
1112 s = splsched();
1113 wait_queue_lock(wq);
1114 ret = wait_queue_wakeup64_all_locked(
1115 wq, (event64_t)((uint32_t)event),
1116 result, TRUE);
1117 /* lock released */
1118 splx(s);
1119 return ret;
1120}
1121
1122/*
1123 * Routine: wait_queue_wakeup64_all
1124 * Purpose:
1125 * Wakeup some number of threads that are in the specified
1126 * wait queue and waiting on the specified event.
1127 * Conditions:
1128 * Nothing locked
1129 * Returns:
1130 * KERN_SUCCESS - Threads were woken up
1131 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1132 */
1133kern_return_t
1134wait_queue_wakeup64_all(
1135 wait_queue_t wq,
1136 event64_t event,
1137 wait_result_t result)
1c79356b 1138{
9bccf70c
A
1139 kern_return_t ret;
1140 spl_t s;
1c79356b 1141
9bccf70c
A
1142 if (!wait_queue_is_valid(wq)) {
1143 return KERN_INVALID_ARGUMENT;
1144 }
1c79356b 1145
9bccf70c
A
1146 s = splsched();
1147 wait_queue_lock(wq);
1148 ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1149 /* lock released */
1150 splx(s);
1151 return ret;
1c79356b
A
1152}
1153
1154/*
9bccf70c 1155 * Routine: _wait_queue_select64_one
1c79356b
A
1156 * Purpose:
1157 * Select the best thread off a wait queue that meet the
1158 * supplied criteria.
1159 * Conditions:
1160 * at splsched
1161 * wait queue locked
1162 * possibly recursive
1163 * Returns:
1164 * a locked thread - if one found
1165 * Note:
1166 * This is where the sync policy of the wait queue comes
1167 * into effect. For now, we just assume FIFO.
1168 */
9bccf70c
A
1169static thread_t
1170_wait_queue_select64_one(
1c79356b 1171 wait_queue_t wq,
9bccf70c 1172 event64_t event)
1c79356b
A
1173{
1174 wait_queue_element_t wq_element;
1175 wait_queue_element_t wqe_next;
1176 thread_t t = THREAD_NULL;
1177 queue_t q;
1178
1179 assert(wq->wq_fifo);
1180
1181 q = &wq->wq_queue;
1182
1183 wq_element = (wait_queue_element_t) queue_first(q);
1184 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c 1185 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1c79356b
A
1186 wqe_next = (wait_queue_element_t)
1187 queue_next((queue_t) wq_element);
1188
1189 /*
1190 * We may have to recurse if this is a compound wait queue.
1191 */
9bccf70c 1192 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1c79356b 1193 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
9bccf70c 1194 wait_queue_t set_queue;
1c79356b
A
1195
1196 /*
9bccf70c 1197 * We have to check the set wait queue.
1c79356b 1198 */
9bccf70c
A
1199 set_queue = (wait_queue_t)wql->wql_setqueue;
1200 wait_queue_lock(set_queue);
1201 if (! wait_queue_empty(set_queue)) {
1202 t = _wait_queue_select64_one(set_queue, event);
1c79356b 1203 }
9bccf70c 1204 wait_queue_unlock(set_queue);
1c79356b
A
1205 if (t != THREAD_NULL)
1206 return t;
1207 } else {
1208
1209 /*
1210 * Otherwise, its a thread. If it is waiting on
1211 * the event we are posting to this queue, pull
1212 * it off the queue and stick it in out wake_queue.
1213 */
91447636 1214 t = (thread_t)wq_element;
1c79356b
A
1215 if (t->wait_event == event) {
1216 thread_lock(t);
1217 remqueue(q, (queue_entry_t) t);
1218 t->wait_queue = WAIT_QUEUE_NULL;
9bccf70c 1219 t->wait_event = NO_EVENT64;
1c79356b
A
1220 t->at_safe_point = FALSE;
1221 return t; /* still locked */
1222 }
91447636
A
1223
1224 t = THREAD_NULL;
1c79356b
A
1225 }
1226 wq_element = wqe_next;
1227 }
1228 return THREAD_NULL;
1229}
1230
1231/*
9bccf70c 1232 * Routine: wait_queue_peek64_locked
1c79356b
A
1233 * Purpose:
1234 * Select the best thread from a wait queue that meet the
9bccf70c 1235 * supplied criteria, but leave it on the queue it was
1c79356b
A
1236 * found on. The thread, and the actual wait_queue the
1237 * thread was found on are identified.
1238 * Conditions:
1239 * at splsched
1240 * wait queue locked
1241 * possibly recursive
1242 * Returns:
1243 * a locked thread - if one found
1244 * a locked waitq - the one the thread was found on
1245 * Note:
9bccf70c
A
1246 * Both the waitq the thread was actually found on, and
1247 * the supplied wait queue, are locked after this.
1c79356b 1248 */
9bccf70c
A
1249__private_extern__ void
1250wait_queue_peek64_locked(
1c79356b 1251 wait_queue_t wq,
9bccf70c 1252 event64_t event,
1c79356b
A
1253 thread_t *tp,
1254 wait_queue_t *wqp)
1255{
1256 wait_queue_element_t wq_element;
1257 wait_queue_element_t wqe_next;
1c79356b
A
1258 queue_t q;
1259
1260 assert(wq->wq_fifo);
1261
1262 *tp = THREAD_NULL;
1263
1264 q = &wq->wq_queue;
1265
1266 wq_element = (wait_queue_element_t) queue_first(q);
1267 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c 1268 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1c79356b
A
1269 wqe_next = (wait_queue_element_t)
1270 queue_next((queue_t) wq_element);
1271
1272 /*
1273 * We may have to recurse if this is a compound wait queue.
1274 */
9bccf70c 1275 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1c79356b 1276 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
9bccf70c 1277 wait_queue_t set_queue;
1c79356b
A
1278
1279 /*
9bccf70c 1280 * We have to check the set wait queue.
1c79356b 1281 */
9bccf70c
A
1282 set_queue = (wait_queue_t)wql->wql_setqueue;
1283 wait_queue_lock(set_queue);
1284 if (! wait_queue_empty(set_queue)) {
1285 wait_queue_peek64_locked(set_queue, event, tp, wqp);
1c79356b 1286 }
9bccf70c
A
1287 if (*tp != THREAD_NULL) {
1288 if (*wqp != set_queue)
1289 wait_queue_unlock(set_queue);
1c79356b 1290 return; /* thread and its waitq locked */
9bccf70c 1291 }
1c79356b 1292
9bccf70c 1293 wait_queue_unlock(set_queue);
1c79356b
A
1294 } else {
1295
1296 /*
1297 * Otherwise, its a thread. If it is waiting on
1298 * the event we are posting to this queue, return
1299 * it locked, but leave it on the queue.
1300 */
1301 thread_t t = (thread_t)wq_element;
1302
1303 if (t->wait_event == event) {
1304 thread_lock(t);
1305 *tp = t;
1306 *wqp = wq;
1307 return;
1308 }
1309 }
1310 wq_element = wqe_next;
1311 }
1312}
1313
1314/*
1315 * Routine: wait_queue_pull_thread_locked
1316 * Purpose:
1317 * Pull a thread that was previously "peeked" off the wait
1318 * queue and (possibly) unlock the waitq.
1319 * Conditions:
1320 * at splsched
1321 * wait queue locked
1322 * thread locked
1323 * Returns:
1324 * with the thread still locked.
1325 */
1326void
1327wait_queue_pull_thread_locked(
1328 wait_queue_t waitq,
1329 thread_t thread,
1330 boolean_t unlock)
1331{
1332
1333 assert(thread->wait_queue == waitq);
1334
1335 remqueue(&waitq->wq_queue, (queue_entry_t)thread );
1336 thread->wait_queue = WAIT_QUEUE_NULL;
9bccf70c 1337 thread->wait_event = NO_EVENT64;
1c79356b
A
1338 thread->at_safe_point = FALSE;
1339 if (unlock)
1340 wait_queue_unlock(waitq);
1341}
1342
1343
1344/*
9bccf70c 1345 * Routine: wait_queue_select64_thread
1c79356b
A
1346 * Purpose:
1347 * Look for a thread and remove it from the queues, if
1348 * (and only if) the thread is waiting on the supplied
1349 * <wait_queue, event> pair.
1350 * Conditions:
1351 * at splsched
1352 * wait queue locked
1353 * possibly recursive
1354 * Returns:
1355 * KERN_NOT_WAITING: Thread is not waiting here.
1356 * KERN_SUCCESS: It was, and is now removed (returned locked)
1357 */
9bccf70c
A
1358static kern_return_t
1359_wait_queue_select64_thread(
1c79356b 1360 wait_queue_t wq,
9bccf70c 1361 event64_t event,
1c79356b
A
1362 thread_t thread)
1363{
1364 wait_queue_element_t wq_element;
1365 wait_queue_element_t wqe_next;
1366 kern_return_t res = KERN_NOT_WAITING;
1367 queue_t q = &wq->wq_queue;
1368
1c79356b
A
1369 thread_lock(thread);
1370 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
1371 remqueue(q, (queue_entry_t) thread);
1372 thread->at_safe_point = FALSE;
9bccf70c 1373 thread->wait_event = NO_EVENT64;
1c79356b
A
1374 thread->wait_queue = WAIT_QUEUE_NULL;
1375 /* thread still locked */
1376 return KERN_SUCCESS;
1377 }
1378 thread_unlock(thread);
1379
1380 /*
1381 * The wait_queue associated with the thread may be one of this
9bccf70c 1382 * wait queue's sets. Go see. If so, removing it from
1c79356b
A
1383 * there is like removing it from here.
1384 */
1385 wq_element = (wait_queue_element_t) queue_first(q);
1386 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c 1387 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1c79356b
A
1388 wqe_next = (wait_queue_element_t)
1389 queue_next((queue_t) wq_element);
1390
9bccf70c 1391 if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
1c79356b 1392 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
9bccf70c 1393 wait_queue_t set_queue;
1c79356b 1394
9bccf70c
A
1395 set_queue = (wait_queue_t)wql->wql_setqueue;
1396 wait_queue_lock(set_queue);
1397 if (! wait_queue_empty(set_queue)) {
1398 res = _wait_queue_select64_thread(set_queue,
1c79356b
A
1399 event,
1400 thread);
1401 }
9bccf70c 1402 wait_queue_unlock(set_queue);
1c79356b
A
1403 if (res == KERN_SUCCESS)
1404 return KERN_SUCCESS;
1405 }
1406 wq_element = wqe_next;
1407 }
1408 return res;
1409}
1410
1411
1412/*
9bccf70c 1413 * Routine: wait_queue_wakeup64_identity_locked
1c79356b
A
1414 * Purpose:
1415 * Select a single thread that is most-eligible to run and set
1416 * set it running. But return the thread locked.
1417 *
1418 * Conditions:
1419 * at splsched
1420 * wait queue locked
1421 * possibly recursive
1422 * Returns:
1423 * a pointer to the locked thread that was awakened
1424 */
9bccf70c
A
1425__private_extern__ thread_t
1426wait_queue_wakeup64_identity_locked(
1c79356b 1427 wait_queue_t wq,
9bccf70c
A
1428 event64_t event,
1429 wait_result_t result,
1c79356b
A
1430 boolean_t unlock)
1431{
9bccf70c 1432 kern_return_t res;
1c79356b
A
1433 thread_t thread;
1434
1435 assert(wait_queue_held(wq));
1436
9bccf70c
A
1437
1438 thread = _wait_queue_select64_one(wq, event);
1c79356b
A
1439 if (unlock)
1440 wait_queue_unlock(wq);
1441
9bccf70c 1442 if (thread) {
91447636 1443 res = thread_go(thread, result);
9bccf70c
A
1444 assert(res == KERN_SUCCESS);
1445 }
1c79356b
A
1446 return thread; /* still locked if not NULL */
1447}
1448
1449
1450/*
9bccf70c 1451 * Routine: wait_queue_wakeup64_one_locked
1c79356b
A
1452 * Purpose:
1453 * Select a single thread that is most-eligible to run and set
1454 * set it runnings.
1455 *
1456 * Conditions:
1457 * at splsched
1458 * wait queue locked
1459 * possibly recursive
1460 * Returns:
1461 * KERN_SUCCESS: It was, and is, now removed.
1462 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1463 */
9bccf70c
A
1464__private_extern__ kern_return_t
1465wait_queue_wakeup64_one_locked(
1c79356b 1466 wait_queue_t wq,
9bccf70c
A
1467 event64_t event,
1468 wait_result_t result,
1c79356b
A
1469 boolean_t unlock)
1470{
1471 thread_t thread;
1472
1473 assert(wait_queue_held(wq));
1474
9bccf70c 1475 thread = _wait_queue_select64_one(wq, event);
1c79356b
A
1476 if (unlock)
1477 wait_queue_unlock(wq);
1478
1479 if (thread) {
9bccf70c
A
1480 kern_return_t res;
1481
91447636 1482 res = thread_go(thread, result);
9bccf70c 1483 assert(res == KERN_SUCCESS);
1c79356b 1484 thread_unlock(thread);
9bccf70c 1485 return res;
1c79356b
A
1486 }
1487
1488 return KERN_NOT_WAITING;
1489}
1490
1491/*
1492 * Routine: wait_queue_wakeup_one
1493 * Purpose:
1494 * Wakeup the most appropriate thread that is in the specified
1495 * wait queue for the specified event.
1c79356b
A
1496 * Conditions:
1497 * Nothing locked
1c79356b
A
1498 * Returns:
1499 * KERN_SUCCESS - Thread was woken up
1500 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1501 */
1502kern_return_t
1503wait_queue_wakeup_one(
1504 wait_queue_t wq,
1505 event_t event,
9bccf70c 1506 wait_result_t result)
1c79356b
A
1507{
1508 thread_t thread;
1509 spl_t s;
1510
9bccf70c
A
1511 if (!wait_queue_is_valid(wq)) {
1512 return KERN_INVALID_ARGUMENT;
1513 }
1514
1c79356b
A
1515 s = splsched();
1516 wait_queue_lock(wq);
9bccf70c 1517 thread = _wait_queue_select64_one(wq, (event64_t)((uint32_t)event));
1c79356b
A
1518 wait_queue_unlock(wq);
1519
1520 if (thread) {
9bccf70c
A
1521 kern_return_t res;
1522
91447636 1523 res = thread_go(thread, result);
9bccf70c 1524 assert(res == KERN_SUCCESS);
1c79356b
A
1525 thread_unlock(thread);
1526 splx(s);
9bccf70c 1527 return res;
1c79356b
A
1528 }
1529
1530 splx(s);
1531 return KERN_NOT_WAITING;
1532}
1533
9bccf70c
A
1534/*
1535 * Routine: wait_queue_wakeup64_one
1536 * Purpose:
1537 * Wakeup the most appropriate thread that is in the specified
1538 * wait queue for the specified event.
1539 * Conditions:
1540 * Nothing locked
1541 * Returns:
1542 * KERN_SUCCESS - Thread was woken up
1543 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1544 */
1545kern_return_t
1546wait_queue_wakeup64_one(
1547 wait_queue_t wq,
1548 event64_t event,
1549 wait_result_t result)
1550{
1551 thread_t thread;
1552 spl_t s;
1553
1554 if (!wait_queue_is_valid(wq)) {
1555 return KERN_INVALID_ARGUMENT;
1556 }
1557 s = splsched();
1558 wait_queue_lock(wq);
1559 thread = _wait_queue_select64_one(wq, event);
1560 wait_queue_unlock(wq);
1561
1562 if (thread) {
1563 kern_return_t res;
1564
91447636 1565 res = thread_go(thread, result);
9bccf70c
A
1566 assert(res == KERN_SUCCESS);
1567 thread_unlock(thread);
1568 splx(s);
1569 return res;
1570 }
1571
1572 splx(s);
1573 return KERN_NOT_WAITING;
1574}
1c79356b
A
1575
1576
1577/*
9bccf70c 1578 * Routine: wait_queue_wakeup64_thread_locked
1c79356b
A
1579 * Purpose:
1580 * Wakeup the particular thread that was specified if and only
9bccf70c 1581 * it was in this wait queue (or one of it's set queues)
1c79356b
A
1582 * and waiting on the specified event.
1583 *
1584 * This is much safer than just removing the thread from
1585 * whatever wait queue it happens to be on. For instance, it
1586 * may have already been awoken from the wait you intended to
1587 * interrupt and waited on something else (like another
1588 * semaphore).
1589 * Conditions:
1590 * at splsched
1591 * wait queue already locked (may be released).
1592 * Returns:
1593 * KERN_SUCCESS - the thread was found waiting and awakened
1594 * KERN_NOT_WAITING - the thread was not waiting here
1595 */
9bccf70c
A
1596__private_extern__ kern_return_t
1597wait_queue_wakeup64_thread_locked(
1c79356b 1598 wait_queue_t wq,
9bccf70c 1599 event64_t event,
1c79356b 1600 thread_t thread,
9bccf70c 1601 wait_result_t result,
1c79356b
A
1602 boolean_t unlock)
1603{
1604 kern_return_t res;
1605
1606 assert(wait_queue_held(wq));
1607
1608 /*
1609 * See if the thread was still waiting there. If so, it got
1610 * dequeued and returned locked.
1611 */
9bccf70c 1612 res = _wait_queue_select64_thread(wq, event, thread);
1c79356b
A
1613 if (unlock)
1614 wait_queue_unlock(wq);
1615
1616 if (res != KERN_SUCCESS)
1617 return KERN_NOT_WAITING;
1618
91447636 1619 res = thread_go(thread, result);
9bccf70c 1620 assert(res == KERN_SUCCESS);
1c79356b 1621 thread_unlock(thread);
9bccf70c 1622 return res;
1c79356b
A
1623}
1624
1625/*
1626 * Routine: wait_queue_wakeup_thread
1627 * Purpose:
1628 * Wakeup the particular thread that was specified if and only
9bccf70c 1629 * it was in this wait queue (or one of it's set queues)
1c79356b
A
1630 * and waiting on the specified event.
1631 *
1632 * This is much safer than just removing the thread from
1633 * whatever wait queue it happens to be on. For instance, it
1634 * may have already been awoken from the wait you intended to
1635 * interrupt and waited on something else (like another
1636 * semaphore).
1637 * Conditions:
1638 * nothing of interest locked
1639 * we need to assume spl needs to be raised
1640 * Returns:
1641 * KERN_SUCCESS - the thread was found waiting and awakened
1642 * KERN_NOT_WAITING - the thread was not waiting here
1643 */
1644kern_return_t
1645wait_queue_wakeup_thread(
1646 wait_queue_t wq,
1647 event_t event,
1648 thread_t thread,
9bccf70c 1649 wait_result_t result)
1c79356b
A
1650{
1651 kern_return_t res;
1652 spl_t s;
1653
9bccf70c
A
1654 if (!wait_queue_is_valid(wq)) {
1655 return KERN_INVALID_ARGUMENT;
1656 }
1657
1c79356b
A
1658 s = splsched();
1659 wait_queue_lock(wq);
9bccf70c 1660 res = _wait_queue_select64_thread(wq, (event64_t)((uint32_t)event), thread);
1c79356b
A
1661 wait_queue_unlock(wq);
1662
1663 if (res == KERN_SUCCESS) {
91447636 1664 res = thread_go(thread, result);
9bccf70c 1665 assert(res == KERN_SUCCESS);
1c79356b
A
1666 thread_unlock(thread);
1667 splx(s);
9bccf70c 1668 return res;
1c79356b
A
1669 }
1670 splx(s);
1671 return KERN_NOT_WAITING;
1672}
1673
1c79356b 1674/*
9bccf70c 1675 * Routine: wait_queue_wakeup64_thread
1c79356b 1676 * Purpose:
9bccf70c
A
1677 * Wakeup the particular thread that was specified if and only
1678 * it was in this wait queue (or one of it's set's queues)
1679 * and waiting on the specified event.
1c79356b 1680 *
9bccf70c
A
1681 * This is much safer than just removing the thread from
1682 * whatever wait queue it happens to be on. For instance, it
1683 * may have already been awoken from the wait you intended to
1684 * interrupt and waited on something else (like another
1685 * semaphore).
1c79356b 1686 * Conditions:
9bccf70c
A
1687 * nothing of interest locked
1688 * we need to assume spl needs to be raised
1c79356b 1689 * Returns:
9bccf70c
A
1690 * KERN_SUCCESS - the thread was found waiting and awakened
1691 * KERN_NOT_WAITING - the thread was not waiting here
1c79356b
A
1692 */
1693kern_return_t
9bccf70c
A
1694wait_queue_wakeup64_thread(
1695 wait_queue_t wq,
1696 event64_t event,
1697 thread_t thread,
1698 wait_result_t result)
1c79356b 1699{
9bccf70c
A
1700 kern_return_t res;
1701 spl_t s;
1c79356b 1702
9bccf70c
A
1703 if (!wait_queue_is_valid(wq)) {
1704 return KERN_INVALID_ARGUMENT;
1705 }
1c79356b 1706
9bccf70c 1707 s = splsched();
1c79356b 1708 wait_queue_lock(wq);
9bccf70c
A
1709 res = _wait_queue_select64_thread(wq, event, thread);
1710 wait_queue_unlock(wq);
1711
1712 if (res == KERN_SUCCESS) {
91447636 1713 res = thread_go(thread, result);
9bccf70c
A
1714 assert(res == KERN_SUCCESS);
1715 thread_unlock(thread);
1716 splx(s);
1717 return res;
1c79356b 1718 }
9bccf70c
A
1719 splx(s);
1720 return KERN_NOT_WAITING;
1c79356b 1721}