]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/wait_queue.c
xnu-1699.24.8.tar.gz
[apple/xnu.git] / osfmk / kern / wait_queue.c
CommitLineData
1c79356b 1/*
6d2010ae 2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: wait_queue.c (adapted from sched_prim.c)
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Primitives for manipulating wait queues: either global
64 * ones from sched_prim.c, or private ones associated with
65 * particular structures(pots, semaphores, etc..).
66 */
67
68#include <kern/kern_types.h>
69#include <kern/simple_lock.h>
b0d623f7 70#include <kern/zalloc.h>
1c79356b
A
71#include <kern/queue.h>
72#include <kern/spl.h>
73#include <mach/sync_policy.h>
b0d623f7 74#include <kern/mach_param.h>
1c79356b 75#include <kern/sched_prim.h>
9bccf70c 76
1c79356b 77#include <kern/wait_queue.h>
b0d623f7 78#include <vm/vm_kern.h>
1c79356b 79
91447636
A
80/* forward declarations */
81static boolean_t wait_queue_member_locked(
82 wait_queue_t wq,
83 wait_queue_set_t wq_set);
84
b0d623f7
A
85static void wait_queues_init(void) __attribute__((section("__TEXT, initcode")));
86
87
88#define WAIT_QUEUE_MAX thread_max
89#define WAIT_QUEUE_SET_MAX task_max * 3
90#define WAIT_QUEUE_LINK_MAX PORT_MAX / 2 + (WAIT_QUEUE_MAX * WAIT_QUEUE_SET_MAX) / 64
91
92static zone_t _wait_queue_link_zone;
93static zone_t _wait_queue_set_zone;
94static zone_t _wait_queue_zone;
95
96/* see rdar://6737748&5561610; we need an unshadowed
97 * definition of a WaitQueueLink for debugging,
98 * but it needs to be used somewhere to wind up in
99 * the dSYM file. */
100volatile WaitQueueLink *unused_except_for_debugging;
91447636 101
b0d623f7
A
102
103/*
104 * Waiting protocols and implementation:
105 *
106 * Each thread may be waiting for exactly one event; this event
107 * is set using assert_wait(). That thread may be awakened either
108 * by performing a thread_wakeup_prim() on its event,
109 * or by directly waking that thread up with clear_wait().
110 *
111 * The implementation of wait events uses a hash table. Each
112 * bucket is queue of threads having the same hash function
113 * value; the chain for the queue (linked list) is the run queue
114 * field. [It is not possible to be waiting and runnable at the
115 * same time.]
116 *
117 * Locks on both the thread and on the hash buckets govern the
118 * wait event field and the queue chain field. Because wakeup
119 * operations only have the event as an argument, the event hash
120 * bucket must be locked before any thread.
121 *
122 * Scheduling operations may also occur at interrupt level; therefore,
123 * interrupts below splsched() must be prevented when holding
124 * thread or hash bucket locks.
125 *
126 * The wait event hash table declarations are as follows:
127 */
128
129struct wait_queue boot_wait_queue[1];
130__private_extern__ struct wait_queue *wait_queues = &boot_wait_queue[0];
131
132__private_extern__ uint32_t num_wait_queues = 1;
133
134static uint32_t
135compute_wait_hash_size(__unused unsigned cpu_count, __unused uint64_t memsize) {
136 uint32_t hsize = (uint32_t)round_page_64((thread_max / 11) * sizeof(struct wait_queue));
137 uint32_t bhsize;
138
139 if (PE_parse_boot_argn("wqsize", &bhsize, sizeof(bhsize)))
140 hsize = bhsize;
141
142 return hsize;
143}
144
145static void
146wait_queues_init(void)
147{
148 uint32_t i, whsize;
149 kern_return_t kret;
150
151 whsize = compute_wait_hash_size(processor_avail_count, machine_info.max_mem);
152 num_wait_queues = (whsize / ((uint32_t)sizeof(struct wait_queue))) - 1;
153
154 kret = kernel_memory_allocate(kernel_map, (vm_offset_t *) &wait_queues, whsize, 0, KMA_KOBJECT|KMA_NOPAGEWAIT);
155
156 if (kret != KERN_SUCCESS || wait_queues == NULL)
157 panic("kernel_memory_allocate() failed to allocate wait queues, error: %d, whsize: 0x%x", kret, whsize);
158
159 for (i = 0; i < num_wait_queues; i++) {
160 wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO);
161 }
162}
163
164void
165wait_queue_bootstrap(void)
166{
167 wait_queues_init();
168 _wait_queue_zone = zinit(sizeof(struct wait_queue),
169 WAIT_QUEUE_MAX * sizeof(struct wait_queue),
170 sizeof(struct wait_queue),
171 "wait queues");
0b4c1975
A
172 zone_change(_wait_queue_zone, Z_NOENCRYPT, TRUE);
173
b0d623f7
A
174 _wait_queue_set_zone = zinit(sizeof(struct wait_queue_set),
175 WAIT_QUEUE_SET_MAX * sizeof(struct wait_queue_set),
176 sizeof(struct wait_queue_set),
177 "wait queue sets");
0b4c1975
A
178 zone_change(_wait_queue_set_zone, Z_NOENCRYPT, TRUE);
179
b0d623f7
A
180 _wait_queue_link_zone = zinit(sizeof(struct _wait_queue_link),
181 WAIT_QUEUE_LINK_MAX * sizeof(struct _wait_queue_link),
182 sizeof(struct _wait_queue_link),
183 "wait queue links");
0b4c1975 184 zone_change(_wait_queue_link_zone, Z_NOENCRYPT, TRUE);
b0d623f7 185}
91447636 186
9bccf70c
A
187/*
188 * Routine: wait_queue_init
189 * Purpose:
190 * Initialize a previously allocated wait queue.
191 * Returns:
192 * KERN_SUCCESS - The wait_queue_t was initialized
193 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
194 */
195kern_return_t
1c79356b 196wait_queue_init(
9bccf70c 197 wait_queue_t wq,
1c79356b
A
198 int policy)
199{
b0d623f7
A
200 /* only FIFO and LIFO for now */
201 if ((policy & SYNC_POLICY_FIXED_PRIORITY) != 0)
9bccf70c
A
202 return KERN_INVALID_ARGUMENT;
203
b0d623f7 204 wq->wq_fifo = ((policy & SYNC_POLICY_REVERSED) == 0);
9bccf70c 205 wq->wq_type = _WAIT_QUEUE_inited;
1c79356b
A
206 queue_init(&wq->wq_queue);
207 hw_lock_init(&wq->wq_interlock);
9bccf70c 208 return KERN_SUCCESS;
1c79356b
A
209}
210
0b4e3aa0 211/*
9bccf70c
A
212 * Routine: wait_queue_alloc
213 * Purpose:
214 * Allocate and initialize a wait queue for use outside of
215 * of the mach part of the kernel.
216 * Conditions:
217 * Nothing locked - can block.
218 * Returns:
219 * The allocated and initialized wait queue
220 * WAIT_QUEUE_NULL if there is a resource shortage
0b4e3aa0
A
221 */
222wait_queue_t
223wait_queue_alloc(
9bccf70c 224 int policy)
0b4e3aa0
A
225{
226 wait_queue_t wq;
9bccf70c 227 kern_return_t ret;
0b4e3aa0 228
b0d623f7 229 wq = (wait_queue_t) zalloc(_wait_queue_zone);
9bccf70c
A
230 if (wq != WAIT_QUEUE_NULL) {
231 ret = wait_queue_init(wq, policy);
232 if (ret != KERN_SUCCESS) {
b0d623f7 233 zfree(_wait_queue_zone, wq);
9bccf70c
A
234 wq = WAIT_QUEUE_NULL;
235 }
236 }
0b4e3aa0
A
237 return wq;
238}
239
240/*
9bccf70c
A
241 * Routine: wait_queue_free
242 * Purpose:
243 * Free an allocated wait queue.
244 * Conditions:
245 * May block.
0b4e3aa0 246 */
9bccf70c 247kern_return_t
0b4e3aa0
A
248wait_queue_free(
249 wait_queue_t wq)
250{
9bccf70c
A
251 if (!wait_queue_is_queue(wq))
252 return KERN_INVALID_ARGUMENT;
253 if (!queue_empty(&wq->wq_queue))
254 return KERN_FAILURE;
b0d623f7 255 zfree(_wait_queue_zone, wq);
9bccf70c 256 return KERN_SUCCESS;
0b4e3aa0
A
257}
258
1c79356b 259/*
9bccf70c 260 * Routine: wait_queue_set_init
1c79356b 261 * Purpose:
9bccf70c
A
262 * Initialize a previously allocated wait queue set.
263 * Returns:
264 * KERN_SUCCESS - The wait_queue_set_t was initialized
265 * KERN_INVALID_ARGUMENT - The policy parameter was invalid
1c79356b 266 */
9bccf70c
A
267kern_return_t
268wait_queue_set_init(
269 wait_queue_set_t wqset,
270 int policy)
1c79356b 271{
9bccf70c
A
272 kern_return_t ret;
273
274 ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
275 if (ret != KERN_SUCCESS)
276 return ret;
277
278 wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
279 if (policy & SYNC_POLICY_PREPOST)
b0d623f7 280 wqset->wqs_wait_queue.wq_prepost = TRUE;
9bccf70c 281 else
b0d623f7 282 wqset->wqs_wait_queue.wq_prepost = FALSE;
9bccf70c 283 queue_init(&wqset->wqs_setlinks);
b0d623f7 284 queue_init(&wqset->wqs_preposts);
9bccf70c
A
285 return KERN_SUCCESS;
286}
287
91447636 288
9bccf70c
A
289kern_return_t
290wait_queue_sub_init(
291 wait_queue_set_t wqset,
292 int policy)
293{
294 return wait_queue_set_init(wqset, policy);
1c79356b
A
295}
296
91447636
A
297kern_return_t
298wait_queue_sub_clearrefs(
299 wait_queue_set_t wq_set)
300{
b0d623f7
A
301 wait_queue_link_t wql;
302 queue_t q;
303 spl_t s;
304
91447636
A
305 if (!wait_queue_is_set(wq_set))
306 return KERN_INVALID_ARGUMENT;
307
b0d623f7 308 s = splsched();
91447636 309 wqs_lock(wq_set);
b0d623f7
A
310 q = &wq_set->wqs_preposts;
311 while (!queue_empty(q)) {
312 queue_remove_first(q, wql, wait_queue_link_t, wql_preposts);
313 assert(!wql_is_preposted(wql));
314 }
91447636 315 wqs_unlock(wq_set);
b0d623f7 316 splx(s);
91447636
A
317 return KERN_SUCCESS;
318}
319
1c79356b 320/*
9bccf70c 321 * Routine: wait_queue_set_alloc
1c79356b 322 * Purpose:
9bccf70c
A
323 * Allocate and initialize a wait queue set for
324 * use outside of the mach part of the kernel.
1c79356b 325 * Conditions:
9bccf70c
A
326 * May block.
327 * Returns:
328 * The allocated and initialized wait queue set
329 * WAIT_QUEUE_SET_NULL if there is a resource shortage
1c79356b 330 */
9bccf70c
A
331wait_queue_set_t
332wait_queue_set_alloc(
333 int policy)
1c79356b 334{
9bccf70c
A
335 wait_queue_set_t wq_set;
336
b0d623f7 337 wq_set = (wait_queue_set_t) zalloc(_wait_queue_set_zone);
9bccf70c
A
338 if (wq_set != WAIT_QUEUE_SET_NULL) {
339 kern_return_t ret;
340
341 ret = wait_queue_set_init(wq_set, policy);
342 if (ret != KERN_SUCCESS) {
b0d623f7 343 zfree(_wait_queue_set_zone, wq_set);
9bccf70c
A
344 wq_set = WAIT_QUEUE_SET_NULL;
345 }
346 }
347 return wq_set;
1c79356b
A
348}
349
350/*
9bccf70c
A
351 * Routine: wait_queue_set_free
352 * Purpose:
353 * Free an allocated wait queue set
354 * Conditions:
355 * May block.
1c79356b 356 */
9bccf70c
A
357kern_return_t
358wait_queue_set_free(
359 wait_queue_set_t wq_set)
1c79356b 360{
9bccf70c
A
361 if (!wait_queue_is_set(wq_set))
362 return KERN_INVALID_ARGUMENT;
1c79356b 363
9bccf70c
A
364 if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
365 return KERN_FAILURE;
366
b0d623f7 367 zfree(_wait_queue_set_zone, wq_set);
9bccf70c 368 return KERN_SUCCESS;
1c79356b
A
369}
370
9bccf70c
A
371
372/*
373 *
374 * Routine: wait_queue_set_size
375 * Routine: wait_queue_link_size
376 * Purpose:
377 * Return the size of opaque wait queue structures
378 */
379unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
380unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
381
382/* declare a unique type for wait queue link structures */
383static unsigned int _wait_queue_link;
b0d623f7 384static unsigned int _wait_queue_link_noalloc;
9bccf70c
A
385static unsigned int _wait_queue_unlinked;
386
387#define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
b0d623f7 388#define WAIT_QUEUE_LINK_NOALLOC ((void *)&_wait_queue_link_noalloc)
9bccf70c
A
389#define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
390
391#define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
392 WQASSERT(((wqe)->wqe_queue == (wq) && \
393 queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
394 "wait queue element list corruption: wq=%#x, wqe=%#x", \
395 (wq), (wqe))
396
397#define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
398 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
399 (queue_t)(wql) : &(wql)->wql_setlinks)))
400
401#define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
402 ((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
403 (queue_t)(wql) : &(wql)->wql_setlinks)))
404
405#define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
b0d623f7
A
406 WQASSERT(((((wql)->wql_type == WAIT_QUEUE_LINK) || \
407 ((wql)->wql_type == WAIT_QUEUE_LINK_NOALLOC)) && \
9bccf70c 408 ((wql)->wql_setqueue == (wqs)) && \
b0d623f7
A
409 (((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) || \
410 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_SET_inited)) && \
9bccf70c
A
411 (WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
412 "wait queue set links corruption: wqs=%#x, wql=%#x", \
b0d623f7 413 (wqs), (wql))
9bccf70c
A
414
415#if defined(_WAIT_QUEUE_DEBUG_)
416
417#define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
418
419#define WAIT_QUEUE_CHECK(wq) \
420MACRO_BEGIN \
421 queue_t q2 = &(wq)->wq_queue; \
422 wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
423 while (!queue_end(q2, (queue_entry_t)wqe2)) { \
424 WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
425 wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
426 } \
427MACRO_END
428
429#define WAIT_QUEUE_SET_CHECK(wqs) \
430MACRO_BEGIN \
431 queue_t q2 = &(wqs)->wqs_setlinks; \
432 wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
433 while (!queue_end(q2, (queue_entry_t)wql2)) { \
434 WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
435 wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
436 } \
437MACRO_END
438
439#else /* !_WAIT_QUEUE_DEBUG_ */
440
441#define WQASSERT(e, s, p0, p1) assert(e)
442
443#define WAIT_QUEUE_CHECK(wq)
444#define WAIT_QUEUE_SET_CHECK(wqs)
445
446#endif /* !_WAIT_QUEUE_DEBUG_ */
1c79356b 447
1c79356b
A
448/*
449 * Routine: wait_queue_member_locked
450 * Purpose:
9bccf70c 451 * Indicate if this set queue is a member of the queue
1c79356b
A
452 * Conditions:
453 * The wait queue is locked
9bccf70c 454 * The set queue is just that, a set queue
1c79356b 455 */
91447636 456static boolean_t
1c79356b
A
457wait_queue_member_locked(
458 wait_queue_t wq,
9bccf70c 459 wait_queue_set_t wq_set)
1c79356b
A
460{
461 wait_queue_element_t wq_element;
462 queue_t q;
463
464 assert(wait_queue_held(wq));
9bccf70c 465 assert(wait_queue_is_set(wq_set));
1c79356b
A
466
467 q = &wq->wq_queue;
468
469 wq_element = (wait_queue_element_t) queue_first(q);
470 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c 471 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
b0d623f7
A
472 if ((wq_element->wqe_type == WAIT_QUEUE_LINK) ||
473 (wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC)) {
1c79356b
A
474 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
475
9bccf70c 476 if (wql->wql_setqueue == wq_set)
1c79356b
A
477 return TRUE;
478 }
479 wq_element = (wait_queue_element_t)
480 queue_next((queue_t) wq_element);
481 }
482 return FALSE;
483}
484
485
486/*
487 * Routine: wait_queue_member
488 * Purpose:
9bccf70c 489 * Indicate if this set queue is a member of the queue
1c79356b 490 * Conditions:
9bccf70c 491 * The set queue is just that, a set queue
1c79356b
A
492 */
493boolean_t
494wait_queue_member(
495 wait_queue_t wq,
9bccf70c 496 wait_queue_set_t wq_set)
1c79356b
A
497{
498 boolean_t ret;
499 spl_t s;
500
9bccf70c
A
501 if (!wait_queue_is_set(wq_set))
502 return FALSE;
1c79356b
A
503
504 s = splsched();
505 wait_queue_lock(wq);
9bccf70c 506 ret = wait_queue_member_locked(wq, wq_set);
1c79356b
A
507 wait_queue_unlock(wq);
508 splx(s);
509
510 return ret;
511}
512
9bccf70c 513
1c79356b 514/*
b0d623f7 515 * Routine: wait_queue_link_internal
1c79356b 516 * Purpose:
9bccf70c 517 * Insert a set wait queue into a wait queue. This
1c79356b 518 * requires us to link the two together using a wait_queue_link
b0d623f7 519 * structure that was provided.
1c79356b 520 * Conditions:
9bccf70c 521 * The wait queue being inserted must be inited as a set queue
b0d623f7 522 * The wait_queue_link structure must already be properly typed
1c79356b 523 */
b0d623f7 524static
1c79356b 525kern_return_t
b0d623f7 526wait_queue_link_internal(
1c79356b 527 wait_queue_t wq,
9bccf70c
A
528 wait_queue_set_t wq_set,
529 wait_queue_link_t wql)
1c79356b 530{
9bccf70c
A
531 wait_queue_element_t wq_element;
532 queue_t q;
1c79356b
A
533 spl_t s;
534
b0d623f7 535 if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set))
9bccf70c 536 return KERN_INVALID_ARGUMENT;
1c79356b 537
9bccf70c 538 /*
b0d623f7
A
539 * There are probably fewer threads and sets associated with
540 * the wait queue than there are wait queues associated with
541 * the set. So let's validate it that way.
9bccf70c 542 */
1c79356b
A
543 s = splsched();
544 wait_queue_lock(wq);
9bccf70c
A
545 q = &wq->wq_queue;
546 wq_element = (wait_queue_element_t) queue_first(q);
547 while (!queue_end(q, (queue_entry_t)wq_element)) {
548 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
b0d623f7
A
549 if ((wq_element->wqe_type == WAIT_QUEUE_LINK ||
550 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) &&
9bccf70c
A
551 ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
552 wait_queue_unlock(wq);
553 splx(s);
554 return KERN_ALREADY_IN_SET;
555 }
556 wq_element = (wait_queue_element_t)
557 queue_next((queue_t) wq_element);
558 }
559
560 /*
561 * Not already a member, so we can add it.
562 */
55e303ae 563 wqs_lock(wq_set);
9bccf70c
A
564
565 WAIT_QUEUE_SET_CHECK(wq_set);
1c79356b 566
b0d623f7
A
567 assert(wql->wql_type == WAIT_QUEUE_LINK ||
568 wql->wql_type == WAIT_QUEUE_LINK_NOALLOC);
569
1c79356b 570 wql->wql_queue = wq;
b0d623f7 571 wql_clear_prepost(wql);
1c79356b 572 queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
9bccf70c
A
573 wql->wql_setqueue = wq_set;
574 queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
9bccf70c
A
575
576 wqs_unlock(wq_set);
1c79356b
A
577 wait_queue_unlock(wq);
578 splx(s);
579
580 return KERN_SUCCESS;
581}
9bccf70c 582
b0d623f7
A
583/*
584 * Routine: wait_queue_link_noalloc
585 * Purpose:
586 * Insert a set wait queue into a wait queue. This
587 * requires us to link the two together using a wait_queue_link
588 * structure that we allocate.
589 * Conditions:
590 * The wait queue being inserted must be inited as a set queue
591 */
592kern_return_t
593wait_queue_link_noalloc(
594 wait_queue_t wq,
595 wait_queue_set_t wq_set,
596 wait_queue_link_t wql)
597{
598 wql->wql_type = WAIT_QUEUE_LINK_NOALLOC;
599 return wait_queue_link_internal(wq, wq_set, wql);
600}
601
0b4e3aa0 602/*
9bccf70c 603 * Routine: wait_queue_link
0b4e3aa0 604 * Purpose:
9bccf70c 605 * Insert a set wait queue into a wait queue. This
0b4e3aa0
A
606 * requires us to link the two together using a wait_queue_link
607 * structure that we allocate.
608 * Conditions:
9bccf70c 609 * The wait queue being inserted must be inited as a set queue
0b4e3aa0
A
610 */
611kern_return_t
9bccf70c 612wait_queue_link(
0b4e3aa0 613 wait_queue_t wq,
9bccf70c 614 wait_queue_set_t wq_set)
0b4e3aa0 615{
9bccf70c
A
616 wait_queue_link_t wql;
617 kern_return_t ret;
0b4e3aa0 618
b0d623f7 619 wql = (wait_queue_link_t) zalloc(_wait_queue_link_zone);
9bccf70c
A
620 if (wql == WAIT_QUEUE_LINK_NULL)
621 return KERN_RESOURCE_SHORTAGE;
0b4e3aa0 622
b0d623f7
A
623 wql->wql_type = WAIT_QUEUE_LINK;
624 ret = wait_queue_link_internal(wq, wq_set, wql);
9bccf70c 625 if (ret != KERN_SUCCESS)
b0d623f7 626 zfree(_wait_queue_link_zone, wql);
0b4e3aa0 627
9bccf70c
A
628 return ret;
629}
0b4e3aa0 630
6d2010ae
A
631wait_queue_link_t
632wait_queue_link_allocate(void)
633{
634 wait_queue_link_t wql;
635
636 wql = zalloc(_wait_queue_link_zone); /* Can't fail */
637 bzero(wql, sizeof(*wql));
638 wql->wql_type = WAIT_QUEUE_UNLINKED;
639
640 return wql;
641}
642
643kern_return_t
644wait_queue_link_free(wait_queue_link_t wql)
645{
646 zfree(_wait_queue_link_zone, wql);
647 return KERN_SUCCESS;
648}
649
0b4e3aa0 650
9bccf70c 651/*
b0d623f7 652 * Routine: wait_queue_unlink_locked
9bccf70c
A
653 * Purpose:
654 * Undo the linkage between a wait queue and a set.
655 */
656static void
657wait_queue_unlink_locked(
658 wait_queue_t wq,
659 wait_queue_set_t wq_set,
660 wait_queue_link_t wql)
661{
662 assert(wait_queue_held(wq));
663 assert(wait_queue_held(&wq_set->wqs_wait_queue));
664
665 wql->wql_queue = WAIT_QUEUE_NULL;
666 queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
667 wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
668 queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
b0d623f7
A
669 if (wql_is_preposted(wql)) {
670 queue_t ppq = &wq_set->wqs_preposts;
671 queue_remove(ppq, wql, wait_queue_link_t, wql_preposts);
672 }
9bccf70c
A
673 wql->wql_type = WAIT_QUEUE_UNLINKED;
674
675 WAIT_QUEUE_CHECK(wq);
676 WAIT_QUEUE_SET_CHECK(wq_set);
677}
1c79356b
A
678
679/*
680 * Routine: wait_queue_unlink
681 * Purpose:
9bccf70c
A
682 * Remove the linkage between a wait queue and a set,
683 * freeing the linkage structure.
1c79356b 684 * Conditions:
9bccf70c 685 * The wait queue being must be a member set queue
1c79356b
A
686 */
687kern_return_t
688wait_queue_unlink(
689 wait_queue_t wq,
9bccf70c 690 wait_queue_set_t wq_set)
1c79356b
A
691{
692 wait_queue_element_t wq_element;
9bccf70c 693 wait_queue_link_t wql;
1c79356b
A
694 queue_t q;
695 spl_t s;
696
b0d623f7 697 if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set)) {
9bccf70c
A
698 return KERN_INVALID_ARGUMENT;
699 }
1c79356b
A
700 s = splsched();
701 wait_queue_lock(wq);
1c79356b
A
702
703 q = &wq->wq_queue;
1c79356b
A
704 wq_element = (wait_queue_element_t) queue_first(q);
705 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c 706 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
b0d623f7
A
707 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
708 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
709
9bccf70c 710 wql = (wait_queue_link_t)wq_element;
1c79356b 711
9bccf70c 712 if (wql->wql_setqueue == wq_set) {
b0d623f7
A
713 boolean_t alloced;
714
715 alloced = (wql->wql_type == WAIT_QUEUE_LINK);
9bccf70c
A
716 wqs_lock(wq_set);
717 wait_queue_unlink_locked(wq, wq_set, wql);
718 wqs_unlock(wq_set);
1c79356b
A
719 wait_queue_unlock(wq);
720 splx(s);
b0d623f7
A
721 if (alloced)
722 zfree(_wait_queue_link_zone, wql);
9bccf70c 723 return KERN_SUCCESS;
1c79356b
A
724 }
725 }
1c79356b 726 wq_element = (wait_queue_element_t)
9bccf70c
A
727 queue_next((queue_t) wq_element);
728 }
729 wait_queue_unlock(wq);
730 splx(s);
731 return KERN_NOT_IN_SET;
732}
733
0b4e3aa0 734/*
9bccf70c 735 * Routine: wait_queue_unlink_all
0b4e3aa0 736 * Purpose:
b0d623f7
A
737 * Remove the linkage between a wait queue and all its sets.
738 * All the linkage structures that were allocated internally
739 * are freed. The others are the caller's responsibility.
0b4e3aa0 740 * Conditions:
9bccf70c 741 * Nothing of interest locked.
0b4e3aa0 742 */
9bccf70c 743
0b4e3aa0 744kern_return_t
9bccf70c
A
745wait_queue_unlink_all(
746 wait_queue_t wq)
0b4e3aa0
A
747{
748 wait_queue_element_t wq_element;
9bccf70c
A
749 wait_queue_element_t wq_next_element;
750 wait_queue_set_t wq_set;
751 wait_queue_link_t wql;
752 queue_head_t links_queue_head;
753 queue_t links = &links_queue_head;
0b4e3aa0 754 queue_t q;
9bccf70c 755 spl_t s;
0b4e3aa0 756
b0d623f7 757 if (!wait_queue_is_valid(wq)) {
9bccf70c
A
758 return KERN_INVALID_ARGUMENT;
759 }
760
761 queue_init(links);
762
763 s = splsched();
764 wait_queue_lock(wq);
0b4e3aa0
A
765
766 q = &wq->wq_queue;
767
768 wq_element = (wait_queue_element_t) queue_first(q);
769 while (!queue_end(q, (queue_entry_t)wq_element)) {
b0d623f7
A
770 boolean_t alloced;
771
9bccf70c
A
772 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
773 wq_next_element = (wait_queue_element_t)
774 queue_next((queue_t) wq_element);
0b4e3aa0 775
b0d623f7
A
776 alloced = (wq_element->wqe_type == WAIT_QUEUE_LINK);
777 if (alloced || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
9bccf70c
A
778 wql = (wait_queue_link_t)wq_element;
779 wq_set = wql->wql_setqueue;
780 wqs_lock(wq_set);
781 wait_queue_unlink_locked(wq, wq_set, wql);
782 wqs_unlock(wq_set);
b0d623f7
A
783 if (alloced)
784 enqueue(links, &wql->wql_links);
0b4e3aa0 785 }
9bccf70c
A
786 wq_element = wq_next_element;
787 }
788 wait_queue_unlock(wq);
789 splx(s);
0b4e3aa0 790
9bccf70c
A
791 while(!queue_empty(links)) {
792 wql = (wait_queue_link_t) dequeue(links);
b0d623f7 793 zfree(_wait_queue_link_zone, wql);
0b4e3aa0 794 }
9bccf70c
A
795
796 return(KERN_SUCCESS);
797}
0b4e3aa0 798
9bccf70c
A
799/* legacy interface naming */
800kern_return_t
801wait_subqueue_unlink_all(
802 wait_queue_set_t wq_set)
803{
b0d623f7 804 return wait_queue_set_unlink_all(wq_set);
9bccf70c
A
805}
806
0b4e3aa0
A
807
808/*
9bccf70c 809 * Routine: wait_queue_set_unlink_all
0b4e3aa0 810 * Purpose:
9bccf70c 811 * Remove the linkage between a set wait queue and all its
b0d623f7
A
812 * member wait queues. The link structures are freed for those
813 * links which were dynamically allocated.
9bccf70c
A
814 * Conditions:
815 * The wait queue must be a set
0b4e3aa0 816 */
0b4e3aa0 817kern_return_t
9bccf70c
A
818wait_queue_set_unlink_all(
819 wait_queue_set_t wq_set)
0b4e3aa0 820{
9bccf70c
A
821 wait_queue_link_t wql;
822 wait_queue_t wq;
0b4e3aa0 823 queue_t q;
9bccf70c
A
824 queue_head_t links_queue_head;
825 queue_t links = &links_queue_head;
0b4e3aa0
A
826 spl_t s;
827
9bccf70c
A
828 if (!wait_queue_is_set(wq_set)) {
829 return KERN_INVALID_ARGUMENT;
830 }
831
832 queue_init(links);
0b4e3aa0 833
9bccf70c 834retry:
0b4e3aa0 835 s = splsched();
9bccf70c 836 wqs_lock(wq_set);
0b4e3aa0 837
9bccf70c 838 q = &wq_set->wqs_setlinks;
0b4e3aa0 839
9bccf70c
A
840 wql = (wait_queue_link_t)queue_first(q);
841 while (!queue_end(q, (queue_entry_t)wql)) {
842 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
843 wq = wql->wql_queue;
844 if (wait_queue_lock_try(wq)) {
b0d623f7
A
845 boolean_t alloced;
846
847 alloced = (wql->wql_type == WAIT_QUEUE_LINK);
9bccf70c
A
848 wait_queue_unlink_locked(wq, wq_set, wql);
849 wait_queue_unlock(wq);
b0d623f7
A
850 if (alloced)
851 enqueue(links, &wql->wql_links);
9bccf70c 852 wql = (wait_queue_link_t)queue_first(q);
0b4e3aa0 853 } else {
9bccf70c
A
854 wqs_unlock(wq_set);
855 splx(s);
856 delay(1);
857 goto retry;
0b4e3aa0 858 }
0b4e3aa0 859 }
9bccf70c 860 wqs_unlock(wq_set);
0b4e3aa0
A
861 splx(s);
862
9bccf70c
A
863 while (!queue_empty (links)) {
864 wql = (wait_queue_link_t) dequeue(links);
b0d623f7 865 zfree(_wait_queue_link_zone, wql);
9bccf70c 866 }
0b4e3aa0
A
867 return(KERN_SUCCESS);
868}
9bccf70c 869
6d2010ae
A
870kern_return_t
871wait_queue_set_unlink_one(
872 wait_queue_set_t wq_set,
873 wait_queue_link_t wql)
874{
875 wait_queue_t wq;
876 spl_t s;
877
878 assert(wait_queue_is_set(wq_set));
879
880retry:
881 s = splsched();
882 wqs_lock(wq_set);
883
884 WAIT_QUEUE_SET_CHECK(wq_set);
885
886 /* Already unlinked, e.g. by selclearthread() */
887 if (wql->wql_type == WAIT_QUEUE_UNLINKED) {
888 goto out;
889 }
890
891 WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
892
893 /* On a wait queue, and we hold set queue lock ... */
894 wq = wql->wql_queue;
895 if (wait_queue_lock_try(wq)) {
896 wait_queue_unlink_locked(wq, wq_set, wql);
897 wait_queue_unlock(wq);
898 } else {
899 wqs_unlock(wq_set);
900 splx(s);
901 delay(1);
902 goto retry;
903 }
904
905out:
906 wqs_unlock(wq_set);
907 splx(s);
908
909 return KERN_SUCCESS;
910}
911
1c79356b 912/*
9bccf70c 913 * Routine: wait_queue_assert_wait64_locked
1c79356b
A
914 * Purpose:
915 * Insert the current thread into the supplied wait queue
916 * waiting for a particular event to be posted to that queue.
917 *
918 * Conditions:
919 * The wait queue is assumed locked.
55e303ae 920 * The waiting thread is assumed locked.
1c79356b
A
921 *
922 */
9bccf70c
A
923__private_extern__ wait_result_t
924wait_queue_assert_wait64_locked(
1c79356b 925 wait_queue_t wq,
9bccf70c
A
926 event64_t event,
927 wait_interrupt_t interruptible,
91447636 928 uint64_t deadline,
55e303ae 929 thread_t thread)
1c79356b 930{
9bccf70c 931 wait_result_t wait_result;
6d2010ae 932 boolean_t realtime;
0b4e3aa0 933
55e303ae
A
934 if (!wait_queue_assert_possible(thread))
935 panic("wait_queue_assert_wait64_locked");
936
9bccf70c
A
937 if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
938 wait_queue_set_t wqs = (wait_queue_set_t)wq;
55e303ae 939
b0d623f7 940 if (event == NO_EVENT64 && wqs_is_preposted(wqs))
9bccf70c 941 return(THREAD_AWAKENED);
0b4e3aa0 942 }
6d2010ae
A
943
944 /*
945 * Realtime threads get priority for wait queue placements.
946 * This allows wait_queue_wakeup_one to prefer a waiting
947 * realtime thread, similar in principle to performing
948 * a wait_queue_wakeup_all and allowing scheduler prioritization
949 * to run the realtime thread, but without causing the
950 * lock contention of that scenario.
951 */
952 realtime = (thread->sched_pri >= BASEPRI_REALTIME);
953
1c79356b
A
954 /*
955 * This is the extent to which we currently take scheduling attributes
956 * into account. If the thread is vm priviledged, we stick it at
957 * the front of the queue. Later, these queues will honor the policy
958 * value set at wait_queue_init time.
959 */
9bccf70c
A
960 wait_result = thread_mark_wait_locked(thread, interruptible);
961 if (wait_result == THREAD_WAITING) {
6d2010ae
A
962 if (!wq->wq_fifo
963 || (thread->options & TH_OPT_VMPRIV)
964 || realtime)
9bccf70c
A
965 enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
966 else
967 enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
91447636 968
9bccf70c
A
969 thread->wait_event = event;
970 thread->wait_queue = wq;
91447636
A
971
972 if (deadline != 0) {
6d2010ae
A
973 uint32_t flags;
974
975 flags = realtime ? TIMER_CALL_CRITICAL : 0;
976
977 if (!timer_call_enter(&thread->wait_timer, deadline, flags))
91447636
A
978 thread->wait_timer_active++;
979 thread->wait_timer_is_set = TRUE;
980 }
9bccf70c 981 }
9bccf70c 982 return(wait_result);
1c79356b
A
983}
984
985/*
986 * Routine: wait_queue_assert_wait
987 * Purpose:
988 * Insert the current thread into the supplied wait queue
989 * waiting for a particular event to be posted to that queue.
990 *
991 * Conditions:
992 * nothing of interest locked.
993 */
9bccf70c 994wait_result_t
1c79356b
A
995wait_queue_assert_wait(
996 wait_queue_t wq,
997 event_t event,
91447636
A
998 wait_interrupt_t interruptible,
999 uint64_t deadline)
1c79356b
A
1000{
1001 spl_t s;
9bccf70c 1002 wait_result_t ret;
91447636 1003 thread_t thread = current_thread();
9bccf70c
A
1004
1005 /* If it is an invalid wait queue, you can't wait on it */
91447636 1006 if (!wait_queue_is_valid(wq))
9bccf70c 1007 return (thread->wait_result = THREAD_RESTART);
9bccf70c
A
1008
1009 s = splsched();
1010 wait_queue_lock(wq);
91447636 1011 thread_lock(thread);
b0d623f7 1012 ret = wait_queue_assert_wait64_locked(wq, CAST_DOWN(event64_t,event),
91447636
A
1013 interruptible, deadline, thread);
1014 thread_unlock(thread);
55e303ae 1015 wait_queue_unlock(wq);
9bccf70c
A
1016 splx(s);
1017 return(ret);
1018}
1019
1020/*
1021 * Routine: wait_queue_assert_wait64
1022 * Purpose:
1023 * Insert the current thread into the supplied wait queue
1024 * waiting for a particular event to be posted to that queue.
1025 * Conditions:
1026 * nothing of interest locked.
1027 */
1028wait_result_t
1029wait_queue_assert_wait64(
1030 wait_queue_t wq,
1031 event64_t event,
91447636
A
1032 wait_interrupt_t interruptible,
1033 uint64_t deadline)
9bccf70c
A
1034{
1035 spl_t s;
1036 wait_result_t ret;
91447636 1037 thread_t thread = current_thread();
9bccf70c
A
1038
1039 /* If it is an invalid wait queue, you cant wait on it */
91447636 1040 if (!wait_queue_is_valid(wq))
9bccf70c 1041 return (thread->wait_result = THREAD_RESTART);
1c79356b
A
1042
1043 s = splsched();
1044 wait_queue_lock(wq);
91447636
A
1045 thread_lock(thread);
1046 ret = wait_queue_assert_wait64_locked(wq, event, interruptible, deadline, thread);
1047 thread_unlock(thread);
55e303ae 1048 wait_queue_unlock(wq);
1c79356b 1049 splx(s);
0b4e3aa0 1050 return(ret);
1c79356b
A
1051}
1052
1c79356b 1053/*
9bccf70c 1054 * Routine: _wait_queue_select64_all
1c79356b
A
1055 * Purpose:
1056 * Select all threads off a wait queue that meet the
1057 * supplied criteria.
1c79356b
A
1058 * Conditions:
1059 * at splsched
1060 * wait queue locked
1061 * wake_queue initialized and ready for insertion
1062 * possibly recursive
1c79356b
A
1063 * Returns:
1064 * a queue of locked threads
1065 */
9bccf70c
A
1066static void
1067_wait_queue_select64_all(
1c79356b 1068 wait_queue_t wq,
9bccf70c 1069 event64_t event,
1c79356b
A
1070 queue_t wake_queue)
1071{
1072 wait_queue_element_t wq_element;
1073 wait_queue_element_t wqe_next;
1074 queue_t q;
1075
1076 q = &wq->wq_queue;
1077
1078 wq_element = (wait_queue_element_t) queue_first(q);
1079 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c 1080 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1c79356b
A
1081 wqe_next = (wait_queue_element_t)
1082 queue_next((queue_t) wq_element);
1083
1084 /*
1085 * We may have to recurse if this is a compound wait queue.
1086 */
b0d623f7
A
1087 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
1088 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
1c79356b 1089 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
b0d623f7 1090 wait_queue_set_t set_queue = wql->wql_setqueue;
1c79356b
A
1091
1092 /*
b0d623f7
A
1093 * We have to check the set wait queue. If it is marked
1094 * as pre-post, and it is the "generic event" then mark
1095 * it pre-posted now (if not already).
1c79356b 1096 */
b0d623f7
A
1097 wqs_lock(set_queue);
1098 if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) {
1099 queue_t ppq = &set_queue->wqs_preposts;
1100 queue_enter(ppq, wql, wait_queue_link_t, wql_preposts);
0b4e3aa0 1101 }
b0d623f7
A
1102 if (! wait_queue_empty(&set_queue->wqs_wait_queue))
1103 _wait_queue_select64_all(&set_queue->wqs_wait_queue, event, wake_queue);
1104 wqs_unlock(set_queue);
1c79356b
A
1105 } else {
1106
1107 /*
1108 * Otherwise, its a thread. If it is waiting on
1109 * the event we are posting to this queue, pull
1110 * it off the queue and stick it in out wake_queue.
1111 */
1112 thread_t t = (thread_t)wq_element;
1113
1114 if (t->wait_event == event) {
1115 thread_lock(t);
6d2010ae 1116 remqueue((queue_entry_t) t);
1c79356b
A
1117 enqueue (wake_queue, (queue_entry_t) t);
1118 t->wait_queue = WAIT_QUEUE_NULL;
9bccf70c 1119 t->wait_event = NO_EVENT64;
1c79356b
A
1120 t->at_safe_point = FALSE;
1121 /* returned locked */
1122 }
1123 }
1124 wq_element = wqe_next;
1125 }
1126}
1127
1128/*
9bccf70c
A
1129 * Routine: wait_queue_wakeup64_all_locked
1130 * Purpose:
1131 * Wakeup some number of threads that are in the specified
1132 * wait queue and waiting on the specified event.
1133 * Conditions:
1134 * wait queue already locked (may be released).
1135 * Returns:
1136 * KERN_SUCCESS - Threads were woken up
1137 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1c79356b 1138 */
9bccf70c
A
1139__private_extern__ kern_return_t
1140wait_queue_wakeup64_all_locked(
1141 wait_queue_t wq,
1142 event64_t event,
1143 wait_result_t result,
1144 boolean_t unlock)
1c79356b 1145{
9bccf70c
A
1146 queue_head_t wake_queue_head;
1147 queue_t q = &wake_queue_head;
1148 kern_return_t res;
1149
0c530ab8 1150// assert(wait_queue_held(wq));
2d21ac55
A
1151// if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1152// panic("wait_queue_wakeup64_all_locked: lock not held on %p\n", wq); /* (BRINGUP) */
1153// }
0c530ab8 1154
9bccf70c
A
1155 queue_init(q);
1156
1157 /*
1158 * Select the threads that we will wake up. The threads
1159 * are returned to us locked and cleanly removed from the
1160 * wait queue.
1161 */
1162 _wait_queue_select64_all(wq, event, q);
1163 if (unlock)
1164 wait_queue_unlock(wq);
1165
1166 /*
1167 * For each thread, set it running.
1168 */
1169 res = KERN_NOT_WAITING;
1170 while (!queue_empty (q)) {
1171 thread_t thread = (thread_t) dequeue(q);
91447636 1172 res = thread_go(thread, result);
9bccf70c
A
1173 assert(res == KERN_SUCCESS);
1174 thread_unlock(thread);
1175 }
1176 return res;
1c79356b
A
1177}
1178
1179
1180/*
9bccf70c
A
1181 * Routine: wait_queue_wakeup_all
1182 * Purpose:
1183 * Wakeup some number of threads that are in the specified
1184 * wait queue and waiting on the specified event.
1185 * Conditions:
1186 * Nothing locked
1187 * Returns:
1188 * KERN_SUCCESS - Threads were woken up
1189 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1c79356b
A
1190 */
1191kern_return_t
1192wait_queue_wakeup_all(
9bccf70c
A
1193 wait_queue_t wq,
1194 event_t event,
1195 wait_result_t result)
1196{
1197 kern_return_t ret;
1198 spl_t s;
1199
1200 if (!wait_queue_is_valid(wq)) {
1201 return KERN_INVALID_ARGUMENT;
1202 }
1203
1204 s = splsched();
1205 wait_queue_lock(wq);
2d21ac55
A
1206// if(!wq->wq_interlock.lock_data) { /* (BRINGUP */
1207// panic("wait_queue_wakeup_all: we did not get the lock on %p\n", wq); /* (BRINGUP) */
1208// }
9bccf70c 1209 ret = wait_queue_wakeup64_all_locked(
b0d623f7 1210 wq, CAST_DOWN(event64_t,event),
9bccf70c
A
1211 result, TRUE);
1212 /* lock released */
1213 splx(s);
1214 return ret;
1215}
1216
1217/*
1218 * Routine: wait_queue_wakeup64_all
1219 * Purpose:
1220 * Wakeup some number of threads that are in the specified
1221 * wait queue and waiting on the specified event.
1222 * Conditions:
1223 * Nothing locked
1224 * Returns:
1225 * KERN_SUCCESS - Threads were woken up
1226 * KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1227 */
1228kern_return_t
1229wait_queue_wakeup64_all(
1230 wait_queue_t wq,
1231 event64_t event,
1232 wait_result_t result)
1c79356b 1233{
9bccf70c
A
1234 kern_return_t ret;
1235 spl_t s;
1c79356b 1236
9bccf70c
A
1237 if (!wait_queue_is_valid(wq)) {
1238 return KERN_INVALID_ARGUMENT;
1239 }
1c79356b 1240
9bccf70c
A
1241 s = splsched();
1242 wait_queue_lock(wq);
1243 ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1244 /* lock released */
1245 splx(s);
1246 return ret;
1c79356b
A
1247}
1248
1249/*
9bccf70c 1250 * Routine: _wait_queue_select64_one
1c79356b
A
1251 * Purpose:
1252 * Select the best thread off a wait queue that meet the
1253 * supplied criteria.
1254 * Conditions:
1255 * at splsched
1256 * wait queue locked
1257 * possibly recursive
1258 * Returns:
1259 * a locked thread - if one found
1260 * Note:
1261 * This is where the sync policy of the wait queue comes
b0d623f7 1262 * into effect. For now, we just assume FIFO/LIFO.
1c79356b 1263 */
9bccf70c
A
1264static thread_t
1265_wait_queue_select64_one(
1c79356b 1266 wait_queue_t wq,
9bccf70c 1267 event64_t event)
1c79356b
A
1268{
1269 wait_queue_element_t wq_element;
1270 wait_queue_element_t wqe_next;
1271 thread_t t = THREAD_NULL;
1272 queue_t q;
1273
1c79356b
A
1274 q = &wq->wq_queue;
1275
1276 wq_element = (wait_queue_element_t) queue_first(q);
1277 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c 1278 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1c79356b
A
1279 wqe_next = (wait_queue_element_t)
1280 queue_next((queue_t) wq_element);
1281
1282 /*
1283 * We may have to recurse if this is a compound wait queue.
1284 */
b0d623f7
A
1285 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
1286 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
1c79356b 1287 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
b0d623f7 1288 wait_queue_set_t set_queue = wql->wql_setqueue;
1c79356b
A
1289
1290 /*
b0d623f7
A
1291 * We have to check the set wait queue. If the set
1292 * supports pre-posting, it isn't already preposted,
1293 * and we didn't find a thread in the set, then mark it.
1294 *
1295 * If we later find a thread, there may be a spurious
1296 * pre-post here on this set. The wait side has to check
1297 * for that either pre- or post-wait.
1c79356b 1298 */
b0d623f7
A
1299 wqs_lock(set_queue);
1300 if (! wait_queue_empty(&set_queue->wqs_wait_queue)) {
1301 t = _wait_queue_select64_one(&set_queue->wqs_wait_queue, event);
1c79356b 1302 }
b0d623f7
A
1303 if (t != THREAD_NULL) {
1304 wqs_unlock(set_queue);
1c79356b 1305 return t;
b0d623f7
A
1306 }
1307 if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) {
1308 queue_t ppq = &set_queue->wqs_preposts;
1309 queue_enter(ppq, wql, wait_queue_link_t, wql_preposts);
1310 }
1311 wqs_unlock(set_queue);
1312
1c79356b
A
1313 } else {
1314
1315 /*
1316 * Otherwise, its a thread. If it is waiting on
1317 * the event we are posting to this queue, pull
1318 * it off the queue and stick it in out wake_queue.
1319 */
91447636 1320 t = (thread_t)wq_element;
1c79356b
A
1321 if (t->wait_event == event) {
1322 thread_lock(t);
6d2010ae 1323 remqueue((queue_entry_t) t);
1c79356b 1324 t->wait_queue = WAIT_QUEUE_NULL;
9bccf70c 1325 t->wait_event = NO_EVENT64;
1c79356b
A
1326 t->at_safe_point = FALSE;
1327 return t; /* still locked */
1328 }
91447636
A
1329
1330 t = THREAD_NULL;
1c79356b
A
1331 }
1332 wq_element = wqe_next;
1333 }
1334 return THREAD_NULL;
1335}
1336
1c79356b
A
1337
1338/*
1339 * Routine: wait_queue_pull_thread_locked
1340 * Purpose:
2d21ac55
A
1341 * Pull a thread off its wait queue and (possibly) unlock
1342 * the waitq.
1c79356b
A
1343 * Conditions:
1344 * at splsched
1345 * wait queue locked
1346 * thread locked
1347 * Returns:
1348 * with the thread still locked.
1349 */
1350void
1351wait_queue_pull_thread_locked(
1352 wait_queue_t waitq,
1353 thread_t thread,
1354 boolean_t unlock)
1355{
1356
1357 assert(thread->wait_queue == waitq);
1358
6d2010ae 1359 remqueue((queue_entry_t)thread );
1c79356b 1360 thread->wait_queue = WAIT_QUEUE_NULL;
9bccf70c 1361 thread->wait_event = NO_EVENT64;
1c79356b
A
1362 thread->at_safe_point = FALSE;
1363 if (unlock)
1364 wait_queue_unlock(waitq);
1365}
1366
1367
1368/*
9bccf70c 1369 * Routine: wait_queue_select64_thread
1c79356b
A
1370 * Purpose:
1371 * Look for a thread and remove it from the queues, if
1372 * (and only if) the thread is waiting on the supplied
1373 * <wait_queue, event> pair.
1374 * Conditions:
1375 * at splsched
1376 * wait queue locked
1377 * possibly recursive
1378 * Returns:
1379 * KERN_NOT_WAITING: Thread is not waiting here.
1380 * KERN_SUCCESS: It was, and is now removed (returned locked)
1381 */
9bccf70c
A
1382static kern_return_t
1383_wait_queue_select64_thread(
1c79356b 1384 wait_queue_t wq,
9bccf70c 1385 event64_t event,
1c79356b
A
1386 thread_t thread)
1387{
1388 wait_queue_element_t wq_element;
1389 wait_queue_element_t wqe_next;
1390 kern_return_t res = KERN_NOT_WAITING;
1391 queue_t q = &wq->wq_queue;
1392
1c79356b
A
1393 thread_lock(thread);
1394 if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
6d2010ae 1395 remqueue((queue_entry_t) thread);
1c79356b 1396 thread->at_safe_point = FALSE;
9bccf70c 1397 thread->wait_event = NO_EVENT64;
1c79356b
A
1398 thread->wait_queue = WAIT_QUEUE_NULL;
1399 /* thread still locked */
1400 return KERN_SUCCESS;
1401 }
1402 thread_unlock(thread);
1403
1404 /*
1405 * The wait_queue associated with the thread may be one of this
9bccf70c 1406 * wait queue's sets. Go see. If so, removing it from
1c79356b
A
1407 * there is like removing it from here.
1408 */
1409 wq_element = (wait_queue_element_t) queue_first(q);
1410 while (!queue_end(q, (queue_entry_t)wq_element)) {
9bccf70c 1411 WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1c79356b
A
1412 wqe_next = (wait_queue_element_t)
1413 queue_next((queue_t) wq_element);
1414
b0d623f7
A
1415 if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
1416 wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
1c79356b 1417 wait_queue_link_t wql = (wait_queue_link_t)wq_element;
b0d623f7 1418 wait_queue_set_t set_queue = wql->wql_setqueue;
1c79356b 1419
b0d623f7
A
1420 wqs_lock(set_queue);
1421 if (! wait_queue_empty(&set_queue->wqs_wait_queue)) {
1422 res = _wait_queue_select64_thread(&set_queue->wqs_wait_queue,
1c79356b
A
1423 event,
1424 thread);
1425 }
b0d623f7 1426 wqs_unlock(set_queue);
1c79356b
A
1427 if (res == KERN_SUCCESS)
1428 return KERN_SUCCESS;
1429 }
1430 wq_element = wqe_next;
1431 }
1432 return res;
1433}
1434
1435
1436/*
9bccf70c 1437 * Routine: wait_queue_wakeup64_identity_locked
1c79356b
A
1438 * Purpose:
1439 * Select a single thread that is most-eligible to run and set
1440 * set it running. But return the thread locked.
1441 *
1442 * Conditions:
1443 * at splsched
1444 * wait queue locked
1445 * possibly recursive
1446 * Returns:
1447 * a pointer to the locked thread that was awakened
1448 */
9bccf70c
A
1449__private_extern__ thread_t
1450wait_queue_wakeup64_identity_locked(
1c79356b 1451 wait_queue_t wq,
9bccf70c
A
1452 event64_t event,
1453 wait_result_t result,
1c79356b
A
1454 boolean_t unlock)
1455{
9bccf70c 1456 kern_return_t res;
1c79356b
A
1457 thread_t thread;
1458
1459 assert(wait_queue_held(wq));
1460
9bccf70c 1461 thread = _wait_queue_select64_one(wq, event);
1c79356b
A
1462 if (unlock)
1463 wait_queue_unlock(wq);
1464
9bccf70c 1465 if (thread) {
91447636 1466 res = thread_go(thread, result);
9bccf70c
A
1467 assert(res == KERN_SUCCESS);
1468 }
1c79356b
A
1469 return thread; /* still locked if not NULL */
1470}
1471
1472
1473/*
9bccf70c 1474 * Routine: wait_queue_wakeup64_one_locked
1c79356b
A
1475 * Purpose:
1476 * Select a single thread that is most-eligible to run and set
1477 * set it runnings.
1478 *
1479 * Conditions:
1480 * at splsched
1481 * wait queue locked
1482 * possibly recursive
1483 * Returns:
1484 * KERN_SUCCESS: It was, and is, now removed.
1485 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1486 */
9bccf70c
A
1487__private_extern__ kern_return_t
1488wait_queue_wakeup64_one_locked(
1c79356b 1489 wait_queue_t wq,
9bccf70c
A
1490 event64_t event,
1491 wait_result_t result,
1c79356b
A
1492 boolean_t unlock)
1493{
1494 thread_t thread;
1495
1496 assert(wait_queue_held(wq));
1497
9bccf70c 1498 thread = _wait_queue_select64_one(wq, event);
1c79356b
A
1499 if (unlock)
1500 wait_queue_unlock(wq);
1501
1502 if (thread) {
9bccf70c
A
1503 kern_return_t res;
1504
91447636 1505 res = thread_go(thread, result);
9bccf70c 1506 assert(res == KERN_SUCCESS);
1c79356b 1507 thread_unlock(thread);
9bccf70c 1508 return res;
1c79356b
A
1509 }
1510
1511 return KERN_NOT_WAITING;
1512}
1513
1514/*
1515 * Routine: wait_queue_wakeup_one
1516 * Purpose:
1517 * Wakeup the most appropriate thread that is in the specified
1518 * wait queue for the specified event.
1c79356b
A
1519 * Conditions:
1520 * Nothing locked
1c79356b
A
1521 * Returns:
1522 * KERN_SUCCESS - Thread was woken up
1523 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1524 */
1525kern_return_t
1526wait_queue_wakeup_one(
1527 wait_queue_t wq,
1528 event_t event,
6d2010ae
A
1529 wait_result_t result,
1530 int priority)
1c79356b
A
1531{
1532 thread_t thread;
1533 spl_t s;
1534
9bccf70c
A
1535 if (!wait_queue_is_valid(wq)) {
1536 return KERN_INVALID_ARGUMENT;
1537 }
1538
1c79356b
A
1539 s = splsched();
1540 wait_queue_lock(wq);
b0d623f7 1541 thread = _wait_queue_select64_one(wq, CAST_DOWN(event64_t,event));
1c79356b
A
1542 wait_queue_unlock(wq);
1543
1544 if (thread) {
9bccf70c
A
1545 kern_return_t res;
1546
6d2010ae
A
1547 if (thread->sched_pri < priority) {
1548 if (priority <= MAXPRI) {
1549 set_sched_pri(thread, priority);
1550
1551 thread->was_promoted_on_wakeup = 1;
1552 thread->sched_flags |= TH_SFLAG_PROMOTED;
1553 }
1554 }
91447636 1555 res = thread_go(thread, result);
9bccf70c 1556 assert(res == KERN_SUCCESS);
1c79356b
A
1557 thread_unlock(thread);
1558 splx(s);
9bccf70c 1559 return res;
1c79356b
A
1560 }
1561
1562 splx(s);
1563 return KERN_NOT_WAITING;
1564}
1565
9bccf70c
A
1566/*
1567 * Routine: wait_queue_wakeup64_one
1568 * Purpose:
1569 * Wakeup the most appropriate thread that is in the specified
1570 * wait queue for the specified event.
1571 * Conditions:
1572 * Nothing locked
1573 * Returns:
1574 * KERN_SUCCESS - Thread was woken up
1575 * KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1576 */
1577kern_return_t
1578wait_queue_wakeup64_one(
1579 wait_queue_t wq,
1580 event64_t event,
1581 wait_result_t result)
1582{
1583 thread_t thread;
1584 spl_t s;
1585
1586 if (!wait_queue_is_valid(wq)) {
1587 return KERN_INVALID_ARGUMENT;
1588 }
1589 s = splsched();
1590 wait_queue_lock(wq);
1591 thread = _wait_queue_select64_one(wq, event);
1592 wait_queue_unlock(wq);
1593
1594 if (thread) {
1595 kern_return_t res;
1596
91447636 1597 res = thread_go(thread, result);
9bccf70c
A
1598 assert(res == KERN_SUCCESS);
1599 thread_unlock(thread);
1600 splx(s);
1601 return res;
1602 }
1603
1604 splx(s);
1605 return KERN_NOT_WAITING;
1606}
1c79356b
A
1607
1608
1609/*
9bccf70c 1610 * Routine: wait_queue_wakeup64_thread_locked
1c79356b
A
1611 * Purpose:
1612 * Wakeup the particular thread that was specified if and only
9bccf70c 1613 * it was in this wait queue (or one of it's set queues)
1c79356b
A
1614 * and waiting on the specified event.
1615 *
1616 * This is much safer than just removing the thread from
1617 * whatever wait queue it happens to be on. For instance, it
1618 * may have already been awoken from the wait you intended to
1619 * interrupt and waited on something else (like another
1620 * semaphore).
1621 * Conditions:
1622 * at splsched
1623 * wait queue already locked (may be released).
1624 * Returns:
1625 * KERN_SUCCESS - the thread was found waiting and awakened
1626 * KERN_NOT_WAITING - the thread was not waiting here
1627 */
9bccf70c
A
1628__private_extern__ kern_return_t
1629wait_queue_wakeup64_thread_locked(
1c79356b 1630 wait_queue_t wq,
9bccf70c 1631 event64_t event,
1c79356b 1632 thread_t thread,
9bccf70c 1633 wait_result_t result,
1c79356b
A
1634 boolean_t unlock)
1635{
1636 kern_return_t res;
1637
1638 assert(wait_queue_held(wq));
1639
1640 /*
1641 * See if the thread was still waiting there. If so, it got
1642 * dequeued and returned locked.
1643 */
9bccf70c 1644 res = _wait_queue_select64_thread(wq, event, thread);
1c79356b
A
1645 if (unlock)
1646 wait_queue_unlock(wq);
1647
1648 if (res != KERN_SUCCESS)
1649 return KERN_NOT_WAITING;
1650
91447636 1651 res = thread_go(thread, result);
9bccf70c 1652 assert(res == KERN_SUCCESS);
1c79356b 1653 thread_unlock(thread);
9bccf70c 1654 return res;
1c79356b
A
1655}
1656
1657/*
1658 * Routine: wait_queue_wakeup_thread
1659 * Purpose:
1660 * Wakeup the particular thread that was specified if and only
9bccf70c 1661 * it was in this wait queue (or one of it's set queues)
1c79356b
A
1662 * and waiting on the specified event.
1663 *
1664 * This is much safer than just removing the thread from
1665 * whatever wait queue it happens to be on. For instance, it
1666 * may have already been awoken from the wait you intended to
1667 * interrupt and waited on something else (like another
1668 * semaphore).
1669 * Conditions:
1670 * nothing of interest locked
1671 * we need to assume spl needs to be raised
1672 * Returns:
1673 * KERN_SUCCESS - the thread was found waiting and awakened
1674 * KERN_NOT_WAITING - the thread was not waiting here
1675 */
1676kern_return_t
1677wait_queue_wakeup_thread(
1678 wait_queue_t wq,
1679 event_t event,
1680 thread_t thread,
9bccf70c 1681 wait_result_t result)
1c79356b
A
1682{
1683 kern_return_t res;
1684 spl_t s;
1685
9bccf70c
A
1686 if (!wait_queue_is_valid(wq)) {
1687 return KERN_INVALID_ARGUMENT;
1688 }
1689
1c79356b
A
1690 s = splsched();
1691 wait_queue_lock(wq);
b0d623f7 1692 res = _wait_queue_select64_thread(wq, CAST_DOWN(event64_t,event), thread);
1c79356b
A
1693 wait_queue_unlock(wq);
1694
1695 if (res == KERN_SUCCESS) {
91447636 1696 res = thread_go(thread, result);
9bccf70c 1697 assert(res == KERN_SUCCESS);
1c79356b
A
1698 thread_unlock(thread);
1699 splx(s);
9bccf70c 1700 return res;
1c79356b
A
1701 }
1702 splx(s);
1703 return KERN_NOT_WAITING;
1704}
1705
1c79356b 1706/*
9bccf70c 1707 * Routine: wait_queue_wakeup64_thread
1c79356b 1708 * Purpose:
9bccf70c
A
1709 * Wakeup the particular thread that was specified if and only
1710 * it was in this wait queue (or one of it's set's queues)
1711 * and waiting on the specified event.
1c79356b 1712 *
9bccf70c
A
1713 * This is much safer than just removing the thread from
1714 * whatever wait queue it happens to be on. For instance, it
1715 * may have already been awoken from the wait you intended to
1716 * interrupt and waited on something else (like another
1717 * semaphore).
1c79356b 1718 * Conditions:
9bccf70c
A
1719 * nothing of interest locked
1720 * we need to assume spl needs to be raised
1c79356b 1721 * Returns:
9bccf70c
A
1722 * KERN_SUCCESS - the thread was found waiting and awakened
1723 * KERN_NOT_WAITING - the thread was not waiting here
1c79356b
A
1724 */
1725kern_return_t
9bccf70c
A
1726wait_queue_wakeup64_thread(
1727 wait_queue_t wq,
1728 event64_t event,
1729 thread_t thread,
1730 wait_result_t result)
1c79356b 1731{
9bccf70c
A
1732 kern_return_t res;
1733 spl_t s;
1c79356b 1734
9bccf70c
A
1735 if (!wait_queue_is_valid(wq)) {
1736 return KERN_INVALID_ARGUMENT;
1737 }
1c79356b 1738
9bccf70c 1739 s = splsched();
1c79356b 1740 wait_queue_lock(wq);
9bccf70c
A
1741 res = _wait_queue_select64_thread(wq, event, thread);
1742 wait_queue_unlock(wq);
1743
1744 if (res == KERN_SUCCESS) {
91447636 1745 res = thread_go(thread, result);
9bccf70c
A
1746 assert(res == KERN_SUCCESS);
1747 thread_unlock(thread);
1748 splx(s);
1749 return res;
1c79356b 1750 }
9bccf70c
A
1751 splx(s);
1752 return KERN_NOT_WAITING;
1c79356b 1753}