]>
Commit | Line | Data |
---|---|---|
3e170ce0 A |
1 | #ifndef _WAITQ_H_ |
2 | #define _WAITQ_H_ | |
3 | /* | |
4 | * Copyright (c) 2014-2015 Apple Computer, Inc. All rights reserved. | |
5 | * | |
6 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
7 | * | |
8 | * This file contains Original Code and/or Modifications of Original Code | |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. The rights granted to you under the License | |
12 | * may not be used to create, or enable the creation or redistribution of, | |
13 | * unlawful or unlicensed copies of an Apple operating system, or to | |
14 | * circumvent, violate, or enable the circumvention or violation of, any | |
15 | * terms of an Apple operating system software license agreement. | |
16 | * | |
17 | * Please obtain a copy of the License at | |
18 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
19 | * | |
20 | * The Original Code and all software distributed under the License are | |
21 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
22 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
23 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
25 | * Please see the License for the specific language governing rights and | |
26 | * limitations under the License. | |
27 | * | |
28 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
29 | */ | |
30 | #ifdef KERNEL_PRIVATE | |
31 | ||
32 | #include <mach/mach_types.h> | |
33 | #include <mach/sync_policy.h> | |
34 | #include <mach/kern_return.h> /* for kern_return_t */ | |
35 | ||
36 | #include <kern/kern_types.h> /* for wait_queue_t */ | |
37 | #include <kern/queue.h> | |
38 | #include <kern/assert.h> | |
39 | ||
40 | #include <sys/cdefs.h> | |
41 | ||
d9a64523 A |
42 | #ifdef XNU_KERNEL_PRIVATE |
43 | /* priority queue static asserts fail for __ARM64_ARCH_8_32__ kext builds */ | |
44 | #include <kern/priority_queue.h> | |
45 | #endif /* XNU_KERNEL_PRIVATE */ | |
46 | ||
3e170ce0 A |
47 | /* |
48 | * Constants and types used in the waitq APIs | |
49 | */ | |
50 | #define WAITQ_ALL_PRIORITIES (-1) | |
51 | #define WAITQ_PROMOTE_PRIORITY (-2) | |
39037602 | 52 | #define WAITQ_SELECT_MAX_PRI (-3) |
3e170ce0 A |
53 | |
54 | typedef enum e_waitq_lock_state { | |
55 | WAITQ_KEEP_LOCKED = 0x01, | |
56 | WAITQ_UNLOCK = 0x02, | |
57 | WAITQ_SHOULD_LOCK = 0x04, | |
58 | WAITQ_ALREADY_LOCKED = 0x08, | |
59 | WAITQ_DONT_LOCK = 0x10, | |
60 | } waitq_lock_state_t; | |
61 | ||
39037602 A |
62 | /* |
63 | * The Jenkins "one at a time" hash. | |
64 | * TBD: There may be some value to unrolling here, | |
65 | * depending on the architecture. | |
66 | */ | |
67 | static __inline__ uint32_t | |
68 | jenkins_hash(char *key, size_t length) | |
69 | { | |
70 | uint32_t hash = 0; | |
71 | size_t i; | |
72 | ||
73 | for (i = 0; i < length; i++) { | |
74 | hash += (uint32_t)key[i]; | |
75 | hash += (hash << 10); | |
76 | hash ^= (hash >> 6); | |
77 | } | |
78 | ||
79 | hash += (hash << 3); | |
80 | hash ^= (hash >> 11); | |
81 | hash += (hash << 15); | |
82 | ||
83 | return hash; | |
84 | } | |
85 | ||
86 | /* Opaque sizes and alignment used for struct verification */ | |
5ba3f43e A |
87 | #if __arm__ || __arm64__ |
88 | #define WQ_OPAQUE_ALIGN __BIGGEST_ALIGNMENT__ | |
89 | #define WQS_OPAQUE_ALIGN __BIGGEST_ALIGNMENT__ | |
90 | #if __arm__ | |
91 | #define WQ_OPAQUE_SIZE 32 | |
92 | #define WQS_OPAQUE_SIZE 48 | |
93 | #else | |
94 | #define WQ_OPAQUE_SIZE 40 | |
95 | #define WQS_OPAQUE_SIZE 56 | |
96 | #endif | |
97 | #elif __x86_64__ | |
39037602 A |
98 | #define WQ_OPAQUE_ALIGN 8 |
99 | #define WQS_OPAQUE_ALIGN 8 | |
100 | #define WQ_OPAQUE_SIZE 48 | |
101 | #define WQS_OPAQUE_SIZE 64 | |
102 | #else | |
103 | #error Unknown size requirement | |
104 | #endif | |
105 | ||
813fb2f6 | 106 | #ifdef MACH_KERNEL_PRIVATE |
3e170ce0 A |
107 | |
108 | #include <kern/spl.h> | |
109 | #include <kern/simple_lock.h> | |
3e170ce0 A |
110 | |
111 | #include <machine/cpu_number.h> | |
112 | #include <machine/machine_routines.h> /* machine_timeout_suspended() */ | |
113 | ||
114 | /* | |
d9a64523 | 115 | * The event mask is of 57 bits on 64 bit architeture and 25 bits on |
3e170ce0 A |
116 | * 32 bit architecture and so we calculate its size using sizeof(long). |
117 | * If the bitfield for wq_type and wq_fifo is changed, then value of | |
118 | * EVENT_MASK_BITS will also change. | |
119 | * | |
120 | * New plan: this is an optimization anyway, so I'm stealing 32bits | |
121 | * from the mask to shrink the waitq object even further. | |
122 | */ | |
d9a64523 | 123 | #define _EVENT_MASK_BITS ((sizeof(uint32_t) * 8) - 7) |
3e170ce0 | 124 | |
3e170ce0 A |
125 | |
126 | enum waitq_type { | |
127 | WQT_INVALID = 0, | |
128 | WQT_QUEUE = 0x2, | |
129 | WQT_SET = 0x3, | |
130 | }; | |
131 | ||
132 | #if CONFIG_WAITQ_STATS | |
133 | #define NWAITQ_BTFRAMES 5 | |
134 | struct wq_stats { | |
135 | uint64_t waits; | |
136 | uint64_t wakeups; | |
137 | uint64_t clears; | |
138 | uint64_t failed_wakeups; | |
139 | ||
140 | uintptr_t last_wait[NWAITQ_BTFRAMES]; | |
141 | uintptr_t last_wakeup[NWAITQ_BTFRAMES]; | |
142 | uintptr_t last_failed_wakeup[NWAITQ_BTFRAMES]; | |
143 | }; | |
144 | #endif | |
145 | ||
146 | /* | |
147 | * struct waitq | |
148 | * | |
149 | * This is the definition of the common event wait queue | |
150 | * that the scheduler APIs understand. It is used | |
151 | * internally by the gerneralized event waiting mechanism | |
152 | * (assert_wait), and also for items that maintain their | |
153 | * own wait queues (such as ports and semaphores). | |
154 | * | |
155 | * It is not published to other kernel components. | |
156 | * | |
157 | * NOTE: Hardware locks are used to protect event wait | |
158 | * queues since interrupt code is free to post events to | |
159 | * them. | |
160 | */ | |
161 | struct waitq { | |
162 | uint32_t /* flags */ | |
163 | waitq_type:2, /* only public field */ | |
164 | waitq_fifo:1, /* fifo wakeup policy? */ | |
165 | waitq_prepost:1, /* waitq supports prepost? */ | |
166 | waitq_irq:1, /* waitq requires interrupts disabled */ | |
39037602 | 167 | waitq_isvalid:1, /* waitq structure is valid */ |
d9a64523 | 168 | waitq_turnstile_or_port:1, /* waitq is embedded in a turnstile (if irq safe), or port (if not irq safe) */ |
3e170ce0 A |
169 | waitq_eventmask:_EVENT_MASK_BITS; |
170 | /* the wait queue set (set-of-sets) to which this queue belongs */ | |
5ba3f43e A |
171 | #if __arm64__ |
172 | hw_lock_bit_t waitq_interlock; /* interlock */ | |
173 | #else | |
3e170ce0 | 174 | hw_lock_data_t waitq_interlock; /* interlock */ |
5ba3f43e | 175 | #endif /* __arm64__ */ |
3e170ce0 A |
176 | |
177 | uint64_t waitq_set_id; | |
178 | uint64_t waitq_prepost_id; | |
d9a64523 A |
179 | union { |
180 | queue_head_t waitq_queue; /* queue of elements */ | |
181 | struct priority_queue waitq_prio_queue; /* priority ordered queue of elements */ | |
182 | }; | |
3e170ce0 A |
183 | }; |
184 | ||
39037602 A |
185 | static_assert(sizeof(struct waitq) == WQ_OPAQUE_SIZE, "waitq structure size mismatch"); |
186 | static_assert(__alignof(struct waitq) == WQ_OPAQUE_ALIGN, "waitq structure alignment mismatch"); | |
187 | ||
3e170ce0 A |
188 | /* |
189 | * struct waitq_set | |
190 | * | |
191 | * This is the common definition for a set wait queue. | |
192 | */ | |
193 | struct waitq_set { | |
194 | struct waitq wqset_q; | |
195 | uint64_t wqset_id; | |
39037602 A |
196 | union { |
197 | uint64_t wqset_prepost_id; | |
198 | void *wqset_prepost_hook; | |
199 | }; | |
3e170ce0 A |
200 | }; |
201 | ||
d9a64523 | 202 | #define WQSET_NOT_LINKED ((uint64_t)(~0)) |
39037602 A |
203 | static_assert(sizeof(struct waitq_set) == WQS_OPAQUE_SIZE, "waitq_set structure size mismatch"); |
204 | static_assert(__alignof(struct waitq_set) == WQS_OPAQUE_ALIGN, "waitq_set structure alignment mismatch"); | |
205 | ||
3e170ce0 A |
206 | extern void waitq_bootstrap(void); |
207 | ||
208 | #define waitq_is_queue(wq) \ | |
209 | ((wq)->waitq_type == WQT_QUEUE) | |
210 | ||
d9a64523 A |
211 | #define waitq_is_turnstile_queue(wq) \ |
212 | (((wq)->waitq_irq) && (wq)->waitq_turnstile_or_port) | |
213 | ||
214 | #define waitq_is_port_queue(wq) \ | |
215 | (!((wq)->waitq_irq) && (wq)->waitq_turnstile_or_port) | |
216 | ||
3e170ce0 A |
217 | #define waitq_is_set(wq) \ |
218 | ((wq)->waitq_type == WQT_SET && ((struct waitq_set *)(wq))->wqset_id != 0) | |
219 | ||
220 | #define waitqs_is_set(wqs) \ | |
221 | (((wqs)->wqset_q.waitq_type == WQT_SET) && ((wqs)->wqset_id != 0)) | |
222 | ||
223 | #define waitq_valid(wq) \ | |
d9a64523 A |
224 | ((wq) != NULL && (wq)->waitq_isvalid) |
225 | ||
226 | #define waitqs_is_linked(wqs) \ | |
227 | (((wqs)->wqset_id != WQSET_NOT_LINKED) && ((wqs)->wqset_id != 0)) | |
39037602 A |
228 | |
229 | /* | |
230 | * Invalidate a waitq. The only valid waitq functions to call after this are: | |
231 | * waitq_deinit() | |
232 | * waitq_set_deinit() | |
233 | */ | |
234 | extern void waitq_invalidate_locked(struct waitq *wq); | |
3e170ce0 | 235 | |
d9a64523 A |
236 | static inline boolean_t waitq_empty(struct waitq *wq) |
237 | { | |
238 | if (waitq_is_turnstile_queue(wq)) { | |
239 | return priority_queue_empty(&(wq->waitq_prio_queue)); | |
240 | } else { | |
241 | return queue_empty(&(wq->waitq_queue)); | |
242 | } | |
243 | } | |
3e170ce0 | 244 | |
5ba3f43e A |
245 | #if __arm64__ |
246 | ||
247 | #define waitq_held(wq) \ | |
248 | (hw_lock_bit_held(&(wq)->waitq_interlock, LCK_ILOCK)) | |
249 | ||
250 | #define waitq_lock_try(wq) \ | |
251 | (hw_lock_bit_try(&(wq)->waitq_interlock, LCK_ILOCK)) | |
252 | ||
253 | #else | |
39037602 | 254 | |
3e170ce0 A |
255 | #define waitq_held(wq) \ |
256 | (hw_lock_held(&(wq)->waitq_interlock)) | |
257 | ||
258 | #define waitq_lock_try(wq) \ | |
259 | (hw_lock_try(&(wq)->waitq_interlock)) | |
260 | ||
5ba3f43e | 261 | #endif /* __arm64__ */ |
39037602 | 262 | |
3e170ce0 A |
263 | #define waitq_wait_possible(thread) \ |
264 | ((thread)->waitq == NULL) | |
265 | ||
266 | extern void waitq_lock(struct waitq *wq); | |
3e170ce0 A |
267 | |
268 | #define waitq_set_lock(wqs) waitq_lock(&(wqs)->wqset_q) | |
269 | #define waitq_set_unlock(wqs) waitq_unlock(&(wqs)->wqset_q) | |
270 | #define waitq_set_lock_try(wqs) waitq_lock_try(&(wqs)->wqset_q) | |
271 | #define waitq_set_can_prepost(wqs) (waitqs_is_set(wqs) && \ | |
272 | (wqs)->wqset_q.waitq_prepost) | |
273 | #define waitq_set_maybe_preposted(wqs) ((wqs)->wqset_q.waitq_prepost && \ | |
274 | (wqs)->wqset_prepost_id > 0) | |
39037602 A |
275 | #define waitq_set_has_prepost_hook(wqs) (waitqs_is_set(wqs) && \ |
276 | !((wqs)->wqset_q.waitq_prepost) && \ | |
277 | (wqs)->wqset_prepost_hook) | |
3e170ce0 A |
278 | |
279 | /* assert intent to wait on a locked wait queue */ | |
280 | extern wait_result_t waitq_assert_wait64_locked(struct waitq *waitq, | |
281 | event64_t wait_event, | |
282 | wait_interrupt_t interruptible, | |
283 | wait_timeout_urgency_t urgency, | |
284 | uint64_t deadline, | |
285 | uint64_t leeway, | |
286 | thread_t thread); | |
287 | ||
288 | /* pull a thread from its wait queue */ | |
39037602 | 289 | extern int waitq_pull_thread_locked(struct waitq *waitq, thread_t thread); |
3e170ce0 A |
290 | |
291 | /* wakeup all threads waiting for a particular event on locked queue */ | |
292 | extern kern_return_t waitq_wakeup64_all_locked(struct waitq *waitq, | |
293 | event64_t wake_event, | |
294 | wait_result_t result, | |
295 | uint64_t *reserved_preposts, | |
296 | int priority, | |
297 | waitq_lock_state_t lock_state); | |
298 | ||
299 | /* wakeup one thread waiting for a particular event on locked queue */ | |
300 | extern kern_return_t waitq_wakeup64_one_locked(struct waitq *waitq, | |
301 | event64_t wake_event, | |
302 | wait_result_t result, | |
303 | uint64_t *reserved_preposts, | |
304 | int priority, | |
305 | waitq_lock_state_t lock_state); | |
306 | ||
307 | /* return identity of a thread awakened for a particular <wait_queue,event> */ | |
39037602 A |
308 | extern thread_t |
309 | waitq_wakeup64_identify_locked(struct waitq *waitq, | |
310 | event64_t wake_event, | |
311 | wait_result_t result, | |
312 | spl_t *spl, | |
313 | uint64_t *reserved_preposts, | |
314 | int priority, | |
315 | waitq_lock_state_t lock_state); | |
3e170ce0 A |
316 | |
317 | /* wakeup thread iff its still waiting for a particular event on locked queue */ | |
318 | extern kern_return_t waitq_wakeup64_thread_locked(struct waitq *waitq, | |
319 | event64_t wake_event, | |
320 | thread_t thread, | |
321 | wait_result_t result, | |
322 | waitq_lock_state_t lock_state); | |
323 | ||
324 | /* clear all preposts generated by the given waitq */ | |
39037602 | 325 | extern int waitq_clear_prepost_locked(struct waitq *waitq); |
3e170ce0 A |
326 | |
327 | /* clear all preposts from the given wait queue set */ | |
328 | extern void waitq_set_clear_preposts_locked(struct waitq_set *wqset); | |
329 | ||
39037602 A |
330 | /* unlink the given waitq from all sets - returns unlocked */ |
331 | extern kern_return_t waitq_unlink_all_unlock(struct waitq *waitq); | |
332 | ||
333 | /* unlink the given waitq set from all waitqs and waitq sets - returns unlocked */ | |
334 | extern kern_return_t waitq_set_unlink_all_unlock(struct waitq_set *wqset); | |
335 | ||
336 | ||
3e170ce0 A |
337 | |
338 | /* | |
339 | * clear a thread's boosted priority | |
340 | * (given via WAITQ_PROMOTE_PRIORITY in the wakeup function) | |
341 | */ | |
342 | extern void waitq_clear_promotion_locked(struct waitq *waitq, | |
343 | thread_t thread); | |
344 | ||
345 | /* | |
346 | * waitq iteration | |
347 | */ | |
348 | ||
349 | enum waitq_iteration_constant { | |
350 | WQ_ITERATE_DROPPED = -4, | |
351 | WQ_ITERATE_INVALID = -3, | |
352 | WQ_ITERATE_ABORTED = -2, | |
353 | WQ_ITERATE_FAILURE = -1, | |
354 | WQ_ITERATE_SUCCESS = 0, | |
355 | WQ_ITERATE_CONTINUE = 1, | |
356 | WQ_ITERATE_BREAK = 2, | |
357 | WQ_ITERATE_BREAK_KEEP_LOCKED = 3, | |
358 | WQ_ITERATE_INVALIDATE_CONTINUE = 4, | |
359 | WQ_ITERATE_RESTART = 5, | |
360 | WQ_ITERATE_FOUND = 6, | |
361 | WQ_ITERATE_UNLINKED = 7, | |
362 | }; | |
363 | ||
364 | /* callback invoked with both 'waitq' and 'wqset' locked */ | |
365 | typedef int (*waitq_iterator_t)(void *ctx, struct waitq *waitq, | |
366 | struct waitq_set *wqset); | |
367 | ||
368 | /* iterate over all sets to which waitq belongs */ | |
369 | extern int waitq_iterate_sets(struct waitq *waitq, void *ctx, | |
370 | waitq_iterator_t it); | |
371 | ||
372 | /* iterator over all waitqs that have preposted to wqset */ | |
373 | extern int waitq_set_iterate_preposts(struct waitq_set *wqset, | |
39037602 | 374 | void *ctx, waitq_iterator_t it); |
3e170ce0 A |
375 | |
376 | /* | |
377 | * prepost reservation | |
378 | */ | |
379 | extern uint64_t waitq_prepost_reserve(struct waitq *waitq, int extra, | |
39037602 | 380 | waitq_lock_state_t lock_state); |
3e170ce0 A |
381 | |
382 | extern void waitq_prepost_release_reserve(uint64_t id); | |
383 | ||
813fb2f6 A |
384 | #else /* !MACH_KERNEL_PRIVATE */ |
385 | ||
386 | /* | |
387 | * The opaque waitq structure is here mostly for AIO and selinfo, | |
388 | * but could potentially be used by other BSD subsystems. | |
389 | */ | |
390 | struct waitq { char opaque[WQ_OPAQUE_SIZE]; } __attribute__((aligned(WQ_OPAQUE_ALIGN))); | |
391 | struct waitq_set { char opaque[WQS_OPAQUE_SIZE]; } __attribute__((aligned(WQS_OPAQUE_ALIGN))); | |
392 | ||
3e170ce0 A |
393 | #endif /* MACH_KERNEL_PRIVATE */ |
394 | ||
395 | ||
396 | __BEGIN_DECLS | |
397 | ||
398 | /* | |
399 | * waitq init | |
400 | */ | |
401 | extern kern_return_t waitq_init(struct waitq *waitq, int policy); | |
402 | extern void waitq_deinit(struct waitq *waitq); | |
403 | ||
404 | /* | |
405 | * global waitqs | |
406 | */ | |
407 | extern struct waitq *_global_eventq(char *event, size_t event_length); | |
408 | #define global_eventq(event) _global_eventq((char *)&(event), sizeof(event)) | |
409 | ||
410 | extern struct waitq *global_waitq(int index); | |
411 | ||
412 | /* | |
413 | * set alloc/init/free | |
414 | */ | |
39037602 | 415 | extern struct waitq_set *waitq_set_alloc(int policy, void *prepost_hook); |
3e170ce0 A |
416 | |
417 | extern kern_return_t waitq_set_init(struct waitq_set *wqset, | |
39037602 A |
418 | int policy, uint64_t *reserved_link, |
419 | void *prepost_hook); | |
3e170ce0 A |
420 | |
421 | extern void waitq_set_deinit(struct waitq_set *wqset); | |
422 | ||
423 | extern kern_return_t waitq_set_free(struct waitq_set *wqset); | |
424 | ||
5ba3f43e | 425 | #if DEVELOPMENT || DEBUG |
d9a64523 | 426 | extern int sysctl_helper_waitq_set_nelem(void); |
3e170ce0 A |
427 | #if CONFIG_WAITQ_DEBUG |
428 | extern uint64_t wqset_id(struct waitq_set *wqset); | |
429 | ||
430 | struct waitq *wqset_waitq(struct waitq_set *wqset); | |
431 | #endif /* CONFIG_WAITQ_DEBUG */ | |
432 | #endif /* DEVELOPMENT || DEBUG */ | |
433 | ||
434 | ||
435 | /* | |
436 | * set membership | |
437 | */ | |
438 | extern uint64_t waitq_link_reserve(struct waitq *waitq); | |
d9a64523 A |
439 | extern void waitq_set_lazy_init_link(struct waitq_set *wqset); |
440 | extern boolean_t waitq_set_should_lazy_init_link(struct waitq_set *wqset); | |
3e170ce0 A |
441 | |
442 | extern void waitq_link_release(uint64_t id); | |
443 | ||
444 | extern boolean_t waitq_member(struct waitq *waitq, struct waitq_set *wqset); | |
445 | ||
446 | /* returns true if the waitq is in at least 1 set */ | |
447 | extern boolean_t waitq_in_set(struct waitq *waitq); | |
448 | ||
449 | ||
450 | /* on success, consumes an reserved_link reference */ | |
451 | extern kern_return_t waitq_link(struct waitq *waitq, | |
452 | struct waitq_set *wqset, | |
453 | waitq_lock_state_t lock_state, | |
454 | uint64_t *reserved_link); | |
455 | ||
456 | extern kern_return_t waitq_unlink(struct waitq *waitq, struct waitq_set *wqset); | |
457 | ||
458 | extern kern_return_t waitq_unlink_all(struct waitq *waitq); | |
459 | ||
460 | extern kern_return_t waitq_set_unlink_all(struct waitq_set *wqset); | |
461 | ||
3e170ce0 A |
462 | /* |
463 | * preposts | |
464 | */ | |
465 | extern void waitq_clear_prepost(struct waitq *waitq); | |
466 | ||
467 | extern void waitq_set_clear_preposts(struct waitq_set *wqset); | |
468 | ||
469 | /* | |
470 | * interfaces used primarily by the select/kqueue subsystems | |
471 | */ | |
472 | extern uint64_t waitq_get_prepost_id(struct waitq *waitq); | |
473 | extern void waitq_unlink_by_prepost_id(uint64_t wqp_id, struct waitq_set *wqset); | |
5ba3f43e | 474 | extern struct waitq *waitq_lock_by_prepost_id(uint64_t wqp_id); |
3e170ce0 A |
475 | |
476 | /* | |
477 | * waitq attributes | |
478 | */ | |
479 | extern int waitq_is_valid(struct waitq *waitq); | |
480 | ||
481 | extern int waitq_set_is_valid(struct waitq_set *wqset); | |
482 | ||
483 | extern int waitq_is_global(struct waitq *waitq); | |
484 | ||
485 | extern int waitq_irq_safe(struct waitq *waitq); | |
486 | ||
d9a64523 A |
487 | extern struct waitq * waitq_get_safeq(struct waitq *waitq); |
488 | ||
3e170ce0 A |
489 | #if CONFIG_WAITQ_STATS |
490 | /* | |
491 | * waitq statistics | |
492 | */ | |
493 | #define WAITQ_STATS_VERSION 1 | |
494 | struct wq_table_stats { | |
495 | uint32_t version; | |
496 | uint32_t table_elements; | |
497 | uint32_t table_used_elems; | |
498 | uint32_t table_elem_sz; | |
499 | uint32_t table_slabs; | |
500 | uint32_t table_slab_sz; | |
501 | ||
502 | uint64_t table_num_allocs; | |
503 | uint64_t table_num_preposts; | |
504 | uint64_t table_num_reservations; | |
505 | ||
506 | uint64_t table_max_used; | |
507 | uint64_t table_avg_used; | |
508 | uint64_t table_max_reservations; | |
509 | uint64_t table_avg_reservations; | |
510 | }; | |
511 | ||
512 | extern void waitq_link_stats(struct wq_table_stats *stats); | |
513 | extern void waitq_prepost_stats(struct wq_table_stats *stats); | |
514 | #endif /* CONFIG_WAITQ_STATS */ | |
515 | ||
516 | /* | |
517 | * | |
518 | * higher-level waiting APIs | |
519 | * | |
520 | */ | |
521 | ||
522 | /* assert intent to wait on <waitq,event64> pair */ | |
523 | extern wait_result_t waitq_assert_wait64(struct waitq *waitq, | |
524 | event64_t wait_event, | |
525 | wait_interrupt_t interruptible, | |
526 | uint64_t deadline); | |
527 | ||
528 | extern wait_result_t waitq_assert_wait64_leeway(struct waitq *waitq, | |
529 | event64_t wait_event, | |
530 | wait_interrupt_t interruptible, | |
531 | wait_timeout_urgency_t urgency, | |
532 | uint64_t deadline, | |
533 | uint64_t leeway); | |
534 | ||
535 | /* wakeup the most appropriate thread waiting on <waitq,event64> pair */ | |
536 | extern kern_return_t waitq_wakeup64_one(struct waitq *waitq, | |
537 | event64_t wake_event, | |
538 | wait_result_t result, | |
539 | int priority); | |
540 | ||
541 | /* wakeup all the threads waiting on <waitq,event64> pair */ | |
542 | extern kern_return_t waitq_wakeup64_all(struct waitq *waitq, | |
543 | event64_t wake_event, | |
544 | wait_result_t result, | |
545 | int priority); | |
546 | ||
39037602 A |
547 | #ifdef XNU_KERNEL_PRIVATE |
548 | ||
3e170ce0 A |
549 | /* wakeup a specified thread iff it's waiting on <waitq,event64> pair */ |
550 | extern kern_return_t waitq_wakeup64_thread(struct waitq *waitq, | |
551 | event64_t wake_event, | |
552 | thread_t thread, | |
553 | wait_result_t result); | |
39037602 A |
554 | |
555 | /* return a reference to the thread that was woken up */ | |
556 | extern thread_t | |
557 | waitq_wakeup64_identify(struct waitq *waitq, | |
558 | event64_t wake_event, | |
559 | wait_result_t result, | |
560 | int priority); | |
561 | ||
5ba3f43e A |
562 | /* take the waitq lock */ |
563 | extern void waitq_unlock(struct waitq *wq); | |
564 | ||
39037602 A |
565 | #endif /* XNU_KERNEL_PRIVATE */ |
566 | ||
3e170ce0 A |
567 | __END_DECLS |
568 | ||
569 | #endif /* KERNEL_PRIVATE */ | |
570 | #endif /* _WAITQ_H_ */ |