]>
Commit | Line | Data |
---|---|---|
3e170ce0 A |
1 | #ifndef _WAITQ_H_ |
2 | #define _WAITQ_H_ | |
3 | /* | |
4 | * Copyright (c) 2014-2015 Apple Computer, Inc. All rights reserved. | |
5 | * | |
6 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
7 | * | |
8 | * This file contains Original Code and/or Modifications of Original Code | |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. The rights granted to you under the License | |
12 | * may not be used to create, or enable the creation or redistribution of, | |
13 | * unlawful or unlicensed copies of an Apple operating system, or to | |
14 | * circumvent, violate, or enable the circumvention or violation of, any | |
15 | * terms of an Apple operating system software license agreement. | |
16 | * | |
17 | * Please obtain a copy of the License at | |
18 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
19 | * | |
20 | * The Original Code and all software distributed under the License are | |
21 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
22 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
23 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
25 | * Please see the License for the specific language governing rights and | |
26 | * limitations under the License. | |
27 | * | |
28 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
29 | */ | |
0a7de745 | 30 | #ifdef KERNEL_PRIVATE |
3e170ce0 A |
31 | |
32 | #include <mach/mach_types.h> | |
33 | #include <mach/sync_policy.h> | |
0a7de745 | 34 | #include <mach/kern_return.h> /* for kern_return_t */ |
3e170ce0 | 35 | |
0a7de745 | 36 | #include <kern/kern_types.h> /* for wait_queue_t */ |
3e170ce0 A |
37 | #include <kern/queue.h> |
38 | #include <kern/assert.h> | |
39 | ||
40 | #include <sys/cdefs.h> | |
41 | ||
d9a64523 A |
42 | #ifdef XNU_KERNEL_PRIVATE |
43 | /* priority queue static asserts fail for __ARM64_ARCH_8_32__ kext builds */ | |
44 | #include <kern/priority_queue.h> | |
45 | #endif /* XNU_KERNEL_PRIVATE */ | |
46 | ||
3e170ce0 A |
47 | /* |
48 | * Constants and types used in the waitq APIs | |
49 | */ | |
50 | #define WAITQ_ALL_PRIORITIES (-1) | |
51 | #define WAITQ_PROMOTE_PRIORITY (-2) | |
cb323159 | 52 | #define WAITQ_PROMOTE_ON_WAKE (-3) |
3e170ce0 A |
53 | |
54 | typedef enum e_waitq_lock_state { | |
55 | WAITQ_KEEP_LOCKED = 0x01, | |
56 | WAITQ_UNLOCK = 0x02, | |
57 | WAITQ_SHOULD_LOCK = 0x04, | |
58 | WAITQ_ALREADY_LOCKED = 0x08, | |
59 | WAITQ_DONT_LOCK = 0x10, | |
60 | } waitq_lock_state_t; | |
61 | ||
39037602 | 62 | /* Opaque sizes and alignment used for struct verification */ |
5ba3f43e A |
63 | #if __arm__ || __arm64__ |
64 | #define WQ_OPAQUE_ALIGN __BIGGEST_ALIGNMENT__ | |
65 | #define WQS_OPAQUE_ALIGN __BIGGEST_ALIGNMENT__ | |
66 | #if __arm__ | |
67 | #define WQ_OPAQUE_SIZE 32 | |
68 | #define WQS_OPAQUE_SIZE 48 | |
69 | #else | |
70 | #define WQ_OPAQUE_SIZE 40 | |
71 | #define WQS_OPAQUE_SIZE 56 | |
72 | #endif | |
73 | #elif __x86_64__ | |
39037602 A |
74 | #define WQ_OPAQUE_ALIGN 8 |
75 | #define WQS_OPAQUE_ALIGN 8 | |
76 | #define WQ_OPAQUE_SIZE 48 | |
77 | #define WQS_OPAQUE_SIZE 64 | |
78 | #else | |
79 | #error Unknown size requirement | |
80 | #endif | |
81 | ||
813fb2f6 | 82 | #ifdef MACH_KERNEL_PRIVATE |
3e170ce0 A |
83 | |
84 | #include <kern/spl.h> | |
85 | #include <kern/simple_lock.h> | |
3e170ce0 A |
86 | |
87 | #include <machine/cpu_number.h> | |
88 | #include <machine/machine_routines.h> /* machine_timeout_suspended() */ | |
89 | ||
90 | /* | |
d9a64523 | 91 | * The event mask is of 57 bits on 64 bit architeture and 25 bits on |
3e170ce0 A |
92 | * 32 bit architecture and so we calculate its size using sizeof(long). |
93 | * If the bitfield for wq_type and wq_fifo is changed, then value of | |
94 | * EVENT_MASK_BITS will also change. | |
95 | * | |
96 | * New plan: this is an optimization anyway, so I'm stealing 32bits | |
97 | * from the mask to shrink the waitq object even further. | |
98 | */ | |
d9a64523 | 99 | #define _EVENT_MASK_BITS ((sizeof(uint32_t) * 8) - 7) |
3e170ce0 | 100 | |
3e170ce0 A |
101 | |
102 | enum waitq_type { | |
103 | WQT_INVALID = 0, | |
94ff46dc | 104 | WQT_TSPROXY = 0x1, |
3e170ce0 A |
105 | WQT_QUEUE = 0x2, |
106 | WQT_SET = 0x3, | |
107 | }; | |
108 | ||
f427ee49 A |
109 | __options_decl(waitq_options_t, uint32_t, { |
110 | WQ_OPTION_NONE = 0, | |
111 | WQ_OPTION_HANDOFF = 1, | |
112 | }); | |
113 | ||
3e170ce0 A |
114 | #if CONFIG_WAITQ_STATS |
115 | #define NWAITQ_BTFRAMES 5 | |
116 | struct wq_stats { | |
117 | uint64_t waits; | |
118 | uint64_t wakeups; | |
119 | uint64_t clears; | |
120 | uint64_t failed_wakeups; | |
121 | ||
122 | uintptr_t last_wait[NWAITQ_BTFRAMES]; | |
123 | uintptr_t last_wakeup[NWAITQ_BTFRAMES]; | |
124 | uintptr_t last_failed_wakeup[NWAITQ_BTFRAMES]; | |
125 | }; | |
126 | #endif | |
127 | ||
128 | /* | |
129 | * struct waitq | |
130 | * | |
131 | * This is the definition of the common event wait queue | |
132 | * that the scheduler APIs understand. It is used | |
133 | * internally by the gerneralized event waiting mechanism | |
134 | * (assert_wait), and also for items that maintain their | |
135 | * own wait queues (such as ports and semaphores). | |
136 | * | |
137 | * It is not published to other kernel components. | |
138 | * | |
139 | * NOTE: Hardware locks are used to protect event wait | |
140 | * queues since interrupt code is free to post events to | |
141 | * them. | |
142 | */ | |
143 | struct waitq { | |
144 | uint32_t /* flags */ | |
0a7de745 A |
145 | waitq_type:2, /* only public field */ |
146 | waitq_fifo:1, /* fifo wakeup policy? */ | |
147 | waitq_prepost:1, /* waitq supports prepost? */ | |
148 | waitq_irq:1, /* waitq requires interrupts disabled */ | |
149 | waitq_isvalid:1, /* waitq structure is valid */ | |
94ff46dc | 150 | waitq_turnstile:1, /* waitq is embedded in a turnstile */ |
0a7de745 A |
151 | waitq_eventmask:_EVENT_MASK_BITS; |
152 | /* the wait queue set (set-of-sets) to which this queue belongs */ | |
5ba3f43e | 153 | #if __arm64__ |
0a7de745 | 154 | hw_lock_bit_t waitq_interlock; /* interlock */ |
5ba3f43e | 155 | #else |
0a7de745 | 156 | hw_lock_data_t waitq_interlock; /* interlock */ |
5ba3f43e | 157 | #endif /* __arm64__ */ |
3e170ce0 A |
158 | |
159 | uint64_t waitq_set_id; | |
160 | uint64_t waitq_prepost_id; | |
d9a64523 | 161 | union { |
f427ee49 A |
162 | queue_head_t waitq_queue; /* queue of elements - used for waitq not embedded in turnstile or ports */ |
163 | struct priority_queue_sched_max waitq_prio_queue; /* priority ordered queue of elements - used for waitqs embedded in turnstiles */ | |
164 | struct { /* used for waitqs embedded in ports */ | |
165 | struct turnstile *waitq_ts; /* used to store receive turnstile of the port */ | |
166 | union { | |
167 | void *waitq_tspriv; /* non special-reply port, used to store the watchport element for port used to store | |
168 | * receive turnstile of the port */ | |
169 | int waitq_priv_pid; /* special-reply port, used to store the pid that copies out the send once right of the | |
170 | * special-reply port. */ | |
171 | }; | |
94ff46dc | 172 | }; |
d9a64523 | 173 | }; |
3e170ce0 A |
174 | }; |
175 | ||
39037602 A |
176 | static_assert(sizeof(struct waitq) == WQ_OPAQUE_SIZE, "waitq structure size mismatch"); |
177 | static_assert(__alignof(struct waitq) == WQ_OPAQUE_ALIGN, "waitq structure alignment mismatch"); | |
178 | ||
3e170ce0 A |
179 | /* |
180 | * struct waitq_set | |
181 | * | |
182 | * This is the common definition for a set wait queue. | |
183 | */ | |
184 | struct waitq_set { | |
185 | struct waitq wqset_q; | |
186 | uint64_t wqset_id; | |
39037602 A |
187 | union { |
188 | uint64_t wqset_prepost_id; | |
189 | void *wqset_prepost_hook; | |
190 | }; | |
3e170ce0 A |
191 | }; |
192 | ||
cb323159 | 193 | #define WQSET_NOT_LINKED ((uint64_t)(~0)) |
39037602 A |
194 | static_assert(sizeof(struct waitq_set) == WQS_OPAQUE_SIZE, "waitq_set structure size mismatch"); |
195 | static_assert(__alignof(struct waitq_set) == WQS_OPAQUE_ALIGN, "waitq_set structure alignment mismatch"); | |
196 | ||
3e170ce0 A |
197 | extern void waitq_bootstrap(void); |
198 | ||
199 | #define waitq_is_queue(wq) \ | |
200 | ((wq)->waitq_type == WQT_QUEUE) | |
201 | ||
94ff46dc A |
202 | #define waitq_is_turnstile_proxy(wq) \ |
203 | ((wq)->waitq_type == WQT_TSPROXY) | |
d9a64523 | 204 | |
94ff46dc A |
205 | #define waitq_is_turnstile_queue(wq) \ |
206 | (((wq)->waitq_irq) && (wq)->waitq_turnstile) | |
d9a64523 | 207 | |
3e170ce0 A |
208 | #define waitq_is_set(wq) \ |
209 | ((wq)->waitq_type == WQT_SET && ((struct waitq_set *)(wq))->wqset_id != 0) | |
210 | ||
211 | #define waitqs_is_set(wqs) \ | |
212 | (((wqs)->wqset_q.waitq_type == WQT_SET) && ((wqs)->wqset_id != 0)) | |
213 | ||
214 | #define waitq_valid(wq) \ | |
d9a64523 A |
215 | ((wq) != NULL && (wq)->waitq_isvalid) |
216 | ||
217 | #define waitqs_is_linked(wqs) \ | |
218 | (((wqs)->wqset_id != WQSET_NOT_LINKED) && ((wqs)->wqset_id != 0)) | |
39037602 A |
219 | |
220 | /* | |
221 | * Invalidate a waitq. The only valid waitq functions to call after this are: | |
0a7de745 A |
222 | * waitq_deinit() |
223 | * waitq_set_deinit() | |
39037602 A |
224 | */ |
225 | extern void waitq_invalidate_locked(struct waitq *wq); | |
3e170ce0 | 226 | |
0a7de745 A |
227 | extern lck_grp_t waitq_lck_grp; |
228 | ||
5ba3f43e A |
229 | #if __arm64__ |
230 | ||
231 | #define waitq_held(wq) \ | |
232 | (hw_lock_bit_held(&(wq)->waitq_interlock, LCK_ILOCK)) | |
233 | ||
234 | #define waitq_lock_try(wq) \ | |
0a7de745 | 235 | (hw_lock_bit_try(&(wq)->waitq_interlock, LCK_ILOCK, &waitq_lck_grp)) |
5ba3f43e A |
236 | |
237 | #else | |
39037602 | 238 | |
3e170ce0 A |
239 | #define waitq_held(wq) \ |
240 | (hw_lock_held(&(wq)->waitq_interlock)) | |
241 | ||
242 | #define waitq_lock_try(wq) \ | |
0a7de745 | 243 | (hw_lock_try(&(wq)->waitq_interlock, &waitq_lck_grp)) |
3e170ce0 | 244 | |
5ba3f43e | 245 | #endif /* __arm64__ */ |
39037602 | 246 | |
3e170ce0 A |
247 | #define waitq_wait_possible(thread) \ |
248 | ((thread)->waitq == NULL) | |
249 | ||
250 | extern void waitq_lock(struct waitq *wq); | |
3e170ce0 | 251 | |
0a7de745 A |
252 | #define waitq_set_lock(wqs) waitq_lock(&(wqs)->wqset_q) |
253 | #define waitq_set_unlock(wqs) waitq_unlock(&(wqs)->wqset_q) | |
254 | #define waitq_set_lock_try(wqs) waitq_lock_try(&(wqs)->wqset_q) | |
255 | #define waitq_set_can_prepost(wqs) (waitqs_is_set(wqs) && \ | |
256 | (wqs)->wqset_q.waitq_prepost) | |
257 | #define waitq_set_maybe_preposted(wqs) ((wqs)->wqset_q.waitq_prepost && \ | |
258 | (wqs)->wqset_prepost_id > 0) | |
259 | #define waitq_set_has_prepost_hook(wqs) (waitqs_is_set(wqs) && \ | |
260 | !((wqs)->wqset_q.waitq_prepost) && \ | |
261 | (wqs)->wqset_prepost_hook) | |
3e170ce0 A |
262 | |
263 | /* assert intent to wait on a locked wait queue */ | |
264 | extern wait_result_t waitq_assert_wait64_locked(struct waitq *waitq, | |
0a7de745 A |
265 | event64_t wait_event, |
266 | wait_interrupt_t interruptible, | |
267 | wait_timeout_urgency_t urgency, | |
268 | uint64_t deadline, | |
269 | uint64_t leeway, | |
270 | thread_t thread); | |
3e170ce0 A |
271 | |
272 | /* pull a thread from its wait queue */ | |
39037602 | 273 | extern int waitq_pull_thread_locked(struct waitq *waitq, thread_t thread); |
3e170ce0 A |
274 | |
275 | /* wakeup all threads waiting for a particular event on locked queue */ | |
276 | extern kern_return_t waitq_wakeup64_all_locked(struct waitq *waitq, | |
0a7de745 A |
277 | event64_t wake_event, |
278 | wait_result_t result, | |
279 | uint64_t *reserved_preposts, | |
280 | int priority, | |
281 | waitq_lock_state_t lock_state); | |
3e170ce0 A |
282 | |
283 | /* wakeup one thread waiting for a particular event on locked queue */ | |
284 | extern kern_return_t waitq_wakeup64_one_locked(struct waitq *waitq, | |
0a7de745 A |
285 | event64_t wake_event, |
286 | wait_result_t result, | |
287 | uint64_t *reserved_preposts, | |
288 | int priority, | |
f427ee49 A |
289 | waitq_lock_state_t lock_state, |
290 | waitq_options_t options); | |
3e170ce0 A |
291 | |
292 | /* return identity of a thread awakened for a particular <wait_queue,event> */ | |
39037602 A |
293 | extern thread_t |
294 | waitq_wakeup64_identify_locked(struct waitq *waitq, | |
0a7de745 A |
295 | event64_t wake_event, |
296 | wait_result_t result, | |
297 | spl_t *spl, | |
298 | uint64_t *reserved_preposts, | |
299 | int priority, | |
300 | waitq_lock_state_t lock_state); | |
3e170ce0 A |
301 | |
302 | /* wakeup thread iff its still waiting for a particular event on locked queue */ | |
303 | extern kern_return_t waitq_wakeup64_thread_locked(struct waitq *waitq, | |
0a7de745 A |
304 | event64_t wake_event, |
305 | thread_t thread, | |
306 | wait_result_t result, | |
307 | waitq_lock_state_t lock_state); | |
3e170ce0 A |
308 | |
309 | /* clear all preposts generated by the given waitq */ | |
39037602 | 310 | extern int waitq_clear_prepost_locked(struct waitq *waitq); |
3e170ce0 A |
311 | |
312 | /* clear all preposts from the given wait queue set */ | |
313 | extern void waitq_set_clear_preposts_locked(struct waitq_set *wqset); | |
314 | ||
39037602 A |
315 | /* unlink the given waitq from all sets - returns unlocked */ |
316 | extern kern_return_t waitq_unlink_all_unlock(struct waitq *waitq); | |
317 | ||
318 | /* unlink the given waitq set from all waitqs and waitq sets - returns unlocked */ | |
319 | extern kern_return_t waitq_set_unlink_all_unlock(struct waitq_set *wqset); | |
320 | ||
321 | ||
3e170ce0 A |
322 | |
323 | /* | |
324 | * clear a thread's boosted priority | |
325 | * (given via WAITQ_PROMOTE_PRIORITY in the wakeup function) | |
326 | */ | |
327 | extern void waitq_clear_promotion_locked(struct waitq *waitq, | |
0a7de745 | 328 | thread_t thread); |
3e170ce0 A |
329 | |
330 | /* | |
331 | * waitq iteration | |
332 | */ | |
333 | ||
334 | enum waitq_iteration_constant { | |
335 | WQ_ITERATE_DROPPED = -4, | |
336 | WQ_ITERATE_INVALID = -3, | |
337 | WQ_ITERATE_ABORTED = -2, | |
338 | WQ_ITERATE_FAILURE = -1, | |
339 | WQ_ITERATE_SUCCESS = 0, | |
340 | WQ_ITERATE_CONTINUE = 1, | |
341 | WQ_ITERATE_BREAK = 2, | |
342 | WQ_ITERATE_BREAK_KEEP_LOCKED = 3, | |
343 | WQ_ITERATE_INVALIDATE_CONTINUE = 4, | |
344 | WQ_ITERATE_RESTART = 5, | |
345 | WQ_ITERATE_FOUND = 6, | |
346 | WQ_ITERATE_UNLINKED = 7, | |
347 | }; | |
348 | ||
349 | /* callback invoked with both 'waitq' and 'wqset' locked */ | |
350 | typedef int (*waitq_iterator_t)(void *ctx, struct waitq *waitq, | |
0a7de745 | 351 | struct waitq_set *wqset); |
3e170ce0 A |
352 | |
353 | /* iterate over all sets to which waitq belongs */ | |
354 | extern int waitq_iterate_sets(struct waitq *waitq, void *ctx, | |
0a7de745 | 355 | waitq_iterator_t it); |
3e170ce0 A |
356 | |
357 | /* iterator over all waitqs that have preposted to wqset */ | |
358 | extern int waitq_set_iterate_preposts(struct waitq_set *wqset, | |
0a7de745 | 359 | void *ctx, waitq_iterator_t it); |
3e170ce0 A |
360 | |
361 | /* | |
362 | * prepost reservation | |
363 | */ | |
364 | extern uint64_t waitq_prepost_reserve(struct waitq *waitq, int extra, | |
0a7de745 | 365 | waitq_lock_state_t lock_state); |
3e170ce0 A |
366 | |
367 | extern void waitq_prepost_release_reserve(uint64_t id); | |
368 | ||
813fb2f6 A |
369 | #else /* !MACH_KERNEL_PRIVATE */ |
370 | ||
371 | /* | |
372 | * The opaque waitq structure is here mostly for AIO and selinfo, | |
373 | * but could potentially be used by other BSD subsystems. | |
374 | */ | |
375 | struct waitq { char opaque[WQ_OPAQUE_SIZE]; } __attribute__((aligned(WQ_OPAQUE_ALIGN))); | |
376 | struct waitq_set { char opaque[WQS_OPAQUE_SIZE]; } __attribute__((aligned(WQS_OPAQUE_ALIGN))); | |
377 | ||
0a7de745 | 378 | #endif /* MACH_KERNEL_PRIVATE */ |
3e170ce0 A |
379 | |
380 | ||
381 | __BEGIN_DECLS | |
382 | ||
383 | /* | |
384 | * waitq init | |
385 | */ | |
386 | extern kern_return_t waitq_init(struct waitq *waitq, int policy); | |
387 | extern void waitq_deinit(struct waitq *waitq); | |
388 | ||
389 | /* | |
390 | * global waitqs | |
391 | */ | |
392 | extern struct waitq *_global_eventq(char *event, size_t event_length); | |
393 | #define global_eventq(event) _global_eventq((char *)&(event), sizeof(event)) | |
394 | ||
395 | extern struct waitq *global_waitq(int index); | |
396 | ||
cb323159 A |
397 | typedef uint16_t waitq_set_prepost_hook_t; |
398 | ||
3e170ce0 A |
399 | /* |
400 | * set alloc/init/free | |
401 | */ | |
cb323159 A |
402 | extern struct waitq_set *waitq_set_alloc(int policy, |
403 | waitq_set_prepost_hook_t *prepost_hook); | |
3e170ce0 A |
404 | |
405 | extern kern_return_t waitq_set_init(struct waitq_set *wqset, | |
0a7de745 | 406 | int policy, uint64_t *reserved_link, |
cb323159 | 407 | waitq_set_prepost_hook_t *prepost_hook); |
3e170ce0 A |
408 | |
409 | extern void waitq_set_deinit(struct waitq_set *wqset); | |
410 | ||
411 | extern kern_return_t waitq_set_free(struct waitq_set *wqset); | |
412 | ||
5ba3f43e | 413 | #if DEVELOPMENT || DEBUG |
d9a64523 | 414 | extern int sysctl_helper_waitq_set_nelem(void); |
3e170ce0 A |
415 | #if CONFIG_WAITQ_DEBUG |
416 | extern uint64_t wqset_id(struct waitq_set *wqset); | |
417 | ||
418 | struct waitq *wqset_waitq(struct waitq_set *wqset); | |
419 | #endif /* CONFIG_WAITQ_DEBUG */ | |
420 | #endif /* DEVELOPMENT || DEBUG */ | |
421 | ||
422 | ||
423 | /* | |
424 | * set membership | |
425 | */ | |
426 | extern uint64_t waitq_link_reserve(struct waitq *waitq); | |
d9a64523 A |
427 | extern void waitq_set_lazy_init_link(struct waitq_set *wqset); |
428 | extern boolean_t waitq_set_should_lazy_init_link(struct waitq_set *wqset); | |
3e170ce0 A |
429 | |
430 | extern void waitq_link_release(uint64_t id); | |
431 | ||
432 | extern boolean_t waitq_member(struct waitq *waitq, struct waitq_set *wqset); | |
433 | ||
434 | /* returns true if the waitq is in at least 1 set */ | |
435 | extern boolean_t waitq_in_set(struct waitq *waitq); | |
436 | ||
437 | ||
438 | /* on success, consumes an reserved_link reference */ | |
439 | extern kern_return_t waitq_link(struct waitq *waitq, | |
0a7de745 A |
440 | struct waitq_set *wqset, |
441 | waitq_lock_state_t lock_state, | |
442 | uint64_t *reserved_link); | |
3e170ce0 A |
443 | |
444 | extern kern_return_t waitq_unlink(struct waitq *waitq, struct waitq_set *wqset); | |
445 | ||
446 | extern kern_return_t waitq_unlink_all(struct waitq *waitq); | |
447 | ||
448 | extern kern_return_t waitq_set_unlink_all(struct waitq_set *wqset); | |
449 | ||
3e170ce0 A |
450 | /* |
451 | * preposts | |
452 | */ | |
453 | extern void waitq_clear_prepost(struct waitq *waitq); | |
454 | ||
455 | extern void waitq_set_clear_preposts(struct waitq_set *wqset); | |
456 | ||
457 | /* | |
458 | * interfaces used primarily by the select/kqueue subsystems | |
459 | */ | |
460 | extern uint64_t waitq_get_prepost_id(struct waitq *waitq); | |
461 | extern void waitq_unlink_by_prepost_id(uint64_t wqp_id, struct waitq_set *wqset); | |
5ba3f43e | 462 | extern struct waitq *waitq_lock_by_prepost_id(uint64_t wqp_id); |
3e170ce0 A |
463 | |
464 | /* | |
465 | * waitq attributes | |
466 | */ | |
467 | extern int waitq_is_valid(struct waitq *waitq); | |
468 | ||
469 | extern int waitq_set_is_valid(struct waitq_set *wqset); | |
470 | ||
471 | extern int waitq_is_global(struct waitq *waitq); | |
472 | ||
473 | extern int waitq_irq_safe(struct waitq *waitq); | |
474 | ||
475 | #if CONFIG_WAITQ_STATS | |
476 | /* | |
477 | * waitq statistics | |
478 | */ | |
479 | #define WAITQ_STATS_VERSION 1 | |
480 | struct wq_table_stats { | |
481 | uint32_t version; | |
482 | uint32_t table_elements; | |
483 | uint32_t table_used_elems; | |
484 | uint32_t table_elem_sz; | |
485 | uint32_t table_slabs; | |
486 | uint32_t table_slab_sz; | |
487 | ||
488 | uint64_t table_num_allocs; | |
489 | uint64_t table_num_preposts; | |
490 | uint64_t table_num_reservations; | |
491 | ||
492 | uint64_t table_max_used; | |
493 | uint64_t table_avg_used; | |
494 | uint64_t table_max_reservations; | |
495 | uint64_t table_avg_reservations; | |
496 | }; | |
497 | ||
498 | extern void waitq_link_stats(struct wq_table_stats *stats); | |
499 | extern void waitq_prepost_stats(struct wq_table_stats *stats); | |
500 | #endif /* CONFIG_WAITQ_STATS */ | |
501 | ||
502 | /* | |
503 | * | |
504 | * higher-level waiting APIs | |
505 | * | |
506 | */ | |
507 | ||
508 | /* assert intent to wait on <waitq,event64> pair */ | |
509 | extern wait_result_t waitq_assert_wait64(struct waitq *waitq, | |
0a7de745 A |
510 | event64_t wait_event, |
511 | wait_interrupt_t interruptible, | |
512 | uint64_t deadline); | |
3e170ce0 A |
513 | |
514 | extern wait_result_t waitq_assert_wait64_leeway(struct waitq *waitq, | |
0a7de745 A |
515 | event64_t wait_event, |
516 | wait_interrupt_t interruptible, | |
517 | wait_timeout_urgency_t urgency, | |
518 | uint64_t deadline, | |
519 | uint64_t leeway); | |
3e170ce0 A |
520 | |
521 | /* wakeup the most appropriate thread waiting on <waitq,event64> pair */ | |
522 | extern kern_return_t waitq_wakeup64_one(struct waitq *waitq, | |
0a7de745 A |
523 | event64_t wake_event, |
524 | wait_result_t result, | |
525 | int priority); | |
3e170ce0 A |
526 | |
527 | /* wakeup all the threads waiting on <waitq,event64> pair */ | |
528 | extern kern_return_t waitq_wakeup64_all(struct waitq *waitq, | |
0a7de745 A |
529 | event64_t wake_event, |
530 | wait_result_t result, | |
531 | int priority); | |
3e170ce0 | 532 | |
0a7de745 | 533 | #ifdef XNU_KERNEL_PRIVATE |
39037602 | 534 | |
3e170ce0 A |
535 | /* wakeup a specified thread iff it's waiting on <waitq,event64> pair */ |
536 | extern kern_return_t waitq_wakeup64_thread(struct waitq *waitq, | |
0a7de745 A |
537 | event64_t wake_event, |
538 | thread_t thread, | |
539 | wait_result_t result); | |
39037602 A |
540 | |
541 | /* return a reference to the thread that was woken up */ | |
542 | extern thread_t | |
543 | waitq_wakeup64_identify(struct waitq *waitq, | |
0a7de745 A |
544 | event64_t wake_event, |
545 | wait_result_t result, | |
546 | int priority); | |
39037602 | 547 | |
5ba3f43e A |
548 | /* take the waitq lock */ |
549 | extern void waitq_unlock(struct waitq *wq); | |
550 | ||
39037602 A |
551 | #endif /* XNU_KERNEL_PRIVATE */ |
552 | ||
3e170ce0 A |
553 | __END_DECLS |
554 | ||
0a7de745 A |
555 | #endif /* KERNEL_PRIVATE */ |
556 | #endif /* _WAITQ_H_ */ |