]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/waitq.h
xnu-4570.1.46.tar.gz
[apple/xnu.git] / osfmk / kern / waitq.h
1 #ifndef _WAITQ_H_
2 #define _WAITQ_H_
3 /*
4 * Copyright (c) 2014-2015 Apple Computer, Inc. All rights reserved.
5 *
6 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. The rights granted to you under the License
12 * may not be used to create, or enable the creation or redistribution of,
13 * unlawful or unlicensed copies of an Apple operating system, or to
14 * circumvent, violate, or enable the circumvention or violation of, any
15 * terms of an Apple operating system software license agreement.
16 *
17 * Please obtain a copy of the License at
18 * http://www.opensource.apple.com/apsl/ and read it before using this file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 */
30 #ifdef KERNEL_PRIVATE
31
32 #include <mach/mach_types.h>
33 #include <mach/sync_policy.h>
34 #include <mach/kern_return.h> /* for kern_return_t */
35
36 #include <kern/kern_types.h> /* for wait_queue_t */
37 #include <kern/queue.h>
38 #include <kern/assert.h>
39
40 #include <sys/cdefs.h>
41
42 /*
43 * Constants and types used in the waitq APIs
44 */
45 #define WAITQ_ALL_PRIORITIES (-1)
46 #define WAITQ_PROMOTE_PRIORITY (-2)
47 #define WAITQ_SELECT_MAX_PRI (-3)
48
49 typedef enum e_waitq_lock_state {
50 WAITQ_KEEP_LOCKED = 0x01,
51 WAITQ_UNLOCK = 0x02,
52 WAITQ_SHOULD_LOCK = 0x04,
53 WAITQ_ALREADY_LOCKED = 0x08,
54 WAITQ_DONT_LOCK = 0x10,
55 } waitq_lock_state_t;
56
57 /*
58 * The Jenkins "one at a time" hash.
59 * TBD: There may be some value to unrolling here,
60 * depending on the architecture.
61 */
62 static __inline__ uint32_t
63 jenkins_hash(char *key, size_t length)
64 {
65 uint32_t hash = 0;
66 size_t i;
67
68 for (i = 0; i < length; i++) {
69 hash += (uint32_t)key[i];
70 hash += (hash << 10);
71 hash ^= (hash >> 6);
72 }
73
74 hash += (hash << 3);
75 hash ^= (hash >> 11);
76 hash += (hash << 15);
77
78 return hash;
79 }
80
81 /* Opaque sizes and alignment used for struct verification */
82 #if __arm__ || __arm64__
83 #define WQ_OPAQUE_ALIGN __BIGGEST_ALIGNMENT__
84 #define WQS_OPAQUE_ALIGN __BIGGEST_ALIGNMENT__
85 #if __arm__
86 #define WQ_OPAQUE_SIZE 32
87 #define WQS_OPAQUE_SIZE 48
88 #else
89 #define WQ_OPAQUE_SIZE 40
90 #define WQS_OPAQUE_SIZE 56
91 #endif
92 #elif __x86_64__
93 #define WQ_OPAQUE_ALIGN 8
94 #define WQS_OPAQUE_ALIGN 8
95 #define WQ_OPAQUE_SIZE 48
96 #define WQS_OPAQUE_SIZE 64
97 #else
98 #error Unknown size requirement
99 #endif
100
101 #ifdef MACH_KERNEL_PRIVATE
102
103 #include <kern/spl.h>
104 #include <kern/simple_lock.h>
105 #include <mach/branch_predicates.h>
106
107 #include <machine/cpu_number.h>
108 #include <machine/machine_routines.h> /* machine_timeout_suspended() */
109
110 /*
111 * The event mask is of 59 bits on 64 bit architeture and 27 bits on
112 * 32 bit architecture and so we calculate its size using sizeof(long).
113 * If the bitfield for wq_type and wq_fifo is changed, then value of
114 * EVENT_MASK_BITS will also change.
115 *
116 * New plan: this is an optimization anyway, so I'm stealing 32bits
117 * from the mask to shrink the waitq object even further.
118 */
119 #define _EVENT_MASK_BITS ((sizeof(uint32_t) * 8) - 6)
120
121 #define WAITQ_BOOST_PRIORITY 31
122
123 enum waitq_type {
124 WQT_INVALID = 0,
125 WQT_QUEUE = 0x2,
126 WQT_SET = 0x3,
127 };
128
129 #if CONFIG_WAITQ_STATS
130 #define NWAITQ_BTFRAMES 5
131 struct wq_stats {
132 uint64_t waits;
133 uint64_t wakeups;
134 uint64_t clears;
135 uint64_t failed_wakeups;
136
137 uintptr_t last_wait[NWAITQ_BTFRAMES];
138 uintptr_t last_wakeup[NWAITQ_BTFRAMES];
139 uintptr_t last_failed_wakeup[NWAITQ_BTFRAMES];
140 };
141 #endif
142
143 /*
144 * struct waitq
145 *
146 * This is the definition of the common event wait queue
147 * that the scheduler APIs understand. It is used
148 * internally by the gerneralized event waiting mechanism
149 * (assert_wait), and also for items that maintain their
150 * own wait queues (such as ports and semaphores).
151 *
152 * It is not published to other kernel components.
153 *
154 * NOTE: Hardware locks are used to protect event wait
155 * queues since interrupt code is free to post events to
156 * them.
157 */
158 struct waitq {
159 uint32_t /* flags */
160 waitq_type:2, /* only public field */
161 waitq_fifo:1, /* fifo wakeup policy? */
162 waitq_prepost:1, /* waitq supports prepost? */
163 waitq_irq:1, /* waitq requires interrupts disabled */
164 waitq_isvalid:1, /* waitq structure is valid */
165 waitq_eventmask:_EVENT_MASK_BITS;
166 /* the wait queue set (set-of-sets) to which this queue belongs */
167 #if __arm64__
168 hw_lock_bit_t waitq_interlock; /* interlock */
169 #else
170 hw_lock_data_t waitq_interlock; /* interlock */
171 #endif /* __arm64__ */
172
173 uint64_t waitq_set_id;
174 uint64_t waitq_prepost_id;
175 queue_head_t waitq_queue; /* queue of elements */
176 };
177
178 static_assert(sizeof(struct waitq) == WQ_OPAQUE_SIZE, "waitq structure size mismatch");
179 static_assert(__alignof(struct waitq) == WQ_OPAQUE_ALIGN, "waitq structure alignment mismatch");
180
181 /*
182 * struct waitq_set
183 *
184 * This is the common definition for a set wait queue.
185 */
186 struct waitq_set {
187 struct waitq wqset_q;
188 uint64_t wqset_id;
189 union {
190 uint64_t wqset_prepost_id;
191 void *wqset_prepost_hook;
192 };
193 };
194
195 static_assert(sizeof(struct waitq_set) == WQS_OPAQUE_SIZE, "waitq_set structure size mismatch");
196 static_assert(__alignof(struct waitq_set) == WQS_OPAQUE_ALIGN, "waitq_set structure alignment mismatch");
197
198 extern void waitq_bootstrap(void);
199
200 #define waitq_is_queue(wq) \
201 ((wq)->waitq_type == WQT_QUEUE)
202
203 #define waitq_is_set(wq) \
204 ((wq)->waitq_type == WQT_SET && ((struct waitq_set *)(wq))->wqset_id != 0)
205
206 #define waitqs_is_set(wqs) \
207 (((wqs)->wqset_q.waitq_type == WQT_SET) && ((wqs)->wqset_id != 0))
208
209 #define waitq_valid(wq) \
210 ((wq) != NULL && (wq)->waitq_isvalid && ((wq)->waitq_type & ~1) == WQT_QUEUE)
211
212 /*
213 * Invalidate a waitq. The only valid waitq functions to call after this are:
214 * waitq_deinit()
215 * waitq_set_deinit()
216 */
217 extern void waitq_invalidate_locked(struct waitq *wq);
218
219 #define waitq_empty(wq) \
220 (queue_empty(&(wq)->waitq_queue))
221
222 #if __arm64__
223
224 #define waitq_held(wq) \
225 (hw_lock_bit_held(&(wq)->waitq_interlock, LCK_ILOCK))
226
227 #define waitq_lock_try(wq) \
228 (hw_lock_bit_try(&(wq)->waitq_interlock, LCK_ILOCK))
229
230 #else
231
232 #define waitq_held(wq) \
233 (hw_lock_held(&(wq)->waitq_interlock))
234
235 #define waitq_lock_try(wq) \
236 (hw_lock_try(&(wq)->waitq_interlock))
237
238 #endif /* __arm64__ */
239
240 #define waitq_wait_possible(thread) \
241 ((thread)->waitq == NULL)
242
243 extern void waitq_lock(struct waitq *wq);
244
245 #define waitq_set_lock(wqs) waitq_lock(&(wqs)->wqset_q)
246 #define waitq_set_unlock(wqs) waitq_unlock(&(wqs)->wqset_q)
247 #define waitq_set_lock_try(wqs) waitq_lock_try(&(wqs)->wqset_q)
248 #define waitq_set_can_prepost(wqs) (waitqs_is_set(wqs) && \
249 (wqs)->wqset_q.waitq_prepost)
250 #define waitq_set_maybe_preposted(wqs) ((wqs)->wqset_q.waitq_prepost && \
251 (wqs)->wqset_prepost_id > 0)
252 #define waitq_set_has_prepost_hook(wqs) (waitqs_is_set(wqs) && \
253 !((wqs)->wqset_q.waitq_prepost) && \
254 (wqs)->wqset_prepost_hook)
255
256 /* assert intent to wait on a locked wait queue */
257 extern wait_result_t waitq_assert_wait64_locked(struct waitq *waitq,
258 event64_t wait_event,
259 wait_interrupt_t interruptible,
260 wait_timeout_urgency_t urgency,
261 uint64_t deadline,
262 uint64_t leeway,
263 thread_t thread);
264
265 /* pull a thread from its wait queue */
266 extern int waitq_pull_thread_locked(struct waitq *waitq, thread_t thread);
267
268 /* wakeup all threads waiting for a particular event on locked queue */
269 extern kern_return_t waitq_wakeup64_all_locked(struct waitq *waitq,
270 event64_t wake_event,
271 wait_result_t result,
272 uint64_t *reserved_preposts,
273 int priority,
274 waitq_lock_state_t lock_state);
275
276 /* wakeup one thread waiting for a particular event on locked queue */
277 extern kern_return_t waitq_wakeup64_one_locked(struct waitq *waitq,
278 event64_t wake_event,
279 wait_result_t result,
280 uint64_t *reserved_preposts,
281 int priority,
282 waitq_lock_state_t lock_state);
283
284 /* return identity of a thread awakened for a particular <wait_queue,event> */
285 extern thread_t
286 waitq_wakeup64_identify_locked(struct waitq *waitq,
287 event64_t wake_event,
288 wait_result_t result,
289 spl_t *spl,
290 uint64_t *reserved_preposts,
291 int priority,
292 waitq_lock_state_t lock_state);
293
294 /* wakeup thread iff its still waiting for a particular event on locked queue */
295 extern kern_return_t waitq_wakeup64_thread_locked(struct waitq *waitq,
296 event64_t wake_event,
297 thread_t thread,
298 wait_result_t result,
299 waitq_lock_state_t lock_state);
300
301 /* clear all preposts generated by the given waitq */
302 extern int waitq_clear_prepost_locked(struct waitq *waitq);
303
304 /* clear all preposts from the given wait queue set */
305 extern void waitq_set_clear_preposts_locked(struct waitq_set *wqset);
306
307 /* unlink the given waitq from all sets - returns unlocked */
308 extern kern_return_t waitq_unlink_all_unlock(struct waitq *waitq);
309
310 /* unlink the given waitq set from all waitqs and waitq sets - returns unlocked */
311 extern kern_return_t waitq_set_unlink_all_unlock(struct waitq_set *wqset);
312
313
314
315 /*
316 * clear a thread's boosted priority
317 * (given via WAITQ_PROMOTE_PRIORITY in the wakeup function)
318 */
319 extern void waitq_clear_promotion_locked(struct waitq *waitq,
320 thread_t thread);
321
322 /*
323 * waitq iteration
324 */
325
326 enum waitq_iteration_constant {
327 WQ_ITERATE_DROPPED = -4,
328 WQ_ITERATE_INVALID = -3,
329 WQ_ITERATE_ABORTED = -2,
330 WQ_ITERATE_FAILURE = -1,
331 WQ_ITERATE_SUCCESS = 0,
332 WQ_ITERATE_CONTINUE = 1,
333 WQ_ITERATE_BREAK = 2,
334 WQ_ITERATE_BREAK_KEEP_LOCKED = 3,
335 WQ_ITERATE_INVALIDATE_CONTINUE = 4,
336 WQ_ITERATE_RESTART = 5,
337 WQ_ITERATE_FOUND = 6,
338 WQ_ITERATE_UNLINKED = 7,
339 };
340
341 /* callback invoked with both 'waitq' and 'wqset' locked */
342 typedef int (*waitq_iterator_t)(void *ctx, struct waitq *waitq,
343 struct waitq_set *wqset);
344
345 /* iterate over all sets to which waitq belongs */
346 extern int waitq_iterate_sets(struct waitq *waitq, void *ctx,
347 waitq_iterator_t it);
348
349 /* iterator over all waitqs that have preposted to wqset */
350 extern int waitq_set_iterate_preposts(struct waitq_set *wqset,
351 void *ctx, waitq_iterator_t it);
352
353 /*
354 * prepost reservation
355 */
356 extern uint64_t waitq_prepost_reserve(struct waitq *waitq, int extra,
357 waitq_lock_state_t lock_state);
358
359 extern void waitq_prepost_release_reserve(uint64_t id);
360
361 #else /* !MACH_KERNEL_PRIVATE */
362
363 /*
364 * The opaque waitq structure is here mostly for AIO and selinfo,
365 * but could potentially be used by other BSD subsystems.
366 */
367 struct waitq { char opaque[WQ_OPAQUE_SIZE]; } __attribute__((aligned(WQ_OPAQUE_ALIGN)));
368 struct waitq_set { char opaque[WQS_OPAQUE_SIZE]; } __attribute__((aligned(WQS_OPAQUE_ALIGN)));
369
370 #endif /* MACH_KERNEL_PRIVATE */
371
372
373 __BEGIN_DECLS
374
375 /*
376 * waitq init
377 */
378 extern kern_return_t waitq_init(struct waitq *waitq, int policy);
379 extern void waitq_deinit(struct waitq *waitq);
380
381 /*
382 * global waitqs
383 */
384 extern struct waitq *_global_eventq(char *event, size_t event_length);
385 #define global_eventq(event) _global_eventq((char *)&(event), sizeof(event))
386
387 extern struct waitq *global_waitq(int index);
388
389 /*
390 * set alloc/init/free
391 */
392 extern struct waitq_set *waitq_set_alloc(int policy, void *prepost_hook);
393
394 extern kern_return_t waitq_set_init(struct waitq_set *wqset,
395 int policy, uint64_t *reserved_link,
396 void *prepost_hook);
397
398 extern void waitq_set_deinit(struct waitq_set *wqset);
399
400 extern kern_return_t waitq_set_free(struct waitq_set *wqset);
401
402 #if DEVELOPMENT || DEBUG
403 #if CONFIG_WAITQ_DEBUG
404 extern uint64_t wqset_id(struct waitq_set *wqset);
405
406 struct waitq *wqset_waitq(struct waitq_set *wqset);
407 #endif /* CONFIG_WAITQ_DEBUG */
408 #endif /* DEVELOPMENT || DEBUG */
409
410
411 /*
412 * set membership
413 */
414 extern uint64_t waitq_link_reserve(struct waitq *waitq);
415
416 extern void waitq_link_release(uint64_t id);
417
418 extern boolean_t waitq_member(struct waitq *waitq, struct waitq_set *wqset);
419
420 /* returns true if the waitq is in at least 1 set */
421 extern boolean_t waitq_in_set(struct waitq *waitq);
422
423
424 /* on success, consumes an reserved_link reference */
425 extern kern_return_t waitq_link(struct waitq *waitq,
426 struct waitq_set *wqset,
427 waitq_lock_state_t lock_state,
428 uint64_t *reserved_link);
429
430 extern kern_return_t waitq_unlink(struct waitq *waitq, struct waitq_set *wqset);
431
432 extern kern_return_t waitq_unlink_all(struct waitq *waitq);
433
434 extern kern_return_t waitq_set_unlink_all(struct waitq_set *wqset);
435
436 /*
437 * preposts
438 */
439 extern void waitq_clear_prepost(struct waitq *waitq);
440
441 extern void waitq_set_clear_preposts(struct waitq_set *wqset);
442
443 /*
444 * interfaces used primarily by the select/kqueue subsystems
445 */
446 extern uint64_t waitq_get_prepost_id(struct waitq *waitq);
447 extern void waitq_unlink_by_prepost_id(uint64_t wqp_id, struct waitq_set *wqset);
448 extern struct waitq *waitq_lock_by_prepost_id(uint64_t wqp_id);
449
450 /*
451 * waitq attributes
452 */
453 extern int waitq_is_valid(struct waitq *waitq);
454
455 extern int waitq_set_is_valid(struct waitq_set *wqset);
456
457 extern int waitq_is_global(struct waitq *waitq);
458
459 extern int waitq_irq_safe(struct waitq *waitq);
460
461 #if CONFIG_WAITQ_STATS
462 /*
463 * waitq statistics
464 */
465 #define WAITQ_STATS_VERSION 1
466 struct wq_table_stats {
467 uint32_t version;
468 uint32_t table_elements;
469 uint32_t table_used_elems;
470 uint32_t table_elem_sz;
471 uint32_t table_slabs;
472 uint32_t table_slab_sz;
473
474 uint64_t table_num_allocs;
475 uint64_t table_num_preposts;
476 uint64_t table_num_reservations;
477
478 uint64_t table_max_used;
479 uint64_t table_avg_used;
480 uint64_t table_max_reservations;
481 uint64_t table_avg_reservations;
482 };
483
484 extern void waitq_link_stats(struct wq_table_stats *stats);
485 extern void waitq_prepost_stats(struct wq_table_stats *stats);
486 #endif /* CONFIG_WAITQ_STATS */
487
488 /*
489 *
490 * higher-level waiting APIs
491 *
492 */
493
494 /* assert intent to wait on <waitq,event64> pair */
495 extern wait_result_t waitq_assert_wait64(struct waitq *waitq,
496 event64_t wait_event,
497 wait_interrupt_t interruptible,
498 uint64_t deadline);
499
500 extern wait_result_t waitq_assert_wait64_leeway(struct waitq *waitq,
501 event64_t wait_event,
502 wait_interrupt_t interruptible,
503 wait_timeout_urgency_t urgency,
504 uint64_t deadline,
505 uint64_t leeway);
506
507 /* wakeup the most appropriate thread waiting on <waitq,event64> pair */
508 extern kern_return_t waitq_wakeup64_one(struct waitq *waitq,
509 event64_t wake_event,
510 wait_result_t result,
511 int priority);
512
513 /* wakeup all the threads waiting on <waitq,event64> pair */
514 extern kern_return_t waitq_wakeup64_all(struct waitq *waitq,
515 event64_t wake_event,
516 wait_result_t result,
517 int priority);
518
519 #ifdef XNU_KERNEL_PRIVATE
520
521 /* wakeup a specified thread iff it's waiting on <waitq,event64> pair */
522 extern kern_return_t waitq_wakeup64_thread(struct waitq *waitq,
523 event64_t wake_event,
524 thread_t thread,
525 wait_result_t result);
526
527 /* return a reference to the thread that was woken up */
528 extern thread_t
529 waitq_wakeup64_identify(struct waitq *waitq,
530 event64_t wake_event,
531 wait_result_t result,
532 int priority);
533
534 /* take the waitq lock */
535 extern void waitq_unlock(struct waitq *wq);
536
537 #endif /* XNU_KERNEL_PRIVATE */
538
539 __END_DECLS
540
541 #endif /* KERNEL_PRIVATE */
542 #endif /* _WAITQ_H_ */