]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/waitq.h
xnu-3789.21.4.tar.gz
[apple/xnu.git] / osfmk / kern / waitq.h
1 #ifndef _WAITQ_H_
2 #define _WAITQ_H_
3 /*
4 * Copyright (c) 2014-2015 Apple Computer, Inc. All rights reserved.
5 *
6 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. The rights granted to you under the License
12 * may not be used to create, or enable the creation or redistribution of,
13 * unlawful or unlicensed copies of an Apple operating system, or to
14 * circumvent, violate, or enable the circumvention or violation of, any
15 * terms of an Apple operating system software license agreement.
16 *
17 * Please obtain a copy of the License at
18 * http://www.opensource.apple.com/apsl/ and read it before using this file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 */
30 #ifdef KERNEL_PRIVATE
31
32 #include <mach/mach_types.h>
33 #include <mach/sync_policy.h>
34 #include <mach/kern_return.h> /* for kern_return_t */
35
36 #include <kern/kern_types.h> /* for wait_queue_t */
37 #include <kern/queue.h>
38 #include <kern/assert.h>
39
40 #include <sys/cdefs.h>
41
42 /*
43 * Constants and types used in the waitq APIs
44 */
45 #define WAITQ_ALL_PRIORITIES (-1)
46 #define WAITQ_PROMOTE_PRIORITY (-2)
47 #define WAITQ_SELECT_MAX_PRI (-3)
48
49 typedef enum e_waitq_lock_state {
50 WAITQ_KEEP_LOCKED = 0x01,
51 WAITQ_UNLOCK = 0x02,
52 WAITQ_SHOULD_LOCK = 0x04,
53 WAITQ_ALREADY_LOCKED = 0x08,
54 WAITQ_DONT_LOCK = 0x10,
55 } waitq_lock_state_t;
56
57 /*
58 * The Jenkins "one at a time" hash.
59 * TBD: There may be some value to unrolling here,
60 * depending on the architecture.
61 */
62 static __inline__ uint32_t
63 jenkins_hash(char *key, size_t length)
64 {
65 uint32_t hash = 0;
66 size_t i;
67
68 for (i = 0; i < length; i++) {
69 hash += (uint32_t)key[i];
70 hash += (hash << 10);
71 hash ^= (hash >> 6);
72 }
73
74 hash += (hash << 3);
75 hash ^= (hash >> 11);
76 hash += (hash << 15);
77
78 return hash;
79 }
80
81 /* Opaque sizes and alignment used for struct verification */
82 #if __x86_64__
83 #define WQ_OPAQUE_ALIGN 8
84 #define WQS_OPAQUE_ALIGN 8
85 #define WQ_OPAQUE_SIZE 48
86 #define WQS_OPAQUE_SIZE 64
87 #else
88 #error Unknown size requirement
89 #endif
90
91 #ifndef MACH_KERNEL_PRIVATE
92
93 /*
94 * The opaque waitq structure is here mostly for AIO and selinfo,
95 * but could potentially be used by other BSD subsystems.
96 */
97 struct waitq { char opaque[WQ_OPAQUE_SIZE]; } __attribute__((aligned(WQ_OPAQUE_ALIGN)));
98 struct waitq_set { char opaque[WQS_OPAQUE_SIZE]; } __attribute__((aligned(WQS_OPAQUE_ALIGN)));
99
100 #else /* MACH_KERNEL_PRIVATE */
101
102 #include <kern/spl.h>
103 #include <kern/simple_lock.h>
104 #include <mach/branch_predicates.h>
105
106 #include <machine/cpu_number.h>
107 #include <machine/machine_routines.h> /* machine_timeout_suspended() */
108
109 /*
110 * The event mask is of 59 bits on 64 bit architeture and 27 bits on
111 * 32 bit architecture and so we calculate its size using sizeof(long).
112 * If the bitfield for wq_type and wq_fifo is changed, then value of
113 * EVENT_MASK_BITS will also change.
114 *
115 * New plan: this is an optimization anyway, so I'm stealing 32bits
116 * from the mask to shrink the waitq object even further.
117 */
118 #define _EVENT_MASK_BITS ((sizeof(uint32_t) * 8) - 6)
119
120 #define WAITQ_BOOST_PRIORITY 31
121
122 enum waitq_type {
123 WQT_INVALID = 0,
124 WQT_QUEUE = 0x2,
125 WQT_SET = 0x3,
126 };
127
128 #if CONFIG_WAITQ_STATS
129 #define NWAITQ_BTFRAMES 5
130 struct wq_stats {
131 uint64_t waits;
132 uint64_t wakeups;
133 uint64_t clears;
134 uint64_t failed_wakeups;
135
136 uintptr_t last_wait[NWAITQ_BTFRAMES];
137 uintptr_t last_wakeup[NWAITQ_BTFRAMES];
138 uintptr_t last_failed_wakeup[NWAITQ_BTFRAMES];
139 };
140 #endif
141
142 /*
143 * struct waitq
144 *
145 * This is the definition of the common event wait queue
146 * that the scheduler APIs understand. It is used
147 * internally by the gerneralized event waiting mechanism
148 * (assert_wait), and also for items that maintain their
149 * own wait queues (such as ports and semaphores).
150 *
151 * It is not published to other kernel components.
152 *
153 * NOTE: Hardware locks are used to protect event wait
154 * queues since interrupt code is free to post events to
155 * them.
156 */
157 struct waitq {
158 uint32_t /* flags */
159 waitq_type:2, /* only public field */
160 waitq_fifo:1, /* fifo wakeup policy? */
161 waitq_prepost:1, /* waitq supports prepost? */
162 waitq_irq:1, /* waitq requires interrupts disabled */
163 waitq_isvalid:1, /* waitq structure is valid */
164 waitq_eventmask:_EVENT_MASK_BITS;
165 /* the wait queue set (set-of-sets) to which this queue belongs */
166 hw_lock_data_t waitq_interlock; /* interlock */
167
168 uint64_t waitq_set_id;
169 uint64_t waitq_prepost_id;
170 queue_head_t waitq_queue; /* queue of elements */
171 };
172
173 static_assert(sizeof(struct waitq) == WQ_OPAQUE_SIZE, "waitq structure size mismatch");
174 static_assert(__alignof(struct waitq) == WQ_OPAQUE_ALIGN, "waitq structure alignment mismatch");
175
176 /*
177 * struct waitq_set
178 *
179 * This is the common definition for a set wait queue.
180 */
181 struct waitq_set {
182 struct waitq wqset_q;
183 uint64_t wqset_id;
184 union {
185 uint64_t wqset_prepost_id;
186 void *wqset_prepost_hook;
187 };
188 };
189
190 static_assert(sizeof(struct waitq_set) == WQS_OPAQUE_SIZE, "waitq_set structure size mismatch");
191 static_assert(__alignof(struct waitq_set) == WQS_OPAQUE_ALIGN, "waitq_set structure alignment mismatch");
192
193 extern void waitq_bootstrap(void);
194
195 #define waitq_is_queue(wq) \
196 ((wq)->waitq_type == WQT_QUEUE)
197
198 #define waitq_is_set(wq) \
199 ((wq)->waitq_type == WQT_SET && ((struct waitq_set *)(wq))->wqset_id != 0)
200
201 #define waitqs_is_set(wqs) \
202 (((wqs)->wqset_q.waitq_type == WQT_SET) && ((wqs)->wqset_id != 0))
203
204 #define waitq_valid(wq) \
205 ((wq) != NULL && (wq)->waitq_isvalid && ((wq)->waitq_type & ~1) == WQT_QUEUE)
206
207 /*
208 * Invalidate a waitq. The only valid waitq functions to call after this are:
209 * waitq_deinit()
210 * waitq_set_deinit()
211 */
212 extern void waitq_invalidate_locked(struct waitq *wq);
213
214 #define waitq_empty(wq) \
215 (queue_empty(&(wq)->waitq_queue))
216
217
218 #define waitq_held(wq) \
219 (hw_lock_held(&(wq)->waitq_interlock))
220
221 #define waitq_lock_try(wq) \
222 (hw_lock_try(&(wq)->waitq_interlock))
223
224
225 #define waitq_wait_possible(thread) \
226 ((thread)->waitq == NULL)
227
228 extern void waitq_lock(struct waitq *wq);
229 extern void waitq_unlock(struct waitq *wq);
230
231 #define waitq_set_lock(wqs) waitq_lock(&(wqs)->wqset_q)
232 #define waitq_set_unlock(wqs) waitq_unlock(&(wqs)->wqset_q)
233 #define waitq_set_lock_try(wqs) waitq_lock_try(&(wqs)->wqset_q)
234 #define waitq_set_can_prepost(wqs) (waitqs_is_set(wqs) && \
235 (wqs)->wqset_q.waitq_prepost)
236 #define waitq_set_maybe_preposted(wqs) ((wqs)->wqset_q.waitq_prepost && \
237 (wqs)->wqset_prepost_id > 0)
238 #define waitq_set_has_prepost_hook(wqs) (waitqs_is_set(wqs) && \
239 !((wqs)->wqset_q.waitq_prepost) && \
240 (wqs)->wqset_prepost_hook)
241
242 /* assert intent to wait on a locked wait queue */
243 extern wait_result_t waitq_assert_wait64_locked(struct waitq *waitq,
244 event64_t wait_event,
245 wait_interrupt_t interruptible,
246 wait_timeout_urgency_t urgency,
247 uint64_t deadline,
248 uint64_t leeway,
249 thread_t thread);
250
251 /* pull a thread from its wait queue */
252 extern int waitq_pull_thread_locked(struct waitq *waitq, thread_t thread);
253
254 /* wakeup all threads waiting for a particular event on locked queue */
255 extern kern_return_t waitq_wakeup64_all_locked(struct waitq *waitq,
256 event64_t wake_event,
257 wait_result_t result,
258 uint64_t *reserved_preposts,
259 int priority,
260 waitq_lock_state_t lock_state);
261
262 /* wakeup one thread waiting for a particular event on locked queue */
263 extern kern_return_t waitq_wakeup64_one_locked(struct waitq *waitq,
264 event64_t wake_event,
265 wait_result_t result,
266 uint64_t *reserved_preposts,
267 int priority,
268 waitq_lock_state_t lock_state);
269
270 /* return identity of a thread awakened for a particular <wait_queue,event> */
271 extern thread_t
272 waitq_wakeup64_identify_locked(struct waitq *waitq,
273 event64_t wake_event,
274 wait_result_t result,
275 spl_t *spl,
276 uint64_t *reserved_preposts,
277 int priority,
278 waitq_lock_state_t lock_state);
279
280 /* wakeup thread iff its still waiting for a particular event on locked queue */
281 extern kern_return_t waitq_wakeup64_thread_locked(struct waitq *waitq,
282 event64_t wake_event,
283 thread_t thread,
284 wait_result_t result,
285 waitq_lock_state_t lock_state);
286
287 /* clear all preposts generated by the given waitq */
288 extern int waitq_clear_prepost_locked(struct waitq *waitq);
289
290 /* clear all preposts from the given wait queue set */
291 extern void waitq_set_clear_preposts_locked(struct waitq_set *wqset);
292
293 /* unlink the given waitq from all sets - returns unlocked */
294 extern kern_return_t waitq_unlink_all_unlock(struct waitq *waitq);
295
296 /* unlink the given waitq set from all waitqs and waitq sets - returns unlocked */
297 extern kern_return_t waitq_set_unlink_all_unlock(struct waitq_set *wqset);
298
299
300
301 /*
302 * clear a thread's boosted priority
303 * (given via WAITQ_PROMOTE_PRIORITY in the wakeup function)
304 */
305 extern void waitq_clear_promotion_locked(struct waitq *waitq,
306 thread_t thread);
307
308 /*
309 * waitq iteration
310 */
311
312 enum waitq_iteration_constant {
313 WQ_ITERATE_DROPPED = -4,
314 WQ_ITERATE_INVALID = -3,
315 WQ_ITERATE_ABORTED = -2,
316 WQ_ITERATE_FAILURE = -1,
317 WQ_ITERATE_SUCCESS = 0,
318 WQ_ITERATE_CONTINUE = 1,
319 WQ_ITERATE_BREAK = 2,
320 WQ_ITERATE_BREAK_KEEP_LOCKED = 3,
321 WQ_ITERATE_INVALIDATE_CONTINUE = 4,
322 WQ_ITERATE_RESTART = 5,
323 WQ_ITERATE_FOUND = 6,
324 WQ_ITERATE_UNLINKED = 7,
325 };
326
327 /* callback invoked with both 'waitq' and 'wqset' locked */
328 typedef int (*waitq_iterator_t)(void *ctx, struct waitq *waitq,
329 struct waitq_set *wqset);
330
331 /* iterate over all sets to which waitq belongs */
332 extern int waitq_iterate_sets(struct waitq *waitq, void *ctx,
333 waitq_iterator_t it);
334
335 /* iterator over all waitqs that have preposted to wqset */
336 extern int waitq_set_iterate_preposts(struct waitq_set *wqset,
337 void *ctx, waitq_iterator_t it);
338
339 /*
340 * prepost reservation
341 */
342 extern uint64_t waitq_prepost_reserve(struct waitq *waitq, int extra,
343 waitq_lock_state_t lock_state);
344
345 extern void waitq_prepost_release_reserve(uint64_t id);
346
347 #endif /* MACH_KERNEL_PRIVATE */
348
349
350 __BEGIN_DECLS
351
352 /*
353 * waitq init
354 */
355 extern kern_return_t waitq_init(struct waitq *waitq, int policy);
356 extern void waitq_deinit(struct waitq *waitq);
357
358 /*
359 * global waitqs
360 */
361 extern struct waitq *_global_eventq(char *event, size_t event_length);
362 #define global_eventq(event) _global_eventq((char *)&(event), sizeof(event))
363
364 extern struct waitq *global_waitq(int index);
365
366 /*
367 * set alloc/init/free
368 */
369 extern struct waitq_set *waitq_set_alloc(int policy, void *prepost_hook);
370
371 extern kern_return_t waitq_set_init(struct waitq_set *wqset,
372 int policy, uint64_t *reserved_link,
373 void *prepost_hook);
374
375 extern void waitq_set_deinit(struct waitq_set *wqset);
376
377 extern kern_return_t waitq_set_free(struct waitq_set *wqset);
378
379 #if defined(DEVELOPMENT) || defined(DEBUG)
380 #if CONFIG_WAITQ_DEBUG
381 extern uint64_t wqset_id(struct waitq_set *wqset);
382
383 struct waitq *wqset_waitq(struct waitq_set *wqset);
384 #endif /* CONFIG_WAITQ_DEBUG */
385 #endif /* DEVELOPMENT || DEBUG */
386
387
388 /*
389 * set membership
390 */
391 extern uint64_t waitq_link_reserve(struct waitq *waitq);
392
393 extern void waitq_link_release(uint64_t id);
394
395 extern boolean_t waitq_member(struct waitq *waitq, struct waitq_set *wqset);
396
397 /* returns true if the waitq is in at least 1 set */
398 extern boolean_t waitq_in_set(struct waitq *waitq);
399
400
401 /* on success, consumes an reserved_link reference */
402 extern kern_return_t waitq_link(struct waitq *waitq,
403 struct waitq_set *wqset,
404 waitq_lock_state_t lock_state,
405 uint64_t *reserved_link);
406
407 extern kern_return_t waitq_unlink(struct waitq *waitq, struct waitq_set *wqset);
408
409 extern kern_return_t waitq_unlink_all(struct waitq *waitq);
410
411 extern kern_return_t waitq_set_unlink_all(struct waitq_set *wqset);
412
413 /*
414 * preposts
415 */
416 extern void waitq_clear_prepost(struct waitq *waitq);
417
418 extern void waitq_set_clear_preposts(struct waitq_set *wqset);
419
420 /*
421 * interfaces used primarily by the select/kqueue subsystems
422 */
423 extern uint64_t waitq_get_prepost_id(struct waitq *waitq);
424 extern void waitq_unlink_by_prepost_id(uint64_t wqp_id, struct waitq_set *wqset);
425
426 /*
427 * waitq attributes
428 */
429 extern int waitq_is_valid(struct waitq *waitq);
430
431 extern int waitq_set_is_valid(struct waitq_set *wqset);
432
433 extern int waitq_is_global(struct waitq *waitq);
434
435 extern int waitq_irq_safe(struct waitq *waitq);
436
437 #if CONFIG_WAITQ_STATS
438 /*
439 * waitq statistics
440 */
441 #define WAITQ_STATS_VERSION 1
442 struct wq_table_stats {
443 uint32_t version;
444 uint32_t table_elements;
445 uint32_t table_used_elems;
446 uint32_t table_elem_sz;
447 uint32_t table_slabs;
448 uint32_t table_slab_sz;
449
450 uint64_t table_num_allocs;
451 uint64_t table_num_preposts;
452 uint64_t table_num_reservations;
453
454 uint64_t table_max_used;
455 uint64_t table_avg_used;
456 uint64_t table_max_reservations;
457 uint64_t table_avg_reservations;
458 };
459
460 extern void waitq_link_stats(struct wq_table_stats *stats);
461 extern void waitq_prepost_stats(struct wq_table_stats *stats);
462 #endif /* CONFIG_WAITQ_STATS */
463
464 /*
465 *
466 * higher-level waiting APIs
467 *
468 */
469
470 /* assert intent to wait on <waitq,event64> pair */
471 extern wait_result_t waitq_assert_wait64(struct waitq *waitq,
472 event64_t wait_event,
473 wait_interrupt_t interruptible,
474 uint64_t deadline);
475
476 extern wait_result_t waitq_assert_wait64_leeway(struct waitq *waitq,
477 event64_t wait_event,
478 wait_interrupt_t interruptible,
479 wait_timeout_urgency_t urgency,
480 uint64_t deadline,
481 uint64_t leeway);
482
483 /* wakeup the most appropriate thread waiting on <waitq,event64> pair */
484 extern kern_return_t waitq_wakeup64_one(struct waitq *waitq,
485 event64_t wake_event,
486 wait_result_t result,
487 int priority);
488
489 /* wakeup all the threads waiting on <waitq,event64> pair */
490 extern kern_return_t waitq_wakeup64_all(struct waitq *waitq,
491 event64_t wake_event,
492 wait_result_t result,
493 int priority);
494
495 #ifdef XNU_KERNEL_PRIVATE
496
497 /* wakeup a specified thread iff it's waiting on <waitq,event64> pair */
498 extern kern_return_t waitq_wakeup64_thread(struct waitq *waitq,
499 event64_t wake_event,
500 thread_t thread,
501 wait_result_t result);
502
503 /* return a reference to the thread that was woken up */
504 extern thread_t
505 waitq_wakeup64_identify(struct waitq *waitq,
506 event64_t wake_event,
507 wait_result_t result,
508 int priority);
509
510 #endif /* XNU_KERNEL_PRIVATE */
511
512 __END_DECLS
513
514 #endif /* KERNEL_PRIVATE */
515 #endif /* _WAITQ_H_ */