]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/waitq.h
xnu-3248.50.21.tar.gz
[apple/xnu.git] / osfmk / kern / waitq.h
1 #ifndef _WAITQ_H_
2 #define _WAITQ_H_
3 /*
4 * Copyright (c) 2014-2015 Apple Computer, Inc. All rights reserved.
5 *
6 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. The rights granted to you under the License
12 * may not be used to create, or enable the creation or redistribution of,
13 * unlawful or unlicensed copies of an Apple operating system, or to
14 * circumvent, violate, or enable the circumvention or violation of, any
15 * terms of an Apple operating system software license agreement.
16 *
17 * Please obtain a copy of the License at
18 * http://www.opensource.apple.com/apsl/ and read it before using this file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 */
30 #ifdef KERNEL_PRIVATE
31
32 #include <mach/mach_types.h>
33 #include <mach/sync_policy.h>
34 #include <mach/kern_return.h> /* for kern_return_t */
35
36 #include <kern/kern_types.h> /* for wait_queue_t */
37 #include <kern/queue.h>
38 #include <kern/assert.h>
39
40 #include <sys/cdefs.h>
41
42 /*
43 * Constants and types used in the waitq APIs
44 */
45 #define WAITQ_ALL_PRIORITIES (-1)
46 #define WAITQ_PROMOTE_PRIORITY (-2)
47
48 typedef enum e_waitq_lock_state {
49 WAITQ_KEEP_LOCKED = 0x01,
50 WAITQ_UNLOCK = 0x02,
51 WAITQ_SHOULD_LOCK = 0x04,
52 WAITQ_ALREADY_LOCKED = 0x08,
53 WAITQ_DONT_LOCK = 0x10,
54 } waitq_lock_state_t;
55
56 #ifndef MACH_KERNEL_PRIVATE
57
58 /*
59 * The opaque waitq structure is here mostly for AIO and selinfo,
60 * but could potentially be used by other BSD subsystems.
61 */
62 #ifndef __LP64__
63 struct waitq { char opaque[32]; };
64 struct waitq_set { char opaque[48]; };
65 #else
66 #if defined(__x86_64__)
67 struct waitq { char opaque[48]; };
68 struct waitq_set { char opaque[64]; };
69 #else
70 struct waitq { char opaque[40]; };
71 struct waitq_set { char opaque[56]; };
72 #endif /* !x86_64 */
73 #endif /* __LP64__ */
74
75 #else /* MACH_KERNEL_PRIVATE */
76
77 #include <kern/spl.h>
78 #include <kern/simple_lock.h>
79 #include <mach/branch_predicates.h>
80
81 #include <machine/cpu_number.h>
82 #include <machine/machine_routines.h> /* machine_timeout_suspended() */
83
84 /*
85 * The event mask is of 59 bits on 64 bit architeture and 27 bits on
86 * 32 bit architecture and so we calculate its size using sizeof(long).
87 * If the bitfield for wq_type and wq_fifo is changed, then value of
88 * EVENT_MASK_BITS will also change.
89 *
90 * New plan: this is an optimization anyway, so I'm stealing 32bits
91 * from the mask to shrink the waitq object even further.
92 */
93 #define _EVENT_MASK_BITS ((sizeof(uint32_t) * 8) - 5)
94
95 #define WAITQ_BOOST_PRIORITY 31
96
97 enum waitq_type {
98 WQT_INVALID = 0,
99 WQT_QUEUE = 0x2,
100 WQT_SET = 0x3,
101 };
102
103 #if CONFIG_WAITQ_STATS
104 #define NWAITQ_BTFRAMES 5
105 struct wq_stats {
106 uint64_t waits;
107 uint64_t wakeups;
108 uint64_t clears;
109 uint64_t failed_wakeups;
110
111 uintptr_t last_wait[NWAITQ_BTFRAMES];
112 uintptr_t last_wakeup[NWAITQ_BTFRAMES];
113 uintptr_t last_failed_wakeup[NWAITQ_BTFRAMES];
114 };
115 #endif
116
117 /*
118 * struct waitq
119 *
120 * This is the definition of the common event wait queue
121 * that the scheduler APIs understand. It is used
122 * internally by the gerneralized event waiting mechanism
123 * (assert_wait), and also for items that maintain their
124 * own wait queues (such as ports and semaphores).
125 *
126 * It is not published to other kernel components.
127 *
128 * NOTE: Hardware locks are used to protect event wait
129 * queues since interrupt code is free to post events to
130 * them.
131 */
132 struct waitq {
133 uint32_t /* flags */
134 waitq_type:2, /* only public field */
135 waitq_fifo:1, /* fifo wakeup policy? */
136 waitq_prepost:1, /* waitq supports prepost? */
137 waitq_irq:1, /* waitq requires interrupts disabled */
138 waitq_eventmask:_EVENT_MASK_BITS;
139 /* the wait queue set (set-of-sets) to which this queue belongs */
140 hw_lock_data_t waitq_interlock; /* interlock */
141
142 uint64_t waitq_set_id;
143 uint64_t waitq_prepost_id;
144 queue_head_t waitq_queue; /* queue of elements */
145 };
146
147 /*
148 * struct waitq_set
149 *
150 * This is the common definition for a set wait queue.
151 */
152 struct waitq_set {
153 struct waitq wqset_q;
154 uint64_t wqset_id;
155 uint64_t wqset_prepost_id;
156 };
157
158 extern void waitq_bootstrap(void);
159
160 #define waitq_is_queue(wq) \
161 ((wq)->waitq_type == WQT_QUEUE)
162
163 #define waitq_is_set(wq) \
164 ((wq)->waitq_type == WQT_SET && ((struct waitq_set *)(wq))->wqset_id != 0)
165
166 #define waitqs_is_set(wqs) \
167 (((wqs)->wqset_q.waitq_type == WQT_SET) && ((wqs)->wqset_id != 0))
168
169 #define waitq_valid(wq) \
170 ((wq) != NULL && ((wq)->waitq_type & ~1) == WQT_QUEUE)
171
172 #define waitq_empty(wq) \
173 (queue_empty(&(wq)->waitq_queue))
174
175 #define waitq_held(wq) \
176 (hw_lock_held(&(wq)->waitq_interlock))
177
178 #define waitq_lock_try(wq) \
179 (hw_lock_try(&(wq)->waitq_interlock))
180
181 #define waitq_wait_possible(thread) \
182 ((thread)->waitq == NULL)
183
184 extern void waitq_lock(struct waitq *wq);
185 extern void waitq_unlock(struct waitq *wq);
186
187 #define waitq_set_lock(wqs) waitq_lock(&(wqs)->wqset_q)
188 #define waitq_set_unlock(wqs) waitq_unlock(&(wqs)->wqset_q)
189 #define waitq_set_lock_try(wqs) waitq_lock_try(&(wqs)->wqset_q)
190 #define waitq_set_can_prepost(wqs) (waitqs_is_set(wqs) && \
191 (wqs)->wqset_q.waitq_prepost)
192 #define waitq_set_maybe_preposted(wqs) ((wqs)->wqset_q.waitq_prepost && \
193 (wqs)->wqset_prepost_id > 0)
194
195 /* assert intent to wait on a locked wait queue */
196 extern wait_result_t waitq_assert_wait64_locked(struct waitq *waitq,
197 event64_t wait_event,
198 wait_interrupt_t interruptible,
199 wait_timeout_urgency_t urgency,
200 uint64_t deadline,
201 uint64_t leeway,
202 thread_t thread);
203
204 /* pull a thread from its wait queue */
205 extern void waitq_pull_thread_locked(struct waitq *waitq, thread_t thread);
206
207 /* wakeup all threads waiting for a particular event on locked queue */
208 extern kern_return_t waitq_wakeup64_all_locked(struct waitq *waitq,
209 event64_t wake_event,
210 wait_result_t result,
211 uint64_t *reserved_preposts,
212 int priority,
213 waitq_lock_state_t lock_state);
214
215 /* wakeup one thread waiting for a particular event on locked queue */
216 extern kern_return_t waitq_wakeup64_one_locked(struct waitq *waitq,
217 event64_t wake_event,
218 wait_result_t result,
219 uint64_t *reserved_preposts,
220 int priority,
221 waitq_lock_state_t lock_state);
222
223 /* return identity of a thread awakened for a particular <wait_queue,event> */
224 extern thread_t waitq_wakeup64_identity_locked(struct waitq *waitq,
225 event64_t wake_event,
226 wait_result_t result,
227 spl_t *spl,
228 uint64_t *reserved_preposts,
229 waitq_lock_state_t lock_state);
230
231 /* wakeup thread iff its still waiting for a particular event on locked queue */
232 extern kern_return_t waitq_wakeup64_thread_locked(struct waitq *waitq,
233 event64_t wake_event,
234 thread_t thread,
235 wait_result_t result,
236 waitq_lock_state_t lock_state);
237
238 /* clear all preposts generated by the given waitq */
239 extern int waitq_clear_prepost_locked(struct waitq *waitq, spl_t *s);
240
241 /* clear all preposts from the given wait queue set */
242 extern void waitq_set_clear_preposts_locked(struct waitq_set *wqset);
243
244 /* unlink the given waitq from all sets */
245 extern kern_return_t waitq_unlink_all_locked(struct waitq *waitq,
246 uint64_t *old_set_id,
247 spl_t *s,
248 int *dropped_lock);
249
250 /*
251 * clear a thread's boosted priority
252 * (given via WAITQ_PROMOTE_PRIORITY in the wakeup function)
253 */
254 extern void waitq_clear_promotion_locked(struct waitq *waitq,
255 thread_t thread);
256
257 /*
258 * waitq iteration
259 */
260
261 enum waitq_iteration_constant {
262 WQ_ITERATE_DROPPED = -4,
263 WQ_ITERATE_INVALID = -3,
264 WQ_ITERATE_ABORTED = -2,
265 WQ_ITERATE_FAILURE = -1,
266 WQ_ITERATE_SUCCESS = 0,
267 WQ_ITERATE_CONTINUE = 1,
268 WQ_ITERATE_BREAK = 2,
269 WQ_ITERATE_BREAK_KEEP_LOCKED = 3,
270 WQ_ITERATE_INVALIDATE_CONTINUE = 4,
271 WQ_ITERATE_RESTART = 5,
272 WQ_ITERATE_FOUND = 6,
273 WQ_ITERATE_UNLINKED = 7,
274 };
275
276 /* callback invoked with both 'waitq' and 'wqset' locked */
277 typedef int (*waitq_iterator_t)(void *ctx, struct waitq *waitq,
278 struct waitq_set *wqset);
279
280 /* iterate over all sets to which waitq belongs */
281 extern int waitq_iterate_sets(struct waitq *waitq, void *ctx,
282 waitq_iterator_t it);
283
284 /* iterator over all waitqs that have preposted to wqset */
285 extern int waitq_set_iterate_preposts(struct waitq_set *wqset,
286 void *ctx, waitq_iterator_t it, spl_t *s);
287
288 /*
289 * prepost reservation
290 */
291 extern uint64_t waitq_prepost_reserve(struct waitq *waitq, int extra,
292 waitq_lock_state_t lock_state, spl_t *s);
293
294 extern void waitq_prepost_release_reserve(uint64_t id);
295
296 #endif /* MACH_KERNEL_PRIVATE */
297
298
299 __BEGIN_DECLS
300
301 /*
302 * waitq init
303 */
304 extern kern_return_t waitq_init(struct waitq *waitq, int policy);
305 extern void waitq_deinit(struct waitq *waitq);
306
307 /*
308 * global waitqs
309 */
310 extern struct waitq *_global_eventq(char *event, size_t event_length);
311 #define global_eventq(event) _global_eventq((char *)&(event), sizeof(event))
312
313 extern struct waitq *global_waitq(int index);
314
315 /*
316 * set alloc/init/free
317 */
318 extern struct waitq_set *waitq_set_alloc(int policy);
319
320 extern kern_return_t waitq_set_init(struct waitq_set *wqset,
321 int policy, uint64_t *reserved_link);
322
323 extern void waitq_set_deinit(struct waitq_set *wqset);
324
325 extern kern_return_t waitq_set_free(struct waitq_set *wqset);
326
327 #if defined(DEVELOPMENT) || defined(DEBUG)
328 #if CONFIG_WAITQ_DEBUG
329 extern uint64_t wqset_id(struct waitq_set *wqset);
330
331 struct waitq *wqset_waitq(struct waitq_set *wqset);
332 #endif /* CONFIG_WAITQ_DEBUG */
333 #endif /* DEVELOPMENT || DEBUG */
334
335
336 /*
337 * set membership
338 */
339 extern uint64_t waitq_link_reserve(struct waitq *waitq);
340
341 extern void waitq_link_release(uint64_t id);
342
343 extern boolean_t waitq_member(struct waitq *waitq, struct waitq_set *wqset);
344
345 /* returns true if the waitq is in at least 1 set */
346 extern boolean_t waitq_in_set(struct waitq *waitq);
347
348
349 /* on success, consumes an reserved_link reference */
350 extern kern_return_t waitq_link(struct waitq *waitq,
351 struct waitq_set *wqset,
352 waitq_lock_state_t lock_state,
353 uint64_t *reserved_link);
354
355 extern kern_return_t waitq_unlink(struct waitq *waitq, struct waitq_set *wqset);
356
357 extern kern_return_t waitq_unlink_all(struct waitq *waitq);
358
359 extern kern_return_t waitq_set_unlink_all(struct waitq_set *wqset);
360
361
362 /*
363 * preposts
364 */
365 extern void waitq_clear_prepost(struct waitq *waitq);
366
367 extern void waitq_set_clear_preposts(struct waitq_set *wqset);
368
369 /*
370 * interfaces used primarily by the select/kqueue subsystems
371 */
372 extern uint64_t waitq_get_prepost_id(struct waitq *waitq);
373 extern void waitq_unlink_by_prepost_id(uint64_t wqp_id, struct waitq_set *wqset);
374
375 /*
376 * waitq attributes
377 */
378 extern int waitq_is_valid(struct waitq *waitq);
379
380 extern int waitq_set_is_valid(struct waitq_set *wqset);
381
382 extern int waitq_is_global(struct waitq *waitq);
383
384 extern int waitq_irq_safe(struct waitq *waitq);
385
386 #if CONFIG_WAITQ_STATS
387 /*
388 * waitq statistics
389 */
390 #define WAITQ_STATS_VERSION 1
391 struct wq_table_stats {
392 uint32_t version;
393 uint32_t table_elements;
394 uint32_t table_used_elems;
395 uint32_t table_elem_sz;
396 uint32_t table_slabs;
397 uint32_t table_slab_sz;
398
399 uint64_t table_num_allocs;
400 uint64_t table_num_preposts;
401 uint64_t table_num_reservations;
402
403 uint64_t table_max_used;
404 uint64_t table_avg_used;
405 uint64_t table_max_reservations;
406 uint64_t table_avg_reservations;
407 };
408
409 extern void waitq_link_stats(struct wq_table_stats *stats);
410 extern void waitq_prepost_stats(struct wq_table_stats *stats);
411 #endif /* CONFIG_WAITQ_STATS */
412
413 /*
414 *
415 * higher-level waiting APIs
416 *
417 */
418
419 /* assert intent to wait on <waitq,event64> pair */
420 extern wait_result_t waitq_assert_wait64(struct waitq *waitq,
421 event64_t wait_event,
422 wait_interrupt_t interruptible,
423 uint64_t deadline);
424
425 extern wait_result_t waitq_assert_wait64_leeway(struct waitq *waitq,
426 event64_t wait_event,
427 wait_interrupt_t interruptible,
428 wait_timeout_urgency_t urgency,
429 uint64_t deadline,
430 uint64_t leeway);
431
432 /* wakeup the most appropriate thread waiting on <waitq,event64> pair */
433 extern kern_return_t waitq_wakeup64_one(struct waitq *waitq,
434 event64_t wake_event,
435 wait_result_t result,
436 int priority);
437
438 /* wakeup all the threads waiting on <waitq,event64> pair */
439 extern kern_return_t waitq_wakeup64_all(struct waitq *waitq,
440 event64_t wake_event,
441 wait_result_t result,
442 int priority);
443
444 /* wakeup a specified thread iff it's waiting on <waitq,event64> pair */
445 extern kern_return_t waitq_wakeup64_thread(struct waitq *waitq,
446 event64_t wake_event,
447 thread_t thread,
448 wait_result_t result);
449 __END_DECLS
450
451 #endif /* KERNEL_PRIVATE */
452 #endif /* _WAITQ_H_ */