]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/wait_queue.h
xnu-2782.40.9.tar.gz
[apple/xnu.git] / osfmk / kern / wait_queue.h
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef KERNEL_PRIVATE
30
31 #ifndef _KERN_WAIT_QUEUE_H_
32 #define _KERN_WAIT_QUEUE_H_
33
34 #include <mach/mach_types.h>
35 #include <mach/sync_policy.h>
36 #include <mach/kern_return.h> /* for kern_return_t */
37
38 #include <kern/kern_types.h> /* for wait_queue_t */
39 #include <kern/queue.h>
40 #include <kern/assert.h>
41
42 #include <sys/cdefs.h>
43
44 #ifdef MACH_KERNEL_PRIVATE
45
46 #include <kern/simple_lock.h>
47 #include <mach/branch_predicates.h>
48
49 #include <machine/cpu_number.h>
50 #include <machine/machine_routines.h> /* machine_timeout_suspended() */
51
52 /*
53 * The event mask is of 60 bits on 64 bit architeture and 28 bits on
54 * 32 bit architecture and so we calculate its size using sizeof(long).
55 * If the bitfield for wq_type and wq_fifo is changed, then value of
56 * EVENT_MASK_BITS will also change.
57 */
58 #define EVENT_MASK_BITS ((sizeof(long) * 8) - 4)
59
60 /*
61 * Zero out the 4 msb of the event.
62 */
63 #define CAST_TO_EVENT_MASK(event) (((CAST_DOWN(unsigned long, event)) << 4) >> 4)
64 /*
65 * wait_queue_t
66 * This is the definition of the common event wait queue
67 * that the scheduler APIs understand. It is used
68 * internally by the gerneralized event waiting mechanism
69 * (assert_wait), and also for items that maintain their
70 * own wait queues (such as ports and semaphores).
71 *
72 * It is not published to other kernel components. They
73 * can create wait queues by calling wait_queue_alloc.
74 *
75 * NOTE: Hardware locks are used to protect event wait
76 * queues since interrupt code is free to post events to
77 * them.
78 */
79 typedef struct wait_queue {
80 unsigned long int /* flags */
81 /* boolean_t */ wq_type:2, /* only public field */
82 wq_fifo:1, /* fifo wakeup policy? */
83 wq_prepost:1, /* waitq supports prepost? set only */
84 wq_eventmask:EVENT_MASK_BITS;
85 hw_lock_data_t wq_interlock; /* interlock */
86 queue_head_t wq_queue; /* queue of elements */
87 } WaitQueue;
88
89 /*
90 * wait_queue_set_t
91 * This is the common definition for a set wait queue.
92 * These can be linked as members/elements of multiple regular
93 * wait queues. They have an additional set of linkages to
94 * identify the linkage structures that point to them.
95 */
96 typedef struct wait_queue_set {
97 WaitQueue wqs_wait_queue; /* our wait queue */
98 queue_head_t wqs_setlinks; /* links from set perspective */
99 queue_head_t wqs_preposts; /* preposted links */
100 } WaitQueueSet;
101
102 #define wqs_type wqs_wait_queue.wq_type
103 #define wqs_fifo wqs_wait_queue.wq_fifo
104 #define wqs_prepost wqs_wait_queue.wq_prepost
105 #define wqs_queue wqs_wait_queue.wq_queue
106
107 /*
108 * wait_queue_element_t
109 * This structure describes the elements on an event wait
110 * queue. It is the common first fields in a thread shuttle
111 * and wait_queue_link_t. In that way, a wait queue can
112 * consist of both thread shuttle elements and links off of
113 * to other (set) wait queues.
114 *
115 * WARNING: These fields correspond to fields in the thread
116 * shuttle (run queue links and run queue pointer). Any change in
117 * the layout here will have to be matched with a change there.
118 */
119 typedef struct wait_queue_element {
120 queue_chain_t wqe_links; /* link of elements on this queue */
121 void * wqe_type; /* Identifies link vs. thread */
122 wait_queue_t wqe_queue; /* queue this element is on */
123 } WaitQueueElement;
124
125 typedef WaitQueueElement *wait_queue_element_t;
126
127 /*
128 * wait_queue_link_t
129 * Specialized wait queue element type for linking set
130 * event waits queues onto a wait queue. In this way, an event
131 * can be constructed so that any thread waiting on any number
132 * of associated wait queues can handle the event, while letting
133 * the thread only be linked on the single wait queue it blocked on.
134 *
135 * One use: ports in multiple portsets. Each thread is queued up
136 * on the portset that it specifically blocked on during a receive
137 * operation. Each port's event queue links in all the portset
138 * event queues of which it is a member. An IPC event post associated
139 * with that port may wake up any thread from any of those portsets,
140 * or one that was waiting locally on the port itself.
141 */
142 typedef struct _wait_queue_link {
143 WaitQueueElement wql_element; /* element on master */
144 queue_chain_t wql_setlinks; /* element on set */
145 queue_chain_t wql_preposts; /* element on set prepost list */
146 wait_queue_set_t wql_setqueue; /* set queue */
147 } WaitQueueLink;
148
149 #define wql_links wql_element.wqe_links
150 #define wql_type wql_element.wqe_type
151 #define wql_queue wql_element.wqe_queue
152
153 #define _WAIT_QUEUE_inited 0x2
154 #define _WAIT_QUEUE_SET_inited 0x3
155
156 #define wait_queue_is_queue(wq) \
157 ((wq)->wq_type == _WAIT_QUEUE_inited)
158
159 #define wait_queue_is_set(wqs) \
160 ((wqs)->wqs_type == _WAIT_QUEUE_SET_inited)
161
162 #define wait_queue_is_valid(wq) \
163 (((wq)->wq_type & ~1) == _WAIT_QUEUE_inited)
164
165 #define wait_queue_empty(wq) (queue_empty(&(wq)->wq_queue))
166
167 #define wait_queue_held(wq) (hw_lock_held(&(wq)->wq_interlock))
168 #define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock))
169
170 /* For x86, the hardware timeout is in TSC units. */
171 #if defined(i386) || defined(x86_64)
172 #define hwLockTimeOut LockTimeOutTSC
173 #else
174 #define hwLockTimeOut LockTimeOut
175 #endif
176 /*
177 * Double the standard lock timeout, because wait queues tend
178 * to iterate over a number of threads - locking each. If there is
179 * a problem with a thread lock, it normally times out at the wait
180 * queue level first, hiding the real problem.
181 */
182
183 static inline void wait_queue_lock(wait_queue_t wq) {
184 if (__improbable(hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2) == 0)) {
185 boolean_t wql_acquired = FALSE;
186
187 while (machine_timeout_suspended()) {
188 #if defined(__i386__) || defined(__x86_64__)
189 /*
190 * i386/x86_64 return with preemption disabled on a timeout for
191 * diagnostic purposes.
192 */
193 mp_enable_preemption();
194 #endif
195 if ((wql_acquired = hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2)))
196 break;
197 }
198 if (wql_acquired == FALSE)
199 panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number());
200 }
201 assert(wait_queue_held(wq));
202 }
203
204 static inline void wait_queue_unlock(wait_queue_t wq) {
205 assert(wait_queue_held(wq));
206 hw_lock_unlock(&(wq)->wq_interlock);
207 }
208
209 #define wqs_lock(wqs) wait_queue_lock(&(wqs)->wqs_wait_queue)
210 #define wqs_unlock(wqs) wait_queue_unlock(&(wqs)->wqs_wait_queue)
211 #define wqs_lock_try(wqs) wait_queue__try_lock(&(wqs)->wqs_wait_queue)
212 #define wqs_is_preposted(wqs) ((wqs)->wqs_prepost && !queue_empty(&(wqs)->wqs_preposts))
213
214 #define wql_is_preposted(wql) ((wql)->wql_preposts.next != NULL)
215 #define wql_clear_prepost(wql) ((wql)->wql_preposts.next = (wql)->wql_preposts.prev = NULL)
216
217 #define wait_queue_assert_possible(thread) \
218 ((thread)->wait_queue == WAIT_QUEUE_NULL)
219
220 /* bootstrap interface - can allocate/link wait_queues and sets after calling this */
221 __private_extern__ void wait_queue_bootstrap(void);
222
223 /******** Decomposed interfaces (to build higher level constructs) ***********/
224
225 /* assert intent to wait on a locked wait queue */
226 __private_extern__ wait_result_t wait_queue_assert_wait64_locked(
227 wait_queue_t wait_queue,
228 event64_t wait_event,
229 wait_interrupt_t interruptible,
230 wait_timeout_urgency_t urgency,
231 uint64_t deadline,
232 uint64_t leeway,
233 thread_t thread);
234
235 /* pull a thread from its wait queue */
236 __private_extern__ void wait_queue_pull_thread_locked(
237 wait_queue_t wait_queue,
238 thread_t thread,
239 boolean_t unlock);
240
241 /* wakeup all threads waiting for a particular event on locked queue */
242 __private_extern__ kern_return_t wait_queue_wakeup64_all_locked(
243 wait_queue_t wait_queue,
244 event64_t wake_event,
245 wait_result_t result,
246 boolean_t unlock);
247
248 /* wakeup one thread waiting for a particular event on locked queue */
249 __private_extern__ kern_return_t wait_queue_wakeup64_one_locked(
250 wait_queue_t wait_queue,
251 event64_t wake_event,
252 wait_result_t result,
253 boolean_t unlock);
254
255 /* return identity of a thread awakened for a particular <wait_queue,event> */
256 __private_extern__ thread_t wait_queue_wakeup64_identity_locked(
257 wait_queue_t wait_queue,
258 event64_t wake_event,
259 wait_result_t result,
260 boolean_t unlock);
261
262 /* wakeup thread iff its still waiting for a particular event on locked queue */
263 __private_extern__ kern_return_t wait_queue_wakeup64_thread_locked(
264 wait_queue_t wait_queue,
265 event64_t wake_event,
266 thread_t thread,
267 wait_result_t result,
268 boolean_t unlock);
269
270 extern uint32_t num_wait_queues;
271 extern struct wait_queue *wait_queues;
272 /* The Jenkins "one at a time" hash.
273 * TBD: There may be some value to unrolling here,
274 * depending on the architecture.
275 */
276 static inline uint32_t wq_hash(char *key)
277 {
278 uint32_t hash = 0;
279 size_t i, length = sizeof(char *);
280
281 for (i = 0; i < length; i++) {
282 hash += key[i];
283 hash += (hash << 10);
284 hash ^= (hash >> 6);
285 }
286
287 hash += (hash << 3);
288 hash ^= (hash >> 11);
289 hash += (hash << 15);
290
291 hash &= (num_wait_queues - 1);
292 return hash;
293 }
294
295 #define wait_hash(event) wq_hash((char *)&event)
296
297 #endif /* MACH_KERNEL_PRIVATE */
298
299 __BEGIN_DECLS
300
301 /******** Semi-Public interfaces (not a part of a higher construct) ************/
302
303 extern unsigned int wait_queue_set_size(void);
304 extern unsigned int wait_queue_link_size(void);
305
306 extern kern_return_t wait_queue_init(
307 wait_queue_t wait_queue,
308 int policy);
309
310 extern wait_queue_set_t wait_queue_set_alloc(
311 int policy);
312
313 extern kern_return_t wait_queue_set_init(
314 wait_queue_set_t set_queue,
315 int policy);
316
317 extern kern_return_t wait_queue_set_free(
318 wait_queue_set_t set_queue);
319
320 extern wait_queue_link_t wait_queue_link_alloc(
321 int policy);
322
323 extern kern_return_t wait_queue_link_free(
324 wait_queue_link_t link_element);
325
326 extern kern_return_t wait_queue_link(
327 wait_queue_t wait_queue,
328 wait_queue_set_t set_queue);
329
330 extern kern_return_t wait_queue_link_noalloc(
331 wait_queue_t wait_queue,
332 wait_queue_set_t set_queue,
333 wait_queue_link_t link);
334
335 extern boolean_t wait_queue_member(
336 wait_queue_t wait_queue,
337 wait_queue_set_t set_queue);
338
339 extern kern_return_t wait_queue_unlink(
340 wait_queue_t wait_queue,
341 wait_queue_set_t set_queue);
342
343 extern kern_return_t wait_queue_unlink_all(
344 wait_queue_t wait_queue);
345
346 extern kern_return_t wait_queue_set_unlink_all(
347 wait_queue_set_t set_queue);
348
349 #ifdef XNU_KERNEL_PRIVATE
350 extern kern_return_t wait_queue_set_unlink_one(
351 wait_queue_set_t set_queue,
352 wait_queue_link_t link);
353
354 extern kern_return_t wait_queue_unlink_nofree(
355 wait_queue_t wait_queue,
356 wait_queue_set_t set_queue,
357 wait_queue_link_t *wqlp);
358
359 extern kern_return_t wait_queue_unlink_all_nofree(
360 wait_queue_t wait_queue,
361 queue_t links);
362
363 extern kern_return_t wait_queue_set_unlink_all_nofree(
364 wait_queue_set_t set_queue,
365 queue_t links);
366
367 extern wait_queue_link_t wait_queue_link_allocate(void);
368
369 #endif /* XNU_KERNEL_PRIVATE */
370
371 /* legacy API */
372 kern_return_t wait_queue_sub_init(
373 wait_queue_set_t set_queue,
374 int policy);
375
376 kern_return_t wait_queue_sub_clearrefs(
377 wait_queue_set_t wq_set);
378
379 extern kern_return_t wait_subqueue_unlink_all(
380 wait_queue_set_t set_queue);
381
382 extern wait_queue_t wait_queue_alloc(
383 int policy);
384
385 extern kern_return_t wait_queue_free(
386 wait_queue_t wait_queue);
387
388 /* assert intent to wait on <wait_queue,event64> pair */
389 extern wait_result_t wait_queue_assert_wait64(
390 wait_queue_t wait_queue,
391 event64_t wait_event,
392 wait_interrupt_t interruptible,
393 uint64_t deadline);
394
395 extern wait_result_t wait_queue_assert_wait64_with_leeway(
396 wait_queue_t wait_queue,
397 event64_t wait_event,
398 wait_interrupt_t interruptible,
399 wait_timeout_urgency_t urgency,
400 uint64_t deadline,
401 uint64_t leeway);
402
403 /* wakeup the most appropriate thread waiting on <wait_queue,event64> pair */
404 extern kern_return_t wait_queue_wakeup64_one(
405 wait_queue_t wait_queue,
406 event64_t wake_event,
407 wait_result_t result);
408
409 /* wakeup all the threads waiting on <wait_queue,event64> pair */
410 extern kern_return_t wait_queue_wakeup64_all(
411 wait_queue_t wait_queue,
412 event64_t wake_event,
413 wait_result_t result);
414
415 /* wakeup a specified thread waiting iff waiting on <wait_queue,event64> pair */
416 extern kern_return_t wait_queue_wakeup64_thread(
417 wait_queue_t wait_queue,
418 event64_t wake_event,
419 thread_t thread,
420 wait_result_t result);
421
422 /*
423 * Compatibility Wait Queue APIs based on pointer events instead of 64bit
424 * integer events.
425 */
426
427 /* assert intent to wait on <wait_queue,event> pair */
428 extern wait_result_t wait_queue_assert_wait(
429 wait_queue_t wait_queue,
430 event_t wait_event,
431 wait_interrupt_t interruptible,
432 uint64_t deadline);
433
434 /* assert intent to wait on <wait_queue,event> pair */
435 extern wait_result_t wait_queue_assert_wait_with_leeway(
436 wait_queue_t wait_queue,
437 event_t wait_event,
438 wait_interrupt_t interruptible,
439 wait_timeout_urgency_t urgency,
440 uint64_t deadline,
441 uint64_t leeway);
442
443 /* wakeup the most appropriate thread waiting on <wait_queue,event> pair */
444 extern kern_return_t wait_queue_wakeup_one(
445 wait_queue_t wait_queue,
446 event_t wake_event,
447 wait_result_t result,
448 int priority);
449
450 /* wakeup all the threads waiting on <wait_queue,event> pair */
451 extern kern_return_t wait_queue_wakeup_all(
452 wait_queue_t wait_queue,
453 event_t wake_event,
454 wait_result_t result);
455
456 /* wakeup a specified thread waiting iff waiting on <wait_queue,event> pair */
457 extern kern_return_t wait_queue_wakeup_thread(
458 wait_queue_t wait_queue,
459 event_t wake_event,
460 thread_t thread,
461 wait_result_t result);
462
463 __END_DECLS
464
465 #endif /* _KERN_WAIT_QUEUE_H_ */
466
467 #endif /* KERNEL_PRIVATE */