xnu-2422.100.13.tar.gz
[apple/xnu.git] / osfmk / kern / wait_queue.h
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef KERNEL_PRIVATE
30
31 #ifndef _KERN_WAIT_QUEUE_H_
32 #define _KERN_WAIT_QUEUE_H_
33
34 #include <mach/mach_types.h>
35 #include <mach/sync_policy.h>
36 #include <mach/kern_return.h> /* for kern_return_t */
37
38 #include <kern/kern_types.h> /* for wait_queue_t */
39 #include <kern/queue.h>
40
41 #include <sys/cdefs.h>
42
43 #ifdef MACH_KERNEL_PRIVATE
44
45 #include <kern/lock.h>
46 #include <mach/branch_predicates.h>
47
48 #include <machine/cpu_number.h>
49 #include <machine/machine_routines.h> /* machine_timeout_suspended() */
50
51 /*
52 * The event mask is of 60 bits on 64 bit architeture and 28 bits on
53 * 32 bit architecture and so we calculate its size using sizeof(long).
54 * If the bitfield for wq_type and wq_fifo is changed, then value of
55 * EVENT_MASK_BITS will also change.
56 */
57 #define EVENT_MASK_BITS ((sizeof(long) * 8) - 4)
58
59 /*
60 * Zero out the 4 msb of the event.
61 */
62 #define CAST_TO_EVENT_MASK(event) (((CAST_DOWN(unsigned long, event)) << 4) >> 4)
63 /*
64 * wait_queue_t
65 * This is the definition of the common event wait queue
66 * that the scheduler APIs understand. It is used
67 * internally by the gerneralized event waiting mechanism
68 * (assert_wait), and also for items that maintain their
69 * own wait queues (such as ports and semaphores).
70 *
71 * It is not published to other kernel components. They
72 * can create wait queues by calling wait_queue_alloc.
73 *
74 * NOTE: Hardware locks are used to protect event wait
75 * queues since interrupt code is free to post events to
76 * them.
77 */
78 typedef struct wait_queue {
79 unsigned long int /* flags */
80 /* boolean_t */ wq_type:2, /* only public field */
81 wq_fifo:1, /* fifo wakeup policy? */
82 wq_prepost:1, /* waitq supports prepost? set only */
83 wq_eventmask:EVENT_MASK_BITS;
84 hw_lock_data_t wq_interlock; /* interlock */
85 queue_head_t wq_queue; /* queue of elements */
86 } WaitQueue;
87
88 /*
89 * wait_queue_set_t
90 * This is the common definition for a set wait queue.
91 * These can be linked as members/elements of multiple regular
92 * wait queues. They have an additional set of linkages to
93 * identify the linkage structures that point to them.
94 */
95 typedef struct wait_queue_set {
96 WaitQueue wqs_wait_queue; /* our wait queue */
97 queue_head_t wqs_setlinks; /* links from set perspective */
98 queue_head_t wqs_preposts; /* preposted links */
99 } WaitQueueSet;
100
101 #define wqs_type wqs_wait_queue.wq_type
102 #define wqs_fifo wqs_wait_queue.wq_fifo
103 #define wqs_prepost wqs_wait_queue.wq_prepost
104 #define wqs_queue wqs_wait_queue.wq_queue
105
106 /*
107 * wait_queue_element_t
108 * This structure describes the elements on an event wait
109 * queue. It is the common first fields in a thread shuttle
110 * and wait_queue_link_t. In that way, a wait queue can
111 * consist of both thread shuttle elements and links off of
112 * to other (set) wait queues.
113 *
114 * WARNING: These fields correspond to fields in the thread
115 * shuttle (run queue links and run queue pointer). Any change in
116 * the layout here will have to be matched with a change there.
117 */
118 typedef struct wait_queue_element {
119 queue_chain_t wqe_links; /* link of elements on this queue */
120 void * wqe_type; /* Identifies link vs. thread */
121 wait_queue_t wqe_queue; /* queue this element is on */
122 } WaitQueueElement;
123
124 typedef WaitQueueElement *wait_queue_element_t;
125
126 /*
127 * wait_queue_link_t
128 * Specialized wait queue element type for linking set
129 * event waits queues onto a wait queue. In this way, an event
130 * can be constructed so that any thread waiting on any number
131 * of associated wait queues can handle the event, while letting
132 * the thread only be linked on the single wait queue it blocked on.
133 *
134 * One use: ports in multiple portsets. Each thread is queued up
135 * on the portset that it specifically blocked on during a receive
136 * operation. Each port's event queue links in all the portset
137 * event queues of which it is a member. An IPC event post associated
138 * with that port may wake up any thread from any of those portsets,
139 * or one that was waiting locally on the port itself.
140 */
141 typedef struct _wait_queue_link {
142 WaitQueueElement wql_element; /* element on master */
143 queue_chain_t wql_setlinks; /* element on set */
144 queue_chain_t wql_preposts; /* element on set prepost list */
145 wait_queue_set_t wql_setqueue; /* set queue */
146 } WaitQueueLink;
147
148 #define wql_links wql_element.wqe_links
149 #define wql_type wql_element.wqe_type
150 #define wql_queue wql_element.wqe_queue
151
152 #define _WAIT_QUEUE_inited 0x2
153 #define _WAIT_QUEUE_SET_inited 0x3
154
155 #define wait_queue_is_queue(wq) \
156 ((wq)->wq_type == _WAIT_QUEUE_inited)
157
158 #define wait_queue_is_set(wqs) \
159 ((wqs)->wqs_type == _WAIT_QUEUE_SET_inited)
160
161 #define wait_queue_is_valid(wq) \
162 (((wq)->wq_type & ~1) == _WAIT_QUEUE_inited)
163
164 #define wait_queue_empty(wq) (queue_empty(&(wq)->wq_queue))
165
166 #define wait_queue_held(wq) (hw_lock_held(&(wq)->wq_interlock))
167 #define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock))
168
169 /* For x86, the hardware timeout is in TSC units. */
170 #if defined(i386) || defined(x86_64)
171 #define hwLockTimeOut LockTimeOutTSC
172 #else
173 #define hwLockTimeOut LockTimeOut
174 #endif
175 /*
176 * Double the standard lock timeout, because wait queues tend
177 * to iterate over a number of threads - locking each. If there is
178 * a problem with a thread lock, it normally times out at the wait
179 * queue level first, hiding the real problem.
180 */
181
182 static inline void wait_queue_lock(wait_queue_t wq) {
183 if (__improbable(hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2) == 0)) {
184 boolean_t wql_acquired = FALSE;
185
186 while (machine_timeout_suspended()) {
187 #if defined(__i386__) || defined(__x86_64__)
188 /*
189 * i386/x86_64 return with preemption disabled on a timeout for
190 * diagnostic purposes.
191 */
192 mp_enable_preemption();
193 #endif
194 if ((wql_acquired = hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2)))
195 break;
196 }
197 if (wql_acquired == FALSE)
198 panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number());
199 }
200 assert(wait_queue_held(wq));
201 }
202
203 static inline void wait_queue_unlock(wait_queue_t wq) {
204 assert(wait_queue_held(wq));
205 hw_lock_unlock(&(wq)->wq_interlock);
206 }
207
208 #define wqs_lock(wqs) wait_queue_lock(&(wqs)->wqs_wait_queue)
209 #define wqs_unlock(wqs) wait_queue_unlock(&(wqs)->wqs_wait_queue)
210 #define wqs_lock_try(wqs) wait_queue__try_lock(&(wqs)->wqs_wait_queue)
211 #define wqs_is_preposted(wqs) ((wqs)->wqs_prepost && !queue_empty(&(wqs)->wqs_preposts))
212
213 #define wql_is_preposted(wql) ((wql)->wql_preposts.next != NULL)
214 #define wql_clear_prepost(wql) ((wql)->wql_preposts.next = (wql)->wql_preposts.prev = NULL)
215
216 #define wait_queue_assert_possible(thread) \
217 ((thread)->wait_queue == WAIT_QUEUE_NULL)
218
219 /* bootstrap interface - can allocate/link wait_queues and sets after calling this */
220 __private_extern__ void wait_queue_bootstrap(void);
221
222 /******** Decomposed interfaces (to build higher level constructs) ***********/
223
224 /* assert intent to wait on a locked wait queue */
225 __private_extern__ wait_result_t wait_queue_assert_wait64_locked(
226 wait_queue_t wait_queue,
227 event64_t wait_event,
228 wait_interrupt_t interruptible,
229 wait_timeout_urgency_t urgency,
230 uint64_t deadline,
231 uint64_t leeway,
232 thread_t thread);
233
234 /* pull a thread from its wait queue */
235 __private_extern__ void wait_queue_pull_thread_locked(
236 wait_queue_t wait_queue,
237 thread_t thread,
238 boolean_t unlock);
239
240 /* wakeup all threads waiting for a particular event on locked queue */
241 __private_extern__ kern_return_t wait_queue_wakeup64_all_locked(
242 wait_queue_t wait_queue,
243 event64_t wake_event,
244 wait_result_t result,
245 boolean_t unlock);
246
247 /* wakeup one thread waiting for a particular event on locked queue */
248 __private_extern__ kern_return_t wait_queue_wakeup64_one_locked(
249 wait_queue_t wait_queue,
250 event64_t wake_event,
251 wait_result_t result,
252 boolean_t unlock);
253
254 /* return identity of a thread awakened for a particular <wait_queue,event> */
255 __private_extern__ thread_t wait_queue_wakeup64_identity_locked(
256 wait_queue_t wait_queue,
257 event64_t wake_event,
258 wait_result_t result,
259 boolean_t unlock);
260
261 /* wakeup thread iff its still waiting for a particular event on locked queue */
262 __private_extern__ kern_return_t wait_queue_wakeup64_thread_locked(
263 wait_queue_t wait_queue,
264 event64_t wake_event,
265 thread_t thread,
266 wait_result_t result,
267 boolean_t unlock);
268
269 extern uint32_t num_wait_queues;
270 extern struct wait_queue *wait_queues;
271 /* The Jenkins "one at a time" hash.
272 * TBD: There may be some value to unrolling here,
273 * depending on the architecture.
274 */
275 static inline uint32_t wq_hash(char *key)
276 {
277 uint32_t hash = 0;
278 size_t i, length = sizeof(char *);
279
280 for (i = 0; i < length; i++) {
281 hash += key[i];
282 hash += (hash << 10);
283 hash ^= (hash >> 6);
284 }
285
286 hash += (hash << 3);
287 hash ^= (hash >> 11);
288 hash += (hash << 15);
289
290 hash &= (num_wait_queues - 1);
291 return hash;
292 }
293
294 #define wait_hash(event) wq_hash((char *)&event)
295
296 #endif /* MACH_KERNEL_PRIVATE */
297
298 __BEGIN_DECLS
299
300 /******** Semi-Public interfaces (not a part of a higher construct) ************/
301
302 extern unsigned int wait_queue_set_size(void);
303 extern unsigned int wait_queue_link_size(void);
304
305 extern kern_return_t wait_queue_init(
306 wait_queue_t wait_queue,
307 int policy);
308
309 extern wait_queue_set_t wait_queue_set_alloc(
310 int policy);
311
312 extern kern_return_t wait_queue_set_init(
313 wait_queue_set_t set_queue,
314 int policy);
315
316 extern kern_return_t wait_queue_set_free(
317 wait_queue_set_t set_queue);
318
319 extern wait_queue_link_t wait_queue_link_alloc(
320 int policy);
321
322 extern kern_return_t wait_queue_link_free(
323 wait_queue_link_t link_element);
324
325 extern kern_return_t wait_queue_link(
326 wait_queue_t wait_queue,
327 wait_queue_set_t set_queue);
328
329 extern kern_return_t wait_queue_link_noalloc(
330 wait_queue_t wait_queue,
331 wait_queue_set_t set_queue,
332 wait_queue_link_t link);
333
334 extern boolean_t wait_queue_member(
335 wait_queue_t wait_queue,
336 wait_queue_set_t set_queue);
337
338 extern kern_return_t wait_queue_unlink(
339 wait_queue_t wait_queue,
340 wait_queue_set_t set_queue);
341
342 extern kern_return_t wait_queue_unlink_all(
343 wait_queue_t wait_queue);
344
345 extern kern_return_t wait_queue_set_unlink_all(
346 wait_queue_set_t set_queue);
347
348 #ifdef XNU_KERNEL_PRIVATE
349 extern kern_return_t wait_queue_set_unlink_one(
350 wait_queue_set_t set_queue,
351 wait_queue_link_t link);
352
353 extern kern_return_t wait_queue_unlink_nofree(
354 wait_queue_t wait_queue,
355 wait_queue_set_t set_queue,
356 wait_queue_link_t *wqlp);
357
358 extern kern_return_t wait_queue_unlink_all_nofree(
359 wait_queue_t wait_queue,
360 queue_t links);
361
362 extern kern_return_t wait_queue_set_unlink_all_nofree(
363 wait_queue_set_t set_queue,
364 queue_t links);
365
366 extern wait_queue_link_t wait_queue_link_allocate(void);
367
368 #endif /* XNU_KERNEL_PRIVATE */
369
370 /* legacy API */
371 kern_return_t wait_queue_sub_init(
372 wait_queue_set_t set_queue,
373 int policy);
374
375 kern_return_t wait_queue_sub_clearrefs(
376 wait_queue_set_t wq_set);
377
378 extern kern_return_t wait_subqueue_unlink_all(
379 wait_queue_set_t set_queue);
380
381 extern wait_queue_t wait_queue_alloc(
382 int policy);
383
384 extern kern_return_t wait_queue_free(
385 wait_queue_t wait_queue);
386
387 /* assert intent to wait on <wait_queue,event64> pair */
388 extern wait_result_t wait_queue_assert_wait64(
389 wait_queue_t wait_queue,
390 event64_t wait_event,
391 wait_interrupt_t interruptible,
392 uint64_t deadline);
393
394 extern wait_result_t wait_queue_assert_wait64_with_leeway(
395 wait_queue_t wait_queue,
396 event64_t wait_event,
397 wait_interrupt_t interruptible,
398 wait_timeout_urgency_t urgency,
399 uint64_t deadline,
400 uint64_t leeway);
401
402 /* wakeup the most appropriate thread waiting on <wait_queue,event64> pair */
403 extern kern_return_t wait_queue_wakeup64_one(
404 wait_queue_t wait_queue,
405 event64_t wake_event,
406 wait_result_t result);
407
408 /* wakeup all the threads waiting on <wait_queue,event64> pair */
409 extern kern_return_t wait_queue_wakeup64_all(
410 wait_queue_t wait_queue,
411 event64_t wake_event,
412 wait_result_t result);
413
414 /* wakeup a specified thread waiting iff waiting on <wait_queue,event64> pair */
415 extern kern_return_t wait_queue_wakeup64_thread(
416 wait_queue_t wait_queue,
417 event64_t wake_event,
418 thread_t thread,
419 wait_result_t result);
420
421 /*
422 * Compatibility Wait Queue APIs based on pointer events instead of 64bit
423 * integer events.
424 */
425
426 /* assert intent to wait on <wait_queue,event> pair */
427 extern wait_result_t wait_queue_assert_wait(
428 wait_queue_t wait_queue,
429 event_t wait_event,
430 wait_interrupt_t interruptible,
431 uint64_t deadline);
432
433 /* assert intent to wait on <wait_queue,event> pair */
434 extern wait_result_t wait_queue_assert_wait_with_leeway(
435 wait_queue_t wait_queue,
436 event_t wait_event,
437 wait_interrupt_t interruptible,
438 wait_timeout_urgency_t urgency,
439 uint64_t deadline,
440 uint64_t leeway);
441
442 /* wakeup the most appropriate thread waiting on <wait_queue,event> pair */
443 extern kern_return_t wait_queue_wakeup_one(
444 wait_queue_t wait_queue,
445 event_t wake_event,
446 wait_result_t result,
447 int priority);
448
449 /* wakeup all the threads waiting on <wait_queue,event> pair */
450 extern kern_return_t wait_queue_wakeup_all(
451 wait_queue_t wait_queue,
452 event_t wake_event,
453 wait_result_t result);
454
455 /* wakeup a specified thread waiting iff waiting on <wait_queue,event> pair */
456 extern kern_return_t wait_queue_wakeup_thread(
457 wait_queue_t wait_queue,
458 event_t wake_event,
459 thread_t thread,
460 wait_result_t result);
461
462 __END_DECLS
463
464 #endif /* _KERN_WAIT_QUEUE_H_ */
465
466 #endif /* KERNEL_PRIVATE */