]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #ifdef KERNEL_PRIVATE | |
30 | ||
31 | #ifndef _KERN_WAIT_QUEUE_H_ | |
32 | #define _KERN_WAIT_QUEUE_H_ | |
33 | ||
34 | #include <mach/mach_types.h> | |
35 | #include <mach/sync_policy.h> | |
36 | #include <mach/kern_return.h> /* for kern_return_t */ | |
37 | ||
38 | #include <kern/kern_types.h> /* for wait_queue_t */ | |
39 | ||
40 | #include <sys/cdefs.h> | |
41 | ||
42 | #ifdef MACH_KERNEL_PRIVATE | |
43 | ||
44 | #include <kern/lock.h> | |
45 | #include <kern/queue.h> | |
46 | #include <machine/cpu_number.h> | |
47 | ||
48 | /* | |
49 | * wait_queue_t | |
50 | * This is the definition of the common event wait queue | |
51 | * that the scheduler APIs understand. It is used | |
52 | * internally by the gerneralized event waiting mechanism | |
53 | * (assert_wait), and also for items that maintain their | |
54 | * own wait queues (such as ports and semaphores). | |
55 | * | |
56 | * It is not published to other kernel components. They | |
57 | * can create wait queues by calling wait_queue_alloc. | |
58 | * | |
59 | * NOTE: Hardware locks are used to protect event wait | |
60 | * queues since interrupt code is free to post events to | |
61 | * them. | |
62 | */ | |
63 | typedef struct wait_queue { | |
64 | unsigned int /* flags */ | |
65 | /* boolean_t */ wq_type:16, /* only public field */ | |
66 | wq_fifo:1, /* fifo wakeup policy? */ | |
67 | wq_prepost:1, /* waitq supports prepost? set only */ | |
68 | :0; /* force to long boundary */ | |
69 | hw_lock_data_t wq_interlock; /* interlock */ | |
70 | queue_head_t wq_queue; /* queue of elements */ | |
71 | } WaitQueue; | |
72 | ||
73 | /* | |
74 | * wait_queue_set_t | |
75 | * This is the common definition for a set wait queue. | |
76 | * These can be linked as members/elements of multiple regular | |
77 | * wait queues. They have an additional set of linkages to | |
78 | * identify the linkage structures that point to them. | |
79 | */ | |
80 | typedef struct wait_queue_set { | |
81 | WaitQueue wqs_wait_queue; /* our wait queue */ | |
82 | queue_head_t wqs_setlinks; /* links from set perspective */ | |
83 | queue_head_t wqs_preposts; /* preposted links */ | |
84 | } WaitQueueSet; | |
85 | ||
86 | #define wqs_type wqs_wait_queue.wq_type | |
87 | #define wqs_fifo wqs_wait_queue.wq_fifo | |
88 | #define wqs_prepost wqs_wait_queue.wq_prepost | |
89 | #define wqs_queue wqs_wait_queue.wq_queue | |
90 | ||
91 | /* | |
92 | * wait_queue_element_t | |
93 | * This structure describes the elements on an event wait | |
94 | * queue. It is the common first fields in a thread shuttle | |
95 | * and wait_queue_link_t. In that way, a wait queue can | |
96 | * consist of both thread shuttle elements and links off of | |
97 | * to other (set) wait queues. | |
98 | * | |
99 | * WARNING: These fields correspond to fields in the thread | |
100 | * shuttle (run queue links and run queue pointer). Any change in | |
101 | * the layout here will have to be matched with a change there. | |
102 | */ | |
103 | typedef struct wait_queue_element { | |
104 | queue_chain_t wqe_links; /* link of elements on this queue */ | |
105 | void * wqe_type; /* Identifies link vs. thread */ | |
106 | wait_queue_t wqe_queue; /* queue this element is on */ | |
107 | } WaitQueueElement; | |
108 | ||
109 | typedef WaitQueueElement *wait_queue_element_t; | |
110 | ||
111 | /* | |
112 | * wait_queue_link_t | |
113 | * Specialized wait queue element type for linking set | |
114 | * event waits queues onto a wait queue. In this way, an event | |
115 | * can be constructed so that any thread waiting on any number | |
116 | * of associated wait queues can handle the event, while letting | |
117 | * the thread only be linked on the single wait queue it blocked on. | |
118 | * | |
119 | * One use: ports in multiple portsets. Each thread is queued up | |
120 | * on the portset that it specifically blocked on during a receive | |
121 | * operation. Each port's event queue links in all the portset | |
122 | * event queues of which it is a member. An IPC event post associated | |
123 | * with that port may wake up any thread from any of those portsets, | |
124 | * or one that was waiting locally on the port itself. | |
125 | */ | |
126 | typedef struct _wait_queue_link { | |
127 | WaitQueueElement wql_element; /* element on master */ | |
128 | queue_chain_t wql_setlinks; /* element on set */ | |
129 | queue_chain_t wql_preposts; /* element on set prepost list */ | |
130 | wait_queue_set_t wql_setqueue; /* set queue */ | |
131 | } WaitQueueLink; | |
132 | ||
133 | #define wql_links wql_element.wqe_links | |
134 | #define wql_type wql_element.wqe_type | |
135 | #define wql_queue wql_element.wqe_queue | |
136 | ||
137 | #define _WAIT_QUEUE_inited 0xf1d0 | |
138 | #define _WAIT_QUEUE_SET_inited 0xf1d1 | |
139 | ||
140 | #define wait_queue_is_queue(wq) \ | |
141 | ((wq)->wq_type == _WAIT_QUEUE_inited) | |
142 | ||
143 | #define wait_queue_is_set(wqs) \ | |
144 | ((wqs)->wqs_type == _WAIT_QUEUE_SET_inited) | |
145 | ||
146 | #define wait_queue_is_valid(wq) \ | |
147 | (((wq)->wq_type & ~1) == _WAIT_QUEUE_inited) | |
148 | ||
149 | #define wait_queue_empty(wq) (queue_empty(&(wq)->wq_queue)) | |
150 | ||
151 | #define wait_queue_held(wq) (hw_lock_held(&(wq)->wq_interlock)) | |
152 | #define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock)) | |
153 | ||
154 | /* For x86, the hardware timeout is in TSC units. */ | |
155 | #if defined(i386) | |
156 | #define hwLockTimeOut LockTimeOutTSC | |
157 | #else | |
158 | #define hwLockTimeOut LockTimeOut | |
159 | #endif | |
160 | /* | |
161 | * Double the standard lock timeout, because wait queues tend | |
162 | * to iterate over a number of threads - locking each. If there is | |
163 | * a problem with a thread lock, it normally times out at the wait | |
164 | * queue level first, hiding the real problem. | |
165 | */ | |
166 | ||
167 | static inline void wait_queue_lock(wait_queue_t wq) { | |
168 | if (!hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2)) | |
169 | panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number( | |
170 | )); | |
171 | } | |
172 | ||
173 | static inline void wait_queue_unlock(wait_queue_t wq) { | |
174 | assert(wait_queue_held(wq)); | |
175 | hw_lock_unlock(&(wq)->wq_interlock); | |
176 | } | |
177 | ||
178 | #define wqs_lock(wqs) wait_queue_lock(&(wqs)->wqs_wait_queue) | |
179 | #define wqs_unlock(wqs) wait_queue_unlock(&(wqs)->wqs_wait_queue) | |
180 | #define wqs_lock_try(wqs) wait_queue__try_lock(&(wqs)->wqs_wait_queue) | |
181 | #define wqs_is_preposted(wqs) ((wqs)->wqs_prepost && !queue_empty(&(wqs)->wqs_preposts)) | |
182 | ||
183 | #define wql_is_preposted(wql) ((wql)->wql_preposts.next != NULL) | |
184 | #define wql_clear_prepost(wql) ((wql)->wql_preposts.next = (wql)->wql_preposts.prev = NULL) | |
185 | ||
186 | #define wait_queue_assert_possible(thread) \ | |
187 | ((thread)->wait_queue == WAIT_QUEUE_NULL) | |
188 | ||
189 | /* bootstrap interface - can allocate/link wait_queues and sets after calling this */ | |
190 | __private_extern__ void wait_queue_bootstrap(void); | |
191 | ||
192 | /******** Decomposed interfaces (to build higher level constructs) ***********/ | |
193 | ||
194 | /* assert intent to wait on a locked wait queue */ | |
195 | __private_extern__ wait_result_t wait_queue_assert_wait64_locked( | |
196 | wait_queue_t wait_queue, | |
197 | event64_t wait_event, | |
198 | wait_interrupt_t interruptible, | |
199 | uint64_t deadline, | |
200 | thread_t thread); | |
201 | ||
202 | /* pull a thread from its wait queue */ | |
203 | __private_extern__ void wait_queue_pull_thread_locked( | |
204 | wait_queue_t wait_queue, | |
205 | thread_t thread, | |
206 | boolean_t unlock); | |
207 | ||
208 | /* wakeup all threads waiting for a particular event on locked queue */ | |
209 | __private_extern__ kern_return_t wait_queue_wakeup64_all_locked( | |
210 | wait_queue_t wait_queue, | |
211 | event64_t wake_event, | |
212 | wait_result_t result, | |
213 | boolean_t unlock); | |
214 | ||
215 | /* wakeup one thread waiting for a particular event on locked queue */ | |
216 | __private_extern__ kern_return_t wait_queue_wakeup64_one_locked( | |
217 | wait_queue_t wait_queue, | |
218 | event64_t wake_event, | |
219 | wait_result_t result, | |
220 | boolean_t unlock); | |
221 | ||
222 | /* return identity of a thread awakened for a particular <wait_queue,event> */ | |
223 | __private_extern__ thread_t wait_queue_wakeup64_identity_locked( | |
224 | wait_queue_t wait_queue, | |
225 | event64_t wake_event, | |
226 | wait_result_t result, | |
227 | boolean_t unlock); | |
228 | ||
229 | /* wakeup thread iff its still waiting for a particular event on locked queue */ | |
230 | __private_extern__ kern_return_t wait_queue_wakeup64_thread_locked( | |
231 | wait_queue_t wait_queue, | |
232 | event64_t wake_event, | |
233 | thread_t thread, | |
234 | wait_result_t result, | |
235 | boolean_t unlock); | |
236 | ||
237 | __private_extern__ uint32_t num_wait_queues; | |
238 | __private_extern__ struct wait_queue *wait_queues; | |
239 | /* The Jenkins "one at a time" hash. | |
240 | * TBD: There may be some value to unrolling here, | |
241 | * depending on the architecture. | |
242 | */ | |
243 | static inline uint32_t wq_hash(char *key) | |
244 | { | |
245 | uint32_t hash = 0; | |
246 | size_t i, length = sizeof(char *); | |
247 | ||
248 | for (i = 0; i < length; i++) { | |
249 | hash += key[i]; | |
250 | hash += (hash << 10); | |
251 | hash ^= (hash >> 6); | |
252 | } | |
253 | ||
254 | hash += (hash << 3); | |
255 | hash ^= (hash >> 11); | |
256 | hash += (hash << 15); | |
257 | ||
258 | return hash; | |
259 | } | |
260 | ||
261 | /* TBD: It should be possible to eliminate the divide here */ | |
262 | #define wait_hash(event) \ | |
263 | (wq_hash((char *)&event) % (num_wait_queues)) | |
264 | ||
265 | #endif /* MACH_KERNEL_PRIVATE */ | |
266 | ||
267 | __BEGIN_DECLS | |
268 | ||
269 | /******** Semi-Public interfaces (not a part of a higher construct) ************/ | |
270 | ||
271 | extern unsigned int wait_queue_set_size(void); | |
272 | extern unsigned int wait_queue_link_size(void); | |
273 | ||
274 | extern kern_return_t wait_queue_init( | |
275 | wait_queue_t wait_queue, | |
276 | int policy); | |
277 | ||
278 | extern wait_queue_set_t wait_queue_set_alloc( | |
279 | int policy); | |
280 | ||
281 | extern kern_return_t wait_queue_set_init( | |
282 | wait_queue_set_t set_queue, | |
283 | int policy); | |
284 | ||
285 | extern kern_return_t wait_queue_set_free( | |
286 | wait_queue_set_t set_queue); | |
287 | ||
288 | extern wait_queue_link_t wait_queue_link_alloc( | |
289 | int policy); | |
290 | ||
291 | extern kern_return_t wait_queue_link_free( | |
292 | wait_queue_link_t link_element); | |
293 | ||
294 | extern kern_return_t wait_queue_link( | |
295 | wait_queue_t wait_queue, | |
296 | wait_queue_set_t set_queue); | |
297 | ||
298 | extern kern_return_t wait_queue_link_noalloc( | |
299 | wait_queue_t wait_queue, | |
300 | wait_queue_set_t set_queue, | |
301 | wait_queue_link_t link); | |
302 | ||
303 | extern boolean_t wait_queue_member( | |
304 | wait_queue_t wait_queue, | |
305 | wait_queue_set_t set_queue); | |
306 | ||
307 | extern kern_return_t wait_queue_unlink( | |
308 | wait_queue_t wait_queue, | |
309 | wait_queue_set_t set_queue); | |
310 | ||
311 | extern kern_return_t wait_queue_unlink_all( | |
312 | wait_queue_t wait_queue); | |
313 | ||
314 | extern kern_return_t wait_queue_set_unlink_all( | |
315 | wait_queue_set_t set_queue); | |
316 | ||
317 | /* legacy API */ | |
318 | kern_return_t wait_queue_sub_init( | |
319 | wait_queue_set_t set_queue, | |
320 | int policy); | |
321 | ||
322 | kern_return_t wait_queue_sub_clearrefs( | |
323 | wait_queue_set_t wq_set); | |
324 | ||
325 | extern kern_return_t wait_subqueue_unlink_all( | |
326 | wait_queue_set_t set_queue); | |
327 | ||
328 | extern wait_queue_t wait_queue_alloc( | |
329 | int policy); | |
330 | ||
331 | extern kern_return_t wait_queue_free( | |
332 | wait_queue_t wait_queue); | |
333 | ||
334 | /* assert intent to wait on <wait_queue,event64> pair */ | |
335 | extern wait_result_t wait_queue_assert_wait64( | |
336 | wait_queue_t wait_queue, | |
337 | event64_t wait_event, | |
338 | wait_interrupt_t interruptible, | |
339 | uint64_t deadline); | |
340 | ||
341 | /* wakeup the most appropriate thread waiting on <wait_queue,event64> pair */ | |
342 | extern kern_return_t wait_queue_wakeup64_one( | |
343 | wait_queue_t wait_queue, | |
344 | event64_t wake_event, | |
345 | wait_result_t result); | |
346 | ||
347 | /* wakeup all the threads waiting on <wait_queue,event64> pair */ | |
348 | extern kern_return_t wait_queue_wakeup64_all( | |
349 | wait_queue_t wait_queue, | |
350 | event64_t wake_event, | |
351 | wait_result_t result); | |
352 | ||
353 | /* wakeup a specified thread waiting iff waiting on <wait_queue,event64> pair */ | |
354 | extern kern_return_t wait_queue_wakeup64_thread( | |
355 | wait_queue_t wait_queue, | |
356 | event64_t wake_event, | |
357 | thread_t thread, | |
358 | wait_result_t result); | |
359 | ||
360 | /* | |
361 | * Compatibility Wait Queue APIs based on pointer events instead of 64bit | |
362 | * integer events. | |
363 | */ | |
364 | ||
365 | /* assert intent to wait on <wait_queue,event> pair */ | |
366 | extern wait_result_t wait_queue_assert_wait( | |
367 | wait_queue_t wait_queue, | |
368 | event_t wait_event, | |
369 | wait_interrupt_t interruptible, | |
370 | uint64_t deadline); | |
371 | ||
372 | /* wakeup the most appropriate thread waiting on <wait_queue,event> pair */ | |
373 | extern kern_return_t wait_queue_wakeup_one( | |
374 | wait_queue_t wait_queue, | |
375 | event_t wake_event, | |
376 | wait_result_t result); | |
377 | ||
378 | /* wakeup all the threads waiting on <wait_queue,event> pair */ | |
379 | extern kern_return_t wait_queue_wakeup_all( | |
380 | wait_queue_t wait_queue, | |
381 | event_t wake_event, | |
382 | wait_result_t result); | |
383 | ||
384 | /* wakeup a specified thread waiting iff waiting on <wait_queue,event> pair */ | |
385 | extern kern_return_t wait_queue_wakeup_thread( | |
386 | wait_queue_t wait_queue, | |
387 | event_t wake_event, | |
388 | thread_t thread, | |
389 | wait_result_t result); | |
390 | ||
391 | __END_DECLS | |
392 | ||
393 | #endif /* _KERN_WAIT_QUEUE_H_ */ | |
394 | ||
395 | #endif /* KERNEL_PRIVATE */ |