]>
Commit | Line | Data |
---|---|---|
91447636 | 1 | /* |
cb323159 | 2 | * Copyright (c) 2003-2019 Apple Inc. All rights reserved. |
91447636 | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
91447636 A |
27 | */ |
28 | ||
29 | #ifndef _KERN_LOCKS_H_ | |
30 | #define _KERN_LOCKS_H_ | |
31 | ||
0a7de745 A |
32 | #include <sys/cdefs.h> |
33 | #include <sys/appleapiopts.h> | |
34 | #include <mach/boolean.h> | |
35 | #include <mach/mach_types.h> | |
36 | #include <kern/kern_types.h> | |
37 | #include <kern/lock_group.h> | |
38 | #include <machine/locks.h> | |
91447636 | 39 | |
0a7de745 A |
40 | #ifdef MACH_KERNEL_PRIVATE |
41 | #include <kern/queue.h> | |
91447636 | 42 | |
0a7de745 A |
43 | extern void lck_mod_init( |
44 | void); | |
91447636 A |
45 | |
46 | #endif | |
47 | ||
0a7de745 | 48 | typedef unsigned int lck_sleep_action_t; |
91447636 | 49 | |
0a7de745 A |
50 | #define LCK_SLEEP_DEFAULT 0x00 /* Release the lock while waiting for the event, then reclaim */ |
51 | /* RW locks are returned in the same mode */ | |
52 | #define LCK_SLEEP_UNLOCK 0x01 /* Release the lock and return unheld */ | |
53 | #define LCK_SLEEP_SHARED 0x02 /* Reclaim the lock in shared mode (RW only) */ | |
54 | #define LCK_SLEEP_EXCLUSIVE 0x04 /* Reclaim the lock in exclusive mode (RW only) */ | |
55 | #define LCK_SLEEP_SPIN 0x08 /* Reclaim the lock in spin mode (mutex only) */ | |
56 | #define LCK_SLEEP_PROMOTED_PRI 0x10 /* Sleep at a promoted priority */ | |
57 | #define LCK_SLEEP_SPIN_ALWAYS 0x20 /* Reclaim the lock in spin-always mode (mutex only) */ | |
91447636 | 58 | |
0a7de745 | 59 | #define LCK_SLEEP_MASK 0x3f /* Valid actions */ |
91447636 | 60 | |
cb323159 A |
61 | typedef unsigned int lck_wake_action_t; |
62 | ||
63 | #define LCK_WAKE_DEFAULT 0x00 /* If waiters are present, transfer their push to the wokenup thread */ | |
64 | #define LCK_WAKE_DO_NOT_TRANSFER_PUSH 0x01 /* Do not transfer waiters push when waking up */ | |
91447636 | 65 | |
0a7de745 A |
66 | #ifdef MACH_KERNEL_PRIVATE |
67 | typedef struct _lck_attr_ { | |
68 | unsigned int lck_attr_val; | |
69 | } lck_attr_t; | |
91447636 | 70 | |
0a7de745 | 71 | extern lck_attr_t LockDefaultLckAttr; |
91447636 | 72 | |
0a7de745 | 73 | #define LCK_ATTR_NONE 0 |
91447636 | 74 | |
0a7de745 A |
75 | #define LCK_ATTR_DEBUG 0x00000001 |
76 | #define LCK_ATTR_RW_SHARED_PRIORITY 0x00010000 | |
91447636 A |
77 | |
78 | #else | |
0a7de745 | 79 | typedef struct __lck_attr__ lck_attr_t; |
91447636 A |
80 | #endif |
81 | ||
cb323159 | 82 | #define LCK_ATTR_NULL (lck_attr_t *)NULL |
91447636 A |
83 | |
84 | __BEGIN_DECLS | |
85 | ||
0a7de745 A |
86 | extern lck_attr_t *lck_attr_alloc_init( |
87 | void); | |
91447636 | 88 | |
0a7de745 A |
89 | extern void lck_attr_setdefault( |
90 | lck_attr_t *attr); | |
91447636 | 91 | |
0a7de745 A |
92 | extern void lck_attr_setdebug( |
93 | lck_attr_t *attr); | |
91447636 | 94 | |
0a7de745 A |
95 | extern void lck_attr_cleardebug( |
96 | lck_attr_t *attr); | |
91447636 | 97 | |
0a7de745 | 98 | #ifdef XNU_KERNEL_PRIVATE |
91447636 | 99 | |
0a7de745 A |
100 | typedef union { |
101 | uint16_t tcurnext; | |
102 | struct { | |
103 | uint8_t cticket; | |
104 | uint8_t nticket; | |
105 | }; | |
106 | } lck_ticket_internal; | |
91447636 | 107 | |
0a7de745 A |
108 | typedef struct { |
109 | lck_ticket_internal tu; | |
110 | uintptr_t lck_owner; | |
111 | } lck_ticket_t; | |
0c530ab8 | 112 | |
0a7de745 A |
113 | void lck_ticket_init(lck_ticket_t *tlock); |
114 | void lck_ticket_lock(lck_ticket_t *tlock); | |
115 | void lck_ticket_unlock(lck_ticket_t *tlock); | |
116 | void lck_ticket_assert_owned(lck_ticket_t *tlock); | |
91447636 | 117 | |
0a7de745 A |
118 | extern void lck_attr_rw_shared_priority( |
119 | lck_attr_t *attr); | |
91447636 A |
120 | #endif |
121 | ||
0a7de745 A |
122 | extern void lck_attr_free( |
123 | lck_attr_t *attr); | |
91447636 | 124 | |
cb323159 | 125 | #define decl_lck_spin_data(class, name) class lck_spin_t name |
91447636 | 126 | |
0a7de745 A |
127 | extern lck_spin_t *lck_spin_alloc_init( |
128 | lck_grp_t *grp, | |
129 | lck_attr_t *attr); | |
91447636 | 130 | |
0a7de745 A |
131 | extern void lck_spin_init( |
132 | lck_spin_t *lck, | |
133 | lck_grp_t *grp, | |
134 | lck_attr_t *attr); | |
91447636 | 135 | |
0a7de745 A |
136 | extern void lck_spin_lock( |
137 | lck_spin_t *lck); | |
2d21ac55 | 138 | |
0a7de745 A |
139 | extern void lck_spin_lock_grp( |
140 | lck_spin_t *lck, | |
141 | lck_grp_t *grp); | |
0c530ab8 | 142 | |
0a7de745 A |
143 | extern void lck_spin_unlock( |
144 | lck_spin_t *lck); | |
91447636 | 145 | |
0a7de745 A |
146 | extern void lck_spin_destroy( |
147 | lck_spin_t *lck, | |
148 | lck_grp_t *grp); | |
91447636 | 149 | |
0a7de745 A |
150 | extern void lck_spin_free( |
151 | lck_spin_t *lck, | |
152 | lck_grp_t *grp); | |
91447636 | 153 | |
0a7de745 A |
154 | extern wait_result_t lck_spin_sleep( |
155 | lck_spin_t *lck, | |
156 | lck_sleep_action_t lck_sleep_action, | |
157 | event_t event, | |
158 | wait_interrupt_t interruptible); | |
91447636 | 159 | |
0a7de745 A |
160 | extern wait_result_t lck_spin_sleep_grp( |
161 | lck_spin_t *lck, | |
162 | lck_sleep_action_t lck_sleep_action, | |
163 | event_t event, | |
164 | wait_interrupt_t interruptible, | |
165 | lck_grp_t *grp); | |
91447636 | 166 | |
0a7de745 A |
167 | extern wait_result_t lck_spin_sleep_deadline( |
168 | lck_spin_t *lck, | |
169 | lck_sleep_action_t lck_sleep_action, | |
170 | event_t event, | |
171 | wait_interrupt_t interruptible, | |
172 | uint64_t deadline); | |
91447636 | 173 | |
0a7de745 | 174 | #ifdef KERNEL_PRIVATE |
91447636 | 175 | |
0a7de745 A |
176 | extern void lck_spin_lock_nopreempt( lck_spin_t *lck); |
177 | extern void lck_spin_lock_nopreempt_grp( lck_spin_t *lck, lck_grp_t *grp); | |
91447636 | 178 | |
0a7de745 | 179 | extern void lck_spin_unlock_nopreempt( lck_spin_t *lck); |
91447636 | 180 | |
0a7de745 | 181 | extern boolean_t lck_spin_try_lock_grp( lck_spin_t *lck, lck_grp_t *grp); |
91447636 | 182 | |
0a7de745 | 183 | extern boolean_t lck_spin_try_lock( lck_spin_t *lck); |
91447636 | 184 | |
0a7de745 A |
185 | extern boolean_t lck_spin_try_lock_nopreempt( lck_spin_t *lck); |
186 | extern boolean_t lck_spin_try_lock_nopreempt_grp( lck_spin_t *lck, lck_grp_t *grp); | |
d9a64523 | 187 | |
fe8ab488 | 188 | /* NOT SAFE: To be used only by kernel debugger to avoid deadlock. */ |
0a7de745 | 189 | extern boolean_t kdp_lck_spin_is_acquired( lck_spin_t *lck); |
91447636 | 190 | |
b0d623f7 A |
191 | struct _lck_mtx_ext_; |
192 | extern void lck_mtx_init_ext(lck_mtx_t *lck, struct _lck_mtx_ext_ *lck_ext, | |
193 | lck_grp_t *grp, lck_attr_t *attr); | |
194 | ||
91447636 A |
195 | #endif |
196 | ||
197 | ||
cb323159 | 198 | #define decl_lck_mtx_data(class, name) class lck_mtx_t name |
91447636 | 199 | |
0a7de745 A |
200 | extern lck_mtx_t *lck_mtx_alloc_init( |
201 | lck_grp_t *grp, | |
202 | lck_attr_t *attr); | |
91447636 | 203 | |
0a7de745 A |
204 | extern void lck_mtx_init( |
205 | lck_mtx_t *lck, | |
206 | lck_grp_t *grp, | |
207 | lck_attr_t *attr); | |
208 | extern void lck_mtx_lock( | |
209 | lck_mtx_t *lck); | |
91447636 | 210 | |
0a7de745 A |
211 | extern void lck_mtx_unlock( |
212 | lck_mtx_t *lck); | |
6d2010ae | 213 | |
0a7de745 A |
214 | extern void lck_mtx_destroy( |
215 | lck_mtx_t *lck, | |
216 | lck_grp_t *grp); | |
91447636 | 217 | |
0a7de745 A |
218 | extern void lck_mtx_free( |
219 | lck_mtx_t *lck, | |
220 | lck_grp_t *grp); | |
91447636 | 221 | |
0a7de745 A |
222 | extern wait_result_t lck_mtx_sleep( |
223 | lck_mtx_t *lck, | |
224 | lck_sleep_action_t lck_sleep_action, | |
225 | event_t event, | |
226 | wait_interrupt_t interruptible); | |
91447636 | 227 | |
0a7de745 A |
228 | extern wait_result_t lck_mtx_sleep_deadline( |
229 | lck_mtx_t *lck, | |
230 | lck_sleep_action_t lck_sleep_action, | |
231 | event_t event, | |
232 | wait_interrupt_t interruptible, | |
233 | uint64_t deadline); | |
cb323159 A |
234 | |
235 | #ifdef KERNEL_PRIVATE | |
236 | /* | |
237 | * Name: lck_spin_sleep_with_inheritor | |
238 | * | |
239 | * Description: deschedule the current thread and wait on the waitq associated with event to be woken up. | |
240 | * While waiting, the sched priority of the waiting thread will contribute to the push of the event that will | |
241 | * be directed to the inheritor specified. | |
242 | * An interruptible mode and deadline can be specified to return earlier from the wait. | |
243 | * | |
244 | * Args: | |
245 | * Arg1: lck_spin_t lock used to protect the sleep. The lock will be dropped while sleeping and reaquired before returning according to the sleep action specified. | |
246 | * Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK. | |
247 | * Arg3: event to wait on. | |
248 | * Arg4: thread to propagate the event push to. | |
249 | * Arg5: interruptible flag for wait. | |
250 | * Arg6: deadline for wait. | |
251 | * | |
252 | * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified. | |
253 | * Lock will be dropped while waiting. | |
254 | * The inheritor specified cannot run in user space until another inheritor is specified for the event or a | |
255 | * wakeup for the event is called. | |
256 | * | |
257 | * Returns: result of the wait. | |
258 | */ | |
259 | extern wait_result_t lck_spin_sleep_with_inheritor(lck_spin_t *lock, lck_sleep_action_t lck_sleep_action, event_t event, thread_t inheritor, wait_interrupt_t interruptible, uint64_t deadline); | |
260 | ||
261 | /* | |
262 | * Name: lck_mtx_sleep_with_inheritor | |
263 | * | |
264 | * Description: deschedule the current thread and wait on the waitq associated with event to be woken up. | |
265 | * While waiting, the sched priority of the waiting thread will contribute to the push of the event that will | |
266 | * be directed to the inheritor specified. | |
267 | * An interruptible mode and deadline can be specified to return earlier from the wait. | |
268 | * | |
269 | * Args: | |
270 | * Arg1: lck_mtx_t lock used to protect the sleep. The lock will be dropped while sleeping and reaquired before returning according to the sleep action specified. | |
271 | * Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK, LCK_SLEEP_SPIN, LCK_SLEEP_SPIN_ALWAYS. | |
272 | * Arg3: event to wait on. | |
273 | * Arg4: thread to propagate the event push to. | |
274 | * Arg5: interruptible flag for wait. | |
275 | * Arg6: deadline for wait. | |
276 | * | |
277 | * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified. | |
278 | * Lock will be dropped while waiting. | |
279 | * The inheritor specified cannot run in user space until another inheritor is specified for the event or a | |
280 | * wakeup for the event is called. | |
281 | * | |
282 | * Returns: result of the wait. | |
283 | */ | |
284 | extern wait_result_t lck_mtx_sleep_with_inheritor(lck_mtx_t *lock, lck_sleep_action_t lck_sleep_action, event_t event, thread_t inheritor, wait_interrupt_t interruptible, uint64_t deadline); | |
285 | ||
286 | /* | |
287 | * Name: lck_mtx_sleep_with_inheritor | |
288 | * | |
289 | * Description: deschedule the current thread and wait on the waitq associated with event to be woken up. | |
290 | * While waiting, the sched priority of the waiting thread will contribute to the push of the event that will | |
291 | * be directed to the inheritor specified. | |
292 | * An interruptible mode and deadline can be specified to return earlier from the wait. | |
293 | * | |
294 | * Args: | |
295 | * Arg1: lck_rw_t lock used to protect the sleep. The lock will be dropped while sleeping and reaquired before returning according to the sleep action specified. | |
296 | * Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_SHARED, LCK_SLEEP_EXCLUSIVE. | |
297 | * Arg3: event to wait on. | |
298 | * Arg4: thread to propagate the event push to. | |
299 | * Arg5: interruptible flag for wait. | |
300 | * Arg6: deadline for wait. | |
301 | * | |
302 | * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified. | |
303 | * Lock will be dropped while waiting. | |
304 | * The inheritor specified cannot run in user space until another inheritor is specified for the event or a | |
305 | * wakeup for the event is called. | |
306 | * | |
307 | * Returns: result of the wait. | |
308 | */ | |
309 | extern wait_result_t lck_rw_sleep_with_inheritor(lck_rw_t *lock, lck_sleep_action_t lck_sleep_action, event_t event, thread_t inheritor, wait_interrupt_t interruptible, uint64_t deadline); | |
310 | ||
311 | /* | |
312 | * Name: wakeup_one_with_inheritor | |
313 | * | |
314 | * Description: wake up one waiter for event if any. The thread woken up will be the one with the higher sched priority waiting on event. | |
315 | * The push for the event will be transferred from the last inheritor to the woken up thread. | |
316 | * | |
317 | * Args: | |
318 | * Arg1: event to wake from. | |
319 | * Arg2: wait result to pass to the woken up thread. | |
320 | * Arg3: pointer for storing the thread wokenup. | |
321 | * | |
322 | * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise. | |
323 | * | |
324 | * Conditions: The new inheritor wokenup cannot run in user space until another inheritor is specified for the event or a | |
325 | * wakeup for the event is called. | |
326 | * A reference for the wokenup thread is acquired. | |
327 | * NOTE: this cannot be called from interrupt context. | |
328 | */ | |
329 | extern kern_return_t wakeup_one_with_inheritor(event_t event, wait_result_t result, lck_wake_action_t action, thread_t *thread_wokenup); | |
330 | ||
331 | /* | |
332 | * Name: wakeup_all_with_inheritor | |
333 | * | |
334 | * Description: wake up all waiters waiting for event. The old inheritor will lose the push. | |
335 | * | |
336 | * Args: | |
337 | * Arg1: event to wake from. | |
338 | * Arg2: wait result to pass to the woken up threads. | |
339 | * | |
340 | * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise. | |
341 | * | |
342 | * Conditions: NOTE: this cannot be called from interrupt context. | |
343 | */ | |
344 | extern kern_return_t wakeup_all_with_inheritor(event_t event, wait_result_t result); | |
345 | ||
346 | /* | |
347 | * Name: change_sleep_inheritor | |
348 | * | |
349 | * Description: Redirect the push of the waiting threads of event to the new inheritor specified. | |
350 | * | |
351 | * Args: | |
352 | * Arg1: event to redirect the push. | |
353 | * Arg2: new inheritor for event. | |
354 | * | |
355 | * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise. | |
356 | * | |
357 | * Conditions: In case of success, the new inheritor cannot run in user space until another inheritor is specified for the event or a | |
358 | * wakeup for the event is called. | |
359 | * NOTE: this cannot be called from interrupt context. | |
360 | */ | |
361 | extern kern_return_t change_sleep_inheritor(event_t event, thread_t inheritor); | |
362 | ||
363 | /* | |
364 | * gate structure | |
365 | */ | |
366 | typedef struct gate { | |
367 | uintptr_t gate_data; // thread holder, interlock bit and waiter bit | |
368 | struct turnstile *turnstile; // protected by the interlock bit | |
369 | } gate_t; | |
370 | ||
371 | #define GATE_ILOCK_BIT 0 | |
372 | #define GATE_WAITERS_BIT 1 | |
373 | ||
374 | #define GATE_ILOCK (1 << GATE_ILOCK_BIT) | |
375 | #define GATE_WAITERS (1 << GATE_WAITERS_BIT) | |
376 | ||
377 | #define gate_ilock(gate) hw_lock_bit((hw_lock_bit_t*)(&(gate)->gate_data), GATE_ILOCK_BIT, LCK_GRP_NULL) | |
378 | #define gate_iunlock(gate) hw_unlock_bit((hw_lock_bit_t*)(&(gate)->gate_data), GATE_ILOCK_BIT) | |
379 | #define gate_has_waiters(state) ((state & GATE_WAITERS) != 0) | |
380 | #define ordered_load_gate(gate) os_atomic_load(&(gate)->gate_data, compiler_acq_rel) | |
381 | #define ordered_store_gate(gate, value) os_atomic_store(&(gate)->gate_data, value, compiler_acq_rel) | |
382 | ||
383 | #define GATE_THREAD_MASK (~(uintptr_t)(GATE_ILOCK | GATE_WAITERS)) | |
384 | #define GATE_STATE_TO_THREAD(state) (thread_t)(state & GATE_THREAD_MASK) | |
385 | #define GATE_THREAD_TO_STATE(thread) ((uintptr_t)thread) | |
386 | ||
387 | /* | |
388 | * Possible gate_wait_result_t values. | |
389 | */ | |
390 | typedef int gate_wait_result_t; | |
391 | #define GATE_HANDOFF 0 | |
392 | #define GATE_OPENED 1 | |
393 | #define GATE_TIMED_OUT 2 | |
394 | #define GATE_INTERRUPTED 3 | |
395 | ||
396 | /* | |
397 | * Gate flags used by gate_assert | |
398 | */ | |
399 | #define GATE_ASSERT_CLOSED 0 | |
400 | #define GATE_ASSERT_OPEN 1 | |
401 | #define GATE_ASSERT_HELD 2 | |
402 | ||
403 | /* | |
404 | * Gate flags used by gate_handoff | |
405 | */ | |
406 | #define GATE_HANDOFF_DEFAULT 0 | |
407 | #define GATE_HANDOFF_OPEN_IF_NO_WAITERS 1 | |
408 | ||
409 | #define GATE_EVENT(gate) ((event_t) gate) | |
410 | #define EVENT_TO_GATE(event) ((gate_t *) event) | |
411 | ||
412 | /* | |
413 | * Name: decl_lck_rw_gate_data | |
414 | * | |
415 | * Description: declares a gate variable with specified storage class. | |
416 | * The gate itself will be stored in this variable and it is the caller's responsibility | |
417 | * to ensure that this variable's memory is going to be accessible by all threads that will use | |
418 | * the gate. | |
419 | * Every gate function will require a pointer to this variable as parameter. The same pointer should | |
420 | * be used in every thread. | |
421 | * | |
422 | * The variable needs to be initialized once with lck_rw_gate_init() and destroyed once with | |
423 | * lck_rw_gate_destroy() when not needed anymore. | |
424 | * | |
425 | * The gate will be used in conjunction with a lck_rw_t. | |
426 | * | |
427 | * Args: | |
428 | * Arg1: storage class. | |
429 | * Arg2: variable name. | |
430 | */ | |
431 | #define decl_lck_rw_gate_data(class, name) class gate_t name | |
432 | ||
433 | /* | |
434 | * Name: lck_rw_gate_init | |
435 | * | |
436 | * Description: initializes a variable declared with decl_lck_rw_gate_data. | |
437 | * | |
438 | * Args: | |
439 | * Arg1: lck_rw_t lock used to protect the gate. | |
440 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
441 | */ | |
442 | extern void lck_rw_gate_init(lck_rw_t *lock, gate_t *gate); | |
443 | ||
444 | /* | |
445 | * Name: lck_rw_gate_destroy | |
446 | * | |
447 | * Description: destroys a variable previously initialized. | |
448 | * | |
449 | * Args: | |
450 | * Arg1: lck_rw_t lock used to protect the gate. | |
451 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
452 | */ | |
453 | extern void lck_rw_gate_destroy(lck_rw_t *lock, gate_t *gate); | |
454 | ||
455 | /* | |
456 | * Name: lck_rw_gate_try_close | |
457 | * | |
458 | * Description: Tries to close the gate. | |
459 | * In case of success the current thread will be set as | |
460 | * the holder of the gate. | |
461 | * | |
462 | * Args: | |
463 | * Arg1: lck_rw_t lock used to protect the gate. | |
464 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
465 | * | |
466 | * Conditions: Lock must be held. Returns with the lock held. | |
467 | * | |
468 | * Returns: | |
469 | * KERN_SUCCESS in case the gate was successfully closed. The current thread is the new holder | |
470 | * of the gate. | |
471 | * A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on | |
472 | * to wake up possible waiters on the gate before returning to userspace. | |
473 | * If the intent is to conditionally probe the gate before waiting, the lock must not be dropped | |
474 | * between the calls to lck_rw_gate_try_close() and lck_rw_gate_wait(). | |
475 | * | |
476 | * KERN_FAILURE in case the gate was already closed. Will panic if the current thread was already the holder of the gate. | |
477 | * lck_rw_gate_wait() should be called instead if the intent is to unconditionally wait on this gate. | |
478 | * The calls to lck_rw_gate_try_close() and lck_rw_gate_wait() should | |
479 | * be done without dropping the lock that is protecting the gate in between. | |
480 | */ | |
481 | extern kern_return_t lck_rw_gate_try_close(lck_rw_t *lock, gate_t *gate); | |
482 | ||
483 | /* | |
484 | * Name: lck_rw_gate_close | |
485 | * | |
486 | * Description: Closes the gate. The current thread will be set as | |
487 | * the holder of the gate. Will panic if the gate is already closed. | |
488 | * A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on | |
489 | * to wake up possible waiters on the gate before returning to userspace. | |
490 | * | |
491 | * Args: | |
492 | * Arg1: lck_rw_t lock used to protect the gate. | |
493 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
494 | * | |
495 | * Conditions: Lock must be held. Returns with the lock held. | |
496 | * The gate must be open. | |
497 | * | |
498 | */ | |
499 | extern void lck_rw_gate_close(lck_rw_t *lock, gate_t *gate); | |
500 | ||
501 | ||
502 | /* | |
503 | * Name: lck_rw_gate_open | |
504 | * | |
505 | * Description: Opens the gate and wakes up possible waiters. | |
506 | * | |
507 | * Args: | |
508 | * Arg1: lck_rw_t lock used to protect the gate. | |
509 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
510 | * | |
511 | * Conditions: Lock must be held. Returns with the lock held. | |
512 | * The current thread must be the holder of the gate. | |
513 | * | |
514 | */ | |
515 | extern void lck_rw_gate_open(lck_rw_t *lock, gate_t *gate); | |
516 | ||
517 | /* | |
518 | * Name: lck_rw_gate_handoff | |
519 | * | |
520 | * Description: Tries to transfer the ownership of the gate. The waiter with highest sched | |
521 | * priority will be selected as the new holder of the gate, and woken up, | |
522 | * with the gate remaining in the closed state throughout. | |
523 | * If no waiters are present, the gate will be kept closed and KERN_NOT_WAITING | |
524 | * will be returned. | |
525 | * GATE_HANDOFF_OPEN_IF_NO_WAITERS flag can be used to specify if the gate should be opened in | |
526 | * case no waiters were found. | |
527 | * | |
528 | * | |
529 | * Args: | |
530 | * Arg1: lck_rw_t lock used to protect the gate. | |
531 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
532 | * Arg3: flags - GATE_HANDOFF_DEFAULT or GATE_HANDOFF_OPEN_IF_NO_WAITERS | |
533 | * | |
534 | * Conditions: Lock must be held. Returns with the lock held. | |
535 | * The current thread must be the holder of the gate. | |
536 | * | |
537 | * Returns: | |
538 | * KERN_SUCCESS in case one of the waiters became the new holder. | |
539 | * KERN_NOT_WAITING in case there were no waiters. | |
540 | * | |
541 | */ | |
542 | extern kern_return_t lck_rw_gate_handoff(lck_rw_t *lock, gate_t *gate, int flags); | |
543 | ||
544 | /* | |
545 | * Name: lck_rw_gate_steal | |
546 | * | |
547 | * Description: Set the current ownership of the gate. It sets the current thread as the | |
548 | * new holder of the gate. | |
549 | * A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on | |
550 | * to wake up possible waiters on the gate before returning to userspace. | |
551 | * NOTE: the previous holder should not call lck_rw_gate_open() or lck_rw_gate_handoff() | |
552 | * anymore. | |
553 | * | |
554 | * | |
555 | * Args: | |
556 | * Arg1: lck_rw_t lock used to protect the gate. | |
557 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
558 | * | |
559 | * Conditions: Lock must be held. Returns with the lock held. | |
560 | * The gate must be closed and the current thread must not already be the holder. | |
561 | * | |
562 | */ | |
563 | extern void lck_rw_gate_steal(lck_rw_t *lock, gate_t *gate); | |
564 | ||
565 | /* | |
566 | * Name: lck_rw_gate_wait | |
567 | * | |
568 | * Description: Waits for the current thread to become the holder of the gate or for the | |
569 | * gate to become open. An interruptible mode and deadline can be specified | |
570 | * to return earlier from the wait. | |
571 | * | |
572 | * Args: | |
573 | * Arg1: lck_rw_t lock used to protect the gate. | |
574 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
575 | * Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_SHARED, LCK_SLEEP_EXCLUSIVE. | |
576 | * Arg3: interruptible flag for wait. | |
577 | * Arg4: deadline | |
578 | * | |
579 | * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified. | |
580 | * Lock will be dropped while waiting. | |
581 | * The gate must be closed. | |
582 | * | |
583 | * Returns: Reason why the thread was woken up. | |
584 | * GATE_HANDOFF - the current thread was handed off the ownership of the gate. | |
585 | * A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on | |
586 | * to wake up possible waiters on the gate before returning to userspace. | |
587 | * GATE_OPENED - the gate was opened by the holder. | |
588 | * GATE_TIMED_OUT - the thread was woken up by a timeout. | |
589 | * GATE_INTERRUPTED - the thread was interrupted while sleeping. | |
590 | * | |
591 | */ | |
592 | extern gate_wait_result_t lck_rw_gate_wait(lck_rw_t *lock, gate_t *gate, lck_sleep_action_t lck_sleep_action, wait_interrupt_t interruptible, uint64_t deadline); | |
593 | ||
594 | /* | |
595 | * Name: lck_rw_gate_assert | |
596 | * | |
597 | * Description: asserts that the gate is in the specified state. | |
598 | * | |
599 | * Args: | |
600 | * Arg1: lck_rw_t lock used to protect the gate. | |
601 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
602 | * Arg3: flags to specified assert type. | |
603 | * GATE_ASSERT_CLOSED - the gate is currently closed | |
604 | * GATE_ASSERT_OPEN - the gate is currently opened | |
605 | * GATE_ASSERT_HELD - the gate is currently closed and the current thread is the holder | |
606 | */ | |
607 | extern void lck_rw_gate_assert(lck_rw_t *lock, gate_t *gate, int flags); | |
608 | ||
609 | /* | |
610 | * Name: decl_lck_mtx_gate_data | |
611 | * | |
612 | * Description: declares a gate variable with specified storage class. | |
613 | * The gate itself will be stored in this variable and it is the caller's responsibility | |
614 | * to ensure that this variable's memory is going to be accessible by all threads that will use | |
615 | * the gate. | |
616 | * Every gate function will require a pointer to this variable as parameter. The same pointer should | |
617 | * be used in every thread. | |
618 | * | |
619 | * The variable needs to be initialized once with lck_mtx_gate_init() and destroyed once with | |
620 | * lck_mtx_gate_destroy() when not needed anymore. | |
621 | * | |
622 | * The gate will be used in conjunction with a lck_mtx_t. | |
623 | * | |
624 | * Args: | |
625 | * Arg1: storage class. | |
626 | * Arg2: variable name. | |
627 | */ | |
628 | #define decl_lck_mtx_gate_data(class, name) class gate_t name | |
629 | ||
630 | /* | |
631 | * Name: lck_mtx_gate_init | |
632 | * | |
633 | * Description: initializes a variable declared with decl_lck_mtx_gate_data. | |
634 | * | |
635 | * Args: | |
636 | * Arg1: lck_mtx_t lock used to protect the gate. | |
637 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
638 | */ | |
639 | extern void lck_mtx_gate_init(lck_mtx_t *lock, gate_t *gate); | |
640 | ||
641 | /* | |
642 | * Name: lck_mtx_gate_destroy | |
643 | * | |
644 | * Description: destroys a variable previously initialized | |
645 | * | |
646 | * Args: | |
647 | * Arg1: lck_mtx_t lock used to protect the gate. | |
648 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
649 | */ | |
650 | extern void lck_mtx_gate_destroy(lck_mtx_t *lock, gate_t *gate); | |
651 | ||
652 | /* | |
653 | * Name: lck_mtx_gate_try_close | |
654 | * | |
655 | * Description: Tries to close the gate. | |
656 | * In case of success the current thread will be set as | |
657 | * the holder of the gate. | |
658 | * | |
659 | * Args: | |
660 | * Arg1: lck_mtx_t lock used to protect the gate. | |
661 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
662 | * | |
663 | * Conditions: Lock must be held. Returns with the lock held. | |
664 | * | |
665 | * Returns: | |
666 | * KERN_SUCCESS in case the gate was successfully closed. The current thread is the new holder | |
667 | * of the gate. | |
668 | * A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on | |
669 | * to wake up possible waiters on the gate before returning to userspace. | |
670 | * If the intent is to conditionally probe the gate before waiting, the lock must not be dropped | |
671 | * between the calls to lck_mtx_gate_try_close() and lck_mtx_gate_wait(). | |
672 | * | |
673 | * KERN_FAILURE in case the gate was already closed. Will panic if the current thread was already the holder of the gate. | |
674 | * lck_mtx_gate_wait() should be called instead if the intent is to unconditionally wait on this gate. | |
675 | * The calls to lck_mtx_gate_try_close() and lck_mtx_gate_wait() should | |
676 | * be done without dropping the lock that is protecting the gate in between. | |
677 | */ | |
678 | extern kern_return_t lck_mtx_gate_try_close(lck_mtx_t *lock, gate_t *gate); | |
679 | ||
680 | /* | |
681 | * Name: lck_mtx_gate_close | |
682 | * | |
683 | * Description: Closes the gate. The current thread will be set as | |
684 | * the holder of the gate. Will panic if the gate is already closed. | |
685 | * A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on | |
686 | * to wake up possible waiters on the gate before returning to userspace. | |
687 | * | |
688 | * Args: | |
689 | * Arg1: lck_mtx_t lock used to protect the gate. | |
690 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
691 | * | |
692 | * Conditions: Lock must be held. Returns with the lock held. | |
693 | * The gate must be open. | |
694 | * | |
695 | */ | |
696 | extern void lck_mtx_gate_close(lck_mtx_t *lock, gate_t *gate); | |
697 | ||
698 | /* | |
699 | * Name: lck_mtx_gate_open | |
700 | * | |
701 | * Description: Opens of the gate and wakes up possible waiters. | |
702 | * | |
703 | * Args: | |
704 | * Arg1: lck_mtx_t lock used to protect the gate. | |
705 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
706 | * | |
707 | * Conditions: Lock must be held. Returns with the lock held. | |
708 | * The current thread must be the holder of the gate. | |
709 | * | |
710 | */ | |
711 | extern void lck_mtx_gate_open(lck_mtx_t *lock, gate_t *gate); | |
712 | ||
713 | /* | |
714 | * Name: lck_mtx_gate_handoff | |
715 | * | |
716 | * Description: Set the current ownership of the gate. The waiter with highest sched | |
717 | * priority will be selected as the new holder of the gate, and woken up, | |
718 | * with the gate remaining in the closed state throughout. | |
719 | * If no waiters are present, the gate will be kept closed and KERN_NOT_WAITING | |
720 | * will be returned. | |
721 | * OPEN_ON_FAILURE flag can be used to specify if the gate should be opened in | |
722 | * case no waiters were found. | |
723 | * | |
724 | * | |
725 | * Args: | |
726 | * Arg1: lck_mtx_t lock used to protect the gate. | |
727 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
728 | * Arg3: flags - GATE_NO_FALGS or OPEN_ON_FAILURE | |
729 | * | |
730 | * Conditions: Lock must be held. Returns with the lock held. | |
731 | * The current thread must be the holder of the gate. | |
732 | * | |
733 | * Returns: | |
734 | * KERN_SUCCESS in case one of the waiters became the new holder. | |
735 | * KERN_NOT_WAITING in case there were no waiters. | |
736 | * | |
737 | */ | |
738 | extern kern_return_t lck_mtx_gate_handoff(lck_mtx_t *lock, gate_t *gate, int flags); | |
739 | ||
740 | /* | |
741 | * Name: lck_mtx_gate_steal | |
742 | * | |
743 | * Description: Steals the ownership of the gate. It sets the current thread as the | |
744 | * new holder of the gate. | |
745 | * A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on | |
746 | * to wake up possible waiters on the gate before returning to userspace. | |
747 | * NOTE: the previous holder should not call lck_mtx_gate_open() or lck_mtx_gate_handoff() | |
748 | * anymore. | |
749 | * | |
750 | * | |
751 | * Args: | |
752 | * Arg1: lck_mtx_t lock used to protect the gate. | |
753 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
754 | * | |
755 | * Conditions: Lock must be held. Returns with the lock held. | |
756 | * The gate must be closed and the current thread must not already be the holder. | |
757 | * | |
758 | */ | |
759 | extern void lck_mtx_gate_steal(lck_mtx_t *lock, gate_t *gate); | |
760 | ||
761 | /* | |
762 | * Name: lck_mtx_gate_wait | |
763 | * | |
764 | * Description: Waits for the current thread to become the holder of the gate or for the | |
765 | * gate to become open. An interruptible mode and deadline can be specified | |
766 | * to return earlier from the wait. | |
767 | * | |
768 | * Args: | |
769 | * Arg1: lck_mtx_t lock used to protect the gate. | |
770 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
771 | * Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK, LCK_SLEEP_SPIN, LCK_SLEEP_SPIN_ALWAYS. | |
772 | * Arg3: interruptible flag for wait. | |
773 | * Arg4: deadline | |
774 | * | |
775 | * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified. | |
776 | * Lock will be dropped while waiting. | |
777 | * The gate must be closed. | |
778 | * | |
779 | * Returns: Reason why the thread was woken up. | |
780 | * GATE_HANDOFF - the current thread was handed off the ownership of the gate. | |
781 | * A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on | |
782 | * to wake up possible waiters on the gate before returning to userspace. | |
783 | * GATE_OPENED - the gate was opened by the holder. | |
784 | * GATE_TIMED_OUT - the thread was woken up by a timeout. | |
785 | * GATE_INTERRUPTED - the thread was interrupted while sleeping. | |
786 | * | |
787 | */ | |
788 | extern gate_wait_result_t lck_mtx_gate_wait(lck_mtx_t *lock, gate_t *gate, lck_sleep_action_t lck_sleep_action, wait_interrupt_t interruptible, uint64_t deadline); | |
789 | ||
790 | /* | |
791 | * Name: lck_mtx_gate_assert | |
792 | * | |
793 | * Description: asserts that the gate is in the specified state. | |
794 | * | |
795 | * Args: | |
796 | * Arg1: lck_mtx_t lock used to protect the gate. | |
797 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
798 | * Arg3: flags to specified assert type. | |
799 | * GATE_ASSERT_CLOSED - the gate is currently closed | |
800 | * GATE_ASSERT_OPEN - the gate is currently opened | |
801 | * GATE_ASSERT_HELD - the gate is currently closed and the current thread is the holder | |
802 | */ | |
803 | extern void lck_mtx_gate_assert(lck_mtx_t *lock, gate_t *gate, int flags); | |
804 | ||
805 | ||
806 | #endif //KERNEL_PRIVATE | |
807 | ||
d9a64523 | 808 | #if DEVELOPMENT || DEBUG |
cb323159 A |
809 | #define FULL_CONTENDED 0 |
810 | #define HALF_CONTENDED 1 | |
811 | #define MAX_CONDENDED 2 | |
812 | ||
0a7de745 A |
813 | extern void erase_all_test_mtx_stats(void); |
814 | extern int get_test_mtx_stats_string(char* buffer, int buffer_size); | |
815 | extern void lck_mtx_test_init(void); | |
816 | extern void lck_mtx_test_lock(void); | |
817 | extern void lck_mtx_test_unlock(void); | |
818 | extern int lck_mtx_test_mtx_uncontended(int iter, char* buffer, int buffer_size); | |
cb323159 | 819 | extern int lck_mtx_test_mtx_contended(int iter, char* buffer, int buffer_size, int type); |
0a7de745 | 820 | extern int lck_mtx_test_mtx_uncontended_loop_time(int iter, char* buffer, int buffer_size); |
cb323159 | 821 | extern int lck_mtx_test_mtx_contended_loop_time(int iter, char* buffer, int buffer_size, int type); |
d9a64523 | 822 | #endif |
0a7de745 | 823 | #ifdef KERNEL_PRIVATE |
91447636 | 824 | |
0a7de745 A |
825 | extern boolean_t lck_mtx_try_lock( |
826 | lck_mtx_t *lck); | |
91447636 | 827 | |
0a7de745 | 828 | extern void mutex_pause(uint32_t); |
91447636 | 829 | |
0a7de745 A |
830 | extern void lck_mtx_yield( |
831 | lck_mtx_t *lck); | |
b0d623f7 | 832 | |
0a7de745 A |
833 | extern boolean_t lck_mtx_try_lock_spin( |
834 | lck_mtx_t *lck); | |
b0d623f7 | 835 | |
0a7de745 A |
836 | extern void lck_mtx_lock_spin( |
837 | lck_mtx_t *lck); | |
2d21ac55 | 838 | |
0a7de745 A |
839 | extern boolean_t kdp_lck_mtx_lock_spin_is_acquired( |
840 | lck_mtx_t *lck); | |
39236c6e | 841 | |
0a7de745 A |
842 | extern void lck_mtx_convert_spin( |
843 | lck_mtx_t *lck); | |
6d2010ae | 844 | |
0a7de745 A |
845 | extern void lck_mtx_lock_spin_always( |
846 | lck_mtx_t *lck); | |
2d21ac55 | 847 | |
0a7de745 A |
848 | extern boolean_t lck_mtx_try_lock_spin_always( |
849 | lck_mtx_t *lck); | |
6d2010ae | 850 | |
0a7de745 | 851 | #define lck_mtx_unlock_always(l) lck_mtx_unlock(l) |
39037602 | 852 | |
0a7de745 A |
853 | extern void lck_spin_assert( |
854 | lck_spin_t *lck, | |
855 | unsigned int type); | |
6d2010ae | 856 | |
0a7de745 A |
857 | extern boolean_t kdp_lck_rw_lock_is_acquired_exclusive( |
858 | lck_rw_t *lck); | |
2d21ac55 | 859 | |
0a7de745 | 860 | #endif /* KERNEL_PRIVATE */ |
3e170ce0 | 861 | |
0a7de745 A |
862 | extern void lck_mtx_assert( |
863 | lck_mtx_t *lck, | |
864 | unsigned int type); | |
91447636 | 865 | |
39037602 | 866 | #if MACH_ASSERT |
0a7de745 A |
867 | #define LCK_MTX_ASSERT(lck, type) lck_mtx_assert((lck),(type)) |
868 | #define LCK_SPIN_ASSERT(lck, type) lck_spin_assert((lck),(type)) | |
869 | #define LCK_RW_ASSERT(lck, type) lck_rw_assert((lck),(type)) | |
39037602 | 870 | #else /* MACH_ASSERT */ |
0a7de745 A |
871 | #define LCK_MTX_ASSERT(lck, type) |
872 | #define LCK_SPIN_ASSERT(lck, type) | |
873 | #define LCK_RW_ASSERT(lck, type) | |
39037602 A |
874 | #endif /* MACH_ASSERT */ |
875 | ||
876 | #if DEBUG | |
0a7de745 A |
877 | #define LCK_MTX_ASSERT_DEBUG(lck, type) lck_mtx_assert((lck),(type)) |
878 | #define LCK_SPIN_ASSERT_DEBUG(lck, type) lck_spin_assert((lck),(type)) | |
879 | #define LCK_RW_ASSERT_DEBUG(lck, type) lck_rw_assert((lck),(type)) | |
39037602 | 880 | #else /* DEBUG */ |
0a7de745 A |
881 | #define LCK_MTX_ASSERT_DEBUG(lck, type) |
882 | #define LCK_SPIN_ASSERT_DEBUG(lck, type) | |
883 | #define LCK_RW_ASSERT_DEBUG(lck, type) | |
39037602 A |
884 | #endif /* DEBUG */ |
885 | ||
91447636 A |
886 | __END_DECLS |
887 | ||
0a7de745 A |
888 | #define LCK_ASSERT_OWNED 1 |
889 | #define LCK_ASSERT_NOTOWNED 2 | |
39037602 | 890 | |
0a7de745 A |
891 | #define LCK_MTX_ASSERT_OWNED LCK_ASSERT_OWNED |
892 | #define LCK_MTX_ASSERT_NOTOWNED LCK_ASSERT_NOTOWNED | |
91447636 | 893 | |
0a7de745 | 894 | #ifdef MACH_KERNEL_PRIVATE |
cb323159 | 895 | struct turnstile; |
0a7de745 A |
896 | extern void lck_mtx_lock_wait( |
897 | lck_mtx_t *lck, | |
cb323159 A |
898 | thread_t holder, |
899 | struct turnstile **ts); | |
91447636 | 900 | |
0a7de745 | 901 | extern int lck_mtx_lock_acquire( |
cb323159 A |
902 | lck_mtx_t *lck, |
903 | struct turnstile *ts); | |
91447636 | 904 | |
cb323159 | 905 | extern boolean_t lck_mtx_unlock_wakeup( |
0a7de745 A |
906 | lck_mtx_t *lck, |
907 | thread_t holder); | |
91447636 | 908 | |
0a7de745 A |
909 | extern boolean_t lck_mtx_ilk_unlock( |
910 | lck_mtx_t *lck); | |
2d21ac55 | 911 | |
0a7de745 A |
912 | extern boolean_t lck_mtx_ilk_try_lock( |
913 | lck_mtx_t *lck); | |
3e170ce0 | 914 | |
d9a64523 A |
915 | extern void lck_mtx_wakeup_adjust_pri(thread_t thread, integer_t priority); |
916 | ||
91447636 A |
917 | #endif |
918 | ||
cb323159 | 919 | #define decl_lck_rw_data(class, name) class lck_rw_t name |
91447636 | 920 | |
0a7de745 | 921 | typedef unsigned int lck_rw_type_t; |
91447636 | 922 | |
0a7de745 A |
923 | #define LCK_RW_TYPE_SHARED 0x01 |
924 | #define LCK_RW_TYPE_EXCLUSIVE 0x02 | |
91447636 | 925 | |
2d21ac55 | 926 | #ifdef XNU_KERNEL_PRIVATE |
0a7de745 A |
927 | #define LCK_RW_ASSERT_SHARED 0x01 |
928 | #define LCK_RW_ASSERT_EXCLUSIVE 0x02 | |
929 | #define LCK_RW_ASSERT_HELD 0x03 | |
930 | #define LCK_RW_ASSERT_NOTHELD 0x04 | |
2d21ac55 A |
931 | #endif |
932 | ||
91447636 A |
933 | __BEGIN_DECLS |
934 | ||
0a7de745 A |
935 | extern lck_rw_t *lck_rw_alloc_init( |
936 | lck_grp_t *grp, | |
937 | lck_attr_t *attr); | |
91447636 | 938 | |
0a7de745 A |
939 | extern void lck_rw_init( |
940 | lck_rw_t *lck, | |
941 | lck_grp_t *grp, | |
942 | lck_attr_t *attr); | |
91447636 | 943 | |
0a7de745 A |
944 | extern void lck_rw_lock( |
945 | lck_rw_t *lck, | |
946 | lck_rw_type_t lck_rw_type); | |
91447636 | 947 | |
0a7de745 A |
948 | extern void lck_rw_unlock( |
949 | lck_rw_t *lck, | |
950 | lck_rw_type_t lck_rw_type); | |
91447636 | 951 | |
0a7de745 A |
952 | extern void lck_rw_lock_shared( |
953 | lck_rw_t *lck); | |
91447636 | 954 | |
0a7de745 A |
955 | extern void lck_rw_unlock_shared( |
956 | lck_rw_t *lck); | |
91447636 | 957 | |
0a7de745 A |
958 | extern boolean_t lck_rw_lock_yield_shared( |
959 | lck_rw_t *lck, | |
960 | boolean_t force_yield); | |
5ba3f43e | 961 | |
0a7de745 A |
962 | extern void lck_rw_lock_exclusive( |
963 | lck_rw_t *lck); | |
91447636 | 964 | |
0a7de745 A |
965 | extern void lck_rw_unlock_exclusive( |
966 | lck_rw_t *lck); | |
91447636 | 967 | |
0a7de745 | 968 | #ifdef XNU_KERNEL_PRIVATE |
2d21ac55 A |
969 | /* |
970 | * CAUTION | |
971 | * read-write locks do not have a concept of ownership, so lck_rw_assert() | |
972 | * merely asserts that someone is holding the lock, not necessarily the caller. | |
973 | */ | |
0a7de745 A |
974 | extern void lck_rw_assert( |
975 | lck_rw_t *lck, | |
976 | unsigned int type); | |
39236c6e | 977 | |
d9a64523 | 978 | extern void lck_rw_clear_promotion(thread_t thread, uintptr_t trace_obj); |
39037602 | 979 | extern void lck_rw_set_promotion_locked(thread_t thread); |
d9a64523 A |
980 | |
981 | uintptr_t unslide_for_kdebug(void* object); | |
0a7de745 | 982 | #endif /* XNU_KERNEL_PRIVATE */ |
2d21ac55 | 983 | |
0a7de745 | 984 | #ifdef KERNEL_PRIVATE |
91447636 | 985 | |
0a7de745 A |
986 | extern lck_rw_type_t lck_rw_done( |
987 | lck_rw_t *lck); | |
91447636 A |
988 | #endif |
989 | ||
0a7de745 A |
990 | extern void lck_rw_destroy( |
991 | lck_rw_t *lck, | |
992 | lck_grp_t *grp); | |
91447636 | 993 | |
0a7de745 A |
994 | extern void lck_rw_free( |
995 | lck_rw_t *lck, | |
996 | lck_grp_t *grp); | |
91447636 | 997 | |
0a7de745 A |
998 | extern wait_result_t lck_rw_sleep( |
999 | lck_rw_t *lck, | |
1000 | lck_sleep_action_t lck_sleep_action, | |
1001 | event_t event, | |
1002 | wait_interrupt_t interruptible); | |
91447636 | 1003 | |
0a7de745 A |
1004 | extern wait_result_t lck_rw_sleep_deadline( |
1005 | lck_rw_t *lck, | |
1006 | lck_sleep_action_t lck_sleep_action, | |
1007 | event_t event, | |
1008 | wait_interrupt_t interruptible, | |
1009 | uint64_t deadline); | |
91447636 | 1010 | |
0a7de745 A |
1011 | extern boolean_t lck_rw_lock_shared_to_exclusive( |
1012 | lck_rw_t *lck); | |
91447636 | 1013 | |
0a7de745 A |
1014 | extern void lck_rw_lock_exclusive_to_shared( |
1015 | lck_rw_t *lck); | |
91447636 | 1016 | |
0a7de745 A |
1017 | extern boolean_t lck_rw_try_lock( |
1018 | lck_rw_t *lck, | |
1019 | lck_rw_type_t lck_rw_type); | |
91447636 | 1020 | |
0a7de745 | 1021 | #ifdef KERNEL_PRIVATE |
2d21ac55 | 1022 | |
0a7de745 A |
1023 | extern boolean_t lck_rw_try_lock_shared( |
1024 | lck_rw_t *lck); | |
91447636 | 1025 | |
0a7de745 A |
1026 | extern boolean_t lck_rw_try_lock_exclusive( |
1027 | lck_rw_t *lck); | |
91447636 A |
1028 | #endif |
1029 | ||
1030 | __END_DECLS | |
1031 | ||
1032 | #endif /* _KERN_LOCKS_H_ */ |