]>
Commit | Line | Data |
---|---|---|
91447636 | 1 | /* |
cb323159 | 2 | * Copyright (c) 2003-2019 Apple Inc. All rights reserved. |
91447636 | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
91447636 A |
27 | */ |
28 | ||
29 | #ifndef _KERN_LOCKS_H_ | |
30 | #define _KERN_LOCKS_H_ | |
31 | ||
f427ee49 A |
32 | #include <sys/cdefs.h> |
33 | #include <sys/appleapiopts.h> | |
34 | #include <mach/boolean.h> | |
35 | #include <mach/mach_types.h> | |
36 | #include <kern/kern_types.h> | |
37 | #include <kern/lock_group.h> | |
38 | #include <machine/locks.h> | |
91447636 | 39 | |
f427ee49 | 40 | __BEGIN_DECLS |
91447636 | 41 | |
0a7de745 | 42 | typedef unsigned int lck_sleep_action_t; |
91447636 | 43 | |
0a7de745 A |
44 | #define LCK_SLEEP_DEFAULT 0x00 /* Release the lock while waiting for the event, then reclaim */ |
45 | /* RW locks are returned in the same mode */ | |
46 | #define LCK_SLEEP_UNLOCK 0x01 /* Release the lock and return unheld */ | |
47 | #define LCK_SLEEP_SHARED 0x02 /* Reclaim the lock in shared mode (RW only) */ | |
48 | #define LCK_SLEEP_EXCLUSIVE 0x04 /* Reclaim the lock in exclusive mode (RW only) */ | |
49 | #define LCK_SLEEP_SPIN 0x08 /* Reclaim the lock in spin mode (mutex only) */ | |
50 | #define LCK_SLEEP_PROMOTED_PRI 0x10 /* Sleep at a promoted priority */ | |
51 | #define LCK_SLEEP_SPIN_ALWAYS 0x20 /* Reclaim the lock in spin-always mode (mutex only) */ | |
91447636 | 52 | |
0a7de745 | 53 | #define LCK_SLEEP_MASK 0x3f /* Valid actions */ |
91447636 | 54 | |
cb323159 A |
55 | typedef unsigned int lck_wake_action_t; |
56 | ||
57 | #define LCK_WAKE_DEFAULT 0x00 /* If waiters are present, transfer their push to the wokenup thread */ | |
58 | #define LCK_WAKE_DO_NOT_TRANSFER_PUSH 0x01 /* Do not transfer waiters push when waking up */ | |
91447636 | 59 | |
f427ee49 A |
60 | #ifdef XNU_KERNEL_PRIVATE |
61 | #include <kern/startup.h> | |
62 | ||
0a7de745 A |
63 | typedef struct _lck_attr_ { |
64 | unsigned int lck_attr_val; | |
65 | } lck_attr_t; | |
91447636 | 66 | |
0a7de745 | 67 | extern lck_attr_t LockDefaultLckAttr; |
91447636 | 68 | |
f427ee49 A |
69 | #define LCK_ATTR_NONE 0 |
70 | #define LCK_ATTR_DEBUG 0x00000001 | |
0a7de745 | 71 | #define LCK_ATTR_RW_SHARED_PRIORITY 0x00010000 |
f427ee49 | 72 | #else /* !XNU_KERNEL_PRIVATE */ |
0a7de745 | 73 | typedef struct __lck_attr__ lck_attr_t; |
f427ee49 | 74 | #endif /* !XNU_KERNEL_PRIVATE */ |
91447636 | 75 | |
cb323159 | 76 | #define LCK_ATTR_NULL (lck_attr_t *)NULL |
91447636 | 77 | |
f427ee49 | 78 | extern lck_attr_t *lck_attr_alloc_init(void); |
91447636 | 79 | |
f427ee49 | 80 | extern void lck_attr_setdefault( |
0a7de745 | 81 | lck_attr_t *attr); |
91447636 | 82 | |
f427ee49 | 83 | extern void lck_attr_setdebug( |
0a7de745 | 84 | lck_attr_t *attr); |
91447636 | 85 | |
f427ee49 | 86 | extern void lck_attr_cleardebug( |
0a7de745 | 87 | lck_attr_t *attr); |
91447636 | 88 | |
0a7de745 | 89 | #ifdef XNU_KERNEL_PRIVATE |
91447636 | 90 | |
f427ee49 A |
91 | #if __x86_64__ |
92 | /* | |
93 | * Extended mutexes are only implemented on x86_64 | |
94 | */ | |
95 | #define HAS_EXT_MUTEXES 1 | |
96 | #endif /* __x86_64__ */ | |
97 | ||
0a7de745 A |
98 | typedef union { |
99 | uint16_t tcurnext; | |
100 | struct { | |
101 | uint8_t cticket; | |
102 | uint8_t nticket; | |
103 | }; | |
104 | } lck_ticket_internal; | |
91447636 | 105 | |
0a7de745 A |
106 | typedef struct { |
107 | lck_ticket_internal tu; | |
108 | uintptr_t lck_owner; | |
109 | } lck_ticket_t; | |
0c530ab8 | 110 | |
f427ee49 A |
111 | void lck_ticket_init(lck_ticket_t *tlock, lck_grp_t *grp); |
112 | ||
113 | #if LOCK_STATS | |
114 | void lck_ticket_lock(lck_ticket_t *tlock, lck_grp_t *grp); | |
115 | #else | |
0a7de745 | 116 | void lck_ticket_lock(lck_ticket_t *tlock); |
f427ee49 A |
117 | #define lck_ticket_lock(tlock, grp) lck_ticket_lock(tlock) |
118 | #endif /* LOCK_STATS */ | |
119 | ||
0a7de745 A |
120 | void lck_ticket_unlock(lck_ticket_t *tlock); |
121 | void lck_ticket_assert_owned(lck_ticket_t *tlock); | |
91447636 | 122 | |
f427ee49 | 123 | extern void lck_attr_rw_shared_priority( |
0a7de745 | 124 | lck_attr_t *attr); |
91447636 A |
125 | #endif |
126 | ||
f427ee49 | 127 | extern void lck_attr_free( |
0a7de745 | 128 | lck_attr_t *attr); |
91447636 | 129 | |
cb323159 | 130 | #define decl_lck_spin_data(class, name) class lck_spin_t name |
91447636 | 131 | |
f427ee49 | 132 | extern lck_spin_t *lck_spin_alloc_init( |
0a7de745 A |
133 | lck_grp_t *grp, |
134 | lck_attr_t *attr); | |
91447636 | 135 | |
f427ee49 | 136 | extern void lck_spin_init( |
0a7de745 A |
137 | lck_spin_t *lck, |
138 | lck_grp_t *grp, | |
139 | lck_attr_t *attr); | |
91447636 | 140 | |
f427ee49 | 141 | extern void lck_spin_lock( |
0a7de745 | 142 | lck_spin_t *lck); |
2d21ac55 | 143 | |
f427ee49 | 144 | extern void lck_spin_lock_grp( |
0a7de745 A |
145 | lck_spin_t *lck, |
146 | lck_grp_t *grp); | |
0c530ab8 | 147 | |
f427ee49 | 148 | extern void lck_spin_unlock( |
0a7de745 | 149 | lck_spin_t *lck); |
91447636 | 150 | |
f427ee49 | 151 | extern void lck_spin_destroy( |
0a7de745 A |
152 | lck_spin_t *lck, |
153 | lck_grp_t *grp); | |
91447636 | 154 | |
f427ee49 | 155 | extern void lck_spin_free( |
0a7de745 A |
156 | lck_spin_t *lck, |
157 | lck_grp_t *grp); | |
91447636 | 158 | |
0a7de745 | 159 | extern wait_result_t lck_spin_sleep( |
f427ee49 | 160 | lck_spin_t *lck, |
0a7de745 | 161 | lck_sleep_action_t lck_sleep_action, |
f427ee49 | 162 | event_t event, |
0a7de745 | 163 | wait_interrupt_t interruptible); |
91447636 | 164 | |
0a7de745 | 165 | extern wait_result_t lck_spin_sleep_grp( |
f427ee49 | 166 | lck_spin_t *lck, |
0a7de745 | 167 | lck_sleep_action_t lck_sleep_action, |
f427ee49 | 168 | event_t event, |
0a7de745 A |
169 | wait_interrupt_t interruptible, |
170 | lck_grp_t *grp); | |
91447636 | 171 | |
0a7de745 | 172 | extern wait_result_t lck_spin_sleep_deadline( |
f427ee49 | 173 | lck_spin_t *lck, |
0a7de745 | 174 | lck_sleep_action_t lck_sleep_action, |
f427ee49 | 175 | event_t event, |
0a7de745 | 176 | wait_interrupt_t interruptible, |
f427ee49 | 177 | uint64_t deadline); |
91447636 | 178 | |
0a7de745 | 179 | #ifdef KERNEL_PRIVATE |
91447636 | 180 | |
f427ee49 A |
181 | extern void lck_spin_lock_nopreempt( |
182 | lck_spin_t *lck); | |
183 | ||
184 | extern void lck_spin_lock_nopreempt_grp( | |
185 | lck_spin_t *lck, lck_grp_t *grp); | |
91447636 | 186 | |
f427ee49 A |
187 | extern void lck_spin_unlock_nopreempt( |
188 | lck_spin_t *lck); | |
189 | ||
190 | extern boolean_t lck_spin_try_lock_grp( | |
191 | lck_spin_t *lck, | |
192 | lck_grp_t *grp); | |
91447636 | 193 | |
f427ee49 A |
194 | extern boolean_t lck_spin_try_lock( |
195 | lck_spin_t *lck); | |
91447636 | 196 | |
f427ee49 A |
197 | extern boolean_t lck_spin_try_lock_nopreempt( |
198 | lck_spin_t *lck); | |
91447636 | 199 | |
f427ee49 A |
200 | extern boolean_t lck_spin_try_lock_nopreempt_grp( |
201 | lck_spin_t *lck, | |
202 | lck_grp_t *grp); | |
d9a64523 | 203 | |
fe8ab488 | 204 | /* NOT SAFE: To be used only by kernel debugger to avoid deadlock. */ |
f427ee49 A |
205 | extern boolean_t kdp_lck_spin_is_acquired( |
206 | lck_spin_t *lck); | |
91447636 | 207 | |
b0d623f7 | 208 | struct _lck_mtx_ext_; |
f427ee49 A |
209 | extern void lck_mtx_init_ext( |
210 | lck_mtx_t *lck, | |
211 | struct _lck_mtx_ext_ *lck_ext, | |
212 | lck_grp_t *grp, | |
213 | lck_attr_t *attr); | |
b0d623f7 | 214 | |
91447636 A |
215 | #endif |
216 | ||
cb323159 | 217 | #define decl_lck_mtx_data(class, name) class lck_mtx_t name |
91447636 | 218 | |
f427ee49 | 219 | extern lck_mtx_t *lck_mtx_alloc_init( |
0a7de745 A |
220 | lck_grp_t *grp, |
221 | lck_attr_t *attr); | |
91447636 | 222 | |
f427ee49 | 223 | extern void lck_mtx_init( |
0a7de745 A |
224 | lck_mtx_t *lck, |
225 | lck_grp_t *grp, | |
226 | lck_attr_t *attr); | |
f427ee49 | 227 | extern void lck_mtx_lock( |
0a7de745 | 228 | lck_mtx_t *lck); |
91447636 | 229 | |
f427ee49 | 230 | extern void lck_mtx_unlock( |
0a7de745 | 231 | lck_mtx_t *lck); |
6d2010ae | 232 | |
f427ee49 | 233 | extern void lck_mtx_destroy( |
0a7de745 A |
234 | lck_mtx_t *lck, |
235 | lck_grp_t *grp); | |
91447636 | 236 | |
f427ee49 | 237 | extern void lck_mtx_free( |
0a7de745 A |
238 | lck_mtx_t *lck, |
239 | lck_grp_t *grp); | |
91447636 | 240 | |
0a7de745 | 241 | extern wait_result_t lck_mtx_sleep( |
f427ee49 | 242 | lck_mtx_t *lck, |
0a7de745 | 243 | lck_sleep_action_t lck_sleep_action, |
f427ee49 | 244 | event_t event, |
0a7de745 | 245 | wait_interrupt_t interruptible); |
91447636 | 246 | |
0a7de745 | 247 | extern wait_result_t lck_mtx_sleep_deadline( |
f427ee49 | 248 | lck_mtx_t *lck, |
0a7de745 | 249 | lck_sleep_action_t lck_sleep_action, |
f427ee49 | 250 | event_t event, |
0a7de745 | 251 | wait_interrupt_t interruptible, |
f427ee49 | 252 | uint64_t deadline); |
cb323159 A |
253 | |
254 | #ifdef KERNEL_PRIVATE | |
255 | /* | |
256 | * Name: lck_spin_sleep_with_inheritor | |
257 | * | |
258 | * Description: deschedule the current thread and wait on the waitq associated with event to be woken up. | |
259 | * While waiting, the sched priority of the waiting thread will contribute to the push of the event that will | |
260 | * be directed to the inheritor specified. | |
261 | * An interruptible mode and deadline can be specified to return earlier from the wait. | |
262 | * | |
263 | * Args: | |
264 | * Arg1: lck_spin_t lock used to protect the sleep. The lock will be dropped while sleeping and reaquired before returning according to the sleep action specified. | |
265 | * Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK. | |
266 | * Arg3: event to wait on. | |
267 | * Arg4: thread to propagate the event push to. | |
268 | * Arg5: interruptible flag for wait. | |
269 | * Arg6: deadline for wait. | |
270 | * | |
271 | * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified. | |
272 | * Lock will be dropped while waiting. | |
273 | * The inheritor specified cannot run in user space until another inheritor is specified for the event or a | |
274 | * wakeup for the event is called. | |
275 | * | |
276 | * Returns: result of the wait. | |
277 | */ | |
278 | extern wait_result_t lck_spin_sleep_with_inheritor(lck_spin_t *lock, lck_sleep_action_t lck_sleep_action, event_t event, thread_t inheritor, wait_interrupt_t interruptible, uint64_t deadline); | |
279 | ||
280 | /* | |
281 | * Name: lck_mtx_sleep_with_inheritor | |
282 | * | |
283 | * Description: deschedule the current thread and wait on the waitq associated with event to be woken up. | |
284 | * While waiting, the sched priority of the waiting thread will contribute to the push of the event that will | |
285 | * be directed to the inheritor specified. | |
286 | * An interruptible mode and deadline can be specified to return earlier from the wait. | |
287 | * | |
288 | * Args: | |
289 | * Arg1: lck_mtx_t lock used to protect the sleep. The lock will be dropped while sleeping and reaquired before returning according to the sleep action specified. | |
290 | * Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK, LCK_SLEEP_SPIN, LCK_SLEEP_SPIN_ALWAYS. | |
291 | * Arg3: event to wait on. | |
292 | * Arg4: thread to propagate the event push to. | |
293 | * Arg5: interruptible flag for wait. | |
294 | * Arg6: deadline for wait. | |
295 | * | |
296 | * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified. | |
297 | * Lock will be dropped while waiting. | |
298 | * The inheritor specified cannot run in user space until another inheritor is specified for the event or a | |
299 | * wakeup for the event is called. | |
300 | * | |
301 | * Returns: result of the wait. | |
302 | */ | |
303 | extern wait_result_t lck_mtx_sleep_with_inheritor(lck_mtx_t *lock, lck_sleep_action_t lck_sleep_action, event_t event, thread_t inheritor, wait_interrupt_t interruptible, uint64_t deadline); | |
304 | ||
305 | /* | |
306 | * Name: lck_mtx_sleep_with_inheritor | |
307 | * | |
308 | * Description: deschedule the current thread and wait on the waitq associated with event to be woken up. | |
309 | * While waiting, the sched priority of the waiting thread will contribute to the push of the event that will | |
310 | * be directed to the inheritor specified. | |
311 | * An interruptible mode and deadline can be specified to return earlier from the wait. | |
312 | * | |
313 | * Args: | |
314 | * Arg1: lck_rw_t lock used to protect the sleep. The lock will be dropped while sleeping and reaquired before returning according to the sleep action specified. | |
315 | * Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_SHARED, LCK_SLEEP_EXCLUSIVE. | |
316 | * Arg3: event to wait on. | |
317 | * Arg4: thread to propagate the event push to. | |
318 | * Arg5: interruptible flag for wait. | |
319 | * Arg6: deadline for wait. | |
320 | * | |
321 | * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified. | |
322 | * Lock will be dropped while waiting. | |
323 | * The inheritor specified cannot run in user space until another inheritor is specified for the event or a | |
324 | * wakeup for the event is called. | |
325 | * | |
326 | * Returns: result of the wait. | |
327 | */ | |
328 | extern wait_result_t lck_rw_sleep_with_inheritor(lck_rw_t *lock, lck_sleep_action_t lck_sleep_action, event_t event, thread_t inheritor, wait_interrupt_t interruptible, uint64_t deadline); | |
329 | ||
330 | /* | |
331 | * Name: wakeup_one_with_inheritor | |
332 | * | |
333 | * Description: wake up one waiter for event if any. The thread woken up will be the one with the higher sched priority waiting on event. | |
334 | * The push for the event will be transferred from the last inheritor to the woken up thread. | |
335 | * | |
336 | * Args: | |
337 | * Arg1: event to wake from. | |
338 | * Arg2: wait result to pass to the woken up thread. | |
339 | * Arg3: pointer for storing the thread wokenup. | |
340 | * | |
341 | * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise. | |
342 | * | |
343 | * Conditions: The new inheritor wokenup cannot run in user space until another inheritor is specified for the event or a | |
344 | * wakeup for the event is called. | |
345 | * A reference for the wokenup thread is acquired. | |
346 | * NOTE: this cannot be called from interrupt context. | |
347 | */ | |
348 | extern kern_return_t wakeup_one_with_inheritor(event_t event, wait_result_t result, lck_wake_action_t action, thread_t *thread_wokenup); | |
349 | ||
350 | /* | |
351 | * Name: wakeup_all_with_inheritor | |
352 | * | |
353 | * Description: wake up all waiters waiting for event. The old inheritor will lose the push. | |
354 | * | |
355 | * Args: | |
356 | * Arg1: event to wake from. | |
357 | * Arg2: wait result to pass to the woken up threads. | |
358 | * | |
359 | * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise. | |
360 | * | |
361 | * Conditions: NOTE: this cannot be called from interrupt context. | |
362 | */ | |
363 | extern kern_return_t wakeup_all_with_inheritor(event_t event, wait_result_t result); | |
364 | ||
365 | /* | |
366 | * Name: change_sleep_inheritor | |
367 | * | |
368 | * Description: Redirect the push of the waiting threads of event to the new inheritor specified. | |
369 | * | |
370 | * Args: | |
371 | * Arg1: event to redirect the push. | |
372 | * Arg2: new inheritor for event. | |
373 | * | |
374 | * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise. | |
375 | * | |
376 | * Conditions: In case of success, the new inheritor cannot run in user space until another inheritor is specified for the event or a | |
377 | * wakeup for the event is called. | |
378 | * NOTE: this cannot be called from interrupt context. | |
379 | */ | |
380 | extern kern_return_t change_sleep_inheritor(event_t event, thread_t inheritor); | |
381 | ||
382 | /* | |
383 | * gate structure | |
384 | */ | |
385 | typedef struct gate { | |
386 | uintptr_t gate_data; // thread holder, interlock bit and waiter bit | |
387 | struct turnstile *turnstile; // protected by the interlock bit | |
388 | } gate_t; | |
389 | ||
390 | #define GATE_ILOCK_BIT 0 | |
391 | #define GATE_WAITERS_BIT 1 | |
392 | ||
393 | #define GATE_ILOCK (1 << GATE_ILOCK_BIT) | |
394 | #define GATE_WAITERS (1 << GATE_WAITERS_BIT) | |
395 | ||
396 | #define gate_ilock(gate) hw_lock_bit((hw_lock_bit_t*)(&(gate)->gate_data), GATE_ILOCK_BIT, LCK_GRP_NULL) | |
397 | #define gate_iunlock(gate) hw_unlock_bit((hw_lock_bit_t*)(&(gate)->gate_data), GATE_ILOCK_BIT) | |
398 | #define gate_has_waiters(state) ((state & GATE_WAITERS) != 0) | |
399 | #define ordered_load_gate(gate) os_atomic_load(&(gate)->gate_data, compiler_acq_rel) | |
400 | #define ordered_store_gate(gate, value) os_atomic_store(&(gate)->gate_data, value, compiler_acq_rel) | |
401 | ||
402 | #define GATE_THREAD_MASK (~(uintptr_t)(GATE_ILOCK | GATE_WAITERS)) | |
403 | #define GATE_STATE_TO_THREAD(state) (thread_t)(state & GATE_THREAD_MASK) | |
404 | #define GATE_THREAD_TO_STATE(thread) ((uintptr_t)thread) | |
405 | ||
406 | /* | |
407 | * Possible gate_wait_result_t values. | |
408 | */ | |
409 | typedef int gate_wait_result_t; | |
410 | #define GATE_HANDOFF 0 | |
411 | #define GATE_OPENED 1 | |
412 | #define GATE_TIMED_OUT 2 | |
413 | #define GATE_INTERRUPTED 3 | |
414 | ||
415 | /* | |
416 | * Gate flags used by gate_assert | |
417 | */ | |
418 | #define GATE_ASSERT_CLOSED 0 | |
419 | #define GATE_ASSERT_OPEN 1 | |
420 | #define GATE_ASSERT_HELD 2 | |
421 | ||
422 | /* | |
423 | * Gate flags used by gate_handoff | |
424 | */ | |
425 | #define GATE_HANDOFF_DEFAULT 0 | |
426 | #define GATE_HANDOFF_OPEN_IF_NO_WAITERS 1 | |
427 | ||
428 | #define GATE_EVENT(gate) ((event_t) gate) | |
429 | #define EVENT_TO_GATE(event) ((gate_t *) event) | |
430 | ||
431 | /* | |
432 | * Name: decl_lck_rw_gate_data | |
433 | * | |
434 | * Description: declares a gate variable with specified storage class. | |
435 | * The gate itself will be stored in this variable and it is the caller's responsibility | |
436 | * to ensure that this variable's memory is going to be accessible by all threads that will use | |
437 | * the gate. | |
438 | * Every gate function will require a pointer to this variable as parameter. The same pointer should | |
439 | * be used in every thread. | |
440 | * | |
441 | * The variable needs to be initialized once with lck_rw_gate_init() and destroyed once with | |
442 | * lck_rw_gate_destroy() when not needed anymore. | |
443 | * | |
444 | * The gate will be used in conjunction with a lck_rw_t. | |
445 | * | |
446 | * Args: | |
447 | * Arg1: storage class. | |
448 | * Arg2: variable name. | |
449 | */ | |
450 | #define decl_lck_rw_gate_data(class, name) class gate_t name | |
451 | ||
452 | /* | |
453 | * Name: lck_rw_gate_init | |
454 | * | |
455 | * Description: initializes a variable declared with decl_lck_rw_gate_data. | |
456 | * | |
457 | * Args: | |
458 | * Arg1: lck_rw_t lock used to protect the gate. | |
459 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
460 | */ | |
461 | extern void lck_rw_gate_init(lck_rw_t *lock, gate_t *gate); | |
462 | ||
463 | /* | |
464 | * Name: lck_rw_gate_destroy | |
465 | * | |
466 | * Description: destroys a variable previously initialized. | |
467 | * | |
468 | * Args: | |
469 | * Arg1: lck_rw_t lock used to protect the gate. | |
470 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
471 | */ | |
472 | extern void lck_rw_gate_destroy(lck_rw_t *lock, gate_t *gate); | |
473 | ||
474 | /* | |
475 | * Name: lck_rw_gate_try_close | |
476 | * | |
477 | * Description: Tries to close the gate. | |
478 | * In case of success the current thread will be set as | |
479 | * the holder of the gate. | |
480 | * | |
481 | * Args: | |
482 | * Arg1: lck_rw_t lock used to protect the gate. | |
483 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
484 | * | |
485 | * Conditions: Lock must be held. Returns with the lock held. | |
486 | * | |
487 | * Returns: | |
488 | * KERN_SUCCESS in case the gate was successfully closed. The current thread is the new holder | |
489 | * of the gate. | |
490 | * A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on | |
491 | * to wake up possible waiters on the gate before returning to userspace. | |
492 | * If the intent is to conditionally probe the gate before waiting, the lock must not be dropped | |
493 | * between the calls to lck_rw_gate_try_close() and lck_rw_gate_wait(). | |
494 | * | |
495 | * KERN_FAILURE in case the gate was already closed. Will panic if the current thread was already the holder of the gate. | |
496 | * lck_rw_gate_wait() should be called instead if the intent is to unconditionally wait on this gate. | |
497 | * The calls to lck_rw_gate_try_close() and lck_rw_gate_wait() should | |
498 | * be done without dropping the lock that is protecting the gate in between. | |
499 | */ | |
500 | extern kern_return_t lck_rw_gate_try_close(lck_rw_t *lock, gate_t *gate); | |
501 | ||
502 | /* | |
503 | * Name: lck_rw_gate_close | |
504 | * | |
505 | * Description: Closes the gate. The current thread will be set as | |
506 | * the holder of the gate. Will panic if the gate is already closed. | |
507 | * A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on | |
508 | * to wake up possible waiters on the gate before returning to userspace. | |
509 | * | |
510 | * Args: | |
511 | * Arg1: lck_rw_t lock used to protect the gate. | |
512 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
513 | * | |
514 | * Conditions: Lock must be held. Returns with the lock held. | |
515 | * The gate must be open. | |
516 | * | |
517 | */ | |
518 | extern void lck_rw_gate_close(lck_rw_t *lock, gate_t *gate); | |
519 | ||
520 | ||
521 | /* | |
522 | * Name: lck_rw_gate_open | |
523 | * | |
524 | * Description: Opens the gate and wakes up possible waiters. | |
525 | * | |
526 | * Args: | |
527 | * Arg1: lck_rw_t lock used to protect the gate. | |
528 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
529 | * | |
530 | * Conditions: Lock must be held. Returns with the lock held. | |
531 | * The current thread must be the holder of the gate. | |
532 | * | |
533 | */ | |
534 | extern void lck_rw_gate_open(lck_rw_t *lock, gate_t *gate); | |
535 | ||
536 | /* | |
537 | * Name: lck_rw_gate_handoff | |
538 | * | |
539 | * Description: Tries to transfer the ownership of the gate. The waiter with highest sched | |
540 | * priority will be selected as the new holder of the gate, and woken up, | |
541 | * with the gate remaining in the closed state throughout. | |
542 | * If no waiters are present, the gate will be kept closed and KERN_NOT_WAITING | |
543 | * will be returned. | |
544 | * GATE_HANDOFF_OPEN_IF_NO_WAITERS flag can be used to specify if the gate should be opened in | |
545 | * case no waiters were found. | |
546 | * | |
547 | * | |
548 | * Args: | |
549 | * Arg1: lck_rw_t lock used to protect the gate. | |
550 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
551 | * Arg3: flags - GATE_HANDOFF_DEFAULT or GATE_HANDOFF_OPEN_IF_NO_WAITERS | |
552 | * | |
553 | * Conditions: Lock must be held. Returns with the lock held. | |
554 | * The current thread must be the holder of the gate. | |
555 | * | |
556 | * Returns: | |
557 | * KERN_SUCCESS in case one of the waiters became the new holder. | |
558 | * KERN_NOT_WAITING in case there were no waiters. | |
559 | * | |
560 | */ | |
561 | extern kern_return_t lck_rw_gate_handoff(lck_rw_t *lock, gate_t *gate, int flags); | |
562 | ||
563 | /* | |
564 | * Name: lck_rw_gate_steal | |
565 | * | |
566 | * Description: Set the current ownership of the gate. It sets the current thread as the | |
567 | * new holder of the gate. | |
568 | * A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on | |
569 | * to wake up possible waiters on the gate before returning to userspace. | |
570 | * NOTE: the previous holder should not call lck_rw_gate_open() or lck_rw_gate_handoff() | |
571 | * anymore. | |
572 | * | |
573 | * | |
574 | * Args: | |
575 | * Arg1: lck_rw_t lock used to protect the gate. | |
576 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
577 | * | |
578 | * Conditions: Lock must be held. Returns with the lock held. | |
579 | * The gate must be closed and the current thread must not already be the holder. | |
580 | * | |
581 | */ | |
582 | extern void lck_rw_gate_steal(lck_rw_t *lock, gate_t *gate); | |
583 | ||
584 | /* | |
585 | * Name: lck_rw_gate_wait | |
586 | * | |
587 | * Description: Waits for the current thread to become the holder of the gate or for the | |
588 | * gate to become open. An interruptible mode and deadline can be specified | |
589 | * to return earlier from the wait. | |
590 | * | |
591 | * Args: | |
592 | * Arg1: lck_rw_t lock used to protect the gate. | |
593 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
594 | * Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_SHARED, LCK_SLEEP_EXCLUSIVE. | |
595 | * Arg3: interruptible flag for wait. | |
596 | * Arg4: deadline | |
597 | * | |
598 | * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified. | |
599 | * Lock will be dropped while waiting. | |
600 | * The gate must be closed. | |
601 | * | |
602 | * Returns: Reason why the thread was woken up. | |
603 | * GATE_HANDOFF - the current thread was handed off the ownership of the gate. | |
604 | * A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on | |
605 | * to wake up possible waiters on the gate before returning to userspace. | |
606 | * GATE_OPENED - the gate was opened by the holder. | |
607 | * GATE_TIMED_OUT - the thread was woken up by a timeout. | |
608 | * GATE_INTERRUPTED - the thread was interrupted while sleeping. | |
609 | * | |
610 | */ | |
611 | extern gate_wait_result_t lck_rw_gate_wait(lck_rw_t *lock, gate_t *gate, lck_sleep_action_t lck_sleep_action, wait_interrupt_t interruptible, uint64_t deadline); | |
612 | ||
613 | /* | |
614 | * Name: lck_rw_gate_assert | |
615 | * | |
616 | * Description: asserts that the gate is in the specified state. | |
617 | * | |
618 | * Args: | |
619 | * Arg1: lck_rw_t lock used to protect the gate. | |
620 | * Arg2: pointer to the gate data declared with decl_lck_rw_gate_data. | |
621 | * Arg3: flags to specified assert type. | |
622 | * GATE_ASSERT_CLOSED - the gate is currently closed | |
623 | * GATE_ASSERT_OPEN - the gate is currently opened | |
624 | * GATE_ASSERT_HELD - the gate is currently closed and the current thread is the holder | |
625 | */ | |
626 | extern void lck_rw_gate_assert(lck_rw_t *lock, gate_t *gate, int flags); | |
627 | ||
628 | /* | |
629 | * Name: decl_lck_mtx_gate_data | |
630 | * | |
631 | * Description: declares a gate variable with specified storage class. | |
632 | * The gate itself will be stored in this variable and it is the caller's responsibility | |
633 | * to ensure that this variable's memory is going to be accessible by all threads that will use | |
634 | * the gate. | |
635 | * Every gate function will require a pointer to this variable as parameter. The same pointer should | |
636 | * be used in every thread. | |
637 | * | |
638 | * The variable needs to be initialized once with lck_mtx_gate_init() and destroyed once with | |
639 | * lck_mtx_gate_destroy() when not needed anymore. | |
640 | * | |
641 | * The gate will be used in conjunction with a lck_mtx_t. | |
642 | * | |
643 | * Args: | |
644 | * Arg1: storage class. | |
645 | * Arg2: variable name. | |
646 | */ | |
647 | #define decl_lck_mtx_gate_data(class, name) class gate_t name | |
648 | ||
649 | /* | |
650 | * Name: lck_mtx_gate_init | |
651 | * | |
652 | * Description: initializes a variable declared with decl_lck_mtx_gate_data. | |
653 | * | |
654 | * Args: | |
655 | * Arg1: lck_mtx_t lock used to protect the gate. | |
656 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
657 | */ | |
658 | extern void lck_mtx_gate_init(lck_mtx_t *lock, gate_t *gate); | |
659 | ||
660 | /* | |
661 | * Name: lck_mtx_gate_destroy | |
662 | * | |
663 | * Description: destroys a variable previously initialized | |
664 | * | |
665 | * Args: | |
666 | * Arg1: lck_mtx_t lock used to protect the gate. | |
667 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
668 | */ | |
669 | extern void lck_mtx_gate_destroy(lck_mtx_t *lock, gate_t *gate); | |
670 | ||
671 | /* | |
672 | * Name: lck_mtx_gate_try_close | |
673 | * | |
674 | * Description: Tries to close the gate. | |
675 | * In case of success the current thread will be set as | |
676 | * the holder of the gate. | |
677 | * | |
678 | * Args: | |
679 | * Arg1: lck_mtx_t lock used to protect the gate. | |
680 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
681 | * | |
682 | * Conditions: Lock must be held. Returns with the lock held. | |
683 | * | |
684 | * Returns: | |
685 | * KERN_SUCCESS in case the gate was successfully closed. The current thread is the new holder | |
686 | * of the gate. | |
687 | * A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on | |
688 | * to wake up possible waiters on the gate before returning to userspace. | |
689 | * If the intent is to conditionally probe the gate before waiting, the lock must not be dropped | |
690 | * between the calls to lck_mtx_gate_try_close() and lck_mtx_gate_wait(). | |
691 | * | |
692 | * KERN_FAILURE in case the gate was already closed. Will panic if the current thread was already the holder of the gate. | |
693 | * lck_mtx_gate_wait() should be called instead if the intent is to unconditionally wait on this gate. | |
694 | * The calls to lck_mtx_gate_try_close() and lck_mtx_gate_wait() should | |
695 | * be done without dropping the lock that is protecting the gate in between. | |
696 | */ | |
697 | extern kern_return_t lck_mtx_gate_try_close(lck_mtx_t *lock, gate_t *gate); | |
698 | ||
699 | /* | |
700 | * Name: lck_mtx_gate_close | |
701 | * | |
702 | * Description: Closes the gate. The current thread will be set as | |
703 | * the holder of the gate. Will panic if the gate is already closed. | |
704 | * A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on | |
705 | * to wake up possible waiters on the gate before returning to userspace. | |
706 | * | |
707 | * Args: | |
708 | * Arg1: lck_mtx_t lock used to protect the gate. | |
709 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
710 | * | |
711 | * Conditions: Lock must be held. Returns with the lock held. | |
712 | * The gate must be open. | |
713 | * | |
714 | */ | |
715 | extern void lck_mtx_gate_close(lck_mtx_t *lock, gate_t *gate); | |
716 | ||
717 | /* | |
718 | * Name: lck_mtx_gate_open | |
719 | * | |
720 | * Description: Opens of the gate and wakes up possible waiters. | |
721 | * | |
722 | * Args: | |
723 | * Arg1: lck_mtx_t lock used to protect the gate. | |
724 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
725 | * | |
726 | * Conditions: Lock must be held. Returns with the lock held. | |
727 | * The current thread must be the holder of the gate. | |
728 | * | |
729 | */ | |
730 | extern void lck_mtx_gate_open(lck_mtx_t *lock, gate_t *gate); | |
731 | ||
732 | /* | |
733 | * Name: lck_mtx_gate_handoff | |
734 | * | |
735 | * Description: Set the current ownership of the gate. The waiter with highest sched | |
736 | * priority will be selected as the new holder of the gate, and woken up, | |
737 | * with the gate remaining in the closed state throughout. | |
738 | * If no waiters are present, the gate will be kept closed and KERN_NOT_WAITING | |
739 | * will be returned. | |
740 | * OPEN_ON_FAILURE flag can be used to specify if the gate should be opened in | |
741 | * case no waiters were found. | |
742 | * | |
743 | * | |
744 | * Args: | |
745 | * Arg1: lck_mtx_t lock used to protect the gate. | |
746 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
747 | * Arg3: flags - GATE_NO_FALGS or OPEN_ON_FAILURE | |
748 | * | |
749 | * Conditions: Lock must be held. Returns with the lock held. | |
750 | * The current thread must be the holder of the gate. | |
751 | * | |
752 | * Returns: | |
753 | * KERN_SUCCESS in case one of the waiters became the new holder. | |
754 | * KERN_NOT_WAITING in case there were no waiters. | |
755 | * | |
756 | */ | |
757 | extern kern_return_t lck_mtx_gate_handoff(lck_mtx_t *lock, gate_t *gate, int flags); | |
758 | ||
759 | /* | |
760 | * Name: lck_mtx_gate_steal | |
761 | * | |
762 | * Description: Steals the ownership of the gate. It sets the current thread as the | |
763 | * new holder of the gate. | |
764 | * A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on | |
765 | * to wake up possible waiters on the gate before returning to userspace. | |
766 | * NOTE: the previous holder should not call lck_mtx_gate_open() or lck_mtx_gate_handoff() | |
767 | * anymore. | |
768 | * | |
769 | * | |
770 | * Args: | |
771 | * Arg1: lck_mtx_t lock used to protect the gate. | |
772 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
773 | * | |
774 | * Conditions: Lock must be held. Returns with the lock held. | |
775 | * The gate must be closed and the current thread must not already be the holder. | |
776 | * | |
777 | */ | |
778 | extern void lck_mtx_gate_steal(lck_mtx_t *lock, gate_t *gate); | |
779 | ||
780 | /* | |
781 | * Name: lck_mtx_gate_wait | |
782 | * | |
783 | * Description: Waits for the current thread to become the holder of the gate or for the | |
784 | * gate to become open. An interruptible mode and deadline can be specified | |
785 | * to return earlier from the wait. | |
786 | * | |
787 | * Args: | |
788 | * Arg1: lck_mtx_t lock used to protect the gate. | |
789 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
790 | * Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK, LCK_SLEEP_SPIN, LCK_SLEEP_SPIN_ALWAYS. | |
791 | * Arg3: interruptible flag for wait. | |
792 | * Arg4: deadline | |
793 | * | |
794 | * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified. | |
795 | * Lock will be dropped while waiting. | |
796 | * The gate must be closed. | |
797 | * | |
798 | * Returns: Reason why the thread was woken up. | |
799 | * GATE_HANDOFF - the current thread was handed off the ownership of the gate. | |
800 | * A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on | |
801 | * to wake up possible waiters on the gate before returning to userspace. | |
802 | * GATE_OPENED - the gate was opened by the holder. | |
803 | * GATE_TIMED_OUT - the thread was woken up by a timeout. | |
804 | * GATE_INTERRUPTED - the thread was interrupted while sleeping. | |
805 | * | |
806 | */ | |
807 | extern gate_wait_result_t lck_mtx_gate_wait(lck_mtx_t *lock, gate_t *gate, lck_sleep_action_t lck_sleep_action, wait_interrupt_t interruptible, uint64_t deadline); | |
808 | ||
809 | /* | |
810 | * Name: lck_mtx_gate_assert | |
811 | * | |
812 | * Description: asserts that the gate is in the specified state. | |
813 | * | |
814 | * Args: | |
815 | * Arg1: lck_mtx_t lock used to protect the gate. | |
816 | * Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data. | |
817 | * Arg3: flags to specified assert type. | |
818 | * GATE_ASSERT_CLOSED - the gate is currently closed | |
819 | * GATE_ASSERT_OPEN - the gate is currently opened | |
820 | * GATE_ASSERT_HELD - the gate is currently closed and the current thread is the holder | |
821 | */ | |
822 | extern void lck_mtx_gate_assert(lck_mtx_t *lock, gate_t *gate, int flags); | |
823 | ||
824 | ||
825 | #endif //KERNEL_PRIVATE | |
826 | ||
d9a64523 | 827 | #if DEVELOPMENT || DEBUG |
cb323159 A |
828 | #define FULL_CONTENDED 0 |
829 | #define HALF_CONTENDED 1 | |
830 | #define MAX_CONDENDED 2 | |
831 | ||
0a7de745 A |
832 | extern void erase_all_test_mtx_stats(void); |
833 | extern int get_test_mtx_stats_string(char* buffer, int buffer_size); | |
834 | extern void lck_mtx_test_init(void); | |
835 | extern void lck_mtx_test_lock(void); | |
836 | extern void lck_mtx_test_unlock(void); | |
837 | extern int lck_mtx_test_mtx_uncontended(int iter, char* buffer, int buffer_size); | |
cb323159 | 838 | extern int lck_mtx_test_mtx_contended(int iter, char* buffer, int buffer_size, int type); |
0a7de745 | 839 | extern int lck_mtx_test_mtx_uncontended_loop_time(int iter, char* buffer, int buffer_size); |
cb323159 | 840 | extern int lck_mtx_test_mtx_contended_loop_time(int iter, char* buffer, int buffer_size, int type); |
d9a64523 | 841 | #endif |
0a7de745 | 842 | #ifdef KERNEL_PRIVATE |
91447636 | 843 | |
f427ee49 | 844 | extern boolean_t lck_mtx_try_lock( |
0a7de745 | 845 | lck_mtx_t *lck); |
91447636 | 846 | |
f427ee49 | 847 | extern void mutex_pause(uint32_t); |
91447636 | 848 | |
f427ee49 | 849 | extern void lck_mtx_yield( |
0a7de745 | 850 | lck_mtx_t *lck); |
b0d623f7 | 851 | |
f427ee49 | 852 | extern boolean_t lck_mtx_try_lock_spin( |
0a7de745 | 853 | lck_mtx_t *lck); |
b0d623f7 | 854 | |
f427ee49 | 855 | extern void lck_mtx_lock_spin( |
0a7de745 | 856 | lck_mtx_t *lck); |
2d21ac55 | 857 | |
f427ee49 | 858 | extern boolean_t kdp_lck_mtx_lock_spin_is_acquired( |
0a7de745 | 859 | lck_mtx_t *lck); |
39236c6e | 860 | |
f427ee49 | 861 | extern void lck_mtx_convert_spin( |
0a7de745 | 862 | lck_mtx_t *lck); |
6d2010ae | 863 | |
f427ee49 | 864 | extern void lck_mtx_lock_spin_always( |
0a7de745 | 865 | lck_mtx_t *lck); |
2d21ac55 | 866 | |
f427ee49 | 867 | extern boolean_t lck_mtx_try_lock_spin_always( |
0a7de745 | 868 | lck_mtx_t *lck); |
6d2010ae | 869 | |
0a7de745 | 870 | #define lck_mtx_unlock_always(l) lck_mtx_unlock(l) |
39037602 | 871 | |
f427ee49 | 872 | extern void lck_spin_assert( |
0a7de745 | 873 | lck_spin_t *lck, |
f427ee49 | 874 | unsigned int type); |
6d2010ae | 875 | |
f427ee49 | 876 | extern boolean_t kdp_lck_rw_lock_is_acquired_exclusive( |
0a7de745 | 877 | lck_rw_t *lck); |
2d21ac55 | 878 | |
0a7de745 | 879 | #endif /* KERNEL_PRIVATE */ |
3e170ce0 | 880 | |
f427ee49 | 881 | extern void lck_mtx_assert( |
0a7de745 | 882 | lck_mtx_t *lck, |
f427ee49 | 883 | unsigned int type); |
91447636 | 884 | |
39037602 | 885 | #if MACH_ASSERT |
0a7de745 A |
886 | #define LCK_MTX_ASSERT(lck, type) lck_mtx_assert((lck),(type)) |
887 | #define LCK_SPIN_ASSERT(lck, type) lck_spin_assert((lck),(type)) | |
888 | #define LCK_RW_ASSERT(lck, type) lck_rw_assert((lck),(type)) | |
39037602 | 889 | #else /* MACH_ASSERT */ |
0a7de745 A |
890 | #define LCK_MTX_ASSERT(lck, type) |
891 | #define LCK_SPIN_ASSERT(lck, type) | |
892 | #define LCK_RW_ASSERT(lck, type) | |
39037602 A |
893 | #endif /* MACH_ASSERT */ |
894 | ||
895 | #if DEBUG | |
0a7de745 A |
896 | #define LCK_MTX_ASSERT_DEBUG(lck, type) lck_mtx_assert((lck),(type)) |
897 | #define LCK_SPIN_ASSERT_DEBUG(lck, type) lck_spin_assert((lck),(type)) | |
898 | #define LCK_RW_ASSERT_DEBUG(lck, type) lck_rw_assert((lck),(type)) | |
39037602 | 899 | #else /* DEBUG */ |
0a7de745 A |
900 | #define LCK_MTX_ASSERT_DEBUG(lck, type) |
901 | #define LCK_SPIN_ASSERT_DEBUG(lck, type) | |
902 | #define LCK_RW_ASSERT_DEBUG(lck, type) | |
39037602 A |
903 | #endif /* DEBUG */ |
904 | ||
0a7de745 A |
905 | #define LCK_ASSERT_OWNED 1 |
906 | #define LCK_ASSERT_NOTOWNED 2 | |
39037602 | 907 | |
0a7de745 A |
908 | #define LCK_MTX_ASSERT_OWNED LCK_ASSERT_OWNED |
909 | #define LCK_MTX_ASSERT_NOTOWNED LCK_ASSERT_NOTOWNED | |
91447636 | 910 | |
0a7de745 | 911 | #ifdef MACH_KERNEL_PRIVATE |
cb323159 | 912 | struct turnstile; |
f427ee49 | 913 | extern void lck_mtx_lock_wait( |
0a7de745 | 914 | lck_mtx_t *lck, |
cb323159 A |
915 | thread_t holder, |
916 | struct turnstile **ts); | |
91447636 | 917 | |
f427ee49 | 918 | extern int lck_mtx_lock_acquire( |
cb323159 A |
919 | lck_mtx_t *lck, |
920 | struct turnstile *ts); | |
91447636 | 921 | |
f427ee49 | 922 | extern boolean_t lck_mtx_unlock_wakeup( |
0a7de745 A |
923 | lck_mtx_t *lck, |
924 | thread_t holder); | |
91447636 | 925 | |
f427ee49 | 926 | extern boolean_t lck_mtx_ilk_unlock( |
0a7de745 | 927 | lck_mtx_t *lck); |
2d21ac55 | 928 | |
f427ee49 | 929 | extern boolean_t lck_mtx_ilk_try_lock( |
0a7de745 | 930 | lck_mtx_t *lck); |
3e170ce0 | 931 | |
d9a64523 A |
932 | extern void lck_mtx_wakeup_adjust_pri(thread_t thread, integer_t priority); |
933 | ||
91447636 A |
934 | #endif |
935 | ||
cb323159 | 936 | #define decl_lck_rw_data(class, name) class lck_rw_t name |
91447636 | 937 | |
0a7de745 | 938 | typedef unsigned int lck_rw_type_t; |
91447636 | 939 | |
0a7de745 A |
940 | #define LCK_RW_TYPE_SHARED 0x01 |
941 | #define LCK_RW_TYPE_EXCLUSIVE 0x02 | |
91447636 | 942 | |
2d21ac55 | 943 | #ifdef XNU_KERNEL_PRIVATE |
0a7de745 A |
944 | #define LCK_RW_ASSERT_SHARED 0x01 |
945 | #define LCK_RW_ASSERT_EXCLUSIVE 0x02 | |
946 | #define LCK_RW_ASSERT_HELD 0x03 | |
947 | #define LCK_RW_ASSERT_NOTHELD 0x04 | |
2d21ac55 A |
948 | #endif |
949 | ||
f427ee49 | 950 | extern lck_rw_t *lck_rw_alloc_init( |
0a7de745 A |
951 | lck_grp_t *grp, |
952 | lck_attr_t *attr); | |
91447636 | 953 | |
f427ee49 | 954 | extern void lck_rw_init( |
0a7de745 A |
955 | lck_rw_t *lck, |
956 | lck_grp_t *grp, | |
957 | lck_attr_t *attr); | |
91447636 | 958 | |
f427ee49 | 959 | extern void lck_rw_lock( |
0a7de745 | 960 | lck_rw_t *lck, |
f427ee49 | 961 | lck_rw_type_t lck_rw_type); |
91447636 | 962 | |
f427ee49 | 963 | extern void lck_rw_unlock( |
0a7de745 | 964 | lck_rw_t *lck, |
f427ee49 | 965 | lck_rw_type_t lck_rw_type); |
91447636 | 966 | |
f427ee49 | 967 | extern void lck_rw_lock_shared( |
0a7de745 | 968 | lck_rw_t *lck); |
91447636 | 969 | |
f427ee49 | 970 | extern void lck_rw_unlock_shared( |
0a7de745 | 971 | lck_rw_t *lck); |
91447636 | 972 | |
f427ee49 | 973 | extern boolean_t lck_rw_lock_yield_shared( |
0a7de745 | 974 | lck_rw_t *lck, |
f427ee49 | 975 | boolean_t force_yield); |
5ba3f43e | 976 | |
f427ee49 A |
977 | extern void lck_rw_lock_exclusive( |
978 | lck_rw_t *lck); | |
979 | /* | |
980 | * Grabs the lock exclusive. | |
981 | * Returns true iff the thread spun or blocked while attempting to | |
982 | * acquire the lock. | |
983 | * | |
984 | * Note that the return value is ONLY A HEURISTIC w.r.t. the lock's | |
985 | * contention. | |
986 | * | |
987 | * This routine IS EXPERIMENTAL. | |
988 | * It's only used for the vm object lock, and use for other subsystems | |
989 | * is UNSUPPORTED. | |
990 | */ | |
991 | extern bool lck_rw_lock_exclusive_check_contended( | |
0a7de745 | 992 | lck_rw_t *lck); |
91447636 | 993 | |
f427ee49 | 994 | extern void lck_rw_unlock_exclusive( |
0a7de745 | 995 | lck_rw_t *lck); |
91447636 | 996 | |
0a7de745 | 997 | #ifdef XNU_KERNEL_PRIVATE |
2d21ac55 A |
998 | /* |
999 | * CAUTION | |
1000 | * read-write locks do not have a concept of ownership, so lck_rw_assert() | |
1001 | * merely asserts that someone is holding the lock, not necessarily the caller. | |
1002 | */ | |
f427ee49 | 1003 | extern void lck_rw_assert( |
0a7de745 A |
1004 | lck_rw_t *lck, |
1005 | unsigned int type); | |
39236c6e | 1006 | |
d9a64523 | 1007 | extern void lck_rw_clear_promotion(thread_t thread, uintptr_t trace_obj); |
39037602 | 1008 | extern void lck_rw_set_promotion_locked(thread_t thread); |
d9a64523 A |
1009 | |
1010 | uintptr_t unslide_for_kdebug(void* object); | |
0a7de745 | 1011 | #endif /* XNU_KERNEL_PRIVATE */ |
2d21ac55 | 1012 | |
0a7de745 | 1013 | #ifdef KERNEL_PRIVATE |
91447636 | 1014 | |
0a7de745 A |
1015 | extern lck_rw_type_t lck_rw_done( |
1016 | lck_rw_t *lck); | |
91447636 A |
1017 | #endif |
1018 | ||
f427ee49 | 1019 | extern void lck_rw_destroy( |
0a7de745 A |
1020 | lck_rw_t *lck, |
1021 | lck_grp_t *grp); | |
91447636 | 1022 | |
f427ee49 | 1023 | extern void lck_rw_free( |
0a7de745 A |
1024 | lck_rw_t *lck, |
1025 | lck_grp_t *grp); | |
91447636 | 1026 | |
0a7de745 | 1027 | extern wait_result_t lck_rw_sleep( |
f427ee49 | 1028 | lck_rw_t *lck, |
0a7de745 | 1029 | lck_sleep_action_t lck_sleep_action, |
f427ee49 | 1030 | event_t event, |
0a7de745 | 1031 | wait_interrupt_t interruptible); |
91447636 | 1032 | |
0a7de745 | 1033 | extern wait_result_t lck_rw_sleep_deadline( |
f427ee49 | 1034 | lck_rw_t *lck, |
0a7de745 | 1035 | lck_sleep_action_t lck_sleep_action, |
f427ee49 | 1036 | event_t event, |
0a7de745 | 1037 | wait_interrupt_t interruptible, |
f427ee49 | 1038 | uint64_t deadline); |
91447636 | 1039 | |
f427ee49 | 1040 | extern boolean_t lck_rw_lock_shared_to_exclusive( |
0a7de745 | 1041 | lck_rw_t *lck); |
91447636 | 1042 | |
f427ee49 | 1043 | extern void lck_rw_lock_exclusive_to_shared( |
0a7de745 | 1044 | lck_rw_t *lck); |
91447636 | 1045 | |
f427ee49 | 1046 | extern boolean_t lck_rw_try_lock( |
0a7de745 | 1047 | lck_rw_t *lck, |
f427ee49 | 1048 | lck_rw_type_t lck_rw_type); |
91447636 | 1049 | |
0a7de745 | 1050 | #ifdef KERNEL_PRIVATE |
2d21ac55 | 1051 | |
f427ee49 | 1052 | extern boolean_t lck_rw_try_lock_shared( |
0a7de745 | 1053 | lck_rw_t *lck); |
91447636 | 1054 | |
f427ee49 | 1055 | extern boolean_t lck_rw_try_lock_exclusive( |
0a7de745 | 1056 | lck_rw_t *lck); |
f427ee49 | 1057 | |
91447636 | 1058 | #endif |
f427ee49 A |
1059 | #if XNU_KERNEL_PRIVATE |
1060 | ||
1061 | struct lck_attr_startup_spec { | |
1062 | lck_attr_t *lck_attr; | |
1063 | uint32_t lck_attr_set_flags; | |
1064 | uint32_t lck_attr_clear_flags; | |
1065 | }; | |
1066 | ||
1067 | struct lck_spin_startup_spec { | |
1068 | lck_spin_t *lck; | |
1069 | lck_grp_t *lck_grp; | |
1070 | lck_attr_t *lck_attr; | |
1071 | }; | |
1072 | ||
1073 | struct lck_mtx_startup_spec { | |
1074 | lck_mtx_t *lck; | |
1075 | struct _lck_mtx_ext_ *lck_ext; | |
1076 | lck_grp_t *lck_grp; | |
1077 | lck_attr_t *lck_attr; | |
1078 | }; | |
1079 | ||
1080 | struct lck_rw_startup_spec { | |
1081 | lck_rw_t *lck; | |
1082 | lck_grp_t *lck_grp; | |
1083 | lck_attr_t *lck_attr; | |
1084 | }; | |
1085 | ||
1086 | extern void lck_attr_startup_init( | |
1087 | struct lck_attr_startup_spec *spec); | |
1088 | ||
1089 | extern void lck_spin_startup_init( | |
1090 | struct lck_spin_startup_spec *spec); | |
1091 | ||
1092 | extern void lck_mtx_startup_init( | |
1093 | struct lck_mtx_startup_spec *spec); | |
1094 | ||
1095 | extern void lck_rw_startup_init( | |
1096 | struct lck_rw_startup_spec *spec); | |
1097 | ||
1098 | /* | |
1099 | * Auto-initializing locks declarations | |
1100 | * ------------------------------------ | |
1101 | * | |
1102 | * Unless you need to configure your locks in very specific ways, | |
1103 | * there is no point creating explicit lock attributes. For most | |
1104 | * static locks, these declaration macros can be used: | |
1105 | * | |
1106 | * - LCK_SPIN_DECLARE for spinlocks, | |
1107 | * - LCK_MTX_EARLY_DECLARE for mutexes initialized before memory | |
1108 | * allocations are possible, | |
1109 | * - LCK_MTX_DECLARE for mutexes, | |
1110 | * - LCK_RW_DECLARE for reader writer locks. | |
1111 | * | |
1112 | * For cases when some particular attributes need to be used, | |
1113 | * these come in *_ATTR variants that take a variable declared with | |
1114 | * LCK_ATTR_DECLARE as an argument. | |
1115 | */ | |
1116 | #define LCK_ATTR_DECLARE(var, set_flags, clear_flags) \ | |
1117 | SECURITY_READ_ONLY_LATE(lck_attr_t) var; \ | |
1118 | static __startup_data struct lck_attr_startup_spec \ | |
1119 | __startup_lck_attr_spec_ ## var = { &var, set_flags, clear_flags }; \ | |
1120 | STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_SECOND, lck_attr_startup_init, \ | |
1121 | &__startup_lck_attr_spec_ ## var) | |
1122 | ||
1123 | #define LCK_SPIN_DECLARE_ATTR(var, grp, attr) \ | |
1124 | lck_spin_t var; \ | |
1125 | static __startup_data struct lck_spin_startup_spec \ | |
1126 | __startup_lck_spin_spec_ ## var = { &var, grp, attr }; \ | |
1127 | STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_FOURTH, lck_spin_startup_init, \ | |
1128 | &__startup_lck_spin_spec_ ## var) | |
1129 | ||
1130 | #define LCK_SPIN_DECLARE(var, grp) \ | |
1131 | LCK_SPIN_DECLARE_ATTR(var, grp, LCK_ATTR_NULL) | |
1132 | ||
1133 | #define LCK_MTX_DECLARE_ATTR(var, grp, attr) \ | |
1134 | lck_mtx_t var; \ | |
1135 | static __startup_data struct lck_mtx_startup_spec \ | |
1136 | __startup_lck_mtx_spec_ ## var = { &var, NULL, grp, attr }; \ | |
1137 | STARTUP_ARG(LOCKS, STARTUP_RANK_FIRST, lck_mtx_startup_init, \ | |
1138 | &__startup_lck_mtx_spec_ ## var) | |
1139 | ||
1140 | #define LCK_MTX_DECLARE(var, grp) \ | |
1141 | LCK_MTX_DECLARE_ATTR(var, grp, LCK_ATTR_NULL) | |
1142 | ||
1143 | #define LCK_MTX_EARLY_DECLARE_ATTR(var, grp, attr) \ | |
1144 | lck_mtx_ext_t var ## _ext; \ | |
1145 | lck_mtx_t var; \ | |
1146 | static __startup_data struct lck_mtx_startup_spec \ | |
1147 | __startup_lck_mtx_spec_ ## var = { &var, &var ## _ext, grp, attr }; \ | |
1148 | STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_FOURTH, lck_mtx_startup_init, \ | |
1149 | &__startup_lck_mtx_spec_ ## var) | |
1150 | ||
1151 | #define LCK_MTX_EARLY_DECLARE(var, grp) \ | |
1152 | LCK_MTX_EARLY_DECLARE_ATTR(var, grp, LCK_ATTR_NULL) | |
1153 | ||
1154 | #define LCK_RW_DECLARE_ATTR(var, grp, attr) \ | |
1155 | lck_rw_t var; \ | |
1156 | static __startup_data struct lck_rw_startup_spec \ | |
1157 | __startup_lck_rw_spec_ ## var = { &var, grp, attr }; \ | |
1158 | STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_FOURTH, lck_rw_startup_init, \ | |
1159 | &__startup_lck_rw_spec_ ## var) | |
1160 | ||
1161 | #define LCK_RW_DECLARE(var, grp) \ | |
1162 | LCK_RW_DECLARE_ATTR(var, grp, LCK_ATTR_NULL) | |
1163 | ||
1164 | #endif /* XNU_KERNEL_PRIVATE */ | |
91447636 A |
1165 | |
1166 | __END_DECLS | |
1167 | ||
1168 | #endif /* _KERN_LOCKS_H_ */ |