2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #define LOCK_PRIVATE 1
31 #include <mach_ldebug.h>
33 #include <kern/locks.h>
34 #include <kern/misc_protos.h>
35 #include <kern/thread.h>
36 #include <kern/processor.h>
37 #include <kern/cpu_data.h>
38 #include <kern/cpu_number.h>
39 #include <kern/sched_prim.h>
40 #include <kern/debug.h>
43 #include <i386/machine_routines.h> /* machine_timeout_suspended() */
44 #include <machine/atomic.h>
45 #include <machine/machine_cpu.h>
47 #include <machine/atomic.h>
48 #include <sys/kdebug.h>
49 #include <i386/locks_i386_inlines.h>
52 * Fast path routines for lck_mtx locking and unlocking functions.
53 * Fast paths will try a single compare and swap instruction to acquire/release the lock
54 * and interlock, and they will fall through the slow path in case it fails.
56 * These functions were previously implemented in x86 assembly,
57 * and some optimizations are in place in this c code to obtain a compiled code
58 * as performant and compact as the assembly version.
60 * To avoid to inline these functions and increase the kernel text size all functions have
61 * the __attribute__((noinline)) specified.
63 * The code is structured in such a way there are no calls to functions that will return
64 * on the context of the caller function, i.e. all functions called are or tail call functions
65 * or inline functions. The number of arguments of the tail call functions are less then six,
66 * so that they can be passed over registers and do not need to be pushed on stack.
67 * This allows the compiler to not create a stack frame for the functions.
69 * The file is compiled with momit-leaf-frame-pointer and O2.
72 #if DEVELOPMENT || DEBUG
73 TUNABLE(bool, LckDisablePreemptCheck
, "-disable_mtx_chk", false);
76 * If one or more simplelocks are currently held by a thread,
77 * an attempt to acquire a mutex will cause this check to fail
78 * (since a mutex lock may context switch, holding a simplelock
79 * is not a good thing).
82 lck_mtx_check_preemption(void)
84 if (get_preemption_level() == 0) {
87 if (LckDisablePreemptCheck
) {
90 if (current_cpu_datap()->cpu_hibernate
) {
94 panic("preemption_level(%d) != 0\n", get_preemption_level());
97 #else /* DEVELOPMENT || DEBUG */
100 lck_mtx_check_preemption(void)
105 #endif /* DEVELOPMENT || DEBUG */
108 * Routine: lck_mtx_lock
110 * Locks a mutex for current thread.
111 * It tries the fast path first and
112 * falls through the slow path in case
115 * Interlock or mutex cannot be already held by current thread.
116 * In case of contention it might sleep.
118 __attribute__((noinline
))
123 uint32_t prev
, state
;
125 lck_mtx_check_preemption();
126 state
= ordered_load_mtx_state(lock
);
129 * Fast path only if the mutex is not held
130 * interlock is not contended and there are no waiters.
131 * Indirect mutexes will fall through the slow path as
132 * well as destroyed mutexes.
135 prev
= state
& ~(LCK_MTX_ILOCKED_MSK
| LCK_MTX_MLOCKED_MSK
| LCK_MTX_WAITERS_MSK
);
136 state
= prev
| LCK_MTX_ILOCKED_MSK
| LCK_MTX_MLOCKED_MSK
;
138 disable_preemption();
139 if (!os_atomic_cmpxchg(&lock
->lck_mtx_state
, prev
, state
, acquire
)) {
141 return lck_mtx_lock_slow(lock
);
144 /* mutex acquired, interlock acquired and preemption disabled */
146 thread_t thread
= current_thread();
147 /* record owner of mutex */
148 ordered_store_mtx_owner(lock
, (uintptr_t)thread
);
152 thread
->mutex_count
++; /* lock statistic */
156 /* release interlock and re-enable preemption */
157 lck_mtx_lock_finish_inline(lock
, state
, FALSE
);
161 * Routine: lck_mtx_try_lock
163 * Try to lock a mutex for current thread.
164 * It tries the fast path first and
165 * falls through the slow path in case
168 * Interlock or mutex cannot be already held by current thread.
170 * In case the mutex is held (either as spin or mutex)
171 * the function will fail, it will acquire the mutex otherwise.
173 __attribute__((noinline
))
178 uint32_t prev
, state
;
180 state
= ordered_load_mtx_state(lock
);
183 * Fast path only if the mutex is not held
184 * interlock is not contended and there are no waiters.
185 * Indirect mutexes will fall through the slow path as
186 * well as destroyed mutexes.
189 prev
= state
& ~(LCK_MTX_ILOCKED_MSK
| LCK_MTX_MLOCKED_MSK
| LCK_MTX_WAITERS_MSK
);
190 state
= prev
| LCK_MTX_ILOCKED_MSK
| LCK_MTX_MLOCKED_MSK
;
192 disable_preemption();
193 if (!os_atomic_cmpxchg(&lock
->lck_mtx_state
, prev
, state
, acquire
)) {
195 return lck_mtx_try_lock_slow(lock
);
198 /* mutex acquired, interlock acquired and preemption disabled */
200 thread_t thread
= current_thread();
201 /* record owner of mutex */
202 ordered_store_mtx_owner(lock
, (uintptr_t)thread
);
206 thread
->mutex_count
++; /* lock statistic */
210 /* release interlock and re-enable preemption */
211 lck_mtx_try_lock_finish_inline(lock
, state
);
217 * Routine: lck_mtx_lock_spin_always
219 * Try to lock a mutex as spin lock for current thread.
220 * It tries the fast path first and
221 * falls through the slow path in case
224 * Interlock or mutex cannot be already held by current thread.
226 * In case the mutex is held as mutex by another thread
227 * this function will switch behavior and try to acquire the lock as mutex.
229 * In case the mutex is held as spinlock it will spin contending
232 * In case of contention it might sleep.
234 __attribute__((noinline
))
236 lck_mtx_lock_spin_always(
239 uint32_t prev
, state
;
241 state
= ordered_load_mtx_state(lock
);
244 * Fast path only if the mutex is not held
245 * neither as mutex nor as spin and
246 * interlock is not contended.
247 * Indirect mutexes will fall through the slow path as
248 * well as destroyed mutexes.
251 if (state
& (LCK_MTX_ILOCKED_MSK
| LCK_MTX_SPIN_MSK
)) {
252 return lck_mtx_lock_spin_slow(lock
);
255 /* Note LCK_MTX_SPIN_MSK is set only if LCK_MTX_ILOCKED_MSK is set */
256 prev
= state
& ~(LCK_MTX_ILOCKED_MSK
| LCK_MTX_MLOCKED_MSK
);
257 state
= prev
| LCK_MTX_ILOCKED_MSK
| LCK_MTX_SPIN_MSK
;
259 disable_preemption();
260 if (!os_atomic_cmpxchg(&lock
->lck_mtx_state
, prev
, state
, acquire
)) {
262 return lck_mtx_lock_spin_slow(lock
);
265 /* mutex acquired as spinlock, interlock acquired and preemption disabled */
267 thread_t thread
= current_thread();
268 /* record owner of mutex */
269 ordered_store_mtx_owner(lock
, (uintptr_t)thread
);
273 thread
->mutex_count
++; /* lock statistic */
278 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE
, lock
, 0);
280 /* return with the interlock held and preemption disabled */
285 * Routine: lck_mtx_lock_spin
287 * Try to lock a mutex as spin lock for current thread.
288 * It tries the fast path first and
289 * falls through the slow path in case
292 * Interlock or mutex cannot be already held by current thread.
294 * In case the mutex is held as mutex by another thread
295 * this function will switch behavior and try to acquire the lock as mutex.
297 * In case the mutex is held as spinlock it will spin contending
300 * In case of contention it might sleep.
306 lck_mtx_check_preemption();
307 lck_mtx_lock_spin_always(lock
);
311 * Routine: lck_mtx_try_lock_spin_always
313 * Try to lock a mutex as spin lock for current thread.
314 * It tries the fast path first and
315 * falls through the slow path in case
318 * Interlock or mutex cannot be already held by current thread.
320 * In case the mutex is held (either as spin or mutex)
321 * the function will fail, it will acquire the mutex as spin lock
325 __attribute__((noinline
))
327 lck_mtx_try_lock_spin_always(
330 uint32_t prev
, state
;
332 state
= ordered_load_mtx_state(lock
);
335 * Fast path only if the mutex is not held
336 * neither as mutex nor as spin and
337 * interlock is not contended.
338 * Indirect mutexes will fall through the slow path as
339 * well as destroyed mutexes.
342 /* Note LCK_MTX_SPIN_MSK is set only if LCK_MTX_ILOCKED_MSK is set */
343 prev
= state
& ~(LCK_MTX_ILOCKED_MSK
| LCK_MTX_MLOCKED_MSK
);
344 state
= prev
| LCK_MTX_ILOCKED_MSK
| LCK_MTX_SPIN_MSK
;
346 disable_preemption();
347 if (!os_atomic_cmpxchg(&lock
->lck_mtx_state
, prev
, state
, acquire
)) {
349 return lck_mtx_try_lock_spin_slow(lock
);
352 /* mutex acquired as spinlock, interlock acquired and preemption disabled */
354 thread_t thread
= current_thread();
355 /* record owner of mutex */
356 ordered_store_mtx_owner(lock
, (uintptr_t)thread
);
360 thread
->mutex_count
++; /* lock statistic */
365 LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE
, lock
, 0);
368 /* return with the interlock held and preemption disabled */
373 * Routine: lck_mtx_try_lock_spin
375 * Try to lock a mutex as spin lock for current thread.
376 * It tries the fast path first and
377 * falls through the slow path in case
380 * Interlock or mutex cannot be already held by current thread.
382 * In case the mutex is held (either as spin or mutex)
383 * the function will fail, it will acquire the mutex as spin lock
388 lck_mtx_try_lock_spin(
391 return lck_mtx_try_lock_spin_always(lock
);
395 * Routine: lck_mtx_unlock
397 * Unlocks a mutex held by current thread.
398 * It tries the fast path first, and falls
399 * through the slow path in case waiters need to
402 * Interlock can be held, and the slow path will
403 * unlock the mutex for this case.
405 __attribute__((noinline
))
410 uint32_t prev
, state
;
412 state
= ordered_load_mtx_state(lock
);
414 if (state
& LCK_MTX_SPIN_MSK
) {
415 return lck_mtx_unlock_slow(lock
);
419 * Only full mutex will go through the fast path
420 * (if the lock was acquired as a spinlock it will
421 * fall through the slow path).
422 * If there are waiters it will fall
423 * through the slow path.
424 * If it is indirect it will fall through the slow path.
429 * interlock not held, no waiters, no promotion and mutex held.
431 prev
= state
& ~(LCK_MTX_ILOCKED_MSK
| LCK_MTX_WAITERS_MSK
);
432 prev
|= LCK_MTX_MLOCKED_MSK
;
434 state
= prev
| LCK_MTX_ILOCKED_MSK
;
435 state
&= ~LCK_MTX_MLOCKED_MSK
;
437 disable_preemption();
439 /* the memory order needs to be acquire because it is acquiring the interlock */
440 if (!os_atomic_cmpxchg(&lock
->lck_mtx_state
, prev
, state
, acquire
)) {
442 return lck_mtx_unlock_slow(lock
);
445 /* mutex released, interlock acquired and preemption disabled */
447 #if DEVELOPMENT | DEBUG
448 thread_t owner
= (thread_t
)lock
->lck_mtx_owner
;
449 if (__improbable(owner
!= current_thread())) {
450 lck_mtx_owner_check_panic(lock
);
455 ordered_store_mtx_owner(lock
, 0);
456 /* release interlock */
457 state
&= ~LCK_MTX_ILOCKED_MSK
;
458 ordered_store_mtx_state_release(lock
, state
);
461 thread_t thread
= current_thread();
463 thread
->mutex_count
--;
465 #endif /* MACH_LDEBUG */
467 /* re-enable preemption */
468 lck_mtx_unlock_finish_inline(lock
, FALSE
);