2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #define ATOMIC_PRIVATE 1
30 #define LOCK_PRIVATE 1
32 #include <mach_ldebug.h>
34 #include <kern/locks.h>
35 #include <kern/kalloc.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread.h>
38 #include <kern/processor.h>
39 #include <kern/cpu_data.h>
40 #include <kern/cpu_number.h>
41 #include <kern/sched_prim.h>
43 #include <kern/debug.h>
46 #include <i386/machine_routines.h> /* machine_timeout_suspended() */
47 #include <machine/atomic.h>
48 #include <machine/machine_cpu.h>
50 #include <machine/atomic.h>
51 #include <sys/kdebug.h>
52 #include <i386/locks_i386_inlines.h>
55 * Fast path routines for lck_mtx locking and unlocking functions.
56 * Fast paths will try a single compare and swap instruction to acquire/release the lock
57 * and interlock, and they will fall through the slow path in case it fails.
59 * These functions were previously implemented in x86 assembly,
60 * and some optimizations are in place in this c code to obtain a compiled code
61 * as performant and compact as the assembly version.
63 * To avoid to inline these functions and increase the kernel text size all functions have
64 * the __attribute__((noinline)) specified.
66 * The code is structured in such a way there are no calls to functions that will return
67 * on the context of the caller function, i.e. all functions called are or tail call functions
68 * or inline functions. The number of arguments of the tail call functions are less then six,
69 * so that they can be passed over registers and do not need to be pushed on stack.
70 * This allows the compiler to not create a stack frame for the functions.
72 * The file is compiled with momit-leaf-frame-pointer and O2.
75 #if DEVELOPMENT || DEBUG
78 * If one or more simplelocks are currently held by a thread,
79 * an attempt to acquire a mutex will cause this check to fail
80 * (since a mutex lock may context switch, holding a simplelock
81 * is not a good thing).
84 lck_mtx_check_preemption(void)
86 if (get_preemption_level() == 0)
88 if (LckDisablePreemptCheck
)
90 if (current_cpu_datap()->cpu_hibernate
)
93 panic("preemption_level(%d) != 0\n", get_preemption_level());
96 #else /* DEVELOPMENT || DEBUG */
99 lck_mtx_check_preemption(void)
104 #endif /* DEVELOPMENT || DEBUG */
107 * Routine: lck_mtx_lock
109 * Locks a mutex for current thread.
110 * It tries the fast path first and
111 * falls through the slow path in case
114 * Interlock or mutex cannot be already held by current thread.
115 * In case of contention it might sleep.
117 __attribute__((noinline
))
122 uint32_t prev
, state
;
124 lck_mtx_check_preemption();
125 state
= ordered_load_mtx_state(lock
);
128 * Fast path only if the mutex is not held
129 * interlock is not contended and there are no waiters.
130 * Indirect mutexes will fall through the slow path as
131 * well as destroyed mutexes.
134 prev
= state
& ~(LCK_MTX_ILOCKED_MSK
| LCK_MTX_MLOCKED_MSK
| LCK_MTX_WAITERS_MSK
);
135 state
= prev
| LCK_MTX_ILOCKED_MSK
| LCK_MTX_MLOCKED_MSK
;
137 disable_preemption();
138 if (!atomic_compare_exchange32(&lock
->lck_mtx_state
, prev
, state
, memory_order_acquire_smp
, FALSE
)) {
140 return lck_mtx_lock_slow(lock
);
143 /* mutex acquired, interlock acquired and preemption disabled */
145 thread_t thread
= current_thread();
146 /* record owner of mutex */
147 ordered_store_mtx_owner(lock
, (uintptr_t)thread
);
151 thread
->mutex_count
++; /* lock statistic */
155 /* release interlock and re-enable preemption */
156 lck_mtx_lock_finish_inline(lock
, state
, FALSE
);
160 * Routine: lck_mtx_try_lock
162 * Try to lock a mutex for current thread.
163 * It tries the fast path first and
164 * falls through the slow path in case
167 * Interlock or mutex cannot be already held by current thread.
169 * In case the mutex is held (either as spin or mutex)
170 * the function will fail, it will acquire the mutex otherwise.
172 __attribute__((noinline
))
177 uint32_t prev
, state
;
179 state
= ordered_load_mtx_state(lock
);
182 * Fast path only if the mutex is not held
183 * interlock is not contended and there are no waiters.
184 * Indirect mutexes will fall through the slow path as
185 * well as destroyed mutexes.
188 prev
= state
& ~(LCK_MTX_ILOCKED_MSK
| LCK_MTX_MLOCKED_MSK
| LCK_MTX_WAITERS_MSK
);
189 state
= prev
| LCK_MTX_ILOCKED_MSK
| LCK_MTX_MLOCKED_MSK
;
191 disable_preemption();
192 if (!atomic_compare_exchange32(&lock
->lck_mtx_state
, prev
, state
, memory_order_acquire_smp
, FALSE
)) {
194 return lck_mtx_try_lock_slow(lock
);
197 /* mutex acquired, interlock acquired and preemption disabled */
199 thread_t thread
= current_thread();
200 /* record owner of mutex */
201 ordered_store_mtx_owner(lock
, (uintptr_t)thread
);
205 thread
->mutex_count
++; /* lock statistic */
209 /* release interlock and re-enable preemption */
210 lck_mtx_try_lock_finish_inline(lock
, state
);
216 * Routine: lck_mtx_lock_spin_always
218 * Try to lock a mutex as spin lock for current thread.
219 * It tries the fast path first and
220 * falls through the slow path in case
223 * Interlock or mutex cannot be already held by current thread.
225 * In case the mutex is held as mutex by another thread
226 * this function will switch behavior and try to acquire the lock as mutex.
228 * In case the mutex is held as spinlock it will spin contending
231 * In case of contention it might sleep.
233 __attribute__((noinline
))
235 lck_mtx_lock_spin_always(
238 uint32_t prev
, state
;
240 state
= ordered_load_mtx_state(lock
);
243 * Fast path only if the mutex is not held
244 * neither as mutex nor as spin and
245 * interlock is not contended.
246 * Indirect mutexes will fall through the slow path as
247 * well as destroyed mutexes.
250 /* Note LCK_MTX_SPIN_MSK is set only if LCK_MTX_ILOCKED_MSK is set */
251 prev
= state
& ~(LCK_MTX_ILOCKED_MSK
| LCK_MTX_MLOCKED_MSK
);
252 state
= prev
| LCK_MTX_ILOCKED_MSK
| LCK_MTX_SPIN_MSK
;
254 disable_preemption();
255 if (!atomic_compare_exchange32(&lock
->lck_mtx_state
, prev
, state
, memory_order_acquire_smp
, FALSE
)) {
257 return lck_mtx_lock_spin_slow(lock
);
260 /* mutex acquired as spinlock, interlock acquired and preemption disabled */
262 thread_t thread
= current_thread();
263 /* record owner of mutex */
264 ordered_store_mtx_owner(lock
, (uintptr_t)thread
);
268 thread
->mutex_count
++; /* lock statistic */
273 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE
, lock
, 0);
275 /* return with the interlock held and preemption disabled */
280 * Routine: lck_mtx_lock_spin
282 * Try to lock a mutex as spin lock for current thread.
283 * It tries the fast path first and
284 * falls through the slow path in case
287 * Interlock or mutex cannot be already held by current thread.
289 * In case the mutex is held as mutex by another thread
290 * this function will switch behavior and try to acquire the lock as mutex.
292 * In case the mutex is held as spinlock it will spin contending
295 * In case of contention it might sleep.
301 lck_mtx_check_preemption();
302 lck_mtx_lock_spin_always(lock
);
306 * Routine: lck_mtx_try_lock_spin_always
308 * Try to lock a mutex as spin lock for current thread.
309 * It tries the fast path first and
310 * falls through the slow path in case
313 * Interlock or mutex cannot be already held by current thread.
315 * In case the mutex is held (either as spin or mutex)
316 * the function will fail, it will acquire the mutex as spin lock
320 __attribute__((noinline
))
322 lck_mtx_try_lock_spin_always(
325 uint32_t prev
, state
;
327 state
= ordered_load_mtx_state(lock
);
330 * Fast path only if the mutex is not held
331 * neither as mutex nor as spin and
332 * interlock is not contended.
333 * Indirect mutexes will fall through the slow path as
334 * well as destroyed mutexes.
337 /* Note LCK_MTX_SPIN_MSK is set only if LCK_MTX_ILOCKED_MSK is set */
338 prev
= state
& ~(LCK_MTX_ILOCKED_MSK
| LCK_MTX_MLOCKED_MSK
);
339 state
= prev
| LCK_MTX_ILOCKED_MSK
| LCK_MTX_SPIN_MSK
;
341 disable_preemption();
342 if (!atomic_compare_exchange32(&lock
->lck_mtx_state
, prev
, state
, memory_order_acquire_smp
, FALSE
)) {
344 return lck_mtx_try_lock_spin_slow(lock
);
347 /* mutex acquired as spinlock, interlock acquired and preemption disabled */
349 thread_t thread
= current_thread();
350 /* record owner of mutex */
351 ordered_store_mtx_owner(lock
, (uintptr_t)thread
);
355 thread
->mutex_count
++; /* lock statistic */
360 LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE
, lock
, 0);
363 /* return with the interlock held and preemption disabled */
368 * Routine: lck_mtx_try_lock_spin
370 * Try to lock a mutex as spin lock for current thread.
371 * It tries the fast path first and
372 * falls through the slow path in case
375 * Interlock or mutex cannot be already held by current thread.
377 * In case the mutex is held (either as spin or mutex)
378 * the function will fail, it will acquire the mutex as spin lock
383 lck_mtx_try_lock_spin(
386 return lck_mtx_try_lock_spin_always(lock
);
390 * Routine: lck_mtx_unlock
392 * Unlocks a mutex held by current thread.
393 * It tries the fast path first, and falls
394 * through the slow path in case waiters need to
395 * be woken up or promotions need to be dropped.
397 * Interlock can be held, and the slow path will
398 * unlock the mutex for this case.
400 __attribute__((noinline
))
405 uint32_t prev
, state
;
407 state
= ordered_load_mtx_state(lock
);
409 if (state
& LCK_MTX_SPIN_MSK
)
410 return lck_mtx_unlock_slow(lock
);
413 * Only full mutex will go through the fast path
414 * (if the lock was acquired as a spinlock it will
415 * fall through the slow path).
416 * If there are waiters or promotions it will fall
417 * through the slow path.
418 * If it is indirect it will fall through the slow path.
423 * interlock not held, no waiters, no promotion and mutex held.
425 prev
= state
& ~(LCK_MTX_ILOCKED_MSK
| LCK_MTX_WAITERS_MSK
| LCK_MTX_PROMOTED_MSK
);
426 prev
|= LCK_MTX_MLOCKED_MSK
;
428 state
= prev
| LCK_MTX_ILOCKED_MSK
;
429 state
&= ~LCK_MTX_MLOCKED_MSK
;
431 disable_preemption();
433 /* the memory order needs to be acquire because it is acquiring the interlock */
434 if (!atomic_compare_exchange32(&lock
->lck_mtx_state
, prev
, state
, memory_order_acquire_smp
, FALSE
)) {
436 return lck_mtx_unlock_slow(lock
);
439 /* mutex released, interlock acquired and preemption disabled */
441 #if DEVELOPMENT | DEBUG
442 thread_t owner
= (thread_t
)lock
->lck_mtx_owner
;
443 if(__improbable(owner
!= current_thread()))
444 return lck_mtx_owner_check_panic(lock
);
448 ordered_store_mtx_owner(lock
, 0);
449 /* release interlock */
450 state
&= ~LCK_MTX_ILOCKED_MSK
;
451 ordered_store_mtx_state_release(lock
, state
);
454 thread_t thread
= current_thread();
456 thread
->mutex_count
--;
457 #endif /* MACH_LDEBUG */
459 /* re-enable preemption */
460 lck_mtx_unlock_finish_inline(lock
, FALSE
);