2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1989 Carnegie-Mellon University
34 * All rights reserved. The CMU software License Agreement specifies
35 * the terms and conditions for use and redistribution.
38 #include <mach_ldebug.h>
40 #include <i386/eflags.h>
41 #include <i386/trap.h>
42 #include <config_dtrace.h>
47 #define PAUSE rep; nop
49 #include <i386/pal_lock_asm.h>
51 #define LEAF_ENTRY(name) \
54 #define LEAF_ENTRY2(n1,n2) \
61 /* Non-leaf routines always have a stack frame: */
63 #define NONLEAF_ENTRY(name) \
67 #define NONLEAF_ENTRY2(n1,n2) \
77 /* For x86_64, the varargs ABI requires that %al indicate
78 * how many SSE register contain arguments. In our case, 0 */
79 #define ALIGN_STACK() and $0xFFFFFFFFFFFFFFF0, %rsp ;
80 #define LOAD_STRING_ARG0(label) leaq label(%rip), %rdi ;
81 #define LOAD_ARG1(x) mov x, %esi ;
82 #define LOAD_PTR_ARG1(x) mov x, %rsi ;
83 #define CALL_PANIC() xorb %al,%al ; call EXT(panic) ;
85 #define CHECK_UNLOCK(current, owner) \
86 cmp current, owner ; \
89 LOAD_STRING_ARG0(2f) ; \
93 2: String "Mutex unlock attempted from non-owner thread"; \
99 * Routines for general lock debugging.
103 * Checks for expected lock types and calls "panic" on
104 * mismatch. Detects calls to Mutex functions with
105 * type simplelock and vice versa.
107 #define CHECK_MUTEX_TYPE() \
108 cmpl $ MUTEX_TAG,M_TYPE ; \
111 LOAD_STRING_ARG0(2f) ; \
115 2: String "not a mutex!" ; \
119 #define CHECK_MYLOCK(current, owner) \
120 cmp current, owner ; \
123 LOAD_STRING_ARG0(2f) ; \
127 2: String "Attempt to recursively lock a non-recursive lock"; \
131 #else /* MACH_LDEBUG */
132 #define CHECK_MUTEX_TYPE()
133 #define CHECK_MYLOCK(thd)
134 #endif /* MACH_LDEBUG */
136 #if DEVELOPMENT || DEBUG
138 * If one or more simplelocks are currently held by a thread,
139 * an attempt to acquire a mutex will cause this check to fail
140 * (since a mutex lock may context switch, holding a simplelock
141 * is not a good thing).
143 #define CHECK_PREEMPTION_LEVEL() \
144 cmpl $0,%gs:CPU_PREEMPTION_LEVEL ; \
146 cmpl $0,EXT(LckDisablePreemptCheck)(%rip) ; \
148 cmpl $0,%gs:CPU_HIBERNATE ; \
151 movl %gs:CPU_PREEMPTION_LEVEL, %eax ; \
153 LOAD_STRING_ARG0(2f) ; \
157 2: String "preemption_level(%d) != 0!" ; \
160 #else /* DEVELOPMENT || DEBUG */
161 #define CHECK_PREEMPTION_LEVEL()
162 #endif /* DEVELOPMENT || DEBUG */
164 #define PREEMPTION_DISABLE \
165 incl %gs:CPU_PREEMPTION_LEVEL
167 #define PREEMPTION_LEVEL_DEBUG 1
168 #if PREEMPTION_LEVEL_DEBUG
169 #define PREEMPTION_ENABLE \
170 decl %gs:CPU_PREEMPTION_LEVEL ; \
173 testl $AST_URGENT,%gs:CPU_PENDING_AST ; \
176 testl $EFL_IF, S_PC ; \
182 call _preemption_underflow_panic ; \
187 #define PREEMPTION_ENABLE \
188 decl %gs:CPU_PREEMPTION_LEVEL ; \
190 testl $AST_URGENT,%gs:CPU_PENDING_AST ; \
193 testl $EFL_IF, S_PC ; \
206 .globl _lockstat_probe
207 .globl _lockstat_probemap
210 * LOCKSTAT_LABEL creates a dtrace symbol which contains
211 * a pointer into the lock code function body. At that
212 * point is a "ret" instruction that can be patched into
216 #define LOCKSTAT_LABEL(lab) \
224 #define LOCKSTAT_RECORD(id, lck) \
227 movl _lockstat_probemap + (id * 4)(%rip),%eax ; \
236 call *_lockstat_probe(%rip) ; \
238 /* ret - left to subsequent code, e.g. return values */
240 #endif /* CONFIG_DTRACE */
243 * For most routines, the hw_lock_t pointer is loaded into a
244 * register initially, and then either a byte or register-sized
245 * word is loaded/stored to the pointer
249 * void hw_lock_byte_init(volatile uint8_t *)
251 * Initialize a hardware byte lock.
253 LEAF_ENTRY(hw_lock_byte_init)
254 movb $0, (%rdi) /* clear the lock */
258 * void hw_lock_byte_lock(uint8_t *lock_byte)
260 * Acquire byte sized lock operand, spinning until it becomes available.
261 * return with preemption disabled.
264 LEAF_ENTRY(hw_lock_byte_lock)
266 movl $1, %ecx /* Set lock value */
268 movb (%rdi), %al /* Load byte at address */
269 testb %al,%al /* lock locked? */
270 jne 3f /* branch if so */
271 lock; cmpxchg %cl,(%rdi) /* attempt atomic compare exchange */
273 LEAF_RET /* if yes, then nothing left to do */
275 PAUSE /* pause for hyper-threading */
276 jmp 1b /* try again */
279 * void hw_lock_byte_unlock(uint8_t *lock_byte)
281 * Unconditionally release byte sized lock operand,
282 * release preemption level.
285 LEAF_ENTRY(hw_lock_byte_unlock)
286 movb $0, (%rdi) /* Clear the lock byte */
291 * N.B.: On x86, statistics are currently recorded for all indirect mutexes.
292 * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained
293 * as a 64-bit quantity (this matches the existing PowerPC implementation,
294 * and the new x86 specific statistics are also maintained as 32-bit
298 * Enable this preprocessor define to record the first miss alone
299 * By default, we count every miss, hence multiple misses may be
300 * recorded for a single lock acquire attempt via lck_mtx_lock
302 #undef LOG_FIRST_MISS_ALONE
305 * This preprocessor define controls whether the R-M-W update of the
306 * per-group statistics elements are atomic (LOCK-prefixed)
307 * Enabled by default.
309 #define ATOMIC_STAT_UPDATES 1
311 #if defined(ATOMIC_STAT_UPDATES)
312 #define LOCK_IF_ATOMIC_STAT_UPDATES lock
314 #define LOCK_IF_ATOMIC_STAT_UPDATES
315 #endif /* ATOMIC_STAT_UPDATES */
319 * For most routines, the lck_mtx_t pointer is loaded into a
320 * register initially, and the owner field checked for indirection.
321 * Eventually the lock owner is loaded into a register and examined.
324 #define M_OWNER MUTEX_OWNER
325 #define M_PTR MUTEX_PTR
326 #define M_STATE MUTEX_STATE
329 #define LMTX_ENTER_EXTENDED \
330 mov M_PTR(%rdx), %rdx ; \
332 mov MUTEX_GRP(%rdx), %r10 ; \
333 LOCK_IF_ATOMIC_STAT_UPDATES ; \
334 incq GRP_MTX_STAT_UTIL(%r10)
337 #if LOG_FIRST_MISS_ALONE
338 #define LMTX_UPDATE_MISS \
341 LOCK_IF_ATOMIC_STAT_UPDATES ; \
342 incl GRP_MTX_STAT_MISS(%r10) ; \
346 #define LMTX_UPDATE_MISS \
347 LOCK_IF_ATOMIC_STAT_UPDATES ; \
348 incl GRP_MTX_STAT_MISS(%r10)
352 #if LOG_FIRST_MISS_ALONE
353 #define LMTX_UPDATE_WAIT \
356 LOCK_IF_ATOMIC_STAT_UPDATES ; \
357 incl GRP_MTX_STAT_WAIT(%r10) ; \
361 #define LMTX_UPDATE_WAIT \
362 LOCK_IF_ATOMIC_STAT_UPDATES ; \
363 incl GRP_MTX_STAT_WAIT(%r10)
368 * Record the "direct wait" statistic, which indicates if a
369 * miss proceeded to block directly without spinning--occurs
370 * if the owner of the mutex isn't running on another processor
371 * at the time of the check.
373 #define LMTX_UPDATE_DIRECT_WAIT \
374 LOCK_IF_ATOMIC_STAT_UPDATES ; \
375 incl GRP_MTX_STAT_DIRECT_WAIT(%r10)
378 #define LMTX_CALLEXT1(func_name) \
386 call EXT(func_name) ; \
395 #define LMTX_CALLEXT2(func_name, reg) \
404 call EXT(func_name) ; \
414 #define M_WAITERS_MSK 0x0000ffff
415 #define M_PRIORITY_MSK 0x00ff0000
416 #define M_ILOCKED_MSK 0x01000000
417 #define M_MLOCKED_MSK 0x02000000
418 #define M_PROMOTED_MSK 0x04000000
419 #define M_SPIN_MSK 0x08000000
422 * void lck_mtx_assert(lck_mtx_t* l, unsigned int)
423 * Takes the address of a lock, and an assertion type as parameters.
424 * The assertion can take one of two forms determine by the type
425 * parameter: either the lock is held by the current thread, and the
426 * type is LCK_MTX_ASSERT_OWNED, or it isn't and the type is
427 * LCK_MTX_ASSERT_NOTOWNED. Calls panic on assertion failure.
431 NONLEAF_ENTRY(lck_mtx_assert)
432 mov %rdi, %rdx /* Load lock address */
433 mov %gs:CPU_ACTIVE_THREAD, %rax /* Load current thread */
435 mov M_STATE(%rdx), %ecx
436 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
438 mov M_PTR(%rdx), %rdx /* If so, take indirection */
440 mov M_OWNER(%rdx), %rcx /* Load owner */
441 cmp $(MUTEX_ASSERT_OWNED), %rsi
442 jne 2f /* Assert ownership? */
443 cmp %rax, %rcx /* Current thread match? */
444 jne 3f /* no, go panic */
445 testl $(M_ILOCKED_MSK | M_MLOCKED_MSK), M_STATE(%rdx)
447 1: /* yes, we own it */
450 cmp %rax, %rcx /* Current thread match? */
451 jne 1b /* No, return */
454 LOAD_STRING_ARG0(mutex_assert_owned_str)
459 LOAD_STRING_ARG0(mutex_assert_not_owned_str)
467 LOAD_STRING_ARG0(mutex_interlock_destroyed_str)
472 mutex_assert_not_owned_str:
473 .asciz "mutex (%p) not owned\n"
474 mutex_assert_owned_str:
475 .asciz "mutex (%p) owned\n"
476 mutex_interlock_destroyed_str:
477 .asciz "trying to interlock destroyed mutex (%p)"
486 * lck_mtx_lock_spin()
487 * lck_mtx_lock_spin_always()
488 * lck_mtx_try_lock_spin()
489 * lck_mtx_try_lock_spin_always()
490 * lck_mtx_convert_spin()
492 NONLEAF_ENTRY(lck_mtx_lock_spin_always)
493 mov %rdi, %rdx /* fetch lock pointer */
494 jmp Llmls_avoid_check
496 NONLEAF_ENTRY(lck_mtx_lock_spin)
497 mov %rdi, %rdx /* fetch lock pointer */
499 CHECK_PREEMPTION_LEVEL()
501 mov M_STATE(%rdx), %ecx
502 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
504 Llmls_try: /* no - can't be INDIRECT, DESTROYED or locked */
505 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
506 or $(M_ILOCKED_MSK | M_SPIN_MSK), %ecx
510 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
511 jne Llmls_busy_disabled
513 mov %gs:CPU_ACTIVE_THREAD, %rax
514 mov %rax, M_OWNER(%rdx) /* record owner of interlock */
518 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
520 #endif /* MACH_LDEBUG */
522 /* return with the interlock held and preemption disabled */
525 LOCKSTAT_LABEL(_lck_mtx_lock_spin_lockstat_patch_point)
527 /* inherit lock pointer in %rdx above */
528 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, %rdx)
533 test $M_ILOCKED_MSK, %ecx /* is the interlock held */
534 jz Llml_contended /* no, must have been the mutex */
536 cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
538 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex */
539 jne Llmls_loop /* no... must be interlocked */
543 mov M_STATE(%rdx), %ecx
544 test $(M_SPIN_MSK), %ecx
547 LMTX_UPDATE_MISS /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */
550 mov M_STATE(%rdx), %ecx
552 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
554 test $(M_MLOCKED_MSK), %ecx
555 jnz Llml_contended /* mutex owned by someone else, go contend for it */
564 NONLEAF_ENTRY(lck_mtx_lock)
565 mov %rdi, %rdx /* fetch lock pointer */
567 CHECK_PREEMPTION_LEVEL()
569 mov M_STATE(%rdx), %ecx
570 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
572 Llml_try: /* no - can't be INDIRECT, DESTROYED or locked */
573 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
574 or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
578 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
579 jne Llml_busy_disabled
581 mov %gs:CPU_ACTIVE_THREAD, %rax
582 mov %rax, M_OWNER(%rdx) /* record owner of mutex */
586 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
588 #endif /* MACH_LDEBUG */
590 testl $(M_WAITERS_MSK), M_STATE(%rdx)
593 LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
596 andl $(~M_ILOCKED_MSK), M_STATE(%rdx)
599 cmp %rdx, %rdi /* is this an extended mutex */
604 LOCKSTAT_LABEL(_lck_mtx_lock_lockstat_patch_point)
606 /* inherit lock pointer in %rdx above */
607 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, %rdx)
613 LOCKSTAT_LABEL(_lck_mtx_lock_ext_lockstat_patch_point)
615 /* inherit lock pointer in %rdx above */
616 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, %rdx)
622 test $M_ILOCKED_MSK, %ecx /* is the interlock held */
623 jz Llml_contended /* no, must have been the mutex */
625 cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
627 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
628 jne Llml_loop /* no... must be interlocked */
632 mov M_STATE(%rdx), %ecx
633 test $(M_SPIN_MSK), %ecx
636 LMTX_UPDATE_MISS /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */
639 mov M_STATE(%rdx), %ecx
641 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
643 test $(M_MLOCKED_MSK), %ecx
644 jnz Llml_contended /* mutex owned by someone else, go contend for it */
653 cmp %rdx, %rdi /* is this an extended mutex */
657 LMTX_CALLEXT1(lck_mtx_lock_spinwait_x86)
660 jz Llml_acquired /* acquired mutex, interlock held and preemption disabled */
662 cmp $1, %rax /* check for direct wait status */
664 cmp %rdx, %rdi /* is this an extended mutex */
666 LMTX_UPDATE_DIRECT_WAIT
668 mov M_STATE(%rdx), %ecx
669 test $(M_ILOCKED_MSK), %ecx
672 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
673 or $(M_ILOCKED_MSK), %ecx /* try to take the interlock */
677 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
680 test $(M_MLOCKED_MSK), %ecx /* we've got the interlock and */
682 or $(M_MLOCKED_MSK), %ecx /* the mutex is free... grab it directly */
683 mov %ecx, M_STATE(%rdx)
685 mov %gs:CPU_ACTIVE_THREAD, %rax
686 mov %rax, M_OWNER(%rdx) /* record owner of mutex */
690 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
692 #endif /* MACH_LDEBUG */
695 testl $(M_WAITERS_MSK), M_STATE(%rdx)
697 mov M_OWNER(%rdx), %rax
698 mov TH_WAS_PROMOTED_ON_WAKEUP(%rax), %eax
702 LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
705 3: /* interlock held, mutex busy */
706 cmp %rdx, %rdi /* is this an extended mutex */
710 LMTX_CALLEXT1(lck_mtx_lock_wait_x86)
719 NONLEAF_ENTRY(lck_mtx_try_lock_spin_always)
720 mov %rdi, %rdx /* fetch lock pointer */
721 jmp Llmts_avoid_check
723 NONLEAF_ENTRY(lck_mtx_try_lock_spin)
724 mov %rdi, %rdx /* fetch lock pointer */
727 mov M_STATE(%rdx), %ecx
728 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
730 Llmts_try: /* no - can't be INDIRECT, DESTROYED or locked */
731 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
732 or $(M_ILOCKED_MSK | M_SPIN_MSK), %rcx
736 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
737 jne Llmts_busy_disabled
739 mov %gs:CPU_ACTIVE_THREAD, %rax
740 mov %rax, M_OWNER(%rdx) /* record owner of mutex */
744 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
746 #endif /* MACH_LDEBUG */
751 mov $1, %rax /* return success */
752 LOCKSTAT_LABEL(_lck_mtx_try_lock_spin_lockstat_patch_point)
754 /* inherit lock pointer in %rdx above */
755 LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, %rdx)
757 mov $1, %rax /* return success */
761 test $(M_ILOCKED_MSK), %ecx /* is the interlock held */
762 jz Llmts_fail /* no, must be held as a mutex */
764 cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
766 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
772 mov M_STATE(%rdx), %ecx
774 test $(M_MLOCKED_MSK | M_SPIN_MSK), %ecx
776 test $(M_ILOCKED_MSK), %ecx
786 NONLEAF_ENTRY(lck_mtx_try_lock)
787 mov %rdi, %rdx /* fetch lock pointer */
789 mov M_STATE(%rdx), %ecx
790 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
792 Llmt_try: /* no - can't be INDIRECT, DESTROYED or locked */
793 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
794 or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
798 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
799 jne Llmt_busy_disabled
801 mov %gs:CPU_ACTIVE_THREAD, %rax
802 mov %rax, M_OWNER(%rdx) /* record owner of mutex */
806 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
808 #endif /* MACH_LDEBUG */
810 test $(M_WAITERS_MSK), %ecx
813 LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
815 andl $(~M_ILOCKED_MSK), M_STATE(%rdx)
820 mov $1, %rax /* return success */
821 /* Dtrace probe: LS_LCK_MTX_TRY_LOCK_ACQUIRE */
822 LOCKSTAT_LABEL(_lck_mtx_try_lock_lockstat_patch_point)
824 /* inherit lock pointer in %rdx from above */
825 LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, %rdx)
827 mov $1, %rax /* return success */
831 test $(M_ILOCKED_MSK), %ecx /* is the interlock held */
832 jz Llmt_fail /* no, must be held as a mutex */
834 cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
836 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
842 mov M_STATE(%rdx), %ecx
844 test $(M_MLOCKED_MSK | M_SPIN_MSK), %ecx
846 test $(M_ILOCKED_MSK), %ecx
857 cmp %rdx, %rdi /* is this an extended mutex */
866 NONLEAF_ENTRY(lck_mtx_convert_spin)
867 mov %rdi, %rdx /* fetch lock pointer */
869 mov M_STATE(%rdx), %ecx
870 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
872 mov M_PTR(%rdx), %rdx /* If so, take indirection */
873 mov M_STATE(%rdx), %ecx
875 test $(M_MLOCKED_MSK), %ecx /* already owned as a mutex, just return */
877 test $(M_WAITERS_MSK), %ecx /* are there any waiters? */
880 LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
881 mov M_STATE(%rdx), %ecx
883 and $(~(M_ILOCKED_MSK | M_SPIN_MSK)), %ecx /* convert from spin version to mutex */
884 or $(M_MLOCKED_MSK), %ecx
885 mov %ecx, M_STATE(%rdx) /* since I own the interlock, I don't need an atomic update */
893 NONLEAF_ENTRY(lck_mtx_unlock)
894 mov %rdi, %rdx /* fetch lock pointer */
896 mov M_STATE(%rdx), %ecx
898 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
902 test $(M_MLOCKED_MSK), %ecx /* check for full mutex */
905 test $(M_ILOCKED_MSK), %rcx /* have to wait for interlock to clear */
908 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
909 and $(~M_MLOCKED_MSK), %ecx /* drop mutex */
910 or $(M_ILOCKED_MSK), %ecx /* pick up interlock */
914 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
915 jne Llmu_busy_disabled /* branch on failure to spin loop */
919 mov %rax, M_OWNER(%rdx)
920 mov %rcx, %rax /* keep original state in %ecx for later evaluation */
921 and $(~(M_ILOCKED_MSK | M_SPIN_MSK | M_PROMOTED_MSK)), %rax
923 test $(M_WAITERS_MSK), %eax
925 dec %eax /* decrement waiter count */
927 mov %eax, M_STATE(%rdx) /* since I own the interlock, I don't need an atomic update */
930 /* perform lock statistics after drop to prevent delay */
931 mov %gs:CPU_ACTIVE_THREAD, %rax
934 decl TH_MUTEX_COUNT(%rax) /* lock statistic */
936 #endif /* MACH_LDEBUG */
938 test $(M_PROMOTED_MSK | M_WAITERS_MSK), %ecx
941 LMTX_CALLEXT2(lck_mtx_unlock_wakeup_x86, %rcx)
950 /* Dtrace: LS_LCK_MTX_UNLOCK_RELEASE */
951 LOCKSTAT_LABEL(_lck_mtx_unlock_lockstat_patch_point)
953 /* inherit lock pointer in %rdx from above */
954 LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, %rdx)
960 /* Dtrace: LS_LCK_MTX_EXT_UNLOCK_RELEASE */
961 LOCKSTAT_LABEL(_lck_mtx_ext_unlock_lockstat_patch_point)
963 /* inherit lock pointer in %rdx from above */
964 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, %rdx)
973 mov M_STATE(%rdx), %ecx
977 mov M_PTR(%rdx), %rdx
978 mov M_OWNER(%rdx), %rax
979 mov %gs:CPU_ACTIVE_THREAD, %rcx
980 CHECK_UNLOCK(%rcx, %rax)
981 mov M_STATE(%rdx), %ecx
986 LEAF_ENTRY(lck_mtx_ilk_try_lock)
987 mov %rdi, %rdx /* fetch lock pointer - no indirection here */
989 mov M_STATE(%rdx), %ecx
991 test $(M_ILOCKED_MSK), %ecx /* can't have the interlock yet */
994 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
995 or $(M_ILOCKED_MSK), %ecx
999 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
1000 jne 2f /* return failure after re-enabling preemption */
1002 mov $1, %rax /* return success with preemption disabled */
1005 PREEMPTION_ENABLE /* need to re-enable preemption */
1007 xor %rax, %rax /* return failure */
1011 LEAF_ENTRY(lck_mtx_ilk_unlock)
1012 mov %rdi, %rdx /* fetch lock pointer - no indirection here */
1014 andl $(~M_ILOCKED_MSK), M_STATE(%rdx)
1016 PREEMPTION_ENABLE /* need to re-enable preemption */
1021 LEAF_ENTRY(lck_mtx_lock_grab_mutex)
1022 mov %rdi, %rdx /* fetch lock pointer - no indirection here */
1024 mov M_STATE(%rdx), %ecx
1026 test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* can't have the mutex yet */
1029 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
1030 or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
1034 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
1035 jne 2f /* branch on failure to spin loop */
1037 mov %gs:CPU_ACTIVE_THREAD, %rax
1038 mov %rax, M_OWNER(%rdx) /* record owner of mutex */
1042 incl TH_MUTEX_COUNT(%rax) /* lock statistic */
1044 #endif /* MACH_LDEBUG */
1046 mov $1, %rax /* return success */
1051 xor %rax, %rax /* return failure */
1056 LEAF_ENTRY(lck_mtx_lock_mark_destroyed)
1059 mov M_STATE(%rdx), %ecx
1060 cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
1063 movl $(MUTEX_DESTROYED), M_STATE(%rdx) /* convert to destroyed state */
1066 test $(M_ILOCKED_MSK), %rcx /* have to wait for interlock to clear */
1070 mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
1071 or $(M_ILOCKED_MSK), %ecx
1073 cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
1074 jne 4f /* branch on failure to spin loop */
1075 movl $(MUTEX_DESTROYED), M_STATE(%rdx) /* convert to destroyed state */
1078 LEAF_RET /* return with M_ILOCKED set */
1085 LEAF_ENTRY(preemption_underflow_panic)
1087 incl %gs:CPU_PREEMPTION_LEVEL
1089 LOAD_STRING_ARG0(16f)
1093 16: String "Preemption level underflow, possible cause unlocking an unlocked mutex or spinlock"