+lck_mtx_destroyed:
+ ALIGN_STACK()
+ LOAD_PTR_ARG1(LMTX_REG)
+ LOAD_STRING_ARG0(mutex_interlock_destroyed_str)
+ CALL_PANIC()
+
+
+.data
+mutex_assert_not_owned_str:
+ .asciz "mutex (%p) not owned\n"
+mutex_assert_owned_str:
+ .asciz "mutex (%p) owned\n"
+mutex_interlock_destroyed_str:
+ .asciz "trying to interlock destroyed mutex (%p)"
+.text
+
+
+
+/*
+ * lck_mtx_lock()
+ * lck_mtx_try_lock()
+ * lck_mtx_unlock()
+ * lck_mtx_lock_spin()
+ * lck_mtx_lock_spin_always()
+ * lck_mtx_convert_spin()
+ */
+NONLEAF_ENTRY(lck_mtx_lock_spin_always)
+ LOAD_LMTX_REG(B_ARG0) /* fetch lock pointer */
+ jmp Llmls_avoid_check
+
+NONLEAF_ENTRY(lck_mtx_lock_spin)
+ LOAD_LMTX_REG(B_ARG0) /* fetch lock pointer */
+
+ CHECK_PREEMPTION_LEVEL()
+Llmls_avoid_check:
+ mov M_STATE(LMTX_REG), LMTX_C_REG32
+ test $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32 /* is the interlock or mutex held */
+ jnz Llmls_slow
+Llmls_try: /* no - can't be INDIRECT, DESTROYED or locked */
+ mov LMTX_C_REG, LMTX_A_REG /* eax contains snapshot for cmpxchgl */
+ or $(M_ILOCKED_MSK | M_SPIN_MSK), LMTX_C_REG32
+
+ PREEMPTION_DISABLE
+ lock
+ cmpxchg LMTX_C_REG32, M_STATE(LMTX_REG) /* atomic compare and exchange */
+ jne Llmls_busy_disabled
+
+ mov %gs:CPU_ACTIVE_THREAD, LMTX_A_REG
+ mov LMTX_A_REG, M_OWNER(LMTX_REG) /* record owner of interlock */
+#if MACH_LDEBUG
+ test LMTX_A_REG, LMTX_A_REG
+ jz 1f
+ incl TH_MUTEX_COUNT(LMTX_A_REG) /* lock statistic */
+1:
+#endif /* MACH_LDEBUG */
+
+ LMTX_CHK_EXTENDED_EXIT
+ /* return with the interlock held and preemption disabled */
+ leave
+#if CONFIG_DTRACE
+ LOCKSTAT_LABEL(_lck_mtx_lock_spin_lockstat_patch_point)
+ ret
+ /* inherit lock pointer in LMTX_REG above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, LMTX_REG)
+#endif
+ ret
+
+Llmls_slow:
+ test $M_ILOCKED_MSK, LMTX_C_REG32 /* is the interlock held */
+ jz Llml_contended /* no, must have been the mutex */
+
+ cmp $(MUTEX_DESTROYED), LMTX_C_REG32 /* check to see if its marked destroyed */
+ je lck_mtx_destroyed
+ cmp $(MUTEX_IND), LMTX_C_REG32 /* Is this an indirect mutex */
+ jne Llmls_loop /* no... must be interlocked */
+
+ LMTX_ENTER_EXTENDED
+
+ mov M_STATE(LMTX_REG), LMTX_C_REG32
+ test $(M_SPIN_MSK), LMTX_C_REG32
+ jz Llmls_loop1
+
+ LMTX_UPDATE_MISS /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */
+Llmls_loop:
+ PAUSE
+ mov M_STATE(LMTX_REG), LMTX_C_REG32
+Llmls_loop1:
+ test $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32
+ jz Llmls_try
+ test $(M_MLOCKED_MSK), LMTX_C_REG32
+ jnz Llml_contended /* mutex owned by someone else, go contend for it */
+ jmp Llmls_loop
+
+Llmls_busy_disabled:
+ PREEMPTION_ENABLE
+ jmp Llmls_loop
+
+
+
+NONLEAF_ENTRY(lck_mtx_lock)
+ LOAD_LMTX_REG(B_ARG0) /* fetch lock pointer */
+
+ CHECK_PREEMPTION_LEVEL()
+
+ mov M_STATE(LMTX_REG), LMTX_C_REG32
+ test $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32 /* is the interlock or mutex held */
+ jnz Llml_slow
+Llml_try: /* no - can't be INDIRECT, DESTROYED or locked */
+ mov LMTX_C_REG, LMTX_A_REG /* eax contains snapshot for cmpxchgl */
+ or $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32
+
+ PREEMPTION_DISABLE
+ lock
+ cmpxchg LMTX_C_REG32, M_STATE(LMTX_REG) /* atomic compare and exchange */
+ jne Llml_busy_disabled
+
+ mov %gs:CPU_ACTIVE_THREAD, LMTX_A_REG
+ mov LMTX_A_REG, M_OWNER(LMTX_REG) /* record owner of mutex */
+#if MACH_LDEBUG
+ test LMTX_A_REG, LMTX_A_REG
+ jz 1f
+ incl TH_MUTEX_COUNT(LMTX_A_REG) /* lock statistic */
+1:
+#endif /* MACH_LDEBUG */
+
+ testl $(M_WAITERS_MSK), M_STATE(LMTX_REG)
+ jz Llml_finish
+
+ LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
+
+Llml_finish:
+ andl $(~M_ILOCKED_MSK), M_STATE(LMTX_REG)
+ PREEMPTION_ENABLE
+
+ LMTX_CHK_EXTENDED /* is this an extended mutex */
+ jne 2f
+
+ leave
+#if CONFIG_DTRACE
+ LOCKSTAT_LABEL(_lck_mtx_lock_lockstat_patch_point)
+ ret
+ /* inherit lock pointer in LMTX_REG above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, LMTX_REG)
+#endif
+ ret
+2:
+ LMTX_EXIT_EXTENDED
+ leave
+#if CONFIG_DTRACE
+ LOCKSTAT_LABEL(_lck_mtx_lock_ext_lockstat_patch_point)
+ ret
+ /* inherit lock pointer in LMTX_REG above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, LMTX_REG)
+#endif
+ ret
+
+
+Llml_slow:
+ test $M_ILOCKED_MSK, LMTX_C_REG32 /* is the interlock held */
+ jz Llml_contended /* no, must have been the mutex */
+
+ cmp $(MUTEX_DESTROYED), LMTX_C_REG32 /* check to see if its marked destroyed */
+ je lck_mtx_destroyed
+ cmp $(MUTEX_IND), LMTX_C_REG32 /* Is this an indirect mutex? */
+ jne Llml_loop /* no... must be interlocked */
+
+ LMTX_ENTER_EXTENDED
+
+ mov M_STATE(LMTX_REG), LMTX_C_REG32
+ test $(M_SPIN_MSK), LMTX_C_REG32
+ jz Llml_loop1
+
+ LMTX_UPDATE_MISS /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */
+Llml_loop:
+ PAUSE
+ mov M_STATE(LMTX_REG), LMTX_C_REG32
+Llml_loop1:
+ test $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32
+ jz Llml_try
+ test $(M_MLOCKED_MSK), LMTX_C_REG32
+ jnz Llml_contended /* mutex owned by someone else, go contend for it */
+ jmp Llml_loop
+
+Llml_busy_disabled:
+ PREEMPTION_ENABLE
+ jmp Llml_loop
+
+
+Llml_contended:
+ LMTX_CHK_EXTENDED /* is this an extended mutex */
+ je 0f
+ LMTX_UPDATE_MISS
+0:
+ LMTX_CALLEXT1(lck_mtx_lock_spinwait_x86)
+
+ test LMTX_RET_REG, LMTX_RET_REG
+ jz Llml_acquired /* acquired mutex, interlock held and preemption disabled */
+
+ cmp $1, LMTX_RET_REG /* check for direct wait status */
+ je 2f
+ LMTX_CHK_EXTENDED /* is this an extended mutex */
+ je 2f
+ LMTX_UPDATE_DIRECT_WAIT
+2:
+ mov M_STATE(LMTX_REG), LMTX_C_REG32
+ test $(M_ILOCKED_MSK), LMTX_C_REG32
+ jnz 6f
+
+ mov LMTX_C_REG, LMTX_A_REG /* eax contains snapshot for cmpxchgl */
+ or $(M_ILOCKED_MSK), LMTX_C_REG32 /* try to take the interlock */
+
+ PREEMPTION_DISABLE
+ lock
+ cmpxchg LMTX_C_REG32, M_STATE(LMTX_REG) /* atomic compare and exchange */
+ jne 5f
+
+ test $(M_MLOCKED_MSK), LMTX_C_REG32 /* we've got the interlock and */
+ jnz 3f
+ or $(M_MLOCKED_MSK), LMTX_C_REG32 /* the mutex is free... grab it directly */
+ mov LMTX_C_REG32, M_STATE(LMTX_REG)
+
+ mov %gs:CPU_ACTIVE_THREAD, LMTX_A_REG
+ mov LMTX_A_REG, M_OWNER(LMTX_REG) /* record owner of mutex */
+#if MACH_LDEBUG
+ test LMTX_A_REG, LMTX_A_REG
+ jz 1f
+ incl TH_MUTEX_COUNT(LMTX_A_REG) /* lock statistic */
+1:
+#endif /* MACH_LDEBUG */
+
+Llml_acquired:
+ testl $(M_WAITERS_MSK), M_STATE(LMTX_REG)
+ jnz 1f
+ mov M_OWNER(LMTX_REG), LMTX_A_REG
+ mov TH_WAS_PROMOTED_ON_WAKEUP(LMTX_A_REG), LMTX_A_REG32
+ test LMTX_A_REG32, LMTX_A_REG32
+ jz Llml_finish
+1:
+ LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
+ jmp Llml_finish
+
+3: /* interlock held, mutex busy */
+ LMTX_CHK_EXTENDED /* is this an extended mutex */
+ je 4f
+ LMTX_UPDATE_WAIT
+4:
+ LMTX_CALLEXT1(lck_mtx_lock_wait_x86)
+ jmp Llml_contended
+5:
+ PREEMPTION_ENABLE
+6:
+ PAUSE
+ jmp 2b
+
+
+
+NONLEAF_ENTRY(lck_mtx_try_lock_spin)
+ LOAD_LMTX_REG(B_ARG0) /* fetch lock pointer */
+
+ mov M_STATE(LMTX_REG), LMTX_C_REG32
+ test $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32 /* is the interlock or mutex held */
+ jnz Llmts_slow
+Llmts_try: /* no - can't be INDIRECT, DESTROYED or locked */
+ mov LMTX_C_REG, LMTX_A_REG /* eax contains snapshot for cmpxchgl */
+ or $(M_ILOCKED_MSK | M_SPIN_MSK), LMTX_C_REG
+
+ PREEMPTION_DISABLE
+ lock
+ cmpxchg LMTX_C_REG32, M_STATE(LMTX_REG) /* atomic compare and exchange */
+ jne Llmts_busy_disabled
+
+ mov %gs:CPU_ACTIVE_THREAD, LMTX_A_REG
+ mov LMTX_A_REG, M_OWNER(LMTX_REG) /* record owner of mutex */
+#if MACH_LDEBUG
+ test LMTX_A_REG, LMTX_A_REG
+ jz 1f
+ incl TH_MUTEX_COUNT(LMTX_A_REG) /* lock statistic */
+1:
+#endif /* MACH_LDEBUG */
+
+ LMTX_CHK_EXTENDED_EXIT
+ leave
+
+#if CONFIG_DTRACE
+ mov $1, LMTX_RET_REG /* return success */
+ LOCKSTAT_LABEL(_lck_mtx_try_lock_spin_lockstat_patch_point)
+ ret
+ /* inherit lock pointer in LMTX_REG above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, LMTX_REG)
+#endif
+ mov $1, LMTX_RET_REG /* return success */
+ ret
+
+Llmts_slow:
+ test $(M_ILOCKED_MSK), LMTX_C_REG32 /* is the interlock held */
+ jz Llmts_fail /* no, must be held as a mutex */
+
+ cmp $(MUTEX_DESTROYED), LMTX_C_REG32 /* check to see if its marked destroyed */
+ je lck_mtx_destroyed
+ cmp $(MUTEX_IND), LMTX_C_REG32 /* Is this an indirect mutex? */
+ jne Llmts_loop1
+
+ LMTX_ENTER_EXTENDED
+Llmts_loop:
+ PAUSE
+ mov M_STATE(LMTX_REG), LMTX_C_REG32
+Llmts_loop1:
+ test $(M_MLOCKED_MSK | M_SPIN_MSK), LMTX_C_REG32
+ jnz Llmts_fail
+ test $(M_ILOCKED_MSK), LMTX_C_REG32
+ jz Llmts_try
+ jmp Llmts_loop
+
+Llmts_busy_disabled:
+ PREEMPTION_ENABLE
+ jmp Llmts_loop
+
+
+
+NONLEAF_ENTRY(lck_mtx_try_lock)
+ LOAD_LMTX_REG(B_ARG0) /* fetch lock pointer */
+
+ mov M_STATE(LMTX_REG), LMTX_C_REG32
+ test $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32 /* is the interlock or mutex held */
+ jnz Llmt_slow
+Llmt_try: /* no - can't be INDIRECT, DESTROYED or locked */
+ mov LMTX_C_REG, LMTX_A_REG /* eax contains snapshot for cmpxchgl */
+ or $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32
+
+ PREEMPTION_DISABLE
+ lock
+ cmpxchg LMTX_C_REG32, M_STATE(LMTX_REG) /* atomic compare and exchange */
+ jne Llmt_busy_disabled
+
+ mov %gs:CPU_ACTIVE_THREAD, LMTX_A_REG
+ mov LMTX_A_REG, M_OWNER(LMTX_REG) /* record owner of mutex */
+#if MACH_LDEBUG
+ test LMTX_A_REG, LMTX_A_REG
+ jz 1f
+ incl TH_MUTEX_COUNT(LMTX_A_REG) /* lock statistic */
+1:
+#endif /* MACH_LDEBUG */
+
+ LMTX_CHK_EXTENDED_EXIT
+
+ test $(M_WAITERS_MSK), LMTX_C_REG32
+ jz 0f
+
+ LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
+0:
+ andl $(~M_ILOCKED_MSK), M_STATE(LMTX_REG)
+ PREEMPTION_ENABLE
+
+ leave
+#if CONFIG_DTRACE
+ mov $1, LMTX_RET_REG /* return success */
+ /* Dtrace probe: LS_LCK_MTX_TRY_LOCK_ACQUIRE */
+ LOCKSTAT_LABEL(_lck_mtx_try_lock_lockstat_patch_point)
+ ret
+ /* inherit lock pointer in LMTX_REG from above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, LMTX_REG)
+#endif
+ mov $1, LMTX_RET_REG /* return success */
+ ret
+
+Llmt_slow:
+ test $(M_ILOCKED_MSK), LMTX_C_REG32 /* is the interlock held */
+ jz Llmt_fail /* no, must be held as a mutex */
+
+ cmp $(MUTEX_DESTROYED), LMTX_C_REG32 /* check to see if its marked destroyed */
+ je lck_mtx_destroyed
+ cmp $(MUTEX_IND), LMTX_C_REG32 /* Is this an indirect mutex? */
+ jne Llmt_loop
+
+ LMTX_ENTER_EXTENDED
+Llmt_loop:
+ PAUSE
+ mov M_STATE(LMTX_REG), LMTX_C_REG32
+Llmt_loop1:
+ test $(M_MLOCKED_MSK | M_SPIN_MSK), LMTX_C_REG32
+ jnz Llmt_fail
+ test $(M_ILOCKED_MSK), LMTX_C_REG32
+ jz Llmt_try
+ jmp Llmt_loop
+
+Llmt_busy_disabled:
+ PREEMPTION_ENABLE
+ jmp Llmt_loop
+