+Llml_ilk_refail:
+ popf /* restore interrupt state */
+ pushf /* resave interrupt state on stack */
+
+Llml_ilk_reloop:
+ PAUSE
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ je Llml_reget_retry /* yes - go try to grab it */
+
+ cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
+ jne Llml_ilk_reloop /* no - keep spinning */
+
+ pushl %edx
+ call EXT(lck_mtx_interlock_panic)
+ /*
+ * shouldn't return from here, but just in case
+ */
+ popl %edx
+ jmp Llml_ilk_reloop /* no - keep spinning */
+
+
+Llml_eval_ilk:
+ cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */
+ cmove M_PTR,%edx /* If so, take indirection */
+ jne Llml_ilk_loop /* If not, go to spin loop */
+
+/*
+ * Entry into statistics codepath for lck_mtx_lock:
+ * EDX: real lock pointer
+ * first dword on stack contains flags
+ */
+
+/* Enable this preprocessor define to record the first miss alone
+ * By default, we count every miss, hence multiple misses may be
+ * recorded for a single lock acquire attempt via lck_mtx_lock
+ */
+#undef LOG_FIRST_MISS_ALONE
+
+/*
+ * N.B.: On x86, statistics are currently recorded for all indirect mutexes.
+ * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained
+ * as a 64-bit quantity (this matches the existing PowerPC implementation,
+ * and the new x86 specific statistics are also maintained as 32-bit
+ * quantities).
+ */
+
+Llml_lck_ext:
+ pushl %esi /* Used to hold the lock group ptr */
+ pushl %edi /* Used for stat update records */
+ movl MUTEX_GRP(%edx),%esi /* Load lock group */
+ xorl %edi,%edi /* Clear stat update records */
+ /* 64-bit increment of acquire attempt statistic (per-group) */
+ LOCK_IF_ATOMIC_STAT_UPDATES
+ addl $1, GRP_MTX_STAT_UTIL(%esi)
+ jnc 1f
+ incl GRP_MTX_STAT_UTIL+4(%esi)
+1:
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ jne Llml_ext_ilk_loop /* no, go to spin loop */
+Llml_ext_get_hw:
+ cli
+ movl %gs:CPU_ACTIVE_THREAD,%ecx
+
+ /* eax == 0 at this point */
+ lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
+ jne Llml_ext_ilk_fail /* branch on failure to retry */
+
+ movl M_LOCKED,%ecx /* get lock owner */
+ testl %ecx,%ecx /* is the mutex locked? */
+ jne Llml_ext_fail /* yes, we lose */
+
+Llml_ext_acquire:
+ movl %gs:CPU_ACTIVE_THREAD,%ecx
+ movl %ecx,M_LOCKED
+
+ cmpw $0,M_WAITERS /* are there any waiters? */
+ jne Llml_ext_waiters /* yes, more work to do */
+Llml_ext_return:
+ xorl %eax,%eax
+ movl %eax,M_ILK
+
+ popl %edi
+ popl %esi
+ popf /* restore interrupt state */
+ leave
+#if CONFIG_DTRACE
+ LOCKSTAT_LABEL(_lck_mtx_lock_ext_lockstat_patch_point)
+ ret
+ /* inherit lock pointer in %edx above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, %edx)
+#endif
+ ret
+
+Llml_ext_waiters:
+ pushl %edx /* save mutex address */
+ pushl %edx
+ call EXT(lck_mtx_lock_acquire)
+ addl $4,%esp
+ popl %edx /* restore mutex address */
+ jmp Llml_ext_return
+
+Llml_ext_restart:
+Llml_ext_ilk_fail:
+ movl 8(%esp),%ecx
+ pushl %ecx
+ popf /* restore interrupt state */
+
+Llml_ext_ilk_loop:
+ PAUSE
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ je Llml_ext_get_hw /* yes - go try to grab it */
+
+ cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
+ jne Llml_ext_ilk_loop /* no - keep spinning */
+
+ pushl %edx
+ call EXT(lck_mtx_interlock_panic)
+ /*
+ * shouldn't return from here, but just in case
+ */
+ popl %edx
+ jmp Llml_ext_ilk_loop
+
+
+Llml_ext_fail:
+#ifdef LOG_FIRST_MISS_ALONE
+ testl $1, %edi
+ jnz 1f
+#endif /* LOG_FIRST_MISS_ALONE */
+ /* Record that a lock acquire attempt missed (per-group statistic) */
+ LOCK_IF_ATOMIC_STAT_UPDATES
+ incl GRP_MTX_STAT_MISS(%esi)
+#ifdef LOG_FIRST_MISS_ALONE
+ orl $1, %edi
+#endif /* LOG_FIRST_MISS_ALONE */
+1:
+ /*
+ * Check if the owner is on another processor and therefore
+ * we should try to spin before blocking.
+ */
+ testl $(OnProc),ACT_SPF(%ecx)
+ jnz 2f
+ /*
+ * Record the "direct wait" statistic, which indicates if a
+ * miss proceeded to block directly without spinning--occurs
+ * if the owner of the mutex isn't running on another processor
+ * at the time of the check.
+ */
+ LOCK_IF_ATOMIC_STAT_UPDATES
+ incl GRP_MTX_STAT_DIRECT_WAIT(%esi)
+ jmp Llml_ext_block
+2:
+ /*
+ * Here if owner is on another processor:
+ * - release the interlock
+ * - spin on the holder until release or timeout
+ * - in either case re-acquire the interlock
+ * - if released, acquire it
+ * - otherwise drop thru to block.
+ */
+ xorl %eax,%eax
+ movl %eax,M_ILK /* zero interlock */
+
+ pushl 8(%esp) /* Make another copy of EFLAGS image */
+ popf /* Restore interrupt state */
+ pushl %edx /* save mutex address */
+ pushl %edx
+ call EXT(lck_mtx_lock_spinwait)
+ addl $4,%esp
+ popl %edx /* restore mutex address */
+
+ /* Re-acquire interlock */
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ jne Llml_ext_ilk_refail /* no, go to spin loop */
+Llml_ext_reget_retry:
+ cli /* disable interrupts */
+ movl %gs:CPU_ACTIVE_THREAD,%ecx
+
+ /* eax == 0 at this point */
+ lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
+ jne Llml_ext_ilk_refail /* branch on failure to spin loop */
+
+ movl M_LOCKED,%ecx /* get lock owner */
+ testl %ecx,%ecx /* is the mutex free? */
+ je Llml_ext_acquire /* yes, acquire */
+
+Llml_ext_block:
+ /* If we wanted to count waits just once per lock acquire, we'd
+ * skip over the stat update here
+ */
+ LOCK_IF_ATOMIC_STAT_UPDATES
+ /* Record that a lock miss proceeded to block */
+ incl GRP_MTX_STAT_WAIT(%esi)
+1:
+ CHECK_MYLOCK(M_THREAD)
+ pushl %edx /* save mutex address */
+ pushl M_LOCKED
+ pushl %edx /* push mutex address */
+ /*
+ * N.B.: lck_mtx_lock_wait is called here with interrupts disabled
+ * Consider reworking.
+ */
+ call EXT(lck_mtx_lock_wait) /* wait for the lock */
+ addl $8,%esp
+ popl %edx /* restore mutex address */
+ jmp Llml_ext_restart /* and start over */
+
+Llml_ext_ilk_refail:
+ movl 8(%esp),%ecx
+ pushl %ecx
+ popf /* restore interrupt state */
+
+Llml_ext_ilk_reloop:
+ PAUSE
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ je Llml_ext_reget_retry /* yes - go try to grab it */
+
+ cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
+ jne Llml_ext_ilk_reloop /* no - keep spinning */
+
+ pushl %edx
+ call EXT(lck_mtx_interlock_panic)
+ /*
+ * shouldn't return from here, but just in case
+ */
+ popl %edx
+ jmp Llml_ext_ilk_reloop
+
+
+
+NONLEAF_ENTRY(lck_mtx_try_lock_spin)
+
+ movl B_ARG0,%edx /* fetch lock pointer */
+ pushf /* save interrupt state */
+
+ CHECK_NO_SIMPLELOCKS()
+ CHECK_PREEMPTION_LEVEL()
+
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ jne Llmts_eval_ilk /* no, go see if indirect */
+Llmts_retry:
+ cli /* disable interrupts */
+ movl %gs:CPU_ACTIVE_THREAD,%ecx
+
+ /* eax == 0 at this point */
+ lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
+ jne Llmts_ilk_fail /* branch on failure to retry */
+
+ movl M_LOCKED,%ecx /* get lock owner */
+ testl %ecx,%ecx /* is the mutex locked? */
+ jne Llmt_fail /* yes, we lose */
+
+ movl $(MUTEX_LOCKED_AS_SPIN),M_LOCKED /* no, indicate ownership as a spin lock */
+ PREEMPTION_DISABLE /* and return with interlock held */
+
+ movl $1,%eax /* return success */
+ popf /* restore interrupt state */
+ leave
+#if CONFIG_DTRACE
+ LOCKSTAT_LABEL(_lck_mtx_try_lock_spin_lockstat_patch_point)
+ ret
+ /* inherit lock pointer in %edx above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, %edx)
+ movl $1,%eax /* return success */
+#endif
+ ret
+
+Llmts_ilk_fail:
+ popf /* restore interrupt state */
+ pushf /* resave interrupt state */
+
+Llmts_ilk_loop:
+ PAUSE
+ /*
+ * need to do this check outside of the interlock in
+ * case this lock is held as a simple lock which means
+ * we won't be able to take the interlock
+ */
+ movl M_LOCKED,%eax /* get lock owner */
+ testl %eax,%eax /* is the mutex locked? */
+ jne Llmt_fail_no_ilk /* yes, go return failure */
+
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ je Llmts_retry /* yes - go try to grab it */
+
+ cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
+ jne Llmts_ilk_loop /* no - keep spinning */
+
+ pushl %edx
+ call EXT(lck_mtx_interlock_panic)
+ /*
+ * shouldn't return from here, but just in case
+ */
+ popl %edx
+ jmp Llmts_ilk_loop
+
+Llmts_eval_ilk:
+ cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */
+ cmove M_PTR,%edx /* If so, take indirection */
+ jne Llmts_ilk_loop /* If not, go to spin loop */
+
+ /*
+ * bump counter on indirect lock
+ */
+ pushl %esi /* Used to hold the lock group ptr */
+ movl MUTEX_GRP(%edx),%esi /* Load lock group */
+ /* 64-bit increment of acquire attempt statistic (per-group) */
+ LOCK_IF_ATOMIC_STAT_UPDATES
+ addl $1, GRP_MTX_STAT_UTIL(%esi)
+ jnc 1f
+ incl GRP_MTX_STAT_UTIL+4(%esi)
+1:
+ popl %esi
+ jmp Llmts_ilk_loop
+
+
+
+NONLEAF_ENTRY(lck_mtx_try_lock)
+
+ movl B_ARG0,%edx /* fetch lock pointer */
+ pushf /* save interrupt state */
+
+ CHECK_NO_SIMPLELOCKS()
+ CHECK_PREEMPTION_LEVEL()
+
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ jne Llmt_eval_ilk /* no, go see if indirect */
+Llmt_retry:
+ cli /* disable interrupts */
+ movl %gs:CPU_ACTIVE_THREAD,%ecx
+
+ /* eax == 0 at this point */
+ lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
+ jne Llmt_ilk_fail /* branch on failure to retry */
+
+ movl M_LOCKED,%ecx /* get lock owner */
+ testl %ecx,%ecx /* is the mutex locked? */
+ jne Llmt_fail /* yes, we lose */
+Llmt_acquire:
+ movl %gs:CPU_ACTIVE_THREAD,%ecx
+ movl %ecx,M_LOCKED
+
+ cmpw $0,M_WAITERS /* are there any waiters? */
+ jne Llmt_waiters /* yes, more work to do */
+Llmt_return:
+ xorl %eax,%eax
+ movl %eax,M_ILK
+
+ popf /* restore interrupt state */
+
+ movl $1,%eax /* return success */
+ leave
+#if CONFIG_DTRACE
+ /* Dtrace probe: LS_LCK_MTX_TRY_LOCK_ACQUIRE */
+ LOCKSTAT_LABEL(_lck_mtx_try_lock_lockstat_patch_point)
+ ret
+ /* inherit lock pointer in %edx from above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, %edx)
+ movl $1,%eax /* return success */
+#endif
+ ret
+
+Llmt_waiters:
+ pushl %edx /* save mutex address */
+ pushl %edx
+ call EXT(lck_mtx_lock_acquire)
+ addl $4,%esp
+ popl %edx /* restore mutex address */
+ jmp Llmt_return
+
+Llmt_ilk_fail:
+ popf /* restore interrupt state */
+ pushf /* resave interrupt state */
+
+Llmt_ilk_loop:
+ PAUSE
+ /*
+ * need to do this check outside of the interlock in
+ * case this lock is held as a simple lock which means
+ * we won't be able to take the interlock
+ */
+ movl M_LOCKED,%eax /* get lock owner */
+ testl %eax,%eax /* is the mutex locked? */
+ jne Llmt_fail_no_ilk /* yes, go return failure */
+
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ je Llmt_retry /* yes - go try to grab it */
+
+ cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
+ jne Llmt_ilk_loop /* no - keep spinning */
+
+ pushl %edx
+ call EXT(lck_mtx_interlock_panic)
+ /*
+ * shouldn't return from here, but just in case
+ */
+ popl %edx
+ jmp Llmt_ilk_loop
+
+Llmt_fail:
+ xorl %eax,%eax /* Zero interlock value */
+ movl %eax,M_ILK
+
+Llmt_fail_no_ilk:
+ popf /* restore interrupt state */
+
+ cmpl %edx,B_ARG0
+ jne Llmt_fail_indirect
+
+ xorl %eax,%eax
+ /* Note that we don't record a dtrace event for trying and missing */
+ NONLEAF_RET
+
+Llmt_fail_indirect:
+ pushl %esi /* Used to hold the lock group ptr */
+ movl MUTEX_GRP(%edx),%esi /* Load lock group */
+
+ /* Record mutex acquire attempt miss statistic */
+ LOCK_IF_ATOMIC_STAT_UPDATES
+ incl GRP_MTX_STAT_MISS(%esi)
+
+ popl %esi
+ xorl %eax,%eax
+ NONLEAF_RET
+
+Llmt_eval_ilk:
+ cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */
+ cmove M_PTR,%edx /* If so, take indirection */
+ jne Llmt_ilk_loop /* If not, go to spin loop */
+
+ /*
+ * bump counter for indirect lock
+ */
+ pushl %esi /* Used to hold the lock group ptr */
+ movl MUTEX_GRP(%edx),%esi /* Load lock group */
+
+ /* 64-bit increment of acquire attempt statistic (per-group) */
+ LOCK_IF_ATOMIC_STAT_UPDATES
+ addl $1, GRP_MTX_STAT_UTIL(%esi)
+ jnc 1f
+ incl GRP_MTX_STAT_UTIL+4(%esi)
+1:
+ pop %esi
+ jmp Llmt_ilk_loop
+
+
+
+LEAF_ENTRY(lck_mtx_convert_spin)
+ movl L_ARG0,%edx /* fetch lock pointer */
+
+ cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */
+ cmove M_PTR,%edx /* If so, take indirection */