+
+Llml_eval_ilk:
+ cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */
+ cmove M_PTR,%edx /* If so, take indirection */
+ jne Llml_ilk_loop /* If not, go to spin loop */
+
+/*
+ * Entry into statistics codepath for lck_mtx_lock:
+ * EDX: real lock pointer
+ * first dword on stack contains flags
+ */
+
+/* Enable this preprocessor define to record the first miss alone
+ * By default, we count every miss, hence multiple misses may be
+ * recorded for a single lock acquire attempt via lck_mtx_lock
+ */
+#undef LOG_FIRST_MISS_ALONE
+
+/*
+ * N.B.: On x86, statistics are currently recorded for all indirect mutexes.
+ * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained
+ * as a 64-bit quantity (this matches the existing PowerPC implementation,
+ * and the new x86 specific statistics are also maintained as 32-bit
+ * quantities).
+ */
+
+Llml_lck_ext:
+ pushl %esi /* Used to hold the lock group ptr */
+ pushl %edi /* Used for stat update records */
+ movl MUTEX_GRP(%edx),%esi /* Load lock group */
+ xorl %edi,%edi /* Clear stat update records */
+ /* 64-bit increment of acquire attempt statistic (per-group) */
+ LOCK_IF_ATOMIC_STAT_UPDATES
+ addl $1, GRP_MTX_STAT_UTIL(%esi)
+ jnc 1f
+ incl GRP_MTX_STAT_UTIL+4(%esi)
+1:
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ jne Llml_ext_ilk_loop /* no, go to spin loop */
+Llml_ext_get_hw:
+ cli
+ movl %gs:CPU_ACTIVE_THREAD,%ecx
+
+ /* eax == 0 at this point */
+ lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
+ jne Llml_ext_ilk_fail /* branch on failure to retry */
+
+ movl M_LOCKED,%ecx /* get lock owner */
+ testl %ecx,%ecx /* is the mutex locked? */
+ jne Llml_ext_fail /* yes, we lose */
+
+Llml_ext_acquire:
+ movl %gs:CPU_ACTIVE_THREAD,%ecx
+ movl %ecx,M_LOCKED
+
+ cmpw $0,M_WAITERS /* are there any waiters? */
+ jne Llml_ext_waiters /* yes, more work to do */
+Llml_ext_return:
+ xorl %eax,%eax
+ movl %eax,M_ILK
+
+ popl %edi
+ popl %esi
+ popf /* restore interrupt state */
+ leave
+#if CONFIG_DTRACE
+ LOCKSTAT_LABEL(_lck_mtx_lock_ext_lockstat_patch_point)
+ ret
+ /* inherit lock pointer in %edx above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, %edx)
+#endif
+ ret
+
+Llml_ext_waiters:
+ pushl %edx /* save mutex address */
+ pushl %edx
+ call EXT(lck_mtx_lock_acquire)
+ addl $4,%esp
+ popl %edx /* restore mutex address */
+ jmp Llml_ext_return
+
+Llml_ext_restart:
+Llml_ext_ilk_fail:
+ movl 8(%esp),%ecx
+ pushl %ecx
+ popf /* restore interrupt state */
+
+Llml_ext_ilk_loop:
+ PAUSE
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ je Llml_ext_get_hw /* yes - go try to grab it */
+
+ cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
+ jne Llml_ext_ilk_loop /* no - keep spinning */
+
+ pushl %edx
+ call EXT(lck_mtx_interlock_panic)
+ /*
+ * shouldn't return from here, but just in case
+ */
+ popl %edx
+ jmp Llml_ext_ilk_loop
+
+
+Llml_ext_fail:
+#ifdef LOG_FIRST_MISS_ALONE
+ testl $1, %edi
+ jnz 1f
+#endif /* LOG_FIRST_MISS_ALONE */
+ /* Record that a lock acquire attempt missed (per-group statistic) */
+ LOCK_IF_ATOMIC_STAT_UPDATES
+ incl GRP_MTX_STAT_MISS(%esi)
+#ifdef LOG_FIRST_MISS_ALONE
+ orl $1, %edi
+#endif /* LOG_FIRST_MISS_ALONE */
+1:
+ /*
+ * Check if the owner is on another processor and therefore
+ * we should try to spin before blocking.
+ */
+ testl $(OnProc),ACT_SPF(%ecx)
+ jnz 2f
+ /*
+ * Record the "direct wait" statistic, which indicates if a
+ * miss proceeded to block directly without spinning--occurs
+ * if the owner of the mutex isn't running on another processor
+ * at the time of the check.
+ */
+ LOCK_IF_ATOMIC_STAT_UPDATES
+ incl GRP_MTX_STAT_DIRECT_WAIT(%esi)
+ jmp Llml_ext_block
+2:
+ /*
+ * Here if owner is on another processor:
+ * - release the interlock
+ * - spin on the holder until release or timeout
+ * - in either case re-acquire the interlock
+ * - if released, acquire it
+ * - otherwise drop thru to block.
+ */
+ xorl %eax,%eax
+ movl %eax,M_ILK /* zero interlock */
+
+ pushl 8(%esp) /* Make another copy of EFLAGS image */
+ popf /* Restore interrupt state */
+ pushl %edx /* save mutex address */
+ pushl %edx
+ call EXT(lck_mtx_lock_spinwait)
+ addl $4,%esp
+ popl %edx /* restore mutex address */
+
+ /* Re-acquire interlock */
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ jne Llml_ext_ilk_refail /* no, go to spin loop */
+Llml_ext_reget_retry:
+ cli /* disable interrupts */
+ movl %gs:CPU_ACTIVE_THREAD,%ecx
+
+ /* eax == 0 at this point */
+ lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
+ jne Llml_ext_ilk_refail /* branch on failure to spin loop */
+
+ movl M_LOCKED,%ecx /* get lock owner */
+ testl %ecx,%ecx /* is the mutex free? */
+ je Llml_ext_acquire /* yes, acquire */
+
+Llml_ext_block:
+ /* If we wanted to count waits just once per lock acquire, we'd
+ * skip over the stat update here
+ */
+ LOCK_IF_ATOMIC_STAT_UPDATES
+ /* Record that a lock miss proceeded to block */
+ incl GRP_MTX_STAT_WAIT(%esi)
+1:
+ CHECK_MYLOCK(M_THREAD)
+ pushl %edx /* save mutex address */
+ pushl M_LOCKED
+ pushl %edx /* push mutex address */
+ /*
+ * N.B.: lck_mtx_lock_wait is called here with interrupts disabled
+ * Consider reworking.
+ */
+ call EXT(lck_mtx_lock_wait) /* wait for the lock */
+ addl $8,%esp
+ popl %edx /* restore mutex address */
+ jmp Llml_ext_restart /* and start over */
+
+Llml_ext_ilk_refail:
+ movl 8(%esp),%ecx
+ pushl %ecx
+ popf /* restore interrupt state */
+
+Llml_ext_ilk_reloop:
+ PAUSE
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ je Llml_ext_reget_retry /* yes - go try to grab it */
+
+ cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
+ jne Llml_ext_ilk_reloop /* no - keep spinning */
+
+ pushl %edx
+ call EXT(lck_mtx_interlock_panic)
+ /*
+ * shouldn't return from here, but just in case
+ */
+ popl %edx
+ jmp Llml_ext_ilk_reloop
+
+
+
+NONLEAF_ENTRY(lck_mtx_try_lock_spin)