+Llmls_ilk_fail:
+ popf /* restore interrupt state */
+ pushf /* resave interrupt state on stack */
+
+Llmls_ilk_loop:
+ PAUSE
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ je Llmls_retry /* yes - go try to grab it */
+
+ cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
+ jne Llmls_ilk_loop /* no - keep spinning */
+
+ pushl %edx
+ call EXT(lck_mtx_interlock_panic)
+ /*
+ * shouldn't return from here, but just in case
+ */
+ popl %edx
+ jmp Llmls_ilk_loop
+
+
+Llmls_eval_ilk:
+ cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */
+ cmove M_PTR,%edx /* If so, take indirection */
+ jne Llmls_ilk_loop /* If not, go to spin loop */
+
+Llmls_lck_ext:
+ pushl %esi /* Used to hold the lock group ptr */
+ pushl %edi /* Used for stat update records */
+ movl MUTEX_GRP(%edx),%esi /* Load lock group */
+ xorl %edi,%edi /* Clear stat update records */
+ /* 64-bit increment of acquire attempt statistic (per-group) */
+ LOCK_IF_ATOMIC_STAT_UPDATES
+ addl $1, GRP_MTX_STAT_UTIL(%esi)
+ jnc 1f
+ incl GRP_MTX_STAT_UTIL+4(%esi)
+1:
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ jne Llmls_ext_ilk_loop /* no, go to spin loop */
+Llmls_ext_retry:
+ cli /* disable interrupts */
+ movl %gs:CPU_ACTIVE_THREAD,%ecx
+
+ /* eax == 0 at this point */
+ lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
+ jne Llmls_ext_ilk_fail /* branch on failure to retry */
+
+ movl M_LOCKED,%ecx /* get lock owner */
+ testl %ecx,%ecx /* is the mutex locked? */
+ jne Llml_ext_fail /* yes, we lose */
+
+ popl %edi
+ popl %esi
+ jmp Llmls_acquire
+
+Llmls_ext_ilk_fail:
+ /*
+ * Slow path: call out to do the spinning.
+ */
+ movl 8(%esp),%ecx
+ pushl %ecx
+ popf /* restore interrupt state */
+
+Llmls_ext_ilk_loop:
+ PAUSE
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ je Llmls_ext_retry /* yes - go try to grab it */
+
+ cmpl $(MUTEX_DESTROYED),%eax /* check to see if its marked destroyed */
+ jne Llmls_ext_ilk_loop /* no - keep spinning */
+
+ pushl %edx
+ call EXT(lck_mtx_interlock_panic)
+ /*
+ * shouldn't return from here, but just in case
+ */
+ popl %edx
+ jmp Llmls_ext_ilk_loop /* no - keep spinning */
+
+
+
+NONLEAF_ENTRY(lck_mtx_lock)
+
+ movl B_ARG0,%edx /* fetch lock pointer */
+ pushf /* save interrupt state */
+
+ CHECK_NO_SIMPLELOCKS()
+ CHECK_PREEMPTION_LEVEL()
+
+ movl M_ILK,%eax /* read interlock */
+ testl %eax,%eax /* unlocked? */
+ jne Llml_eval_ilk /* no, go see if indirect */
+Llml_retry:
+ cli /* disable interrupts */
+ movl %gs:CPU_ACTIVE_THREAD,%ecx
+
+ /* eax == 0 at this point */
+ lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
+ jne Llml_ilk_fail /* branch on failure to spin loop */
+
+ movl M_LOCKED,%ecx /* get lock owner */
+ testl %ecx,%ecx /* is the mutex locked? */
+ jne Llml_fail /* yes, we lose */
+Llml_acquire:
+ movl %gs:CPU_ACTIVE_THREAD,%ecx
+ movl %ecx,M_LOCKED
+
+ cmpw $0,M_WAITERS /* are there any waiters? */
+ jne Lml_waiters /* yes, more work to do */
+Llml_return:
+ xorl %eax,%eax
+ movl %eax,M_ILK
+
+ popf /* restore interrupt state */
+ leave
+#if CONFIG_DTRACE
+ LOCKSTAT_LABEL(_lck_mtx_lock_lockstat_patch_point)
+ ret
+ /* inherit lock pointer in %edx above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, %edx)