-Lml_waiters:
- pushl %edx /* save mutex address */
- pushl %edx
- call EXT(lck_mtx_lock_acquire)
- addl $4,%esp
- popl %edx /* restore mutex address */
- jmp Lml_return
-
-Lml_ilk_fail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Lml_retry /* try again */
-
-Lml_fail:
- /*
- n Check if the owner is on another processor and therefore
- * we should try to spin before blocking.
- */
- testl $(OnProc),ACT_SPF(%ecx)
- jz Lml_block
-
- /*
- * Here if owner is on another processor:
- * - release the interlock
- * - spin on the holder until release or timeout
- * - in either case re-acquire the interlock
- * - if released, acquire it
- * - otherwise drop thru to block.
- */
- xorl %eax,%eax
- movl %eax,M_ILK /* zero interlock */
- popf
- pushf /* restore interrupt state */
-
- push %edx /* lock address */
- call EXT(lck_mtx_lock_spin) /* call out to do spinning */
- addl $4,%esp
- movl B_ARG0,%edx /* refetch mutex address */
-
- /* Re-acquire interlock */
- cli /* disable interrupts */
-Lml_reget_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
-
-Lml_reget_hw:
- movl M_ILK,%eax /* read interlock */
- testl %eax,%eax /* unlocked? */
- jne Lml_ilk_refail /* no - slow path */
-
- lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Lml_reget_hw /* branch on failure to retry */
-
- movl M_LOCKED,%ecx /* get lock owner */
- testl %ecx,%ecx /* is the mutex free? */
- je Lml_acquire /* yes, acquire */
+/*
+ * lck_mtx_lock()
+ * lck_mtx_try_lock()
+ * lck_mtx_unlock()
+ * lck_mtx_lock_spin()
+ * lck_mtx_lock_spin_always()
+ * lck_mtx_try_lock_spin()
+ * lck_mtx_try_lock_spin_always()
+ * lck_mtx_convert_spin()
+ */
+NONLEAF_ENTRY(lck_mtx_lock_spin_always)
+ mov %rdi, %rdx /* fetch lock pointer */
+ jmp Llmls_avoid_check