-/*
- * void hw_lock_unlock(hw_lock_t)
- *
- * Unconditionally release lock.
- * MACH_RT: release preemption level.
- */
-LEAF_ENTRY(hw_lock_unlock)
- movl L_ARG0,%edx /* fetch lock pointer */
- movl $0,0(%edx) /* clear the lock */
- ENABLE_PREEMPTION
- LEAF_RET
-
-/*
- * void i386_lock_unlock_with_flush(hw_lock_t)
- *
- * Unconditionally release lock, followed by a cacheline flush of
- * the line corresponding to the lock dword. This routine is currently
- * used with certain locks which are susceptible to lock starvation,
- * minimizing cache affinity for lock acquisitions. A queued spinlock
- * or other mechanism that ensures fairness would obviate the need
- * for this routine, but ideally few or no spinlocks should exhibit
- * enough contention to require such measures.
- * MACH_RT: release preemption level.
- */
-LEAF_ENTRY(i386_lock_unlock_with_flush)
- movl L_ARG0,%edx /* Fetch lock pointer */
- movl $0,0(%edx) /* Clear the lock */
- mfence /* Serialize prior stores */
- clflush 0(%edx) /* Write back and invalidate line */
- ENABLE_PREEMPTION
- LEAF_RET
-
-/*
- * unsigned int hw_lock_try(hw_lock_t)
- * MACH_RT: returns with preemption disabled on success.
- */
-LEAF_ENTRY(hw_lock_try)
- movl L_ARG0,%edx /* fetch lock pointer */
-
- movl %gs:CPU_ACTIVE_THREAD,%ecx
- DISABLE_PREEMPTION
- movl 0(%edx),%eax
- testl %eax,%eax
- jne 1f
- lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */
- jne 1f
-
- movl $1,%eax /* success */
- LEAF_RET
-
-1:
- ENABLE_PREEMPTION /* failure: release preemption... */
- xorl %eax,%eax /* ...and return failure */
- LEAF_RET
-
-/*
- * unsigned int hw_lock_held(hw_lock_t)
- * MACH_RT: doesn't change preemption state.
- * N.B. Racy, of course.
- */
-LEAF_ENTRY(hw_lock_held)
- movl L_ARG0,%edx /* fetch lock pointer */
-
- movl 0(%edx),%eax /* check lock value */
- testl %eax,%eax
- movl $1,%ecx
- cmovne %ecx,%eax /* 0 => unlocked, 1 => locked */
- LEAF_RET
-
-LEAF_ENTRY(mutex_init)
- movl L_ARG0,%edx /* fetch lock pointer */
- xorl %eax,%eax
- movl %eax,M_ILK /* clear interlock */
- movl %eax,M_LOCKED /* clear locked flag */
- movw %ax,M_WAITERS /* init waiter count */
- movw %ax,M_PROMOTED_PRI
-
-#if MACH_LDEBUG
- movl $ MUTEX_TAG,M_TYPE /* set lock type */
- movl %eax,M_PC /* init caller pc */
- movl %eax,M_THREAD /* and owning thread */
-#endif
-
- LEAF_RET
-
-NONLEAF_ENTRY2(mutex_lock,_mutex_lock)
-
- movl B_ARG0,%edx /* fetch lock pointer */
-
- CHECK_MUTEX_TYPE()
- CHECK_NO_SIMPLELOCKS()
- CHECK_PREEMPTION_LEVEL()
-
- pushf /* save interrupt state */
- cli /* disable interrupts */
-Lml_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
-
-Lml_get_hw:
- movl M_ILK,%eax /* read interlock */
- testl %eax,%eax /* unlocked? */
- jne Lml_ilk_fail /* no - take the slow path */
-
- lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Lml_get_hw /* branch on failure to retry */
-
- movl M_LOCKED,%ecx /* get lock owner */
- testl %ecx,%ecx /* is the mutex locked? */
- jne Lml_fail /* yes, we lose */
-Lml_acquire:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
- movl %ecx,M_LOCKED
-
-#if MACH_LDEBUG
- movl %ecx,M_THREAD
- movl B_PC,%ecx
- movl %ecx,M_PC
-#endif
-
- cmpw $0,M_WAITERS /* are there any waiters? */
- jne Lml_waiters /* yes, more work to do */
-Lml_return:
- xorl %eax,%eax
- movl %eax,M_ILK
-
- popf /* restore interrupt state */
-
- NONLEAF_RET
-
-Lml_waiters:
- pushl %edx /* save mutex address */
- pushl %edx
- call EXT(lck_mtx_lock_acquire)
- addl $4,%esp
- popl %edx /* restore mutex address */
- jmp Lml_return
-
-Lml_ilk_fail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Lml_retry /* try again */
-
-Lml_fail:
- /*
- n Check if the owner is on another processor and therefore
- * we should try to spin before blocking.
- */
- testl $(OnProc),ACT_SPF(%ecx)
- jz Lml_block
-
- /*
- * Here if owner is on another processor:
- * - release the interlock
- * - spin on the holder until release or timeout
- * - in either case re-acquire the interlock
- * - if released, acquire it
- * - otherwise drop thru to block.
- */
- xorl %eax,%eax
- movl %eax,M_ILK /* zero interlock */
- popf
- pushf /* restore interrupt state */
-
- push %edx /* lock address */
- call EXT(lck_mtx_lock_spin) /* call out to do spinning */
- addl $4,%esp
- movl B_ARG0,%edx /* refetch mutex address */
-
- /* Re-acquire interlock */
- cli /* disable interrupts */
-Lml_reget_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
-
-Lml_reget_hw:
- movl M_ILK,%eax /* read interlock */
- testl %eax,%eax /* unlocked? */
- jne Lml_ilk_refail /* no - slow path */
-
- lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Lml_reget_hw /* branch on failure to retry */
-
- movl M_LOCKED,%ecx /* get lock owner */
- testl %ecx,%ecx /* is the mutex free? */
- je Lml_acquire /* yes, acquire */
-
-Lml_block:
- CHECK_MYLOCK(M_THREAD)
- pushl M_LOCKED
- pushl %edx /* push mutex address */
- call EXT(lck_mtx_lock_wait) /* wait for the lock */
- addl $8,%esp
- movl B_ARG0,%edx /* refetch mutex address */
- cli /* ensure interrupts disabled */
- jmp Lml_retry /* and try again */
-
-Lml_ilk_refail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Lml_reget_retry /* try again */
-
-NONLEAF_ENTRY2(mutex_try,_mutex_try)
-
- movl B_ARG0,%edx /* fetch lock pointer */
-
- CHECK_MUTEX_TYPE()
- CHECK_NO_SIMPLELOCKS()
-
- pushf /* save interrupt state */
- cli /* disable interrupts */
-Lmt_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
-
-Lmt_get_hw:
- movl M_ILK,%eax /* read interlock */
- testl %eax,%eax /* unlocked? */
- jne Lmt_ilk_fail /* no - slow path */
-
- lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Lmt_get_hw /* branch on failure to retry */
-
- movl M_LOCKED,%ecx /* get lock owner */
- testl %ecx,%ecx /* is the mutex locked? */
- jne Lmt_fail /* yes, we lose */
- movl %gs:CPU_ACTIVE_THREAD,%ecx
- movl %ecx,M_LOCKED
-
-#if MACH_LDEBUG
- movl %ecx,M_THREAD
- movl B_PC,%ecx
- movl %ecx,M_PC
-#endif
-
- cmpl $0,M_WAITERS /* are there any waiters? */
- jne Lmt_waiters /* yes, more work to do */
-Lmt_return:
- xorl %eax,%eax
- movl %eax,M_ILK
- popf /* restore interrupt state */
-
- movl $1,%eax
-
- NONLEAF_RET
-
-Lmt_waiters:
- pushl %edx /* save mutex address */
- pushl %edx
- call EXT(lck_mtx_lock_acquire)
- addl $4,%esp
- popl %edx /* restore mutex address */
- jmp Lmt_return
-
-Lmt_ilk_fail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Lmt_retry /* try again */
-
-Lmt_fail:
- xorl %eax,%eax
- movl %eax,M_ILK
-
- popf /* restore interrupt state */
-
- xorl %eax,%eax
-
- NONLEAF_RET
-
-NONLEAF_ENTRY(mutex_unlock)
- movl B_ARG0,%edx /* fetch lock pointer */
-
- CHECK_MUTEX_TYPE()
- CHECK_THREAD(M_THREAD)
-
- pushf /* save interrupt state */
- cli /* disable interrupts */
-Lmu_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
-
-Lmu_get_hw:
- movl M_ILK,%eax /* read interlock */
- testl %eax,%eax /* unlocked? */
- jne Lmu_ilk_fail /* no - slow path */
-
- lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Lmu_get_hw /* branch on failure to retry */
-
- cmpw $0,M_WAITERS /* are there any waiters? */
- jne Lmu_wakeup /* yes, more work to do */
-
-Lmu_doit:
-
-#if MACH_LDEBUG
- movl $0,M_THREAD /* disown thread */
-#endif
-
- xorl %ecx,%ecx
- movl %ecx,M_LOCKED /* unlock the mutex */
-
- movl %ecx,M_ILK
-
- popf /* restore interrupt state */
-
- NONLEAF_RET
-
-Lmu_ilk_fail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Lmu_retry /* try again */
-
-Lmu_wakeup:
- pushl M_LOCKED
- pushl %edx /* push mutex address */
- call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
- addl $8,%esp
- movl B_ARG0,%edx /* restore lock pointer */
- jmp Lmu_doit
-
-/*
- * lck_mtx_lock()
- * lck_mtx_try_lock()
- * lck_mutex_unlock()
- *
- * These are variants of mutex_lock(), mutex_try() and mutex_unlock() without
- * DEBUG checks (which require fields not present in lck_mtx_t's).
- */
-NONLEAF_ENTRY(lck_mtx_lock)
-
- movl B_ARG0,%edx /* fetch lock pointer */
- cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
- cmove M_PTR,%edx /* yes - take indirection */
-
- CHECK_NO_SIMPLELOCKS()
- CHECK_PREEMPTION_LEVEL()
-
- pushf /* save interrupt state */
- cli /* disable interrupts */
-Llml_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
-
-Llml_get_hw:
- movl M_ILK,%eax /* read interlock */
- testl %eax,%eax /* unlocked? */
- jne Llml_ilk_fail /* no - slow path */
-
- lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Llml_get_hw /* branch on failure to retry */
-
- movl M_LOCKED,%ecx /* get lock owner */
- testl %ecx,%ecx /* is the mutex locked? */
- jne Llml_fail /* yes, we lose */
-Llml_acquire:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
- movl %ecx,M_LOCKED
-
- cmpl $0,M_WAITERS /* are there any waiters? */
- jne Llml_waiters /* yes, more work to do */
-Llml_return:
- xorl %eax,%eax
- movl %eax,M_ILK
-
- popf /* restore interrupt state */
-
- NONLEAF_RET
-
-Llml_waiters:
- pushl %edx /* save mutex address */
- pushl %edx
- call EXT(lck_mtx_lock_acquire)
- addl $4,%esp
- popl %edx /* restore mutex address */
- jmp Llml_return
-
-Llml_ilk_fail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Llml_retry /* try again */
-
-Llml_fail:
- /*
- * Check if the owner is on another processor and therefore
- * we should try to spin before blocking.
- */
- testl $(OnProc),ACT_SPF(%ecx)
- jz Llml_block
-
- /*
- * Here if owner is on another processor:
- * - release the interlock
- * - spin on the holder until release or timeout
- * - in either case re-acquire the interlock
- * - if released, acquire it
- * - otherwise drop thru to block.
- */
- xorl %eax,%eax
- movl %eax,M_ILK /* zero interlock */
- popf
- pushf /* restore interrupt state */
-
- pushl %edx /* save mutex address */
- pushl %edx
- call EXT(lck_mtx_lock_spin)
- addl $4,%esp
- popl %edx /* restore mutex address */
-
- /* Re-acquire interlock */
- cli /* disable interrupts */
-Llml_reget_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
-
-Llml_reget_hw:
- movl M_ILK,%eax /* read interlock */
- testl %eax,%eax /* unlocked? */
- jne Llml_ilk_refail /* no - slow path */
-
- lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Llml_reget_hw /* branch on failure to retry */
-
- movl M_LOCKED,%ecx /* get lock owner */
- testl %ecx,%ecx /* is the mutex free? */
- je Llml_acquire /* yes, acquire */
-
-Llml_block:
- CHECK_MYLOCK(M_THREAD)
- pushl %edx /* save mutex address */
- pushl M_LOCKED
- pushl %edx /* push mutex address */
- call EXT(lck_mtx_lock_wait) /* wait for the lock */
- addl $8,%esp
- popl %edx /* restore mutex address */
- cli /* ensure interrupts disabled */
- jmp Llml_retry /* and try again */
-
-Llml_ilk_refail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Llml_reget_retry /* try again */
-
-NONLEAF_ENTRY(lck_mtx_try_lock)
-
- movl B_ARG0,%edx /* fetch lock pointer */
- cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
- cmove M_PTR,%edx /* yes - take indirection */
-
- CHECK_NO_SIMPLELOCKS()
- CHECK_PREEMPTION_LEVEL()
-
- pushf /* save interrupt state */
- cli /* disable interrupts */
-Llmt_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
-
-Llmt_get_hw:
- movl M_ILK,%eax /* read interlock */
- testl %eax,%eax /* unlocked? */
- jne Llmt_ilk_fail /* no - slow path */
-
- lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Llmt_get_hw /* branch on failure to retry */
-
- movl M_LOCKED,%ecx /* get lock owner */
- testl %ecx,%ecx /* is the mutex locked? */
- jne Llmt_fail /* yes, we lose */
- movl %gs:CPU_ACTIVE_THREAD,%ecx
- movl %ecx,M_LOCKED
-
- cmpl $0,M_WAITERS /* are there any waiters? */
- jne Llmt_waiters /* yes, more work to do */
-Llmt_return:
- xorl %eax,%eax
- movl %eax,M_ILK
-
- popf /* restore interrupt state */
-
- movl $1,%eax /* return success */
- NONLEAF_RET
-
-Llmt_waiters:
- pushl %edx /* save mutex address */
- pushl %edx
- call EXT(lck_mtx_lock_acquire)
- addl $4,%esp
- popl %edx /* restore mutex address */
- jmp Llmt_return
-
-Llmt_ilk_fail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Llmt_retry /* try again */
-
-Llmt_fail:
- xorl %eax,%eax
- movl %eax,M_ILK
-
- popf /* restore interrupt state */
-
- xorl %eax,%eax /* return failure */
- NONLEAF_RET
-
-NONLEAF_ENTRY(lck_mtx_unlock)
-
- movl B_ARG0,%edx /* fetch lock pointer */
- cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
- cmove M_PTR,%edx /* yes - take indirection */
-
- pushf /* save interrupt state */
- cli /* disable interrupts */
-Llmu_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
-
-Llmu_get_hw:
- movl M_ILK,%eax /* read interlock */
- testl %eax,%eax /* unlocked? */
- jne Llmu_ilk_fail /* no - slow path */
-
- lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Llmu_get_hw /* branch on failure to retry */
-
- cmpw $0,M_WAITERS /* are there any waiters? */
- jne Llmu_wakeup /* yes, more work to do */
-
-Llmu_doit:
- xorl %ecx,%ecx
- movl %ecx,M_LOCKED /* unlock the mutex */
-
- movl %ecx,M_ILK
-
- popf /* restore interrupt state */
-
- NONLEAF_RET
-
-Llmu_ilk_fail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Llmu_retry /* try again */
-
-Llmu_wakeup:
- pushl %edx /* save mutex address */
- pushl M_LOCKED
- pushl %edx /* push mutex address */
- call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
- addl $8,%esp
- popl %edx /* restore mutex pointer */
- jmp Llmu_doit
-
-LEAF_ENTRY(lck_mtx_ilk_unlock)
- movl L_ARG0,%edx /* no indirection here */
-
- xorl %eax,%eax
- movl %eax,M_ILK
-
- LEAF_RET
-
-LEAF_ENTRY(_disable_preemption)
-#if MACH_RT
- _DISABLE_PREEMPTION
-#endif /* MACH_RT */
- LEAF_RET
-
-LEAF_ENTRY(_enable_preemption)
-#if MACH_RT
-#if MACH_ASSERT
- cmpl $0,%gs:CPU_PREEMPTION_LEVEL
- jg 1f
- pushl %gs:CPU_PREEMPTION_LEVEL
- pushl $2f
- call EXT(panic)