/*
* Routines for general lock debugging.
*/
+#define S_TYPE SLOCK_TYPE(%edx)
+#define S_PC SLOCK_PC(%edx)
+#define S_THREAD SLOCK_THREAD(%edx)
+#define S_DURATIONH SLOCK_DURATIONH(%edx)
+#define S_DURATIONL SLOCK_DURATIONL(%edx)
/*
* Checks for expected lock types and calls "panic" on
.text ; \
1:
+#define CHECK_SIMPLE_LOCK_TYPE() \
+ cmpl $ USLOCK_TAG,S_TYPE ; \
+ je 1f ; \
+ pushl $2f ; \
+ call EXT(panic) ; \
+ hlt ; \
+ .data ; \
+2: String "not a simple lock!" ; \
+ .text ; \
+1:
+
/*
* If one or more simplelocks are currently held by a thread,
* an attempt to acquire a mutex will cause this check to fail
LEAF_ENTRY(hw_lock_lock)
movl L_ARG0,%edx /* fetch lock pointer */
- movl %gs:CPU_ACTIVE_THREAD,%ecx
- DISABLE_PREEMPTION
-1:
+ movl L_PC,%ecx
+1: DISABLE_PREEMPTION
movl 0(%edx), %eax
testl %eax,%eax /* lock locked? */
jne 3f /* branch if so */
jne 3f
movl $1,%eax /* In case this was a timeout call */
LEAF_RET /* if yes, then nothing left to do */
-3:
+
+3: ENABLE_PREEMPTION /* no reason we can't be preemptable */
PAUSE /* pause for hyper-threading */
jmp 1b /* try again */
LEAF_ENTRY(hw_lock_to)
1:
movl L_ARG0,%edx /* fetch lock pointer */
- movl %gs:CPU_ACTIVE_THREAD,%ecx
+ movl L_PC,%ecx
/*
* Attempt to grab the lock immediately
* - fastpath without timeout nonsense.
adcl $0,%edx /* add carry */
mov %edx,%ecx
mov %eax,%ebx /* %ecx:%ebx is the timeout expiry */
+3:
+ ENABLE_PREEMPTION /* no reason not to be preempted now */
4:
/*
* The inner-loop spin to look for the lock being freed.
cmpl %ecx,%edx /* compare high-order 32-bits */
jb 4b /* continue spinning if less, or */
cmpl %ebx,%eax /* compare low-order 32-bits */
- jb 4b /* continue if less, else bail */
+ jb 5b /* continue if less, else bail */
xor %eax,%eax /* with 0 return value */
pop %ebx
pop %edi
* Here to try to grab the lock that now appears to be free
* after contention.
*/
- movl %gs:CPU_ACTIVE_THREAD,%edx
+ movl 8+L_PC,%edx /* calling pc (8+ for pushed regs) */
+ DISABLE_PREEMPTION
lock; cmpxchgl %edx,0(%edi) /* try to acquire the HW lock */
- jne 4b /* no - spin again */
+ jne 3b /* no - spin again */
movl $1,%eax /* yes */
pop %ebx
pop %edi
ENABLE_PREEMPTION
LEAF_RET
-/*
- * void i386_lock_unlock_with_flush(hw_lock_t)
- *
- * Unconditionally release lock, followed by a cacheline flush of
- * the line corresponding to the lock dword. This routine is currently
- * used with certain locks which are susceptible to lock starvation,
- * minimizing cache affinity for lock acquisitions. A queued spinlock
- * or other mechanism that ensures fairness would obviate the need
- * for this routine, but ideally few or no spinlocks should exhibit
- * enough contention to require such measures.
- * MACH_RT: release preemption level.
- */
-LEAF_ENTRY(i386_lock_unlock_with_flush)
- movl L_ARG0,%edx /* Fetch lock pointer */
- movl $0,0(%edx) /* Clear the lock */
- mfence /* Serialize prior stores */
- clflush 0(%edx) /* Write back and invalidate line */
- ENABLE_PREEMPTION
- LEAF_RET
-
/*
* unsigned int hw_lock_try(hw_lock_t)
* MACH_RT: returns with preemption disabled on success.
LEAF_ENTRY(hw_lock_try)
movl L_ARG0,%edx /* fetch lock pointer */
- movl %gs:CPU_ACTIVE_THREAD,%ecx
+ movl L_PC,%ecx
DISABLE_PREEMPTION
movl 0(%edx),%eax
testl %eax,%eax
movl $1,%eax /* success */
LEAF_RET
-1:
- ENABLE_PREEMPTION /* failure: release preemption... */
+1: ENABLE_PREEMPTION /* failure: release preemption... */
xorl %eax,%eax /* ...and return failure */
LEAF_RET
pushf /* save interrupt state */
cli /* disable interrupts */
-Lml_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
-Lml_get_hw:
+ml_retry:
+ movl B_PC,%ecx
+
+ml_get_hw:
movl M_ILK,%eax /* read interlock */
testl %eax,%eax /* unlocked? */
- jne Lml_ilk_fail /* no - take the slow path */
-
+ je 1f /* yes - attempt to lock it */
+ PAUSE /* no - pause */
+ jmp ml_get_hw /* try again */
+1:
lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Lml_get_hw /* branch on failure to retry */
+ jne ml_get_hw /* branch on failure to retry */
movl M_LOCKED,%ecx /* get lock owner */
testl %ecx,%ecx /* is the mutex locked? */
- jne Lml_fail /* yes, we lose */
-Lml_acquire:
+ jne ml_fail /* yes, we lose */
movl %gs:CPU_ACTIVE_THREAD,%ecx
movl %ecx,M_LOCKED
movl %ecx,M_PC
#endif
- cmpw $0,M_WAITERS /* are there any waiters? */
- jne Lml_waiters /* yes, more work to do */
-Lml_return:
- xorl %eax,%eax
- movl %eax,M_ILK
-
- popf /* restore interrupt state */
-
- NONLEAF_RET
-
-Lml_waiters:
pushl %edx /* save mutex address */
pushl %edx
call EXT(lck_mtx_lock_acquire)
addl $4,%esp
popl %edx /* restore mutex address */
- jmp Lml_return
-
-Lml_ilk_fail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Lml_retry /* try again */
-Lml_fail:
- /*
- n Check if the owner is on another processor and therefore
- * we should try to spin before blocking.
- */
- testl $(OnProc),ACT_SPF(%ecx)
- jz Lml_block
-
- /*
- * Here if owner is on another processor:
- * - release the interlock
- * - spin on the holder until release or timeout
- * - in either case re-acquire the interlock
- * - if released, acquire it
- * - otherwise drop thru to block.
- */
xorl %eax,%eax
- movl %eax,M_ILK /* zero interlock */
- popf
- pushf /* restore interrupt state */
-
- push %edx /* lock address */
- call EXT(lck_mtx_lock_spin) /* call out to do spinning */
- addl $4,%esp
- movl B_ARG0,%edx /* refetch mutex address */
-
- /* Re-acquire interlock */
- cli /* disable interrupts */
-Lml_reget_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
+ movl %eax,M_ILK
-Lml_reget_hw:
- movl M_ILK,%eax /* read interlock */
- testl %eax,%eax /* unlocked? */
- jne Lml_ilk_refail /* no - slow path */
+ popf /* restore interrupt state */
- lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Lml_reget_hw /* branch on failure to retry */
+ NONLEAF_RET
- movl M_LOCKED,%ecx /* get lock owner */
- testl %ecx,%ecx /* is the mutex free? */
- je Lml_acquire /* yes, acquire */
-
-Lml_block:
+ml_fail:
+ml_block:
CHECK_MYLOCK(M_THREAD)
pushl M_LOCKED
pushl %edx /* push mutex address */
call EXT(lck_mtx_lock_wait) /* wait for the lock */
addl $8,%esp
movl B_ARG0,%edx /* refetch mutex address */
- cli /* ensure interrupts disabled */
- jmp Lml_retry /* and try again */
-
-Lml_ilk_refail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Lml_reget_retry /* try again */
+ jmp ml_retry /* and try again */
NONLEAF_ENTRY2(mutex_try,_mutex_try)
CHECK_MUTEX_TYPE()
CHECK_NO_SIMPLELOCKS()
+ movl B_PC,%ecx
+
pushf /* save interrupt state */
cli /* disable interrupts */
-Lmt_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
-Lmt_get_hw:
+mt_get_hw:
movl M_ILK,%eax /* read interlock */
testl %eax,%eax /* unlocked? */
- jne Lmt_ilk_fail /* no - slow path */
-
+ je 1f /* yes - attempt to lock it */
+ PAUSE /* no - pause */
+ jmp mt_get_hw /* try again */
+1:
lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Lmt_get_hw /* branch on failure to retry */
+ jne mt_get_hw /* branch on failure to retry */
movl M_LOCKED,%ecx /* get lock owner */
testl %ecx,%ecx /* is the mutex locked? */
- jne Lmt_fail /* yes, we lose */
+ jne mt_fail /* yes, we lose */
movl %gs:CPU_ACTIVE_THREAD,%ecx
movl %ecx,M_LOCKED
movl %ecx,M_PC
#endif
- cmpl $0,M_WAITERS /* are there any waiters? */
- jne Lmt_waiters /* yes, more work to do */
-Lmt_return:
+ pushl %edx /* save mutex address */
+ pushl %edx
+ call EXT(lck_mtx_lock_acquire)
+ addl $4,%esp
+ popl %edx /* restore mutex address */
+
xorl %eax,%eax
movl %eax,M_ILK
+
popf /* restore interrupt state */
movl $1,%eax
NONLEAF_RET
-Lmt_waiters:
- pushl %edx /* save mutex address */
- pushl %edx
- call EXT(lck_mtx_lock_acquire)
- addl $4,%esp
- popl %edx /* restore mutex address */
- jmp Lmt_return
-
-Lmt_ilk_fail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Lmt_retry /* try again */
-
-Lmt_fail:
+mt_fail:
xorl %eax,%eax
movl %eax,M_ILK
CHECK_MUTEX_TYPE()
CHECK_THREAD(M_THREAD)
+ movl B_PC,%ecx
+
pushf /* save interrupt state */
cli /* disable interrupts */
-Lmu_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
-Lmu_get_hw:
+mu_get_hw:
movl M_ILK,%eax /* read interlock */
testl %eax,%eax /* unlocked? */
- jne Lmu_ilk_fail /* no - slow path */
-
+ je 1f /* yes - attempt to lock it */
+ PAUSE /* no - pause */
+ jmp mu_get_hw /* try again */
+1:
lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Lmu_get_hw /* branch on failure to retry */
+ jne mu_get_hw /* branch on failure to retry */
cmpw $0,M_WAITERS /* are there any waiters? */
- jne Lmu_wakeup /* yes, more work to do */
+ jne mu_wakeup /* yes, more work to do */
-Lmu_doit:
+mu_doit:
#if MACH_LDEBUG
movl $0,M_THREAD /* disown thread */
NONLEAF_RET
-Lmu_ilk_fail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Lmu_retry /* try again */
-
-Lmu_wakeup:
+mu_wakeup:
pushl M_LOCKED
pushl %edx /* push mutex address */
call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
addl $8,%esp
movl B_ARG0,%edx /* restore lock pointer */
- jmp Lmu_doit
+ jmp mu_doit
/*
* lck_mtx_lock()
pushf /* save interrupt state */
cli /* disable interrupts */
-Llml_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
-Llml_get_hw:
+lml_retry:
+ movl B_PC,%ecx
+
+lml_get_hw:
movl M_ILK,%eax /* read interlock */
testl %eax,%eax /* unlocked? */
- jne Llml_ilk_fail /* no - slow path */
-
+ je 1f /* yes - attempt to lock it */
+ PAUSE /* no - pause */
+ jmp lml_get_hw /* try again */
+1:
lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Llml_get_hw /* branch on failure to retry */
+ jne lml_get_hw /* branch on failure to retry */
movl M_LOCKED,%ecx /* get lock owner */
testl %ecx,%ecx /* is the mutex locked? */
- jne Llml_fail /* yes, we lose */
-Llml_acquire:
+ jne lml_fail /* yes, we lose */
movl %gs:CPU_ACTIVE_THREAD,%ecx
movl %ecx,M_LOCKED
- cmpl $0,M_WAITERS /* are there any waiters? */
- jne Llml_waiters /* yes, more work to do */
-Llml_return:
- xorl %eax,%eax
- movl %eax,M_ILK
-
- popf /* restore interrupt state */
-
- NONLEAF_RET
-
-Llml_waiters:
pushl %edx /* save mutex address */
pushl %edx
call EXT(lck_mtx_lock_acquire)
addl $4,%esp
popl %edx /* restore mutex address */
- jmp Llml_return
-
-Llml_ilk_fail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Llml_retry /* try again */
-
-Llml_fail:
- /*
- * Check if the owner is on another processor and therefore
- * we should try to spin before blocking.
- */
- testl $(OnProc),ACT_SPF(%ecx)
- jz Llml_block
- /*
- * Here if owner is on another processor:
- * - release the interlock
- * - spin on the holder until release or timeout
- * - in either case re-acquire the interlock
- * - if released, acquire it
- * - otherwise drop thru to block.
- */
xorl %eax,%eax
- movl %eax,M_ILK /* zero interlock */
- popf
- pushf /* restore interrupt state */
-
- pushl %edx /* save mutex address */
- pushl %edx
- call EXT(lck_mtx_lock_spin)
- addl $4,%esp
- popl %edx /* restore mutex address */
-
- /* Re-acquire interlock */
- cli /* disable interrupts */
-Llml_reget_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
+ movl %eax,M_ILK
-Llml_reget_hw:
- movl M_ILK,%eax /* read interlock */
- testl %eax,%eax /* unlocked? */
- jne Llml_ilk_refail /* no - slow path */
+ popf /* restore interrupt state */
- lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Llml_reget_hw /* branch on failure to retry */
+ NONLEAF_RET
- movl M_LOCKED,%ecx /* get lock owner */
- testl %ecx,%ecx /* is the mutex free? */
- je Llml_acquire /* yes, acquire */
-
-Llml_block:
+lml_fail:
CHECK_MYLOCK(M_THREAD)
pushl %edx /* save mutex address */
pushl M_LOCKED
call EXT(lck_mtx_lock_wait) /* wait for the lock */
addl $8,%esp
popl %edx /* restore mutex address */
- cli /* ensure interrupts disabled */
- jmp Llml_retry /* and try again */
-
-Llml_ilk_refail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Llml_reget_retry /* try again */
+ jmp lml_retry /* and try again */
NONLEAF_ENTRY(lck_mtx_try_lock)
CHECK_NO_SIMPLELOCKS()
CHECK_PREEMPTION_LEVEL()
+ movl B_PC,%ecx
+
pushf /* save interrupt state */
cli /* disable interrupts */
-Llmt_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
-Llmt_get_hw:
+lmt_get_hw:
movl M_ILK,%eax /* read interlock */
testl %eax,%eax /* unlocked? */
- jne Llmt_ilk_fail /* no - slow path */
-
+ je 1f /* yes - attempt to lock it */
+ PAUSE /* no - pause */
+ jmp lmt_get_hw /* try again */
+1:
lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Llmt_get_hw /* branch on failure to retry */
+ jne lmt_get_hw /* branch on failure to retry */
movl M_LOCKED,%ecx /* get lock owner */
testl %ecx,%ecx /* is the mutex locked? */
- jne Llmt_fail /* yes, we lose */
+ jne lmt_fail /* yes, we lose */
movl %gs:CPU_ACTIVE_THREAD,%ecx
movl %ecx,M_LOCKED
- cmpl $0,M_WAITERS /* are there any waiters? */
- jne Llmt_waiters /* yes, more work to do */
-Llmt_return:
+ pushl %edx /* save mutex address */
+ pushl %edx
+ call EXT(lck_mtx_lock_acquire)
+ addl $4,%esp
+ popl %edx /* restore mutex address */
+
xorl %eax,%eax
movl %eax,M_ILK
movl $1,%eax /* return success */
NONLEAF_RET
-Llmt_waiters:
- pushl %edx /* save mutex address */
- pushl %edx
- call EXT(lck_mtx_lock_acquire)
- addl $4,%esp
- popl %edx /* restore mutex address */
- jmp Llmt_return
-
-Llmt_ilk_fail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Llmt_retry /* try again */
-
-Llmt_fail:
+lmt_fail:
xorl %eax,%eax
movl %eax,M_ILK
cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */
cmove M_PTR,%edx /* yes - take indirection */
+ movl B_PC,%ecx
+
pushf /* save interrupt state */
cli /* disable interrupts */
-Llmu_retry:
- movl %gs:CPU_ACTIVE_THREAD,%ecx
-Llmu_get_hw:
+lmu_get_hw:
movl M_ILK,%eax /* read interlock */
testl %eax,%eax /* unlocked? */
- jne Llmu_ilk_fail /* no - slow path */
-
+ je 1f /* yes - attempt to lock it */
+ PAUSE /* no - pause */
+ jmp lmu_get_hw /* try again */
+1:
lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */
- jne Llmu_get_hw /* branch on failure to retry */
+ jne lmu_get_hw /* branch on failure to retry */
cmpw $0,M_WAITERS /* are there any waiters? */
- jne Llmu_wakeup /* yes, more work to do */
+ jne lmu_wakeup /* yes, more work to do */
-Llmu_doit:
+lmu_doit:
xorl %ecx,%ecx
movl %ecx,M_LOCKED /* unlock the mutex */
NONLEAF_RET
-Llmu_ilk_fail:
- /*
- * Slow path: call out to do the spinning.
- */
- pushl %edx /* lock address */
- call EXT(lck_mtx_interlock_spin)
- popl %edx /* lock pointer */
- jmp Llmu_retry /* try again */
-
-Llmu_wakeup:
+lmu_wakeup:
pushl %edx /* save mutex address */
pushl M_LOCKED
pushl %edx /* push mutex address */
call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
addl $8,%esp
popl %edx /* restore mutex pointer */
- jmp Llmu_doit
+ jmp lmu_doit
LEAF_ENTRY(lck_mtx_ilk_unlock)
movl L_ARG0,%edx /* no indirection here */