+ mfsprg r6,1 ; load the current thread
+ bf MUTEX_ATTR_STATb,mlckestatskip ; Branch if no stat
+ lwz r5,MUTEX_GRP(r3) ; Load lock group
+ li r7,GRP_MTX_STAT_UTIL+4 ; Set stat util offset
+mlckestatloop:
+ lwarx r8,r7,r5 ; Load stat util cnt
+ addi r8,r8,1 ; Increment stat util cnt
+ stwcx. r8,r7,r5 ; Store stat util cnt
+ bne-- mlckestatloop ; Retry if failed
+ mr. r8,r8 ; Test for zero
+ bne++ mlckestatskip ; Did stat util cnt wrapped?
+ lwz r8,GRP_MTX_STAT_UTIL(r5) ; Load upper stat util cnt
+ addi r8,r8,1 ; Increment upper stat util cnt
+ stw r8,GRP_MTX_STAT_UTIL(r5) ; Store upper stat util cnt
+mlckestatskip:
+ lwz r5,MUTEX_DATA(r3) ; Get the lock quickly
+ li r4,0
+ li r8,0
+ lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
+ mfmsr r9 ; Get the MSR value
+ ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
+ ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
+ andc r9,r9,r0 ; Clear FP and VEC
+ andc r7,r9,r7 ; Clear EE as well
+ mtmsr r7 ; Turn off interruptions
+ isync ; May have turned off vec and fp here
+ mr. r5,r5 ; Quick check
+ bne-- mlckespin01 ; Can not get it right now...
+
+mlcketry:
+ lwarx r5,MUTEX_DATA,r3 ; load the mutex lock
+ mr. r5,r5
+ bne-- mlckespin0 ; Can not get it right now...
+ stwcx. r6,MUTEX_DATA,r3 ; grab the lock
+ bne-- mlcketry ; loop back if failed
+ .globl EXT(mlckePatch_isync)
+LEXT(mlckePatch_isync)
+ isync ; stop prefeteching
+ mflr r12
+ bf MUTEX_ATTR_DEBUGb,mlckedebskip
+ mr r8,r6 ; Get the active thread
+ stw r12,MUTEX_STACK(r3) ; Save our caller
+ stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
+ mr r5,r1
+ LCK_STACK(r3,r5,r6,r7,r8,r10)
+mlckedebskip:
+ mtmsr r9 ; Say, any interrupts pending?
+ blr
+
+mlckespin0:
+ li r5,lgKillResv ; Killing field
+ stwcx. r5,0,r5 ; Kill reservation
+mlckespin01:
+ mflr r12
+ mtmsr r9 ; Say, any interrupts pending?
+ bl mlckspin1
+ mtmsr r7 ; Turn off interruptions, vec and fp off already
+ mtlr r12
+ b mlcketry
+
+/*
+ * void lck_mtx_lock(lck_mtx_t*)
+ *
+ */
+ .align 5
+ .globl EXT(lck_mtx_lock)
+LEXT(lck_mtx_lock)
+
+#if !MACH_LDEBUG
+ .globl EXT(mutex_lock)
+LEXT(mutex_lock)
+
+ .globl EXT(_mutex_lock)
+LEXT(_mutex_lock)
+#endif
+
+ mfsprg r6,1 ; load the current thread
+ lwz r5,MUTEX_DATA(r3) ; Get the lock quickly
+ mr r11,r3 ; Save lock addr
+ li r4,0
+ li r8,0
+ li r9,0
+ mr. r5,r5 ; Quick check
+ bne-- mlckspin00 ; Indirect or Can not get it right now...
+
+mlcktry:
+ lwarx r5,MUTEX_DATA,r3 ; load the mutex lock
+ mr. r5,r5
+ bne-- mlckspin01 ; Can not get it right now...
+ stwcx. r6,MUTEX_DATA,r3 ; grab the lock
+ bne-- mlcktry ; loop back if failed
+ .globl EXT(mlckPatch_isync)
+LEXT(mlckPatch_isync)
+ isync ; stop prefeteching
+ blr
+; Need to debug making blr above a patch point and record:
+; LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE)
+
+mlckspin00:
+ cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
+ bne-- mlckspin02 ; No, go handle contention
+ lwz r3,MUTEX_PTR(r3) ; load mutex ext pointer
+ b mlckeEnter
+mlckspin01:
+ li r5,lgKillResv ; Killing field
+ stwcx. r5,0,r5 ; Kill reservation
+mlckspin02:
+ mflr r12
+ li r0,0
+ mtcrf 1,r0 ; Set cr7 to zero
+ bl mlckspin1
+ mtlr r12
+ b mlcktry
+
+
+mlckspin1:
+ mr. r4,r4 ; Test timeout value
+ bne++ mlckspin2
+ lis r4,hi16(EXT(MutexSpin)) ; Get the high part
+ ori r4,r4,lo16(EXT(MutexSpin) ) ; And the low part
+ lwz r4,0(r4) ; Get spin timerout value
+ mr. r4,r4 ; Test spin timeout value
+ bne++ mlckspin2 ; Is spin timeout requested
+ crclr mlckmiss ; Clear miss test
+ b mlckslow1 ; Don't try to spin
+
+mlckspin2: mr. r8,r8 ; Is r8 set to zero
+ bne++ mlckspin3 ; If yes, first spin attempt
+ crclr mlckmiss ; Clear miss test
+ mr. r9,r9 ; Is r9 set to zero
+ bne++ mlckspin3 ; If yes, r9 set with msr value
+ lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
+ mfmsr r9 ; Get the MSR value
+ ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
+ ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
+ andc r9,r9,r0 ; Clear FP and VEC
+ andc r7,r9,r7 ; Clear EE as well
+ mtmsr r7 ; Turn off interruptions
+ isync ; May have turned off vec and fp here
+ mftb r8 ; Get timestamp on entry
+ b mlcksniff
+
+mlckspin3: mtmsr r7 ; Turn off interruptions
+ mftb r8 ; Get timestamp on entry
+
+mlcksniff: lwz r5,MUTEX_DATA(r3) ; Get that lock in here
+ mr. r5,r5 ; Is the lock held
+ beq++ mlckretry ; No, try for it again...
+ rlwinm. r10,r5,0,0,29 ; Extract the lock owner
+ beq++ mlckslow0 ; InterLock is held
+ bf MUTEX_ATTR_STATb,mlStatSkip ; Branch if no stat
+ andi. r5,r5,ILK_LOCKED ; extract interlocked?
+ bne mlStatSkip ; yes, skip
+ bt mlckmiss,mlStatSkip ; miss already counted
+ crset mlckmiss ; Remember miss recorded
+ lwz r5,MUTEX_GRP(r3) ; Load lock group
+ addi r5,r5,GRP_MTX_STAT_MISS+4 ; Add stat miss offset
+mlStatLoop:
+ lwarx r6,0,r5 ; Load stat miss cnt
+ addi r6,r6,1 ; Increment stat miss cnt
+ stwcx. r6,0,r5 ; Update stat miss cnt
+ bne-- mlStatLoop ; Retry if failed
+ mfsprg r6,1 ; Reload current thread
+mlStatSkip:
+ lwz r2,ACT_MACT_SPF(r10) ; Get the special flags
+ rlwinm. r2,r2,0,OnProcbit,OnProcbit ; Is OnProcbit set?
+ beq mlckslow0 ; Lock owner isn't running
+ lis r2,hi16(TH_IDLE) ; Get thread idle state
+ ori r2,r2,lo16(TH_IDLE) ; Get thread idle state
+ lwz r10,THREAD_STATE(r10) ; Get the thread state
+ and. r10,r10,r2 ; Is idle set?
+ bne mlckslow0 ; Lock owner is idling
+
+ mftb r10 ; Time stamp us now
+ sub r10,r10,r8 ; Get the elapsed time
+ cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
+ blt++ mlcksniff ; Not yet...
+
+ mtmsr r9 ; Say, any interrupts pending?
+
+; The following instructions force the pipeline to be interlocked to that only one
+; instruction is issued per cycle. The insures that we stay enabled for a long enough
+; time; if it's too short, pending interruptions will not have a chance to be taken
+
+ subi r4,r4,128 ; Back off elapsed time from timeout value
+ or r4,r4,r4 ; Do nothing here but force a single cycle delay
+ mr. r4,r4 ; See if we used the whole timeout
+ or r4,r4,r4 ; Do nothing here but force a single cycle delay
+
+ ble-- mlckslow1 ; We failed
+ b mlckspin3 ; Now that we've opened an enable window, keep trying...
+mlckretry:
+ mtmsr r9 ; Restore interrupt state
+ li r8,1 ; Show already through once
+ blr
+
+mlckslow0: ; We couldn't get the lock
+ mtmsr r9 ; Restore interrupt state
+
+mlckslow1:
+ mtlr r12
+
+ PROLOG(0)
+.L_ml_retry:
+ bl lockDisa ; Go get a lock on the mutex's interlock lock
+ mr. r4,r3 ; Did we get it?
+ lwz r3,FM_ARG0(r1) ; Restore the lock address
+ bne++ mlGotInt ; We got it just fine...
+ mr r4,r11 ; Saved lock addr
+ lis r3,hi16(mutex_failed1) ; Get the failed mutex message
+ ori r3,r3,lo16(mutex_failed1) ; Get the failed mutex message
+ bl EXT(panic) ; Call panic
+ BREAKPOINT_TRAP ; We die here anyway, can not get the lock
+