*
* @APPLE_LICENSE_HEADER_START@
*
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * The contents of this file constitute Original Code as defined in and
+ * are subject to the Apple Public Source License Version 1.1 (the
+ * "License"). You may not use this file except in compliance with the
+ * License. Please obtain a copy of the License at
+ * http://www.apple.com/publicsource and read it before using this file.
*
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This Original Code and all software distributed under the License are
+ * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
+ * License for the specific language governing rights and limitations
+ * under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
andc r10,r11,r10 __ASMNL__ \
mtmsr r10 __ASMNL__ \
isync __ASMNL__ \
- mfsprg r10,0 __ASMNL__ \
- lwz r10,PP_ACTIVE_THREAD(r10) __ASMNL__ \
+ mfsprg r10,1 __ASMNL__ \
+ lwz r10,ACT_THREAD(r10) __ASMNL__ \
cmpwi r10,0 __ASMNL__ \
beq- 1f __ASMNL__ \
lwz r9,thread_offset(r3) __ASMNL__ \
andc r10,r11,r10 __ASMNL__ \
mtmsr r10 __ASMNL__ \
isync __ASMNL__ \
- mfsprg r10,0 __ASMNL__ \
- lwz r10,PP_ACTIVE_THREAD(r10) __ASMNL__ \
+ mfsprg r10,1 __ASMNL__ \
+ lwz r10,ACT_THREAD(r10) __ASMNL__ \
cmpwi r10,0 __ASMNL__ \
beq- 1f __ASMNL__ \
lwz r9, thread_offset(r3) __ASMNL__ \
#if !MACH_LDEBUG
mfsprg r6,1 ; load the current thread
lwz r5,0(r3) ; Get the lock quickly
+ li r4,0
+ li r8,0
mr. r5,r5 ; Quick check
- bne-- L_mutex_lock_slow ; Can not get it right now...
+ bne-- mlckspin1 ; Can not get it right now...
-L_mutex_lock_loop:
+mlcktry:
lwarx r5,0,r3 ; load the mutex lock
mr. r5,r5
- bne-- L_mutex_lock_slowX ; go to the slow path
+ bne-- mlckspin0 ; Can not get it right now...
stwcx. r6,0,r3 ; grab the lock
- bne-- L_mutex_lock_loop ; loop back if failed
+ bne-- mlcktry ; loop back if failed
isync ; stop prefeteching
+ mflr r8
+ stw r8,4(r3)
blr
-L_mutex_lock_slowX:
+mlckspin0:
li r5,lgKillResv ; Killing field
stwcx. r5,0,r5 ; Kill reservation
+mlckspin1:
+ mr. r4,r4 ; Test timeout value
+ bne++ mlckspin2
+ lis r4,hi16(EXT(MutexSpin)) ; Get the high part
+ ori r4,r4,lo16(EXT(MutexSpin) ) ; And the low part
+ lwz r4,0(r4) ; Get spin timerout value
+ mr. r4,r4 ; Test spin timeout value
+ beq mlckslow1 ; Is spin timeout set to zero
+
+mlckspin2: mr. r8,r8 ; Is r8 set to zero
+ bne++ mlckspin3 ; If yes, first spin attempt
+ lis r0,hi16(MASK(MSR_VEC)) ; Get vector enable
+ mfmsr r9 ; Get the MSR value
+ ori r0,r0,lo16(MASK(MSR_FP)) ; Get FP enable
+ ori r7,r0,lo16(MASK(MSR_EE)) ; Get EE bit on too
+ andc r9,r9,r0 ; Clear FP and VEC
+ andc r7,r9,r7 ; Clear EE as well
+ mtmsr r7 ; Turn off interruptions
+ isync ; May have turned off vec and fp here
+ mftb r8 ; Get timestamp on entry
+ b mlcksniff
+
+mlckspin3: mtmsr r7 ; Turn off interruptions
+ mftb r8 ; Get timestamp on entry
+
+mlcksniff: lwz r5,0(r3) ; Get that lock in here
+ mr. r5,r5 ; Is the lock held
+ beq++ mlckretry ; No, try for it again...
+ rlwinm r5,r5,0,0,29 ; Extract the lock owner
+ mr. r5,r5 ; Quick check
+ beq++ mlckslow0 ; InterLock is held
+ lwz r10,ACT_MACT_SPF(r5) ; Get the special flags
+ rlwinm. r10,r10,0,OnProcbit,OnProcbit ; Is OnProcbit set?
+ beq mlckslow0 ; Lock owner isn't running
+
+ mftb r10 ; Time stamp us now
+ sub r10,r10,r8 ; Get the elapsed time
+ cmplwi r10,128 ; Have we been spinning for 128 tb ticks?
+ blt++ mlcksniff ; Not yet...
+
+ mtmsr r9 ; Say, any interrupts pending?
+
+; The following instructions force the pipeline to be interlocked to that only one
+; instruction is issued per cycle. The insures that we stay enabled for a long enough
+; time; if it's too short, pending interruptions will not have a chance to be taken
-L_mutex_lock_slow:
+ subi r4,r4,128 ; Back off elapsed time from timeout value
+ or r4,r4,r4 ; Do nothing here but force a single cycle delay
+ mr. r4,r4 ; See if we used the whole timeout
+ or r4,r4,r4 ; Do nothing here but force a single cycle delay
+
+ ble-- mlckslow1 ; We failed
+ b mlckspin1 ; Now that we've opened an enable window, keep trying...
+mlckretry:
+ mtmsr r9 ; Restore interrupt state
+ li r8,1 ; Show already through once
+ b mlcktry
+mlckslow0: ; We couldn't get the lock
+ mtmsr r9 ; Restore interrupt state
+
+mlckslow1:
#endif
#if CHECKNMI
mflr r12 ; (TEST/DEBUG)
bne L_mutex_lock_assert_wait_1
lis r3,hi16(L_mutex_lock_assert_wait_panic_str)
ori r3,r3,lo16(L_mutex_lock_assert_wait_panic_str)
+ PROLOG(0)
bl EXT(panic)
+ BREAKPOINT_TRAP ; We die here anyway
.data
L_mutex_lock_assert_wait_panic_str:
lis r3,hi16(mutex_failed1) ; Get the failed mutex message
ori r3,r3,lo16(mutex_failed1) ; Get the failed mutex message
+ PROLOG(0)
bl EXT(panic) ; Call panic
BREAKPOINT_TRAP ; We die here anyway, can not get the lock
mfmsr r11 ; Note: no need to deal with fp or vec here
andc r5,r11,r5
mtmsr r5
- mfsprg r9,0 ; Get the per_proc block
+ mfsprg r9,1 ; Get the current activation
lwz r5,0(r1) ; Get previous save frame
lwz r5,FM_LR_SAVE(r5) ; Get our caller's address
- lwz r8, PP_ACTIVE_THREAD(r9) ; Get the active thread
+ lwz r8, ACT_THREAD(r9) ; Get the active thread
stw r5,MUTEX_PC(r3) ; Save our caller
mr. r8,r8 ; Is there any thread?
stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
lis r3,hi16(mutex_failed2) ; Get the failed mutex message
ori r3,r3,lo16(mutex_failed2) ; Get the failed mutex message
+ PROLOG(0)
bl EXT(panic) ; Call panic
BREAKPOINT_TRAP ; We die here anyway, can not get the lock
andc r5,r11,r5 ; Clear EE as well
mtmsr r5
- mfsprg r9,0 ; Get the per_proc block
+ mfsprg r9,1 ; Get the current activation
lwz r5,0(r1) ; Get previous save frame
lwz r5,FM_LR_SAVE(r5) ; Get our caller's address
- lwz r8, PP_ACTIVE_THREAD(r9) ; Get the active thread
+ lwz r8,ACT_THREAD(r9) ; Get the active thread
stw r5,MUTEX_PC(r3) ; Save our caller
mr. r8,r8 ; Is there any thread?
stw r8,MUTEX_THREAD(r3) ; Set the mutex's holding thread
lis r3,hi16(mutex_failed3) ; Get the failed mutex message
ori r3,r3,lo16(mutex_failed3) ; Get the failed mutex message
+ PROLOG(0)
bl EXT(panic) ; Call panic
BREAKPOINT_TRAP ; We die here anyway, can not get the lock
andc r9,r11,r9 ; Clear EE as well
mtmsr r9
- mfsprg r9,0
- lwz r9,PP_ACTIVE_THREAD(r9)
+ mfsprg r9,1
+ lwz r9,ACT_THREAD(r9)
stw r9,MUTEX_THREAD(r3) ; disown thread
cmpwi r9,0
beq- .L_mu_no_active_thread
EPILOG ; Deal with the stack now, enable_preemption doesn't always want one
b epStart ; Go enable preemption...
+/*
+ * boolean_t mutex_preblock(mutex_t*, thread_t)
+ */
+ .align 5
+ .globl EXT(mutex_preblock)
+
+LEXT(mutex_preblock)
+ mr r6,r3
+ lwz r5,LOCK_DATA(r3)
+ mr. r3,r5
+ beqlr+
+ mr r3,r6
+
+ PROLOG(0)
+ stw r4,(FM_ARG0-4)(r1)
+
+ bl EXT(hw_lock_try)
+ mr. r4,r3
+ lwz r3,FM_ARG0(r1)
+ bne+ mpbGotInt
+
+ li r3,0
+
+ EPILOG
+
+ blr
+
+mpbGotInt:
+ lwz r6,LOCK_DATA(r3)
+ rlwinm. r5,r6,0,0,30
+ bne+ mpbInUse
+
+ stw r5,LOCK_DATA(r3)
+
+ bl epStart
+
+ li r3,0
+
+ EPILOG
+
+ blr
+
+mpbInUse:
+ lwz r4,(FM_ARG0-4)(r1)
+ rlwinm r5,r6,0,0,29
+ bl EXT(mutex_preblock_wait)
+ lwz r4,FM_ARG0(r1)
+ mr. r3,r3
+ lwz r5,LOCK_DATA(r4)
+ rlwinm r5,r5,0,0,30
+ beq- mpbUnlock0
+ ori r5,r5,WAIT_FLAG
+
+ eieio
+ stw r5,LOCK_DATA(r4)
+
+ bl epStart
+
+ li r3,1
+
+ EPILOG
+
+ blr
+
+mpbUnlock0:
+ eieio
+ stw r5,LOCK_DATA(r4)
+
+ bl epStart
+
+ li r3,0
+
+ EPILOG
+
+ blr
+
/*
* void interlock_unlock(hw_lock_t lock)
*/
mr r4,r5
lis r3,hi16(epTooFarStr) ; First half of panic string
ori r3,r3,lo16(epTooFarStr) ; Second half of panic string
+ PROLOG(0)
bl EXT(panic)
+ BREAKPOINT_TRAP ; We die here anyway
.data
epTooFarStr:
ori r3,r3,lo16(slckpanic_str)
mr r4,r5
mflr r5
+ PROLOG(0)
bl EXT(panic)
+ BREAKPOINT_TRAP ; We die here anyway
.data
slckpanic_str: