/*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <ppc/proc_reg.h>
#include <assym.s>
+
+#include <config_dtrace.h>
+#if CONFIG_DTRACE
+ #define LOCKSTAT_LABEL(lab) \
+ .data __ASMNL__ \
+ .globl lab __ASMNL__ \
+ lab: __ASMNL__ \
+ .long 9f __ASMNL__ \
+ .text __ASMNL__ \
+ 9: __ASMNL__ \
+
+ .globl _dtrace_probe, _lockstat_probemap
+#define LOCKSTAT_RECORD(id) \
+ lis r6,hi16(_lockstat_probemap) __ASMNL__ \
+ ori r6,r6,lo16(_lockstat_probemap) __ASMNL__ \
+ lwz r5,4*id(r6) __ASMNL__ \
+ mr. r5,r5 __ASMNL__ \
+ beqlr-- __ASMNL__ \
+ mr r4,r3 __ASMNL__ \
+ mr r3,r5 __ASMNL__ \
+ li r5,0 __ASMNL__ \
+ li r6,0 __ASMNL__ \
+ li r7,0 __ASMNL__ \
+ li r8,0 __ASMNL__ \
+ PROLOG(0) __ASMNL__ \
+ bl _dtrace_probe __ASMNL__ \
+ EPILOG
+#endif
+
+
+
#define STRING ascii
#define ILK_LOCKED 0x01
#define WAIT_FLAG 0x02
#define WANT_UPGRADE 0x04
#define WANT_EXCL 0x08
+#define PRIV_EXCL 0x8000
#define TH_FN_OWNED 0x01
*/
.align 5
.globl EXT(hw_atomic_or)
-
LEXT(hw_atomic_or)
-
+ .globl EXT(hw_atomic_or_noret)
+LEXT(hw_atomic_or_noret)
mr r6,r3 ; Save the area
ortry: lwarx r3,0,r6 ; Grab the area value
*/
.align 5
.globl EXT(hw_atomic_and)
-
LEXT(hw_atomic_and)
-
+ .globl EXT(hw_atomic_and_noret)
+LEXT(hw_atomic_and_noret)
mr r6,r3 ; Save the area
andtry: lwarx r3,0,r6 ; Grab the area value
LEXT(mlckPatch_isync)
isync ; stop prefeteching
blr
+; Need to debug making blr above a patch point and record:
+; LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE)
mlckspin00:
cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
lwz r2,ACT_MACT_SPF(r10) ; Get the special flags
rlwinm. r2,r2,0,OnProcbit,OnProcbit ; Is OnProcbit set?
beq mlckslow0 ; Lock owner isn't running
- lis r2,hi16(TH_OPT_DELAYIDLE) ; Get DelayedIdle Option
- ori r2,r2,lo16(TH_OPT_DELAYIDLE) ; Get DelayedIdle Option
- lwz r10,THREAD_OPTIONS(r10) ; Get the thread options
- and. r10,r10,r2 ; Is DelayedIdle set?
- bne mlckslow0 ; Lock owner is in delay idle
+ lis r2,hi16(TH_IDLE) ; Get thread idle state
+ ori r2,r2,lo16(TH_IDLE) ; Get thread idle state
+ lwz r10,THREAD_STATE(r10) ; Get the thread state
+ and. r10,r10,r2 ; Is idle set?
+ bne mlckslow0 ; Lock owner is idling
mftb r10 ; Time stamp us now
sub r10,r10,r8 ; Get the elapsed time
bne-- mluSlowX
stwcx. r5,MUTEX_DATA,r3
bne-- mluLoop
+#if CONFIG_DTRACE
+/* lock released - LS_LCK_MTX_UNLOCK_RELEASE */
+ LOCKSTAT_LABEL(_lck_mtx_unlock_lockstat_patch_point)
blr
+ LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE)
+#endif
+ blr
+
+
mluSlow0:
cmpli cr0,r5,MUTEX_IND ; Is it a mutex indirect
bne-- L_mutex_unlock_slow ; No, go handle contention
b epStart ; Go enable preemption...
-/*
- * void enter_funnel_section(funnel_t *)
- *
- */
- .align 5
- .globl EXT(enter_funnel_section)
-
-LEXT(enter_funnel_section)
-
-#if !MACH_LDEBUG
- lis r10,hi16(EXT(kdebug_enable))
- ori r10,r10,lo16(EXT(kdebug_enable))
- lwz r10,0(r10)
- lis r11,hi16(EXT(split_funnel_off))
- ori r11,r11,lo16(EXT(split_funnel_off))
- lwz r11,0(r11)
- or. r10,r11,r10 ; Check kdebug_enable or split_funnel_off
- bne- L_enter_funnel_section_slow ; If set, call the slow path
- mfsprg r6,1 ; Get the current activation
- lwz r7,LOCK_FNL_MUTEX(r3)
-
- lwz r5,0(r7) ; Get lock quickly
- mr. r5,r5 ; Locked?
- bne-- L_enter_funnel_section_slow ; Yup...
-
-L_enter_funnel_section_loop:
- lwarx r5,0,r7 ; Load the mutex lock
- mr. r5,r5
- bne-- L_enter_funnel_section_slowX ; Go to the slow path
- stwcx. r6,0,r7 ; Grab the lock
- bne-- L_enter_funnel_section_loop ; Loop back if failed
- .globl EXT(entfsectPatch_isync)
-LEXT(entfsectPatch_isync)
- isync ; Stop prefeteching
- li r7,TH_FN_OWNED
- stw r3,THREAD_FUNNEL_LOCK(r6) ; Set the funnel lock reference
- stw r7,THREAD_FUNNEL_STATE(r6) ; Set the funnel state
- blr
-
-L_enter_funnel_section_slowX:
- li r4,lgKillResv ; Killing field
- stwcx. r4,0,r4 ; Kill reservation
-
-L_enter_funnel_section_slow:
-#endif
- li r4,TRUE
- b EXT(thread_funnel_set)
-
-/*
- * void exit_funnel_section(void)
- *
- */
- .align 5
- .globl EXT(exit_funnel_section)
-
-LEXT(exit_funnel_section)
-
- mfsprg r6,1 ; Get the current activation
- lwz r3,THREAD_FUNNEL_LOCK(r6) ; Get the funnel lock
- mr. r3,r3 ; Check on funnel held
- beq- L_exit_funnel_section_ret ;
-#if !MACH_LDEBUG
- lis r10,hi16(EXT(kdebug_enable))
- ori r10,r10,lo16(EXT(kdebug_enable))
- lwz r10,0(r10)
- mr. r10,r10
- bne- L_exit_funnel_section_slow ; If set, call the slow path
- lwz r7,LOCK_FNL_MUTEX(r3) ; Get the funnel mutex lock
- .globl EXT(retfsectPatch_isync)
-LEXT(retfsectPatch_isync)
- isync
- .globl EXT(retfsectPatch_eieio)
-LEXT(retfsectPatch_eieio)
- eieio
-
- lwz r5,0(r7) ; Get lock
- rlwinm. r4,r5,0,30,31 ; Quick check for bail if pending waiter or interlock set
- bne-- L_exit_funnel_section_slow ; No can get...
-
-L_exit_funnel_section_loop:
- lwarx r5,0,r7
- rlwinm. r4,r5,0,30,31 ; Bail if pending waiter or interlock set
- li r5,0 ; Clear the mutexlock
- bne-- L_exit_funnel_section_slowX
- stwcx. r5,0,r7 ; Release the funnel mutexlock
- bne-- L_exit_funnel_section_loop
- li r7,0
- stw r7,THREAD_FUNNEL_STATE(r6) ; Clear the funnel state
- stw r7,THREAD_FUNNEL_LOCK(r6) ; Clear the funnel lock reference
- blr ; Return
-
-L_exit_funnel_section_slowX:
- li r4,lgKillResv ; Killing field
- stwcx. r4,0,r4 ; Kill it
-
-L_exit_funnel_section_slow:
-#endif
- li r4,FALSE
- b EXT(thread_funnel_set)
-L_exit_funnel_section_ret:
- blr
-
/*
* void lck_rw_lock_exclusive(lck_rw_t*)
*
.globl EXT(lock_write)
LEXT(lock_write)
#endif
+ lis r7,0xFFFF
+ ori r7,r7,(WANT_EXCL|WANT_UPGRADE|ILK_LOCKED)
rwleloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
- rlwinm. r7,r5,30,1,31 ; Can we have it?
+ and. r8,r5,r7 ; Can we have it?
ori r6,r5,WANT_EXCL ; Mark Exclusive
bne-- rwlespin ; Branch if cannot be held
stwcx. r6,RW_DATA,r3 ; Update lock word
#endif
rwlsloop: lwarx r5,RW_DATA,r3 ; Grab the lock value
andi. r7,r5,WANT_EXCL|WANT_UPGRADE|ILK_LOCKED ; Can we have it?
+ bne-- rwlsopt ; Branch if cannot be held
+rwlsloopres:
addis r6,r5,1 ; Increment read cnt
- bne-- rwlsspin ; Branch if cannot be held
stwcx. r6,RW_DATA,r3 ; Update lock word
bne-- rwlsloop
.globl EXT(rwlsPatch_isync)
LEXT(rwlsPatch_isync)
isync
blr
+rwlsopt:
+ andi. r7,r5,PRIV_EXCL|ILK_LOCKED ; Can we have it?
+ bne-- rwlsspin ; Branch if cannot be held
+ lis r7,0xFFFF ; Get read cnt mask
+ and. r8,r5,r7 ; Is it shared
+ bne rwlsloopres ; Branch if can be held
rwlsspin:
li r4,lgKillResv ; Killing field
stwcx. r4,0,r4 ; Kill it
.globl EXT(rwlsePatch_isync)
LEXT(rwlsePatch_isync)
isync
- li r3,0 ; Succeed, return FALSE...
+ li r3,1 ; Succeed, return TRUE...
blr
rwlsespin:
li r4,lgKillResv ; Killing field
andi. r7,r5,ILK_LOCKED ; Test interlock flag
bne-- rwtlsspin ; Branch if interlocked
andi. r7,r5,WANT_EXCL|WANT_UPGRADE ; So, can we have it?
+ bne-- rwtlsopt ; Branch if held exclusive
+rwtlsloopres:
addis r6,r5,1 ; Increment read cnt
- bne-- rwtlsfail ; Branch if held exclusive
stwcx. r6,RW_DATA,r3 ; Update lock word
bne-- rwtlsloop
.globl EXT(rwtlsPatch_isync)
isync
li r3,1 ; Return TRUE
blr
+rwtlsopt:
+ andi. r7,r5,PRIV_EXCL ; Can we have it?
+ bne-- rwtlsfail ; Branch if cannot be held
+ lis r7,0xFFFF ; Get read cnt mask
+ and. r8,r5,r7 ; Is it shared
+ bne rwtlsloopres ; Branch if can be held
rwtlsfail:
li r3,0 ; Return FALSE
blr