+/*
+ * fast_usimple_lock():
+ *
+ * If EE is off, get the simple lock without incrementing the preemption count and
+ * mark The simple lock with SLOCK_FAST.
+ * If EE is on, call usimple_lock().
+ */
+ .align 5
+ .globl EXT(fast_usimple_lock)
+
+LEXT(fast_usimple_lock)
+
+ mfmsr r9
+ andi. r7,r9,lo16(MASK(MSR_EE))
+ bne- L_usimple_lock_c
+L_usimple_lock_loop:
+ lwarx r4,0,r3
+ li r5,ILK_LOCKED|SLOCK_FAST
+ mr. r4,r4
+ bne- L_usimple_lock_c
+ stwcx. r5,0,r3
+ bne- L_usimple_lock_loop
+ isync
+ blr
+L_usimple_lock_c:
+ b EXT(usimple_lock)
+
+/*
+ * fast_usimple_lock_try():
+ *
+ * If EE is off, try to get the simple lock. The preemption count doesn't get incremented and
+ * if successfully held, the simple lock is marked with SLOCK_FAST.
+ * If EE is on, call usimple_lock_try()
+ */
+ .align 5
+ .globl EXT(fast_usimple_lock_try)
+
+LEXT(fast_usimple_lock_try)
+
+ mfmsr r9
+ andi. r7,r9,lo16(MASK(MSR_EE))
+ bne- L_usimple_lock_try_c
+L_usimple_lock_try_loop:
+ lwarx r4,0,r3
+ li r5,ILK_LOCKED|SLOCK_FAST
+ mr. r4,r4
+ bne- L_usimple_lock_try_fail
+ stwcx. r5,0,r3
+ bne- L_usimple_lock_try_loop
+ li r3,1
+ isync
+ blr
+L_usimple_lock_try_fail:
+ li r3,0
+ blr
+L_usimple_lock_try_c:
+ b EXT(usimple_lock_try)
+
+/*
+ * fast_usimple_unlock():
+ *
+ * If the simple lock is marked SLOCK_FAST, release it without decrementing the preemption count.
+ * Call usimple_unlock() otherwise.
+ */
+ .align 5
+ .globl EXT(fast_usimple_unlock)
+
+LEXT(fast_usimple_unlock)
+
+ lwz r5,LOCK_DATA(r3)
+ li r0,0
+ cmpi cr0,r5,ILK_LOCKED|SLOCK_FAST
+ bne- L_usimple_unlock_c
+ sync
+#if 0
+ mfmsr r9
+ andi. r7,r9,lo16(MASK(MSR_EE))
+ beq L_usimple_unlock_cont
+ lis r3,hi16(L_usimple_unlock_panic)
+ ori r3,r3,lo16(L_usimple_unlock_panic)
+ bl EXT(panic)
+
+ .data
+L_usimple_unlock_panic:
+ STRINGD "fast_usimple_unlock: interrupts not disabled\n\000"
+ .text
+L_usimple_unlock_cont:
+#endif
+ stw r0, LOCK_DATA(r3)
+ blr
+L_usimple_unlock_c:
+ b EXT(usimple_unlock)
+