.globl EXT(invalidate_mmu_icache)
LEXT(InvalidatePoU_Icache)
LEXT(invalidate_mmu_icache)
+ dsb sy
ic ialluis // Invalidate icache
dsb sy
isb sy
+L_imi_done:
ret
/*
.align 2
.globl EXT(InvalidatePoU_IcacheRegion)
LEXT(InvalidatePoU_IcacheRegion)
+ ARM64_STACK_PROLOG
+ PUSH_FRAME
+ bl EXT(CleanPoU_DcacheRegion)
+#if __ARM_IC_NOALIAS_ICACHE__
mov x9, #((1<<MMU_I_CLINE)-1)
and x2, x0, x9
bic x0, x0, x9 // Cached aligned
b.pl L_ipui_loop // Loop in counter not null
dsb sy
isb sy
- ret
+L_ipui_done:
+#else
+ bl EXT(InvalidatePoU_Icache)
+#endif
+ POP_FRAME
+ ARM64_STACK_EPILOG
/*
mov x9, #(1 << MMU_I7SET)
mov x10, #(1 << (MMU_NSET + MMU_I7SET))
mov x11, #(1 << MMU_I7WAY)
+ dmb sy
L_cpcd_dcacheway:
L_cpcd_dcacheline:
dc csw, x0 // clean dcache line by way/set
tst x0, #(1 << (MMU_NSET + MMU_I7SET)) // look for overflow
b.eq L_cpcd_dcacheline
bic x0, x0, x10 // clear set overflow
- adds x0, x0, x11 // increment way
+ adds w0, w0, w11 // increment way
b.cc L_cpcd_dcacheway // loop
#if __ARM_L2CACHE__
mov x0, #2
mov x9, #(1 << L2_I7SET)
mov x10, #(1 << (L2_NSET + L2_I7SET))
mov x11, #(1 << L2_I7WAY)
+ dsb sy
L_cpcd_l2dcacheway:
L_cpcd_l2dcacheline:
dc csw, x0 // clean dcache line by way/set
tst x0, #(1 << (L2_NSET + L2_I7SET)) // look for overflow
b.eq L_cpcd_l2dcacheline
bic x0, x0, x10 // clear set overflow
- adds x0, x0, x11 // increment way
+ adds w0, w0, w11 // increment way
b.cc L_cpcd_l2dcacheway // loop
#endif
#endif /* defined(APPLE_ARM64_ARCH_FAMILY) */
#if defined(APPLE_ARM64_ARCH_FAMILY)
/* "Fully Coherent." */
#else /* !defined(APPLE_ARM64_ARCH_FAMILY) */
-#error CleanPoU_Dcache needs an implementation
+ mov x0, #0
+ mov x9, #(1 << MMU_I7SET)
+ mov x10, #(1 << (MMU_NSET + MMU_I7SET))
+ mov x11, #(1 << MMU_I7WAY)
+ dmb sy
+L_cpud_dcacheway:
+L_cpud_dcacheline:
+ dc csw, x0 // clean dcache line by way/set
+ add x0, x0, x9 // increment set index
+ tst x0, #(1 << (MMU_NSET + MMU_I7SET)) // look for overflow
+ b.eq L_cpud_dcacheline
+ bic x0, x0, x10 // clear set overflow
+ adds w0, w0, w11 // increment way
+ b.cc L_cpud_dcacheway // loop
#endif /* defined(APPLE_ARM64_ARCH_FAMILY) */
dsb sy
ret
#else /* !defined(APPLE_ARM64_ARCH_FAMILY) */
mov x9, #((1<<MMU_CLINE)-1)
and x2, x0, x9
- bic x0, x0, x9 // Cached aligned
- add x1, x1, x2
- sub x1, x1, #1
- lsr x1, x1, #MMU_CLINE // Set cache line counter
+ bic x3, x0, x9 // Cached aligned
+ add x4, x1, x2
+ sub x4, x4, #1
+ lsr x4, x4, #MMU_CLINE // Set cache line counter
+ dmb sy
L_cpudr_loop:
- dc cvau, x0 // Clean dcache line to PoU
- add x0, x0, #(1<<MMU_CLINE) // Get next cache aligned addr
- subs x1, x1, #1 // Decrementer cache line counter
+ dc cvau, x3 // Clean dcache line to PoU
+ add x3, x3, #(1<<MMU_CLINE) // Get next cache aligned addr
+ subs x4, x4, #1 // Decrementer cache line counter
b.pl L_cpudr_loop // Loop in counter not null
#endif /* defined(APPLE_ARM64_ARCH_FAMILY) */
dsb sy
.text
.align 2
LEXT(CleanPoC_DcacheRegion_internal)
- PUSH_FRAME
mov x9, #((1<<MMU_CLINE)-1)
and x2, x0, x9
bic x0, x0, x9 // Cached aligned
subs x1, x1, #1 // Decrementer cache line counter
b.pl L_cpcdr_loop // Loop in counter not null
dsb sy
- POP_FRAME
ret
/*
b EXT(CleanPoC_DcacheRegion_internal)
#endif /* defined(APPLE_ARM64_ARCH_FAMILY) */
+ .text
+ .align 2
+ .globl EXT(CleanPoC_DcacheRegion_Force_nopreempt)
+LEXT(CleanPoC_DcacheRegion_Force_nopreempt)
+#if defined(APPLE_ARM64_ARCH_FAMILY)
+ ARM64_STACK_PROLOG
+ PUSH_FRAME
+ isb sy
+ ARM64_IS_PCORE x15
+ ARM64_READ_EP_SPR x15, x14, ARM64_REG_EHID4, ARM64_REG_HID4
+ and x14, x14, (~ARM64_REG_HID4_DisDcMVAOps)
+ ARM64_WRITE_EP_SPR x15, x14, ARM64_REG_EHID4, ARM64_REG_HID4
+ isb sy
+ bl EXT(CleanPoC_DcacheRegion_internal)
+ isb sy
+ orr x14, x14, ARM64_REG_HID4_DisDcMVAOps
+ ARM64_WRITE_EP_SPR x15, x14, ARM64_REG_EHID4, ARM64_REG_HID4
+ isb sy
+ POP_FRAME
+ ARM64_STACK_EPILOG
+#else
+ b EXT(CleanPoC_DcacheRegion_internal)
+#endif // APPLE_ARM64_ARCH_FAMILY
+
/*
* void CleanPoC_DcacheRegion_Force(vm_offset_t va, unsigned length)
*
* Clean d-cache region to Point of Coherency - when you really
* need to flush even on coherent platforms, e.g. panic log
*/
-.text
+ .text
.align 2
.globl EXT(CleanPoC_DcacheRegion_Force)
LEXT(CleanPoC_DcacheRegion_Force)
- b EXT(CleanPoC_DcacheRegion_internal)
+#if defined(APPLE_ARM64_ARCH_FAMILY)
+ ARM64_STACK_PROLOG
+ PUSH_FRAME
+ stp x0, x1, [sp, #-16]!
+ bl EXT(_disable_preemption)
+ ldp x0, x1, [sp], #16
+ bl EXT(CleanPoC_DcacheRegion_Force_nopreempt)
+ bl EXT(_enable_preemption)
+ POP_FRAME
+ ARM64_STACK_EPILOG
+#else
+ b EXT(CleanPoC_DcacheRegion_internal)
+#endif // APPLE_ARM64_ARCH_FAMILY
/*
* void FlushPoC_Dcache(void)
mov x9, #(1 << MMU_I7SET)
mov x10, #(1 << (MMU_NSET + MMU_I7SET))
mov x11, #(1 << MMU_I7WAY)
+ dmb sy
L_fpcd_dcacheway:
L_fpcd_dcacheline:
dc cisw, x0 // clean invalidate dcache line by way/set
tst x0, #(1 << (MMU_NSET + MMU_I7SET)) // look for overflow
b.eq L_fpcd_dcacheline
bic x0, x0, x10 // clear set overflow
- adds x0, x0, x11 // increment way
+ adds w0, w0, w11 // increment way
b.cc L_fpcd_dcacheway // loop
#if __ARM_L2CACHE__
+ dsb sy
mov x0, #2
mov x9, #(1 << L2_I7SET)
mov x10, #(1 << (L2_NSET + L2_I7SET))
tst x0, #(1 << (L2_NSET + L2_I7SET)) // look for overflow
b.eq L_fpcd_l2dcacheline
bic x0, x0, x10 // clear set overflow
- adds x0, x0, x11 // increment way
+ adds w0, w0, w11 // increment way
b.cc L_fpcd_l2dcacheway // loop
#endif
#endif /* defined(APPLE_ARM64_ARCH_FAMILY) */
mov x9, #(1 << MMU_I7SET)
mov x10, #(1 << (MMU_NSET + MMU_I7SET))
mov x11, #(1 << MMU_I7WAY)
+ dmb sy
L_fpud_way:
L_fpud_line:
dc cisw, x0 // clean invalidate dcache line by way/set
tst x0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow
b.eq L_fpud_line
bic x0, x0, x10 // clear set overflow
- adds x0, x0, x11 // increment way
+ adds w0, w0, w11 // increment way
b.cc L_fpud_way // loop
#endif /* defined(APPLE_ARM64_ARCH_FAMILY) */
dsb sy
add x1, x1, x2
sub x1, x1, #1
lsr x1, x1, #MMU_CLINE // Set cache line counter
+ dmb sy
L_fpcdr_loop:
dc civac, x0 // Clean invalidate dcache line to PoC
add x0, x0, #(1<<MMU_CLINE) // Get next cache aligned addr