-#if __ARM_KERNEL_PROTECT__
-/*
- * __ARM_KERNEL_PROTECT__ adds two complications to TLB management:
- *
- * 1. As each pmap has two ASIDs, every TLB operation that targets an ASID must
- * target both ASIDs for the pmap that owns the target ASID.
- *
- * 2. Any TLB operation targeting the kernel_pmap ASID (ASID 0) must target all
- * ASIDs (as kernel_pmap mappings may be referenced while using an ASID that
- * belongs to another pmap). We expect these routines to be called with the
- * EL0 ASID for the target; not the EL1 ASID.
- */
-#endif /* __ARM_KERNEL_PROTECT__ */
-
-.macro SYNC_TLB_FLUSH
- dsb ish
- isb sy
-.endmacro
-
-
-/*
- * void sync_tlb_flush(void)
- *
- * Synchronize one or more prior TLB flush operations
- */
- .text
- .align 2
- .globl EXT(sync_tlb_flush)
-LEXT(sync_tlb_flush)
- SYNC_TLB_FLUSH
- ret
-
-
-.macro FLUSH_MMU_TLB
- tlbi vmalle1is
-.endmacro
-/*
- * void flush_mmu_tlb_async(void)
- *
- * Flush all TLBs, don't wait for completion
- */
- .text
- .align 2
- .globl EXT(flush_mmu_tlb_async)
-LEXT(flush_mmu_tlb_async)
- FLUSH_MMU_TLB
- ret
-
-/*
- * void flush_mmu_tlb(void)
- *
- * Flush all TLBs
- */
- .text
- .align 2
- .globl EXT(flush_mmu_tlb)
-LEXT(flush_mmu_tlb)
- FLUSH_MMU_TLB
- SYNC_TLB_FLUSH
- ret
-
-.macro FLUSH_CORE_TLB
- tlbi vmalle1
-.endmacro
-
-/*
- * void flush_core_tlb_async(void)
- *
- * Flush local core TLB, don't wait for completion
- */
- .text
- .align 2
- .globl EXT(flush_core_tlb_async)
-LEXT(flush_core_tlb_async)
- FLUSH_CORE_TLB
- ret
-
-/*
- * void flush_core_tlb(void)
- *
- * Flush local core TLB
- */
- .text
- .align 2
- .globl EXT(flush_core_tlb)
-LEXT(flush_core_tlb)
- FLUSH_CORE_TLB
- SYNC_TLB_FLUSH
- ret
-
-.macro FLUSH_MMU_TLB_ALLENTRIES
-#if __ARM_16K_PG__
- and x0, x0, #~0x3
-
- /*
- * The code below is not necessarily correct. From an overview of
- * the client code, the expected contract for TLB flushes is that
- * we will expand from an "address, length" pair to "start address,
- * end address" in the course of a TLB flush. This suggests that
- * a flush for "X, X+4" is actually only asking for a flush of a
- * single 16KB page. At the same time, we'd like to be prepared
- * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page
- * number to a 16KB page boundary. This should deal correctly with
- * unaligned inputs.
- *
- * If our expecations about client behavior are wrong however, this
- * will lead to occasional TLB corruption on platforms with 16KB
- * pages.
- */
- add x1, x1, #0x3
- and x1, x1, #~0x3
-#endif
-1: // Lflush_mmu_tlb_allentries_loop:
- tlbi vaae1is, x0
- add x0, x0, #(ARM_PGBYTES / 4096) // Units are 4KB pages, as defined by the ISA
- cmp x0, x1
- b.lt 1b // Lflush_mmu_tlb_allentries_loop
-.endmacro
-
-/*
- * void flush_mmu_tlb_allentries_async(uint64_t, uint64_t)
- *
- * Flush TLB entries, don't wait for completion
- */
- .text
- .align 2
- .globl EXT(flush_mmu_tlb_allentries_async)
-LEXT(flush_mmu_tlb_allentries_async)
- FLUSH_MMU_TLB_ALLENTRIES
- ret
-
-/*
- * void flush_mmu_tlb_allentries(uint64_t, uint64_t)
- *
- * Flush TLB entries
- */
- .globl EXT(flush_mmu_tlb_allentries)
-LEXT(flush_mmu_tlb_allentries)
- FLUSH_MMU_TLB_ALLENTRIES
- SYNC_TLB_FLUSH
- ret
-
-.macro FLUSH_MMU_TLB_ENTRY
-#if __ARM_KERNEL_PROTECT__
- /*
- * If we are flushing ASID 0, this is a kernel operation. With this
- * ASID scheme, this means we should flush all ASIDs.
- */
- lsr x2, x0, #TLBI_ASID_SHIFT
- cmp x2, #0
- b.eq 1f // Lflush_mmu_tlb_entry_globally
-
- bic x0, x0, #(1 << TLBI_ASID_SHIFT)
- tlbi vae1is, x0
- orr x0, x0, #(1 << TLBI_ASID_SHIFT)
-#endif /* __ARM_KERNEL_PROTECT__ */
- tlbi vae1is, x0
-#if __ARM_KERNEL_PROTECT__
- b 2f // Lflush_mmu_tlb_entry_done
-1: // Lflush_mmu_tlb_entry_globally:
- tlbi vaae1is, x0
-2: // Lflush_mmu_tlb_entry_done
-#endif /* __ARM_KERNEL_PROTECT__ */
-.endmacro
-/*
- * void flush_mmu_tlb_entry_async(uint64_t)
- *
- * Flush TLB entry, don't wait for completion
- */
- .text
- .align 2
- .globl EXT(flush_mmu_tlb_entry_async)
-LEXT(flush_mmu_tlb_entry_async)
- FLUSH_MMU_TLB_ENTRY
- ret
-
-/*
- * void flush_mmu_tlb_entry(uint64_t)
- *
- * Flush TLB entry
- */
- .text
- .align 2
- .globl EXT(flush_mmu_tlb_entry)
-LEXT(flush_mmu_tlb_entry)
- FLUSH_MMU_TLB_ENTRY
- SYNC_TLB_FLUSH
- ret
-
-.macro FLUSH_MMU_TLB_ENTRIES
-#if __ARM_16K_PG__
- and x0, x0, #~0x3
-
- /*
- * The code below is not necessarily correct. From an overview of
- * the client code, the expected contract for TLB flushes is that
- * we will expand from an "address, length" pair to "start address,
- * end address" in the course of a TLB flush. This suggests that
- * a flush for "X, X+4" is actually only asking for a flush of a
- * single 16KB page. At the same time, we'd like to be prepared
- * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page
- * number to a 16KB page boundary. This should deal correctly with
- * unaligned inputs.
- *
- * If our expecations about client behavior are wrong however, this
- * will lead to occasional TLB corruption on platforms with 16KB
- * pages.
- */
- add x1, x1, #0x3
- and x1, x1, #~0x3
-#endif /* __ARM_16K_PG__ */
-#if __ARM_KERNEL_PROTECT__
- /*
- * If we are flushing ASID 0, this is a kernel operation. With this
- * ASID scheme, this means we should flush all ASIDs.
- */
- lsr x2, x0, #TLBI_ASID_SHIFT
- cmp x2, #0
- b.eq 2f // Lflush_mmu_tlb_entries_globally_loop
-
- bic x0, x0, #(1 << TLBI_ASID_SHIFT)
-#endif /* __ARM_KERNEL_PROTECT__ */
-1: // Lflush_mmu_tlb_entries_loop
- tlbi vae1is, x0
-#if __ARM_KERNEL_PROTECT__
- orr x0, x0, #(1 << TLBI_ASID_SHIFT)
- tlbi vae1is, x0
- bic x0, x0, #(1 << TLBI_ASID_SHIFT)
-#endif /* __ARM_KERNEL_PROTECT__ */
- add x0, x0, #(ARM_PGBYTES / 4096) // Units are pages
- cmp x0, x1
- b.lt 1b // Lflush_mmu_tlb_entries_loop
-#if __ARM_KERNEL_PROTECT__
- b 3f // Lflush_mmu_tlb_entries_done
-2: // Lflush_mmu_tlb_entries_globally_loop:
- tlbi vaae1is, x0
- add x0, x0, #(ARM_PGBYTES / 4096) // Units are pages
- cmp x0, x1
- b.lt 2b // Lflush_mmu_tlb_entries_globally_loop
-3: // Lflush_mmu_tlb_entries_done
-#endif /* __ARM_KERNEL_PROTECT__ */
-.endmacro
-
-/*
- * void flush_mmu_tlb_entries_async(uint64_t, uint64_t)
- *
- * Flush TLB entries, don't wait for completion
- */
- .text
- .align 2
- .globl EXT(flush_mmu_tlb_entries_async)
-LEXT(flush_mmu_tlb_entries_async)
- FLUSH_MMU_TLB_ENTRIES
- ret
-
-/*
- * void flush_mmu_tlb_entries(uint64_t, uint64_t)
- *
- * Flush TLB entries
- */
- .text
- .align 2
- .globl EXT(flush_mmu_tlb_entries)
-LEXT(flush_mmu_tlb_entries)
- FLUSH_MMU_TLB_ENTRIES
- SYNC_TLB_FLUSH
- ret
-
-.macro FLUSH_MMU_TLB_ASID
-#if __ARM_KERNEL_PROTECT__
- /*
- * If we are flushing ASID 0, this is a kernel operation. With this
- * ASID scheme, this means we should flush all ASIDs.
- */
- lsr x1, x0, #TLBI_ASID_SHIFT
- cmp x1, #0
- b.eq 1f // Lflush_mmu_tlb_globally
-
- bic x0, x0, #(1 << TLBI_ASID_SHIFT)
- tlbi aside1is, x0
- orr x0, x0, #(1 << TLBI_ASID_SHIFT)
-#endif /* __ARM_KERNEL_PROTECT__ */
- tlbi aside1is, x0
-#if __ARM_KERNEL_PROTECT__
- b 2f // Lflush_mmu_tlb_asid_done
-1: // Lflush_mmu_tlb_globally:
- tlbi vmalle1is
-2: // Lflush_mmu_tlb_asid_done:
-#endif /* __ARM_KERNEL_PROTECT__ */
-.endmacro
-
-/*
- * void flush_mmu_tlb_asid_async(uint64_t)
- *
- * Flush TLB entriesfor requested asid, don't wait for completion
- */
- .text
- .align 2
- .globl EXT(flush_mmu_tlb_asid_async)
-LEXT(flush_mmu_tlb_asid_async)
- FLUSH_MMU_TLB_ASID
- ret
-
-/*
- * void flush_mmu_tlb_asid(uint64_t)
- *
- * Flush TLB entriesfor requested asid
- */
- .text
- .align 2
- .globl EXT(flush_mmu_tlb_asid)
-LEXT(flush_mmu_tlb_asid)
- FLUSH_MMU_TLB_ASID
- SYNC_TLB_FLUSH
- ret
-
-.macro FLUSH_CORE_TLB_ASID
-#if __ARM_KERNEL_PROTECT__
- /*
- * If we are flushing ASID 0, this is a kernel operation. With this
- * ASID scheme, this means we should flush all ASIDs.
- */
- lsr x1, x0, #TLBI_ASID_SHIFT
- cmp x1, #0
- b.eq 1f // Lflush_core_tlb_asid_globally
-
- bic x0, x0, #(1 << TLBI_ASID_SHIFT)
- tlbi aside1, x0
- orr x0, x0, #(1 << TLBI_ASID_SHIFT)
-#endif /* __ARM_KERNEL_PROTECT__ */
- tlbi aside1, x0
-#if __ARM_KERNEL_PROTECT__
- b 2f // Lflush_core_tlb_asid_done
-1: // Lflush_core_tlb_asid_globally:
- tlbi vmalle1
-2: // Lflush_core_tlb_asid_done:
-#endif /* __ARM_KERNEL_PROTECT__ */
-.endmacro
-
-/*
- * void flush_core_tlb_asid_async(uint64_t)
- *
- * Flush TLB entries for core for requested asid, don't wait for completion
- */
- .text
- .align 2
- .globl EXT(flush_core_tlb_asid_async)
-LEXT(flush_core_tlb_asid_async)
- FLUSH_CORE_TLB_ASID
- ret
-/*
- * void flush_core_tlb_asid(uint64_t)
- *
- * Flush TLB entries for core for requested asid
- */
- .text
- .align 2
- .globl EXT(flush_core_tlb_asid)
-LEXT(flush_core_tlb_asid)
- FLUSH_CORE_TLB_ASID
- SYNC_TLB_FLUSH
- ret
-