// flush_mmu_tlb_allentries_async: flush entries that map VA range, all ASIDS, all cores
// start and end are in units of 4K pages.
static inline void
// flush_mmu_tlb_allentries_async: flush entries that map VA range, all ASIDS, all cores
// start and end are in units of 4K pages.
static inline void
- /*
- * The code below is not necessarily correct. From an overview of
- * the client code, the expected contract for TLB flushes is that
- * we will expand from an "address, length" pair to "start address,
- * end address" in the course of a TLB flush. This suggests that
- * a flush for "X, X+4" is actually only asking for a flush of a
- * single 16KB page. At the same time, we'd like to be prepared
- * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page
- * number to a 16KB page boundary. This should deal correctly with
- * unaligned inputs.
- *
- * If our expecations about client behavior are wrong however, this
- * will lead to occasional TLB corruption on platforms with 16KB
- * pages.
- */
- end = (end + 0x3ULL) & ~0x3ULL;
+ /*
+ * The code below is not necessarily correct. From an overview of
+ * the client code, the expected contract for TLB flushes is that
+ * we will expand from an "address, length" pair to "start address,
+ * end address" in the course of a TLB flush. This suggests that
+ * a flush for "X, X+4" is actually only asking for a flush of a
+ * single 16KB page. At the same time, we'd like to be prepared
+ * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page
+ * number to a 16KB page boundary. This should deal correctly with
+ * unaligned inputs.
+ *
+ * If our expecations about client behavior are wrong however, this
+ * will lead to occasional TLB corruption on platforms with 16KB
+ * pages.
+ */
+ end = (end + 0x3ULL) & ~0x3ULL;
+ }
// start and end must have the ASID in the high 16 bits, with the VA in units of 4K in the lowest bits
// Will also flush global entries that match the VA range
static inline void
// start and end must have the ASID in the high 16 bits, with the VA in units of 4K in the lowest bits
// Will also flush global entries that match the VA range
static inline void
- /*
- * The code below is not necessarily correct. From an overview of
- * the client code, the expected contract for TLB flushes is that
- * we will expand from an "address, length" pair to "start address,
- * end address" in the course of a TLB flush. This suggests that
- * a flush for "X, X+4" is actually only asking for a flush of a
- * single 16KB page. At the same time, we'd like to be prepared
- * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page
- * number to a 16KB page boundary. This should deal correctly with
- * unaligned inputs.
- *
- * If our expecations about client behavior are wrong however, this
- * will lead to occasional TLB corruption on platforms with 16KB
- * pages.
- */
- end = (end + 0x3ULL) & ~0x3ULL;
+ /*
+ * The code below is not necessarily correct. From an overview of
+ * the client code, the expected contract for TLB flushes is that
+ * we will expand from an "address, length" pair to "start address,
+ * end address" in the course of a TLB flush. This suggests that
+ * a flush for "X, X+4" is actually only asking for a flush of a
+ * single 16KB page. At the same time, we'd like to be prepared
+ * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page
+ * number to a 16KB page boundary. This should deal correctly with
+ * unaligned inputs.
+ *
+ * If our expecations about client behavior are wrong however, this
+ * will lead to occasional TLB corruption on platforms with 16KB
+ * pages.
+ */
+ end = (end + 0x3ULL) & ~0x3ULL;
+ }
asm volatile ("tlbi vaae1is, %0" : : "r"(start));
}
return;
}
start = start | (1ULL << TLBI_ASID_SHIFT);
end = end | (1ULL << TLBI_ASID_SHIFT);
asm volatile ("tlbi vaae1is, %0" : : "r"(start));
}
return;
}
start = start | (1ULL << TLBI_ASID_SHIFT);
end = end | (1ULL << TLBI_ASID_SHIFT);
start = start & ~(1ULL << TLBI_ASID_SHIFT);
asm volatile ("tlbi vae1is, %0" : : "r"(start));
start = start | (1ULL << TLBI_ASID_SHIFT);
asm volatile ("tlbi vae1is, %0" : : "r"(start));
}
#else
start = start & ~(1ULL << TLBI_ASID_SHIFT);
asm volatile ("tlbi vae1is, %0" : : "r"(start));
start = start | (1ULL << TLBI_ASID_SHIFT);
asm volatile ("tlbi vae1is, %0" : : "r"(start));
}
#else
asm volatile ("tlbi vae1is, %0" : : "r"(start));
}
#endif /* __ARM_KERNEL_PROTECT__ */
}
static inline void
asm volatile ("tlbi vae1is, %0" : : "r"(start));
}
#endif /* __ARM_KERNEL_PROTECT__ */
}
static inline void
-#define ARM64_16K_TLB_RANGE_PAGES (1ULL << 21)
-#define rtlbi_addr(x) (((x) >> RTLBI_ADDR_SHIFT) & RTLBI_ADDR_MASK)
+#define ARM64_TLB_RANGE_PAGES (1ULL << 21)
+#define rtlbi_addr(x, shift) (((x) >> (shift)) & RTLBI_ADDR_MASK)
#define rtlbi_scale(x) ((uint64_t)(x) << RTLBI_SCALE_SHIFT)
#define rtlbi_num(x) ((uint64_t)(x) << RTLBI_NUM_SHIFT)
#define rtlbi_scale(x) ((uint64_t)(x) << RTLBI_SCALE_SHIFT)
#define rtlbi_num(x) ((uint64_t)(x) << RTLBI_NUM_SHIFT)
-generate_rtlbi_param(ppnum_t npages, uint32_t asid, vm_offset_t va)
+generate_rtlbi_param(ppnum_t npages, uint32_t asid, vm_offset_t va, uint64_t pmap_page_shift)
{
/**
* Per the armv8.4 RTLBI extension spec, the range encoded in the rtlbi register operand is defined by:
* BaseADDR <= VA < BaseADDR+((NUM+1)*2^(5*SCALE+1) * Translation_Granule_Size)
*/
{
/**
* Per the armv8.4 RTLBI extension spec, the range encoded in the rtlbi register operand is defined by:
* BaseADDR <= VA < BaseADDR+((NUM+1)*2^(5*SCALE+1) * Translation_Granule_Size)
*/
unsigned scale = ((order ? order : 1) - 1) / 5;
unsigned granule = 1 << ((5 * scale) + 1);
unsigned num = (((npages + granule - 1) & ~(granule - 1)) / granule) - 1;
unsigned scale = ((order ? order : 1) - 1) / 5;
unsigned granule = 1 << ((5 * scale) + 1);
unsigned num = (((npages + granule - 1) & ~(granule - 1)) / granule) - 1;
+// flush_core_tlb_allrange: flush TLB entries that map a VA range using a single instruction, local core only
+// The argument should be encoded according to generate_rtlbi_param().
+// Follows the same ASID matching behavior as flush_mmu_tlb_allentries()
+static inline void
+flush_core_tlb_allrange_async(uint64_t val)
+{
+ asm volatile ("tlbi rvaae1, %0" : : "r"(val));
+}
+
+static inline void
+flush_core_tlb_allrange(uint64_t val)
+{
+ flush_core_tlb_allrange_async(val);
+ sync_tlb_flush();
+}
+