/*
- * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2016-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <kasan_internal.h>
#include <memintrinsics.h>
+#include <pexpert/device_tree.h>
#include <pexpert/arm64/boot.h>
-#include <arm64/proc_reg.h>
+#include <arm64/tlb.h>
#include <libkern/kernel_mach_header.h>
extern uint64_t *cpu_tte;
extern unsigned long gVirtBase, gPhysBase;
-#define phystokv(a) ((vm_address_t)(a) - gPhysBase + gVirtBase)
+
+typedef uint64_t pmap_paddr_t;
+extern vm_map_address_t phystokv(pmap_paddr_t pa);
vm_offset_t physmap_vbase;
vm_offset_t physmap_vtop;
vm_offset_t shadow_pbase;
vm_offset_t shadow_ptop;
+#if HIBERNATION
+// if we're building a kernel with hibernation support, hibernate_write_image depends on this symbol
+vm_offset_t shadow_pnext;
+#else
static vm_offset_t shadow_pnext;
+#endif
static vm_offset_t zero_page_phys;
static vm_offset_t bootstrap_pgtable_phys;
extern vm_offset_t excepstack, excepstack_top;
void kasan_bootstrap(boot_args *, vm_offset_t pgtable);
-void flush_mmu_tlb(void);
-#ifndef __ARM_16K_PG__
-#error "Unsupported HW config: Assuming 16K pages"
-#endif
+#define KASAN_OFFSET_ARM64 0xe000000000000000ULL /* Defined in makedefs/MakeInc.def */
-#define KASAN_SHIFT_ARM64 0xdffffff800000000ULL /* Defined in makedefs/MakeInc.def */
-#define KASAN_SHADOW_MIN 0xfffffff400000000ULL
-#define KASAN_SHADOW_MAX 0xfffffff680000000ULL
+#if defined(ARM_LARGE_MEMORY)
+#define KASAN_SHADOW_MIN (VM_MAX_KERNEL_ADDRESS+1)
+#define KASAN_SHADOW_MAX 0xffffffffffffffffULL
+#else
+#define KASAN_SHADOW_MIN 0xfffffffc00000000ULL
+#define KASAN_SHADOW_MAX 0xffffffff80000000ULL
+#endif
-_Static_assert(KASAN_SHIFT == KASAN_SHIFT_ARM64, "KASan inconsistent shadow shift");
+_Static_assert(KASAN_OFFSET == KASAN_OFFSET_ARM64, "KASan inconsistent shadow offset");
_Static_assert(VM_MAX_KERNEL_ADDRESS < KASAN_SHADOW_MIN, "KASan shadow overlaps with kernel VM");
-_Static_assert((VM_MIN_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM");
-_Static_assert((VM_MAX_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM");
+_Static_assert((VM_MIN_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM");
+_Static_assert((VM_MAX_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM");
static uintptr_t
alloc_page(void)
}
static void
-kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, bool back_page)
+align_to_page(vm_offset_t *addrp, vm_offset_t *sizep)
{
- size = vm_map_round_page(size, ARM_PGMASK);
- vm_size_t j;
- uint64_t *pte;
-
- /* XXX: this could be more efficient by walking through the shadow pages
- * instead of the source pages */
+ vm_offset_t addr_aligned = vm_map_trunc_page(*addrp, ARM_PGMASK);
+ *sizep = vm_map_round_page(*sizep + (*addrp - addr_aligned), ARM_PGMASK);
+ *addrp = addr_aligned;
+}
- for (j = 0; j < size; j += ARM_PGBYTES) {
- vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j);
+static void
+kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, bool back_page)
+{
+ size = (size + 0x7UL) & ~0x7UL;
+ vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), ARM_PGMASK);
+ vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), ARM_PGMASK);
- assert(virt_shadow_target >= KASAN_SHADOW_MIN);
- assert(virt_shadow_target < KASAN_SHADOW_MAX);
+ assert(shadow_base >= KASAN_SHADOW_MIN && shadow_top <= KASAN_SHADOW_MAX);
+ assert((size & 0x7) == 0);
+ for (; shadow_base < shadow_top; shadow_base += ARM_PGBYTES) {
uint64_t *base = cpu_tte;
+ uint64_t *pte;
-#if !__ARM64_TWO_LEVEL_PMAP__
/* lookup L1 entry */
- pte = base + ((virt_shadow_target & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
+ pte = base + ((shadow_base & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
if (*pte & ARM_TTE_VALID) {
assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
} else {
*pte = ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
}
base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
-#endif
/* lookup L2 entry */
- pte = base + ((virt_shadow_target & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
+ pte = base + ((shadow_base & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
if (*pte & ARM_TTE_VALID) {
assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
} else {
}
/* lookup L3 entry */
- pte = base + ((virt_shadow_target & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
+ pte = base + ((shadow_base & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
if ((*pte & ARM_PTE_TYPE_VALID) &&
((((*pte) & ARM_PTE_APMASK) != ARM_PTE_AP(AP_RONA)) || is_zero)) {
/* nothing to do - page already mapped and we are not
newpte = (uint64_t)alloc_zero_page() | ARM_PTE_AP(AP_RWNA);
}
newpte |= ARM_PTE_TYPE_VALID
- | ARM_PTE_AF
- | ARM_PTE_SH(SH_OUTER_MEMORY)
- | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
- | ARM_PTE_NX
- | ARM_PTE_PNX;
+ | ARM_PTE_AF
+ | ARM_PTE_SH(SH_OUTER_MEMORY)
+ | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
+ | ARM_PTE_NX
+ | ARM_PTE_PNX;
*pte = newpte;
}
}
static void
kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero)
{
- size = vm_map_round_page(size, ARM_PGMASK);
+ align_to_page(&address, &size);
+
vm_size_t j;
uint64_t *pte;
uint64_t *base = (uint64_t *)bootstrap_pgtable_phys;
-#if !__ARM64_TWO_LEVEL_PMAP__
/* lookup L1 entry */
pte = base + ((virt_shadow_target & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
if (*pte & ARM_TTE_VALID) {
*pte = ((uint64_t)pg & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
}
base = (uint64_t *)(*pte & ARM_TTE_TABLE_MASK);
-#endif
/* lookup L2 entry */
pte = base + ((virt_shadow_target & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
/* lookup L3 entry */
pte = base + ((virt_shadow_target & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
- if ((*pte & (ARM_PTE_TYPE|ARM_PTE_APMASK)) == (ARM_PTE_TYPE_VALID|ARM_PTE_AP(AP_RWNA))) {
+ if ((*pte & (ARM_PTE_TYPE | ARM_PTE_APMASK)) == (ARM_PTE_TYPE_VALID | ARM_PTE_AP(AP_RWNA))) {
/* L3 entry valid and mapped RW - do nothing */
} else {
/* Not mapped, or mapped RO - create new L3 entry or upgrade to RW */
/* add the default attributes */
newpte |= ARM_PTE_TYPE_VALID
- | ARM_PTE_AF
- | ARM_PTE_SH(SH_OUTER_MEMORY)
- | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
- | ARM_PTE_NX
- | ARM_PTE_PNX;
+ | ARM_PTE_AF
+ | ARM_PTE_SH(SH_OUTER_MEMORY)
+ | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
+ | ARM_PTE_NX
+ | ARM_PTE_PNX;
*pte = newpte;
}
void
kasan_arch_init(void)
{
- assert(KASAN_SHADOW_MIN >= VM_MAX_KERNEL_ADDRESS);
-
/* Map the physical aperture */
- kasan_map_shadow(kernel_vtop, physmap_vtop - kernel_vtop, true);
+ kasan_map_shadow(physmap_vbase, physmap_vtop - physmap_vbase, true);
-#if defined(KERNEL_INTEGRITY_KTRR)
+#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
/* Pre-allocate all the L3 page table pages to avoid triggering KTRR */
kasan_map_shadow_internal(VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1, false, false);
#endif
kernel_vbase = args->virtBase;
kernel_vtop = args->virtBase + ptop - pbase;
- /* Steal ~15% of physical memory */
- tosteal = vm_map_trunc_page(args->memSize / 6, ARM_PGMASK);
+ tosteal = (args->memSize * STOLEN_MEM_PERCENT) / 100 + STOLEN_MEM_BYTES;
+ tosteal = vm_map_trunc_page(tosteal, ARM_PGMASK);
+
args->memSize -= tosteal;
/* Initialize the page allocator */
shadow_pbase = vm_map_round_page(pbase + args->memSize, ARM_PGMASK);
shadow_ptop = shadow_pbase + tosteal;
shadow_pnext = shadow_pbase;
- shadow_pages_total = (long)((shadow_ptop - shadow_pbase) / ARM_PGBYTES);
+ shadow_pages_total = (uint32_t)((shadow_ptop - shadow_pbase) / ARM_PGBYTES);
/* Set aside a page of zeros we can use for dummy shadow mappings */
zero_page_phys = alloc_page();
kasan_map_shadow_early(intstack_virt, intstack_size, false);
kasan_map_shadow_early(excepstack_virt, excepstack_size, false);
+
+ if ((vm_offset_t)args->deviceTreeP - p2v < (vm_offset_t)&_mh_execute_header) {
+ kasan_map_shadow_early((vm_offset_t)args->deviceTreeP, args->deviceTreeLength, false);
+ }
+}
+
+bool
+kasan_is_shadow_mapped(uintptr_t shadowp)
+{
+ uint64_t *pte;
+ uint64_t *base = cpu_tte;
+
+ assert(shadowp >= KASAN_SHADOW_MIN);
+ assert(shadowp < KASAN_SHADOW_MAX);
+
+ /* lookup L1 entry */
+ pte = base + ((shadowp & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
+ if (!(*pte & ARM_TTE_VALID)) {
+ return false;
+ }
+ base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
+
+ /* lookup L2 entry */
+ pte = base + ((shadowp & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
+ if (!(*pte & ARM_TTE_VALID)) {
+ return false;
+ }
+ base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
+
+ /* lookup L3 entry */
+ pte = base + ((shadowp & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
+ if (!(*pte & ARM_PTE_TYPE_VALID)) {
+ return false;
+ }
+
+ return true;
}