]> git.saurik.com Git - apple/xnu.git/blobdiff - san/kasan-arm64.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / san / kasan-arm64.c
index 49137763c8f17a5a1c4c00ce656733b0a37f9a22..20a815c6194cd7d10f45d678f92fb410e4788399 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2016-2020 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  *
 #include <kasan_internal.h>
 #include <memintrinsics.h>
 
+#include <pexpert/device_tree.h>
 #include <pexpert/arm64/boot.h>
-#include <arm64/proc_reg.h>
+#include <arm64/tlb.h>
 
 #include <libkern/kernel_mach_header.h>
 
 extern uint64_t *cpu_tte;
 extern unsigned long gVirtBase, gPhysBase;
-#define phystokv(a) ((vm_address_t)(a) - gPhysBase + gVirtBase)
+
+typedef uint64_t pmap_paddr_t;
+extern vm_map_address_t phystokv(pmap_paddr_t pa);
 
 vm_offset_t physmap_vbase;
 vm_offset_t physmap_vtop;
 
 vm_offset_t shadow_pbase;
 vm_offset_t shadow_ptop;
+#if HIBERNATION
+// if we're building a kernel with hibernation support, hibernate_write_image depends on this symbol
+vm_offset_t shadow_pnext;
+#else
 static vm_offset_t shadow_pnext;
+#endif
 
 static vm_offset_t zero_page_phys;
 static vm_offset_t bootstrap_pgtable_phys;
@@ -67,20 +75,21 @@ extern vm_offset_t intstack, intstack_top;
 extern vm_offset_t excepstack, excepstack_top;
 
 void kasan_bootstrap(boot_args *, vm_offset_t pgtable);
-void flush_mmu_tlb(void);
 
-#ifndef __ARM_16K_PG__
-#error "Unsupported HW config: Assuming 16K pages"
-#endif
+#define KASAN_OFFSET_ARM64 0xe000000000000000ULL /* Defined in makedefs/MakeInc.def */
 
-#define KASAN_SHIFT_ARM64 0xdffffff800000000ULL /* Defined in makedefs/MakeInc.def */
-#define KASAN_SHADOW_MIN  0xfffffff400000000ULL
-#define KASAN_SHADOW_MAX  0xfffffff680000000ULL
+#if defined(ARM_LARGE_MEMORY)
+#define KASAN_SHADOW_MIN  (VM_MAX_KERNEL_ADDRESS+1)
+#define KASAN_SHADOW_MAX  0xffffffffffffffffULL
+#else
+#define KASAN_SHADOW_MIN  0xfffffffc00000000ULL
+#define KASAN_SHADOW_MAX  0xffffffff80000000ULL
+#endif
 
-_Static_assert(KASAN_SHIFT == KASAN_SHIFT_ARM64, "KASan inconsistent shadow shift");
+_Static_assert(KASAN_OFFSET == KASAN_OFFSET_ARM64, "KASan inconsistent shadow offset");
 _Static_assert(VM_MAX_KERNEL_ADDRESS < KASAN_SHADOW_MIN, "KASan shadow overlaps with kernel VM");
-_Static_assert((VM_MIN_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM");
-_Static_assert((VM_MAX_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 < KASAN_SHADOW_MAX,  "KASan shadow does not cover kernel VM");
+_Static_assert((VM_MIN_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM");
+_Static_assert((VM_MAX_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM");
 
 static uintptr_t
 alloc_page(void)
@@ -105,26 +114,29 @@ alloc_zero_page(void)
 }
 
 static void
-kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, bool back_page)
+align_to_page(vm_offset_t *addrp, vm_offset_t *sizep)
 {
-       size = vm_map_round_page(size, ARM_PGMASK);
-       vm_size_t j;
-       uint64_t *pte;
-
-       /* XXX: this could be more efficient by walking through the shadow pages
-        * instead of the source pages */
+       vm_offset_t addr_aligned = vm_map_trunc_page(*addrp, ARM_PGMASK);
+       *sizep = vm_map_round_page(*sizep + (*addrp - addr_aligned), ARM_PGMASK);
+       *addrp = addr_aligned;
+}
 
-       for (j = 0; j < size; j += ARM_PGBYTES) {
-               vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j);
+static void
+kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, bool back_page)
+{
+       size = (size + 0x7UL) & ~0x7UL;
+       vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), ARM_PGMASK);
+       vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), ARM_PGMASK);
 
-               assert(virt_shadow_target >= KASAN_SHADOW_MIN);
-               assert(virt_shadow_target < KASAN_SHADOW_MAX);
+       assert(shadow_base >= KASAN_SHADOW_MIN && shadow_top <= KASAN_SHADOW_MAX);
+       assert((size & 0x7) == 0);
 
+       for (; shadow_base < shadow_top; shadow_base += ARM_PGBYTES) {
                uint64_t *base = cpu_tte;
+               uint64_t *pte;
 
-#if !__ARM64_TWO_LEVEL_PMAP__
                /* lookup L1 entry */
-               pte = base + ((virt_shadow_target & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
+               pte = base + ((shadow_base & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
                if (*pte & ARM_TTE_VALID) {
                        assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
                } else {
@@ -132,10 +144,9 @@ kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, boo
                        *pte = ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
                }
                base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
-#endif
 
                /* lookup L2 entry */
-               pte = base + ((virt_shadow_target & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
+               pte = base + ((shadow_base & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
                if (*pte & ARM_TTE_VALID) {
                        assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
                } else {
@@ -149,7 +160,7 @@ kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, boo
                }
 
                /* lookup L3 entry */
-               pte = base + ((virt_shadow_target & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
+               pte = base + ((shadow_base & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
                if ((*pte & ARM_PTE_TYPE_VALID) &&
                    ((((*pte) & ARM_PTE_APMASK) != ARM_PTE_AP(AP_RONA)) || is_zero)) {
                        /* nothing to do - page already mapped and we are not
@@ -165,11 +176,11 @@ kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, boo
                                newpte = (uint64_t)alloc_zero_page() | ARM_PTE_AP(AP_RWNA);
                        }
                        newpte |= ARM_PTE_TYPE_VALID
-                               | ARM_PTE_AF
-                               | ARM_PTE_SH(SH_OUTER_MEMORY)
-                               | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
-                               | ARM_PTE_NX
-                               | ARM_PTE_PNX;
+                           | ARM_PTE_AF
+                           | ARM_PTE_SH(SH_OUTER_MEMORY)
+                           | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
+                           | ARM_PTE_NX
+                           | ARM_PTE_PNX;
                        *pte = newpte;
                }
        }
@@ -189,7 +200,8 @@ kasan_map_shadow(vm_offset_t address, vm_size_t size, bool is_zero)
 static void
 kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero)
 {
-       size = vm_map_round_page(size, ARM_PGMASK);
+       align_to_page(&address, &size);
+
        vm_size_t j;
        uint64_t *pte;
 
@@ -201,7 +213,6 @@ kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero)
 
                uint64_t *base = (uint64_t *)bootstrap_pgtable_phys;
 
-#if !__ARM64_TWO_LEVEL_PMAP__
                /* lookup L1 entry */
                pte = base + ((virt_shadow_target & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
                if (*pte & ARM_TTE_VALID) {
@@ -213,7 +224,6 @@ kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero)
                        *pte = ((uint64_t)pg & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
                }
                base = (uint64_t *)(*pte & ARM_TTE_TABLE_MASK);
-#endif
 
                /* lookup L2 entry */
                pte = base + ((virt_shadow_target & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
@@ -230,7 +240,7 @@ kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero)
                /* lookup L3 entry */
                pte = base + ((virt_shadow_target & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
 
-               if ((*pte & (ARM_PTE_TYPE|ARM_PTE_APMASK)) == (ARM_PTE_TYPE_VALID|ARM_PTE_AP(AP_RWNA))) {
+               if ((*pte & (ARM_PTE_TYPE | ARM_PTE_APMASK)) == (ARM_PTE_TYPE_VALID | ARM_PTE_AP(AP_RWNA))) {
                        /* L3 entry valid and mapped RW - do nothing */
                } else {
                        /* Not mapped, or mapped RO - create new L3 entry or upgrade to RW */
@@ -248,11 +258,11 @@ kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero)
 
                        /* add the default attributes */
                        newpte |= ARM_PTE_TYPE_VALID
-                               | ARM_PTE_AF
-                               | ARM_PTE_SH(SH_OUTER_MEMORY)
-                               | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
-                               | ARM_PTE_NX
-                               | ARM_PTE_PNX;
+                           | ARM_PTE_AF
+                           | ARM_PTE_SH(SH_OUTER_MEMORY)
+                           | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
+                           | ARM_PTE_NX
+                           | ARM_PTE_PNX;
 
                        *pte = newpte;
                }
@@ -264,12 +274,10 @@ kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero)
 void
 kasan_arch_init(void)
 {
-       assert(KASAN_SHADOW_MIN >= VM_MAX_KERNEL_ADDRESS);
-
        /* Map the physical aperture */
-       kasan_map_shadow(kernel_vtop, physmap_vtop - kernel_vtop, true);
+       kasan_map_shadow(physmap_vbase, physmap_vtop - physmap_vbase, true);
 
-#if defined(KERNEL_INTEGRITY_KTRR)
+#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
        /* Pre-allocate all the L3 page table pages to avoid triggering KTRR */
        kasan_map_shadow_internal(VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1, false, false);
 #endif
@@ -291,15 +299,16 @@ kasan_bootstrap(boot_args *args, vm_offset_t pgtable)
        kernel_vbase = args->virtBase;
        kernel_vtop = args->virtBase + ptop - pbase;
 
-       /* Steal ~15% of physical memory */
-       tosteal = vm_map_trunc_page(args->memSize / 6, ARM_PGMASK);
+       tosteal = (args->memSize * STOLEN_MEM_PERCENT) / 100 + STOLEN_MEM_BYTES;
+       tosteal = vm_map_trunc_page(tosteal, ARM_PGMASK);
+
        args->memSize -= tosteal;
 
        /* Initialize the page allocator */
        shadow_pbase = vm_map_round_page(pbase + args->memSize, ARM_PGMASK);
        shadow_ptop = shadow_pbase + tosteal;
        shadow_pnext = shadow_pbase;
-       shadow_pages_total = (long)((shadow_ptop - shadow_pbase) / ARM_PGBYTES);
+       shadow_pages_total = (uint32_t)((shadow_ptop - shadow_pbase) / ARM_PGBYTES);
 
        /* Set aside a page of zeros we can use for dummy shadow mappings */
        zero_page_phys = alloc_page();
@@ -319,4 +328,40 @@ kasan_bootstrap(boot_args *args, vm_offset_t pgtable)
 
        kasan_map_shadow_early(intstack_virt, intstack_size, false);
        kasan_map_shadow_early(excepstack_virt, excepstack_size, false);
+
+       if ((vm_offset_t)args->deviceTreeP - p2v < (vm_offset_t)&_mh_execute_header) {
+               kasan_map_shadow_early((vm_offset_t)args->deviceTreeP, args->deviceTreeLength, false);
+       }
+}
+
+bool
+kasan_is_shadow_mapped(uintptr_t shadowp)
+{
+       uint64_t *pte;
+       uint64_t *base = cpu_tte;
+
+       assert(shadowp >= KASAN_SHADOW_MIN);
+       assert(shadowp < KASAN_SHADOW_MAX);
+
+       /* lookup L1 entry */
+       pte = base + ((shadowp & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
+       if (!(*pte & ARM_TTE_VALID)) {
+               return false;
+       }
+       base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
+
+       /* lookup L2 entry */
+       pte = base + ((shadowp & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
+       if (!(*pte & ARM_TTE_VALID)) {
+               return false;
+       }
+       base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
+
+       /* lookup L3 entry */
+       pte = base + ((shadowp & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
+       if (!(*pte & ARM_PTE_TYPE_VALID)) {
+               return false;
+       }
+
+       return true;
 }