]> git.saurik.com Git - apple/xnu.git/blobdiff - san/kasan-arm64.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / san / kasan-arm64.c
index 77ee449a803c5ba6623a6a8085c1b36092d03706..20a815c6194cd7d10f45d678f92fb410e4788399 100644 (file)
@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2016-2020 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  *
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  *
 #include <kasan_internal.h>
 #include <memintrinsics.h>
 
 #include <kasan_internal.h>
 #include <memintrinsics.h>
 
+#include <pexpert/device_tree.h>
 #include <pexpert/arm64/boot.h>
 #include <pexpert/arm64/boot.h>
-#include <arm64/proc_reg.h>
+#include <arm64/tlb.h>
 
 #include <libkern/kernel_mach_header.h>
 
 extern uint64_t *cpu_tte;
 extern unsigned long gVirtBase, gPhysBase;
 
 #include <libkern/kernel_mach_header.h>
 
 extern uint64_t *cpu_tte;
 extern unsigned long gVirtBase, gPhysBase;
-#define phystokv(a) ((vm_address_t)(a) - gPhysBase + gVirtBase)
+
+typedef uint64_t pmap_paddr_t;
+extern vm_map_address_t phystokv(pmap_paddr_t pa);
 
 vm_offset_t physmap_vbase;
 vm_offset_t physmap_vtop;
 
 vm_offset_t shadow_pbase;
 vm_offset_t shadow_ptop;
 
 vm_offset_t physmap_vbase;
 vm_offset_t physmap_vtop;
 
 vm_offset_t shadow_pbase;
 vm_offset_t shadow_ptop;
+#if HIBERNATION
+// if we're building a kernel with hibernation support, hibernate_write_image depends on this symbol
+vm_offset_t shadow_pnext;
+#else
 static vm_offset_t shadow_pnext;
 static vm_offset_t shadow_pnext;
+#endif
 
 static vm_offset_t zero_page_phys;
 static vm_offset_t bootstrap_pgtable_phys;
 
 static vm_offset_t zero_page_phys;
 static vm_offset_t bootstrap_pgtable_phys;
@@ -67,16 +75,21 @@ extern vm_offset_t intstack, intstack_top;
 extern vm_offset_t excepstack, excepstack_top;
 
 void kasan_bootstrap(boot_args *, vm_offset_t pgtable);
 extern vm_offset_t excepstack, excepstack_top;
 
 void kasan_bootstrap(boot_args *, vm_offset_t pgtable);
-void flush_mmu_tlb(void);
 
 
-#define KASAN_SHIFT_ARM64 0xdffffff800000000ULL /* Defined in makedefs/MakeInc.def */
-#define KASAN_SHADOW_MIN  0xfffffff400000000ULL
-#define KASAN_SHADOW_MAX  0xfffffff680000000ULL
+#define KASAN_OFFSET_ARM64 0xe000000000000000ULL /* Defined in makedefs/MakeInc.def */
 
 
-_Static_assert(KASAN_SHIFT == KASAN_SHIFT_ARM64, "KASan inconsistent shadow shift");
+#if defined(ARM_LARGE_MEMORY)
+#define KASAN_SHADOW_MIN  (VM_MAX_KERNEL_ADDRESS+1)
+#define KASAN_SHADOW_MAX  0xffffffffffffffffULL
+#else
+#define KASAN_SHADOW_MIN  0xfffffffc00000000ULL
+#define KASAN_SHADOW_MAX  0xffffffff80000000ULL
+#endif
+
+_Static_assert(KASAN_OFFSET == KASAN_OFFSET_ARM64, "KASan inconsistent shadow offset");
 _Static_assert(VM_MAX_KERNEL_ADDRESS < KASAN_SHADOW_MIN, "KASan shadow overlaps with kernel VM");
 _Static_assert(VM_MAX_KERNEL_ADDRESS < KASAN_SHADOW_MIN, "KASan shadow overlaps with kernel VM");
-_Static_assert((VM_MIN_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM");
-_Static_assert((VM_MAX_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 < KASAN_SHADOW_MAX,  "KASan shadow does not cover kernel VM");
+_Static_assert((VM_MIN_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM");
+_Static_assert((VM_MAX_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM");
 
 static uintptr_t
 alloc_page(void)
 
 static uintptr_t
 alloc_page(void)
@@ -111,16 +124,17 @@ align_to_page(vm_offset_t *addrp, vm_offset_t *sizep)
 static void
 kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, bool back_page)
 {
 static void
 kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, bool back_page)
 {
+       size = (size + 0x7UL) & ~0x7UL;
        vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), ARM_PGMASK);
        vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), ARM_PGMASK);
 
        assert(shadow_base >= KASAN_SHADOW_MIN && shadow_top <= KASAN_SHADOW_MAX);
        vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), ARM_PGMASK);
        vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), ARM_PGMASK);
 
        assert(shadow_base >= KASAN_SHADOW_MIN && shadow_top <= KASAN_SHADOW_MAX);
+       assert((size & 0x7) == 0);
 
        for (; shadow_base < shadow_top; shadow_base += ARM_PGBYTES) {
                uint64_t *base = cpu_tte;
                uint64_t *pte;
 
 
        for (; shadow_base < shadow_top; shadow_base += ARM_PGBYTES) {
                uint64_t *base = cpu_tte;
                uint64_t *pte;
 
-#if !__ARM64_TWO_LEVEL_PMAP__
                /* lookup L1 entry */
                pte = base + ((shadow_base & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
                if (*pte & ARM_TTE_VALID) {
                /* lookup L1 entry */
                pte = base + ((shadow_base & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
                if (*pte & ARM_TTE_VALID) {
@@ -130,7 +144,6 @@ kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, boo
                        *pte = ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
                }
                base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
                        *pte = ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
                }
                base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
-#endif
 
                /* lookup L2 entry */
                pte = base + ((shadow_base & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
 
                /* lookup L2 entry */
                pte = base + ((shadow_base & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
@@ -163,11 +176,11 @@ kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, boo
                                newpte = (uint64_t)alloc_zero_page() | ARM_PTE_AP(AP_RWNA);
                        }
                        newpte |= ARM_PTE_TYPE_VALID
                                newpte = (uint64_t)alloc_zero_page() | ARM_PTE_AP(AP_RWNA);
                        }
                        newpte |= ARM_PTE_TYPE_VALID
-                               | ARM_PTE_AF
-                               | ARM_PTE_SH(SH_OUTER_MEMORY)
-                               | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
-                               | ARM_PTE_NX
-                               | ARM_PTE_PNX;
+                           | ARM_PTE_AF
+                           | ARM_PTE_SH(SH_OUTER_MEMORY)
+                           | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
+                           | ARM_PTE_NX
+                           | ARM_PTE_PNX;
                        *pte = newpte;
                }
        }
                        *pte = newpte;
                }
        }
@@ -200,7 +213,6 @@ kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero)
 
                uint64_t *base = (uint64_t *)bootstrap_pgtable_phys;
 
 
                uint64_t *base = (uint64_t *)bootstrap_pgtable_phys;
 
-#if !__ARM64_TWO_LEVEL_PMAP__
                /* lookup L1 entry */
                pte = base + ((virt_shadow_target & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
                if (*pte & ARM_TTE_VALID) {
                /* lookup L1 entry */
                pte = base + ((virt_shadow_target & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
                if (*pte & ARM_TTE_VALID) {
@@ -212,7 +224,6 @@ kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero)
                        *pte = ((uint64_t)pg & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
                }
                base = (uint64_t *)(*pte & ARM_TTE_TABLE_MASK);
                        *pte = ((uint64_t)pg & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
                }
                base = (uint64_t *)(*pte & ARM_TTE_TABLE_MASK);
-#endif
 
                /* lookup L2 entry */
                pte = base + ((virt_shadow_target & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
 
                /* lookup L2 entry */
                pte = base + ((virt_shadow_target & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
@@ -229,7 +240,7 @@ kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero)
                /* lookup L3 entry */
                pte = base + ((virt_shadow_target & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
 
                /* lookup L3 entry */
                pte = base + ((virt_shadow_target & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
 
-               if ((*pte & (ARM_PTE_TYPE|ARM_PTE_APMASK)) == (ARM_PTE_TYPE_VALID|ARM_PTE_AP(AP_RWNA))) {
+               if ((*pte & (ARM_PTE_TYPE | ARM_PTE_APMASK)) == (ARM_PTE_TYPE_VALID | ARM_PTE_AP(AP_RWNA))) {
                        /* L3 entry valid and mapped RW - do nothing */
                } else {
                        /* Not mapped, or mapped RO - create new L3 entry or upgrade to RW */
                        /* L3 entry valid and mapped RW - do nothing */
                } else {
                        /* Not mapped, or mapped RO - create new L3 entry or upgrade to RW */
@@ -247,11 +258,11 @@ kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero)
 
                        /* add the default attributes */
                        newpte |= ARM_PTE_TYPE_VALID
 
                        /* add the default attributes */
                        newpte |= ARM_PTE_TYPE_VALID
-                               | ARM_PTE_AF
-                               | ARM_PTE_SH(SH_OUTER_MEMORY)
-                               | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
-                               | ARM_PTE_NX
-                               | ARM_PTE_PNX;
+                           | ARM_PTE_AF
+                           | ARM_PTE_SH(SH_OUTER_MEMORY)
+                           | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
+                           | ARM_PTE_NX
+                           | ARM_PTE_PNX;
 
                        *pte = newpte;
                }
 
                        *pte = newpte;
                }
@@ -264,9 +275,9 @@ void
 kasan_arch_init(void)
 {
        /* Map the physical aperture */
 kasan_arch_init(void)
 {
        /* Map the physical aperture */
-       kasan_map_shadow(kernel_vtop, physmap_vtop - kernel_vtop, true);
+       kasan_map_shadow(physmap_vbase, physmap_vtop - physmap_vbase, true);
 
 
-#if defined(KERNEL_INTEGRITY_KTRR)
+#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
        /* Pre-allocate all the L3 page table pages to avoid triggering KTRR */
        kasan_map_shadow_internal(VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1, false, false);
 #endif
        /* Pre-allocate all the L3 page table pages to avoid triggering KTRR */
        kasan_map_shadow_internal(VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1, false, false);
 #endif
@@ -297,7 +308,7 @@ kasan_bootstrap(boot_args *args, vm_offset_t pgtable)
        shadow_pbase = vm_map_round_page(pbase + args->memSize, ARM_PGMASK);
        shadow_ptop = shadow_pbase + tosteal;
        shadow_pnext = shadow_pbase;
        shadow_pbase = vm_map_round_page(pbase + args->memSize, ARM_PGMASK);
        shadow_ptop = shadow_pbase + tosteal;
        shadow_pnext = shadow_pbase;
-       shadow_pages_total = (long)((shadow_ptop - shadow_pbase) / ARM_PGBYTES);
+       shadow_pages_total = (uint32_t)((shadow_ptop - shadow_pbase) / ARM_PGBYTES);
 
        /* Set aside a page of zeros we can use for dummy shadow mappings */
        zero_page_phys = alloc_page();
 
        /* Set aside a page of zeros we can use for dummy shadow mappings */
        zero_page_phys = alloc_page();
@@ -317,6 +328,10 @@ kasan_bootstrap(boot_args *args, vm_offset_t pgtable)
 
        kasan_map_shadow_early(intstack_virt, intstack_size, false);
        kasan_map_shadow_early(excepstack_virt, excepstack_size, false);
 
        kasan_map_shadow_early(intstack_virt, intstack_size, false);
        kasan_map_shadow_early(excepstack_virt, excepstack_size, false);
+
+       if ((vm_offset_t)args->deviceTreeP - p2v < (vm_offset_t)&_mh_execute_header) {
+               kasan_map_shadow_early((vm_offset_t)args->deviceTreeP, args->deviceTreeLength, false);
+       }
 }
 
 bool
 }
 
 bool
@@ -328,14 +343,12 @@ kasan_is_shadow_mapped(uintptr_t shadowp)
        assert(shadowp >= KASAN_SHADOW_MIN);
        assert(shadowp < KASAN_SHADOW_MAX);
 
        assert(shadowp >= KASAN_SHADOW_MIN);
        assert(shadowp < KASAN_SHADOW_MAX);
 
-#if !__ARM64_TWO_LEVEL_PMAP__
        /* lookup L1 entry */
        pte = base + ((shadowp & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
        if (!(*pte & ARM_TTE_VALID)) {
                return false;
        }
        base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
        /* lookup L1 entry */
        pte = base + ((shadowp & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
        if (!(*pte & ARM_TTE_VALID)) {
                return false;
        }
        base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
-#endif
 
        /* lookup L2 entry */
        pte = base + ((shadowp & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
 
        /* lookup L2 entry */
        pte = base + ((shadowp & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);