]> git.saurik.com Git - apple/xnu.git/blob - san/kasan-arm64.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / san / kasan-arm64.c
1 /*
2 * Copyright (c) 2016-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stdint.h>
30 #include <string.h>
31 #include <vm/vm_kern.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <machine/machine_routines.h>
35 #include <kern/locks.h>
36 #include <kern/simple_lock.h>
37 #include <kern/debug.h>
38 #include <mach/mach_vm.h>
39 #include <mach/vm_param.h>
40 #include <libkern/libkern.h>
41 #include <sys/queue.h>
42 #include <vm/pmap.h>
43 #include <kasan.h>
44 #include <kasan_internal.h>
45 #include <memintrinsics.h>
46
47 #include <pexpert/device_tree.h>
48 #include <pexpert/arm64/boot.h>
49 #include <arm64/tlb.h>
50
51 #include <libkern/kernel_mach_header.h>
52
53 extern uint64_t *cpu_tte;
54 extern unsigned long gVirtBase, gPhysBase;
55
56 typedef uint64_t pmap_paddr_t;
57 extern vm_map_address_t phystokv(pmap_paddr_t pa);
58
59 vm_offset_t physmap_vbase;
60 vm_offset_t physmap_vtop;
61
62 vm_offset_t shadow_pbase;
63 vm_offset_t shadow_ptop;
64 #if HIBERNATION
65 // if we're building a kernel with hibernation support, hibernate_write_image depends on this symbol
66 vm_offset_t shadow_pnext;
67 #else
68 static vm_offset_t shadow_pnext;
69 #endif
70
71 static vm_offset_t zero_page_phys;
72 static vm_offset_t bootstrap_pgtable_phys;
73
74 extern vm_offset_t intstack, intstack_top;
75 extern vm_offset_t excepstack, excepstack_top;
76
77 void kasan_bootstrap(boot_args *, vm_offset_t pgtable);
78
79 #define KASAN_OFFSET_ARM64 0xe000000000000000ULL /* Defined in makedefs/MakeInc.def */
80
81 #if defined(ARM_LARGE_MEMORY)
82 #define KASAN_SHADOW_MIN (VM_MAX_KERNEL_ADDRESS+1)
83 #define KASAN_SHADOW_MAX 0xffffffffffffffffULL
84 #else
85 #define KASAN_SHADOW_MIN 0xfffffffc00000000ULL
86 #define KASAN_SHADOW_MAX 0xffffffff80000000ULL
87 #endif
88
89 _Static_assert(KASAN_OFFSET == KASAN_OFFSET_ARM64, "KASan inconsistent shadow offset");
90 _Static_assert(VM_MAX_KERNEL_ADDRESS < KASAN_SHADOW_MIN, "KASan shadow overlaps with kernel VM");
91 _Static_assert((VM_MIN_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM");
92 _Static_assert((VM_MAX_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM");
93
94 static uintptr_t
95 alloc_page(void)
96 {
97 if (shadow_pnext + ARM_PGBYTES >= shadow_ptop) {
98 panic("KASAN: OOM");
99 }
100
101 uintptr_t mem = shadow_pnext;
102 shadow_pnext += ARM_PGBYTES;
103 shadow_pages_used++;
104
105 return mem;
106 }
107
108 static uintptr_t
109 alloc_zero_page(void)
110 {
111 uintptr_t mem = alloc_page();
112 __nosan_bzero((void *)phystokv(mem), ARM_PGBYTES);
113 return mem;
114 }
115
116 static void
117 align_to_page(vm_offset_t *addrp, vm_offset_t *sizep)
118 {
119 vm_offset_t addr_aligned = vm_map_trunc_page(*addrp, ARM_PGMASK);
120 *sizep = vm_map_round_page(*sizep + (*addrp - addr_aligned), ARM_PGMASK);
121 *addrp = addr_aligned;
122 }
123
124 static void
125 kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, bool back_page)
126 {
127 size = (size + 0x7UL) & ~0x7UL;
128 vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), ARM_PGMASK);
129 vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), ARM_PGMASK);
130
131 assert(shadow_base >= KASAN_SHADOW_MIN && shadow_top <= KASAN_SHADOW_MAX);
132 assert((size & 0x7) == 0);
133
134 for (; shadow_base < shadow_top; shadow_base += ARM_PGBYTES) {
135 uint64_t *base = cpu_tte;
136 uint64_t *pte;
137
138 /* lookup L1 entry */
139 pte = base + ((shadow_base & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
140 if (*pte & ARM_TTE_VALID) {
141 assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
142 } else {
143 /* create new L1 table */
144 *pte = ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
145 }
146 base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
147
148 /* lookup L2 entry */
149 pte = base + ((shadow_base & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
150 if (*pte & ARM_TTE_VALID) {
151 assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
152 } else {
153 /* create new L3 table */
154 *pte = ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
155 }
156 base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
157
158 if (!back_page) {
159 continue;
160 }
161
162 /* lookup L3 entry */
163 pte = base + ((shadow_base & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
164 if ((*pte & ARM_PTE_TYPE_VALID) &&
165 ((((*pte) & ARM_PTE_APMASK) != ARM_PTE_AP(AP_RONA)) || is_zero)) {
166 /* nothing to do - page already mapped and we are not
167 * upgrading */
168 } else {
169 /* create new L3 entry */
170 uint64_t newpte;
171 if (is_zero) {
172 /* map the zero page RO */
173 newpte = (uint64_t)zero_page_phys | ARM_PTE_AP(AP_RONA);
174 } else {
175 /* map a fresh page RW */
176 newpte = (uint64_t)alloc_zero_page() | ARM_PTE_AP(AP_RWNA);
177 }
178 newpte |= ARM_PTE_TYPE_VALID
179 | ARM_PTE_AF
180 | ARM_PTE_SH(SH_OUTER_MEMORY)
181 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
182 | ARM_PTE_NX
183 | ARM_PTE_PNX;
184 *pte = newpte;
185 }
186 }
187
188 flush_mmu_tlb();
189 }
190
191 void
192 kasan_map_shadow(vm_offset_t address, vm_size_t size, bool is_zero)
193 {
194 kasan_map_shadow_internal(address, size, is_zero, true);
195 }
196
197 /*
198 * TODO: mappings here can be reclaimed after kasan_init()
199 */
200 static void
201 kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero)
202 {
203 align_to_page(&address, &size);
204
205 vm_size_t j;
206 uint64_t *pte;
207
208 for (j = 0; j < size; j += ARM_PGBYTES) {
209 vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j);
210
211 assert(virt_shadow_target >= KASAN_SHADOW_MIN);
212 assert(virt_shadow_target < KASAN_SHADOW_MAX);
213
214 uint64_t *base = (uint64_t *)bootstrap_pgtable_phys;
215
216 /* lookup L1 entry */
217 pte = base + ((virt_shadow_target & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
218 if (*pte & ARM_TTE_VALID) {
219 assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
220 } else {
221 /* create new L1 table */
222 vm_address_t pg = alloc_page();
223 __nosan_bzero((void *)pg, ARM_PGBYTES);
224 *pte = ((uint64_t)pg & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
225 }
226 base = (uint64_t *)(*pte & ARM_TTE_TABLE_MASK);
227
228 /* lookup L2 entry */
229 pte = base + ((virt_shadow_target & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
230 if (*pte & ARM_TTE_VALID) {
231 assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
232 } else {
233 /* create new L3 table */
234 vm_address_t pg = alloc_page();
235 __nosan_bzero((void *)pg, ARM_PGBYTES);
236 *pte = ((uint64_t)pg & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
237 }
238 base = (uint64_t *)(*pte & ARM_TTE_TABLE_MASK);
239
240 /* lookup L3 entry */
241 pte = base + ((virt_shadow_target & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
242
243 if ((*pte & (ARM_PTE_TYPE | ARM_PTE_APMASK)) == (ARM_PTE_TYPE_VALID | ARM_PTE_AP(AP_RWNA))) {
244 /* L3 entry valid and mapped RW - do nothing */
245 } else {
246 /* Not mapped, or mapped RO - create new L3 entry or upgrade to RW */
247
248 uint64_t newpte;
249 if (is_zero) {
250 /* map the zero page RO */
251 newpte = (uint64_t)zero_page_phys | ARM_PTE_AP(AP_RONA);
252 } else {
253 /* map a fresh page RW */
254 vm_address_t pg = alloc_page();
255 __nosan_bzero((void *)pg, ARM_PGBYTES);
256 newpte = pg | ARM_PTE_AP(AP_RWNA);
257 }
258
259 /* add the default attributes */
260 newpte |= ARM_PTE_TYPE_VALID
261 | ARM_PTE_AF
262 | ARM_PTE_SH(SH_OUTER_MEMORY)
263 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
264 | ARM_PTE_NX
265 | ARM_PTE_PNX;
266
267 *pte = newpte;
268 }
269 }
270
271 flush_mmu_tlb();
272 }
273
274 void
275 kasan_arch_init(void)
276 {
277 /* Map the physical aperture */
278 kasan_map_shadow(physmap_vbase, physmap_vtop - physmap_vbase, true);
279
280 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
281 /* Pre-allocate all the L3 page table pages to avoid triggering KTRR */
282 kasan_map_shadow_internal(VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1, false, false);
283 #endif
284 }
285
286 /*
287 * Steal memory for the shadow, and shadow map the bootstrap page tables so we can
288 * run until kasan_init(). Called while running with identity (V=P) map active.
289 */
290 void
291 kasan_bootstrap(boot_args *args, vm_offset_t pgtable)
292 {
293 uintptr_t tosteal;
294
295 vm_address_t pbase = args->physBase;
296 vm_address_t ptop = args->topOfKernelData;
297 vm_offset_t extra = (vm_offset_t)&_mh_execute_header - pbase;
298
299 kernel_vbase = args->virtBase;
300 kernel_vtop = args->virtBase + ptop - pbase;
301
302 tosteal = (args->memSize * STOLEN_MEM_PERCENT) / 100 + STOLEN_MEM_BYTES;
303 tosteal = vm_map_trunc_page(tosteal, ARM_PGMASK);
304
305 args->memSize -= tosteal;
306
307 /* Initialize the page allocator */
308 shadow_pbase = vm_map_round_page(pbase + args->memSize, ARM_PGMASK);
309 shadow_ptop = shadow_pbase + tosteal;
310 shadow_pnext = shadow_pbase;
311 shadow_pages_total = (uint32_t)((shadow_ptop - shadow_pbase) / ARM_PGBYTES);
312
313 /* Set aside a page of zeros we can use for dummy shadow mappings */
314 zero_page_phys = alloc_page();
315 __nosan_bzero((void *)zero_page_phys, ARM_PGBYTES);
316
317 /* Shadow the KVA bootstrap mapping: start of kernel Mach-O to end of physical */
318 bootstrap_pgtable_phys = pgtable;
319 kasan_map_shadow_early(kernel_vbase + extra, args->memSize - extra, true);
320
321 /* Shadow the early stacks */
322 vm_offset_t p2v = args->virtBase - args->physBase;
323
324 vm_offset_t intstack_virt = (vm_offset_t)&intstack + p2v;
325 vm_offset_t excepstack_virt = (vm_offset_t)&excepstack + p2v;
326 vm_offset_t intstack_size = (vm_offset_t)&intstack_top - (vm_offset_t)&intstack;
327 vm_offset_t excepstack_size = (vm_offset_t)&excepstack_top - (vm_offset_t)&excepstack;
328
329 kasan_map_shadow_early(intstack_virt, intstack_size, false);
330 kasan_map_shadow_early(excepstack_virt, excepstack_size, false);
331
332 if ((vm_offset_t)args->deviceTreeP - p2v < (vm_offset_t)&_mh_execute_header) {
333 kasan_map_shadow_early((vm_offset_t)args->deviceTreeP, args->deviceTreeLength, false);
334 }
335 }
336
337 bool
338 kasan_is_shadow_mapped(uintptr_t shadowp)
339 {
340 uint64_t *pte;
341 uint64_t *base = cpu_tte;
342
343 assert(shadowp >= KASAN_SHADOW_MIN);
344 assert(shadowp < KASAN_SHADOW_MAX);
345
346 /* lookup L1 entry */
347 pte = base + ((shadowp & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
348 if (!(*pte & ARM_TTE_VALID)) {
349 return false;
350 }
351 base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
352
353 /* lookup L2 entry */
354 pte = base + ((shadowp & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
355 if (!(*pte & ARM_TTE_VALID)) {
356 return false;
357 }
358 base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
359
360 /* lookup L3 entry */
361 pte = base + ((shadowp & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
362 if (!(*pte & ARM_PTE_TYPE_VALID)) {
363 return false;
364 }
365
366 return true;
367 }