]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <stdint.h> | |
30 | #include <string.h> | |
31 | #include <vm/vm_kern.h> | |
32 | #include <vm/vm_map.h> | |
33 | #include <kern/assert.h> | |
34 | #include <machine/machine_routines.h> | |
35 | #include <kern/locks.h> | |
36 | #include <kern/simple_lock.h> | |
37 | #include <kern/debug.h> | |
38 | #include <mach/mach_vm.h> | |
39 | #include <mach/vm_param.h> | |
40 | #include <libkern/libkern.h> | |
41 | #include <sys/queue.h> | |
42 | #include <vm/pmap.h> | |
43 | #include <kasan.h> | |
44 | #include <kasan_internal.h> | |
45 | #include <memintrinsics.h> | |
46 | ||
47 | #include <pexpert/arm64/boot.h> | |
48 | #include <arm64/proc_reg.h> | |
49 | ||
50 | #include <libkern/kernel_mach_header.h> | |
51 | ||
52 | extern uint64_t *cpu_tte; | |
53 | extern unsigned long gVirtBase, gPhysBase; | |
d9a64523 A |
54 | |
55 | typedef uint64_t pmap_paddr_t; | |
56 | extern vm_map_address_t phystokv(pmap_paddr_t pa); | |
5ba3f43e A |
57 | |
58 | vm_offset_t physmap_vbase; | |
59 | vm_offset_t physmap_vtop; | |
60 | ||
61 | vm_offset_t shadow_pbase; | |
62 | vm_offset_t shadow_ptop; | |
63 | static vm_offset_t shadow_pnext; | |
64 | ||
65 | static vm_offset_t zero_page_phys; | |
66 | static vm_offset_t bootstrap_pgtable_phys; | |
67 | ||
68 | extern vm_offset_t intstack, intstack_top; | |
69 | extern vm_offset_t excepstack, excepstack_top; | |
70 | ||
71 | void kasan_bootstrap(boot_args *, vm_offset_t pgtable); | |
72 | void flush_mmu_tlb(void); | |
73 | ||
5ba3f43e A |
74 | #define KASAN_SHIFT_ARM64 0xdffffff800000000ULL /* Defined in makedefs/MakeInc.def */ |
75 | #define KASAN_SHADOW_MIN 0xfffffff400000000ULL | |
76 | #define KASAN_SHADOW_MAX 0xfffffff680000000ULL | |
77 | ||
78 | _Static_assert(KASAN_SHIFT == KASAN_SHIFT_ARM64, "KASan inconsistent shadow shift"); | |
79 | _Static_assert(VM_MAX_KERNEL_ADDRESS < KASAN_SHADOW_MIN, "KASan shadow overlaps with kernel VM"); | |
80 | _Static_assert((VM_MIN_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM"); | |
81 | _Static_assert((VM_MAX_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM"); | |
82 | ||
83 | static uintptr_t | |
84 | alloc_page(void) | |
85 | { | |
86 | if (shadow_pnext + ARM_PGBYTES >= shadow_ptop) { | |
87 | panic("KASAN: OOM"); | |
88 | } | |
89 | ||
90 | uintptr_t mem = shadow_pnext; | |
91 | shadow_pnext += ARM_PGBYTES; | |
92 | shadow_pages_used++; | |
93 | ||
94 | return mem; | |
95 | } | |
96 | ||
97 | static uintptr_t | |
98 | alloc_zero_page(void) | |
99 | { | |
100 | uintptr_t mem = alloc_page(); | |
101 | __nosan_bzero((void *)phystokv(mem), ARM_PGBYTES); | |
102 | return mem; | |
103 | } | |
104 | ||
cc8bc92a A |
105 | static void |
106 | align_to_page(vm_offset_t *addrp, vm_offset_t *sizep) | |
107 | { | |
108 | vm_offset_t addr_aligned = vm_map_trunc_page(*addrp, ARM_PGMASK); | |
109 | *sizep = vm_map_round_page(*sizep + (*addrp - addr_aligned), ARM_PGMASK); | |
110 | *addrp = addr_aligned; | |
111 | } | |
112 | ||
5ba3f43e A |
113 | static void |
114 | kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, bool back_page) | |
115 | { | |
d9a64523 | 116 | size = (size + 0x7UL) & ~0x7UL; |
a39ff7e2 A |
117 | vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), ARM_PGMASK); |
118 | vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), ARM_PGMASK); | |
5ba3f43e | 119 | |
a39ff7e2 | 120 | assert(shadow_base >= KASAN_SHADOW_MIN && shadow_top <= KASAN_SHADOW_MAX); |
d9a64523 | 121 | assert((size & 0x7) == 0); |
5ba3f43e | 122 | |
a39ff7e2 | 123 | for (; shadow_base < shadow_top; shadow_base += ARM_PGBYTES) { |
5ba3f43e | 124 | uint64_t *base = cpu_tte; |
a39ff7e2 | 125 | uint64_t *pte; |
5ba3f43e A |
126 | |
127 | #if !__ARM64_TWO_LEVEL_PMAP__ | |
128 | /* lookup L1 entry */ | |
a39ff7e2 | 129 | pte = base + ((shadow_base & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); |
5ba3f43e A |
130 | if (*pte & ARM_TTE_VALID) { |
131 | assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE); | |
132 | } else { | |
133 | /* create new L1 table */ | |
134 | *pte = ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE; | |
135 | } | |
136 | base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK); | |
137 | #endif | |
138 | ||
139 | /* lookup L2 entry */ | |
a39ff7e2 | 140 | pte = base + ((shadow_base & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT); |
5ba3f43e A |
141 | if (*pte & ARM_TTE_VALID) { |
142 | assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE); | |
143 | } else { | |
144 | /* create new L3 table */ | |
145 | *pte = ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE; | |
146 | } | |
147 | base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK); | |
148 | ||
149 | if (!back_page) { | |
150 | continue; | |
151 | } | |
152 | ||
153 | /* lookup L3 entry */ | |
a39ff7e2 | 154 | pte = base + ((shadow_base & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT); |
5ba3f43e A |
155 | if ((*pte & ARM_PTE_TYPE_VALID) && |
156 | ((((*pte) & ARM_PTE_APMASK) != ARM_PTE_AP(AP_RONA)) || is_zero)) { | |
157 | /* nothing to do - page already mapped and we are not | |
158 | * upgrading */ | |
159 | } else { | |
160 | /* create new L3 entry */ | |
161 | uint64_t newpte; | |
162 | if (is_zero) { | |
163 | /* map the zero page RO */ | |
164 | newpte = (uint64_t)zero_page_phys | ARM_PTE_AP(AP_RONA); | |
165 | } else { | |
166 | /* map a fresh page RW */ | |
167 | newpte = (uint64_t)alloc_zero_page() | ARM_PTE_AP(AP_RWNA); | |
168 | } | |
169 | newpte |= ARM_PTE_TYPE_VALID | |
170 | | ARM_PTE_AF | |
171 | | ARM_PTE_SH(SH_OUTER_MEMORY) | |
172 | | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT) | |
173 | | ARM_PTE_NX | |
174 | | ARM_PTE_PNX; | |
175 | *pte = newpte; | |
176 | } | |
177 | } | |
178 | ||
179 | flush_mmu_tlb(); | |
180 | } | |
181 | ||
182 | void | |
183 | kasan_map_shadow(vm_offset_t address, vm_size_t size, bool is_zero) | |
184 | { | |
185 | kasan_map_shadow_internal(address, size, is_zero, true); | |
186 | } | |
187 | ||
188 | /* | |
189 | * TODO: mappings here can be reclaimed after kasan_init() | |
190 | */ | |
191 | static void | |
192 | kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero) | |
193 | { | |
cc8bc92a A |
194 | align_to_page(&address, &size); |
195 | ||
5ba3f43e A |
196 | vm_size_t j; |
197 | uint64_t *pte; | |
198 | ||
199 | for (j = 0; j < size; j += ARM_PGBYTES) { | |
200 | vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j); | |
201 | ||
202 | assert(virt_shadow_target >= KASAN_SHADOW_MIN); | |
203 | assert(virt_shadow_target < KASAN_SHADOW_MAX); | |
204 | ||
205 | uint64_t *base = (uint64_t *)bootstrap_pgtable_phys; | |
206 | ||
207 | #if !__ARM64_TWO_LEVEL_PMAP__ | |
208 | /* lookup L1 entry */ | |
209 | pte = base + ((virt_shadow_target & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); | |
210 | if (*pte & ARM_TTE_VALID) { | |
211 | assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE); | |
212 | } else { | |
213 | /* create new L1 table */ | |
214 | vm_address_t pg = alloc_page(); | |
215 | __nosan_bzero((void *)pg, ARM_PGBYTES); | |
216 | *pte = ((uint64_t)pg & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE; | |
217 | } | |
218 | base = (uint64_t *)(*pte & ARM_TTE_TABLE_MASK); | |
219 | #endif | |
220 | ||
221 | /* lookup L2 entry */ | |
222 | pte = base + ((virt_shadow_target & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT); | |
223 | if (*pte & ARM_TTE_VALID) { | |
224 | assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE); | |
225 | } else { | |
226 | /* create new L3 table */ | |
227 | vm_address_t pg = alloc_page(); | |
228 | __nosan_bzero((void *)pg, ARM_PGBYTES); | |
229 | *pte = ((uint64_t)pg & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE; | |
230 | } | |
231 | base = (uint64_t *)(*pte & ARM_TTE_TABLE_MASK); | |
232 | ||
233 | /* lookup L3 entry */ | |
234 | pte = base + ((virt_shadow_target & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT); | |
235 | ||
236 | if ((*pte & (ARM_PTE_TYPE|ARM_PTE_APMASK)) == (ARM_PTE_TYPE_VALID|ARM_PTE_AP(AP_RWNA))) { | |
237 | /* L3 entry valid and mapped RW - do nothing */ | |
238 | } else { | |
239 | /* Not mapped, or mapped RO - create new L3 entry or upgrade to RW */ | |
240 | ||
241 | uint64_t newpte; | |
242 | if (is_zero) { | |
243 | /* map the zero page RO */ | |
244 | newpte = (uint64_t)zero_page_phys | ARM_PTE_AP(AP_RONA); | |
245 | } else { | |
246 | /* map a fresh page RW */ | |
247 | vm_address_t pg = alloc_page(); | |
248 | __nosan_bzero((void *)pg, ARM_PGBYTES); | |
249 | newpte = pg | ARM_PTE_AP(AP_RWNA); | |
250 | } | |
251 | ||
252 | /* add the default attributes */ | |
253 | newpte |= ARM_PTE_TYPE_VALID | |
254 | | ARM_PTE_AF | |
255 | | ARM_PTE_SH(SH_OUTER_MEMORY) | |
256 | | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT) | |
257 | | ARM_PTE_NX | |
258 | | ARM_PTE_PNX; | |
259 | ||
260 | *pte = newpte; | |
261 | } | |
262 | } | |
263 | ||
264 | flush_mmu_tlb(); | |
265 | } | |
266 | ||
267 | void | |
268 | kasan_arch_init(void) | |
269 | { | |
5ba3f43e A |
270 | /* Map the physical aperture */ |
271 | kasan_map_shadow(kernel_vtop, physmap_vtop - kernel_vtop, true); | |
272 | ||
273 | #if defined(KERNEL_INTEGRITY_KTRR) | |
274 | /* Pre-allocate all the L3 page table pages to avoid triggering KTRR */ | |
275 | kasan_map_shadow_internal(VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1, false, false); | |
276 | #endif | |
277 | } | |
278 | ||
279 | /* | |
280 | * Steal memory for the shadow, and shadow map the bootstrap page tables so we can | |
281 | * run until kasan_init(). Called while running with identity (V=P) map active. | |
282 | */ | |
283 | void | |
284 | kasan_bootstrap(boot_args *args, vm_offset_t pgtable) | |
285 | { | |
286 | uintptr_t tosteal; | |
287 | ||
288 | vm_address_t pbase = args->physBase; | |
289 | vm_address_t ptop = args->topOfKernelData; | |
290 | vm_offset_t extra = (vm_offset_t)&_mh_execute_header - pbase; | |
291 | ||
292 | kernel_vbase = args->virtBase; | |
293 | kernel_vtop = args->virtBase + ptop - pbase; | |
294 | ||
5c9f4661 A |
295 | tosteal = (args->memSize * STOLEN_MEM_PERCENT) / 100 + STOLEN_MEM_BYTES; |
296 | tosteal = vm_map_trunc_page(tosteal, ARM_PGMASK); | |
297 | ||
5ba3f43e A |
298 | args->memSize -= tosteal; |
299 | ||
300 | /* Initialize the page allocator */ | |
301 | shadow_pbase = vm_map_round_page(pbase + args->memSize, ARM_PGMASK); | |
302 | shadow_ptop = shadow_pbase + tosteal; | |
303 | shadow_pnext = shadow_pbase; | |
304 | shadow_pages_total = (long)((shadow_ptop - shadow_pbase) / ARM_PGBYTES); | |
305 | ||
306 | /* Set aside a page of zeros we can use for dummy shadow mappings */ | |
307 | zero_page_phys = alloc_page(); | |
308 | __nosan_bzero((void *)zero_page_phys, ARM_PGBYTES); | |
309 | ||
310 | /* Shadow the KVA bootstrap mapping: start of kernel Mach-O to end of physical */ | |
311 | bootstrap_pgtable_phys = pgtable; | |
312 | kasan_map_shadow_early(kernel_vbase + extra, args->memSize - extra, true); | |
313 | ||
314 | /* Shadow the early stacks */ | |
315 | vm_offset_t p2v = args->virtBase - args->physBase; | |
316 | ||
317 | vm_offset_t intstack_virt = (vm_offset_t)&intstack + p2v; | |
318 | vm_offset_t excepstack_virt = (vm_offset_t)&excepstack + p2v; | |
319 | vm_offset_t intstack_size = (vm_offset_t)&intstack_top - (vm_offset_t)&intstack; | |
320 | vm_offset_t excepstack_size = (vm_offset_t)&excepstack_top - (vm_offset_t)&excepstack; | |
321 | ||
322 | kasan_map_shadow_early(intstack_virt, intstack_size, false); | |
323 | kasan_map_shadow_early(excepstack_virt, excepstack_size, false); | |
324 | } | |
a39ff7e2 A |
325 | |
326 | bool | |
327 | kasan_is_shadow_mapped(uintptr_t shadowp) | |
328 | { | |
329 | uint64_t *pte; | |
330 | uint64_t *base = cpu_tte; | |
331 | ||
332 | assert(shadowp >= KASAN_SHADOW_MIN); | |
333 | assert(shadowp < KASAN_SHADOW_MAX); | |
334 | ||
335 | #if !__ARM64_TWO_LEVEL_PMAP__ | |
336 | /* lookup L1 entry */ | |
337 | pte = base + ((shadowp & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); | |
338 | if (!(*pte & ARM_TTE_VALID)) { | |
339 | return false; | |
340 | } | |
341 | base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK); | |
342 | #endif | |
343 | ||
344 | /* lookup L2 entry */ | |
345 | pte = base + ((shadowp & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT); | |
346 | if (!(*pte & ARM_TTE_VALID)) { | |
347 | return false; | |
348 | } | |
349 | base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK); | |
350 | ||
351 | /* lookup L3 entry */ | |
352 | pte = base + ((shadowp & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT); | |
353 | if (!(*pte & ARM_PTE_TYPE_VALID)) { | |
354 | return false; | |
355 | } | |
356 | ||
357 | return true; | |
358 | } |