]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <stdint.h> | |
30 | #include <string.h> | |
31 | #include <vm/vm_kern.h> | |
32 | #include <vm/vm_map.h> | |
33 | #include <kern/assert.h> | |
34 | #include <machine/machine_routines.h> | |
35 | #include <kern/locks.h> | |
36 | #include <kern/simple_lock.h> | |
37 | #include <kern/debug.h> | |
38 | #include <mach/mach_vm.h> | |
39 | #include <mach/vm_param.h> | |
40 | #include <libkern/libkern.h> | |
41 | #include <sys/queue.h> | |
42 | #include <vm/pmap.h> | |
43 | #include <kasan.h> | |
44 | #include <kasan_internal.h> | |
45 | #include <memintrinsics.h> | |
46 | ||
47 | #include <pexpert/arm64/boot.h> | |
cb323159 | 48 | #include <arm64/tlb.h> |
5ba3f43e A |
49 | |
50 | #include <libkern/kernel_mach_header.h> | |
51 | ||
52 | extern uint64_t *cpu_tte; | |
53 | extern unsigned long gVirtBase, gPhysBase; | |
d9a64523 A |
54 | |
55 | typedef uint64_t pmap_paddr_t; | |
56 | extern vm_map_address_t phystokv(pmap_paddr_t pa); | |
5ba3f43e A |
57 | |
58 | vm_offset_t physmap_vbase; | |
59 | vm_offset_t physmap_vtop; | |
60 | ||
61 | vm_offset_t shadow_pbase; | |
62 | vm_offset_t shadow_ptop; | |
63 | static vm_offset_t shadow_pnext; | |
64 | ||
65 | static vm_offset_t zero_page_phys; | |
66 | static vm_offset_t bootstrap_pgtable_phys; | |
67 | ||
68 | extern vm_offset_t intstack, intstack_top; | |
69 | extern vm_offset_t excepstack, excepstack_top; | |
70 | ||
71 | void kasan_bootstrap(boot_args *, vm_offset_t pgtable); | |
5ba3f43e | 72 | |
cb323159 A |
73 | #define KASAN_SHIFT_ARM64 0xe000000000000000ULL /* Defined in makedefs/MakeInc.def */ |
74 | #define KASAN_SHADOW_MIN 0xfffffffc00000000ULL | |
75 | #define KASAN_SHADOW_MAX 0xffffffff80000000ULL | |
5ba3f43e A |
76 | |
77 | _Static_assert(KASAN_SHIFT == KASAN_SHIFT_ARM64, "KASan inconsistent shadow shift"); | |
78 | _Static_assert(VM_MAX_KERNEL_ADDRESS < KASAN_SHADOW_MIN, "KASan shadow overlaps with kernel VM"); | |
79 | _Static_assert((VM_MIN_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM"); | |
0a7de745 | 80 | _Static_assert((VM_MAX_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM"); |
5ba3f43e A |
81 | |
82 | static uintptr_t | |
83 | alloc_page(void) | |
84 | { | |
85 | if (shadow_pnext + ARM_PGBYTES >= shadow_ptop) { | |
86 | panic("KASAN: OOM"); | |
87 | } | |
88 | ||
89 | uintptr_t mem = shadow_pnext; | |
90 | shadow_pnext += ARM_PGBYTES; | |
91 | shadow_pages_used++; | |
92 | ||
93 | return mem; | |
94 | } | |
95 | ||
96 | static uintptr_t | |
97 | alloc_zero_page(void) | |
98 | { | |
99 | uintptr_t mem = alloc_page(); | |
100 | __nosan_bzero((void *)phystokv(mem), ARM_PGBYTES); | |
101 | return mem; | |
102 | } | |
103 | ||
cc8bc92a A |
104 | static void |
105 | align_to_page(vm_offset_t *addrp, vm_offset_t *sizep) | |
106 | { | |
107 | vm_offset_t addr_aligned = vm_map_trunc_page(*addrp, ARM_PGMASK); | |
108 | *sizep = vm_map_round_page(*sizep + (*addrp - addr_aligned), ARM_PGMASK); | |
109 | *addrp = addr_aligned; | |
110 | } | |
111 | ||
5ba3f43e A |
112 | static void |
113 | kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, bool back_page) | |
114 | { | |
d9a64523 | 115 | size = (size + 0x7UL) & ~0x7UL; |
a39ff7e2 A |
116 | vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), ARM_PGMASK); |
117 | vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), ARM_PGMASK); | |
5ba3f43e | 118 | |
a39ff7e2 | 119 | assert(shadow_base >= KASAN_SHADOW_MIN && shadow_top <= KASAN_SHADOW_MAX); |
d9a64523 | 120 | assert((size & 0x7) == 0); |
5ba3f43e | 121 | |
a39ff7e2 | 122 | for (; shadow_base < shadow_top; shadow_base += ARM_PGBYTES) { |
5ba3f43e | 123 | uint64_t *base = cpu_tte; |
a39ff7e2 | 124 | uint64_t *pte; |
5ba3f43e | 125 | |
5ba3f43e | 126 | /* lookup L1 entry */ |
a39ff7e2 | 127 | pte = base + ((shadow_base & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); |
5ba3f43e A |
128 | if (*pte & ARM_TTE_VALID) { |
129 | assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE); | |
130 | } else { | |
131 | /* create new L1 table */ | |
132 | *pte = ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE; | |
133 | } | |
134 | base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK); | |
5ba3f43e A |
135 | |
136 | /* lookup L2 entry */ | |
a39ff7e2 | 137 | pte = base + ((shadow_base & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT); |
5ba3f43e A |
138 | if (*pte & ARM_TTE_VALID) { |
139 | assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE); | |
140 | } else { | |
141 | /* create new L3 table */ | |
142 | *pte = ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE; | |
143 | } | |
144 | base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK); | |
145 | ||
146 | if (!back_page) { | |
147 | continue; | |
148 | } | |
149 | ||
150 | /* lookup L3 entry */ | |
a39ff7e2 | 151 | pte = base + ((shadow_base & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT); |
5ba3f43e A |
152 | if ((*pte & ARM_PTE_TYPE_VALID) && |
153 | ((((*pte) & ARM_PTE_APMASK) != ARM_PTE_AP(AP_RONA)) || is_zero)) { | |
154 | /* nothing to do - page already mapped and we are not | |
155 | * upgrading */ | |
156 | } else { | |
157 | /* create new L3 entry */ | |
158 | uint64_t newpte; | |
159 | if (is_zero) { | |
160 | /* map the zero page RO */ | |
161 | newpte = (uint64_t)zero_page_phys | ARM_PTE_AP(AP_RONA); | |
162 | } else { | |
163 | /* map a fresh page RW */ | |
164 | newpte = (uint64_t)alloc_zero_page() | ARM_PTE_AP(AP_RWNA); | |
165 | } | |
166 | newpte |= ARM_PTE_TYPE_VALID | |
0a7de745 A |
167 | | ARM_PTE_AF |
168 | | ARM_PTE_SH(SH_OUTER_MEMORY) | |
169 | | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT) | |
170 | | ARM_PTE_NX | |
171 | | ARM_PTE_PNX; | |
5ba3f43e A |
172 | *pte = newpte; |
173 | } | |
174 | } | |
175 | ||
176 | flush_mmu_tlb(); | |
177 | } | |
178 | ||
179 | void | |
180 | kasan_map_shadow(vm_offset_t address, vm_size_t size, bool is_zero) | |
181 | { | |
182 | kasan_map_shadow_internal(address, size, is_zero, true); | |
183 | } | |
184 | ||
185 | /* | |
186 | * TODO: mappings here can be reclaimed after kasan_init() | |
187 | */ | |
188 | static void | |
189 | kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero) | |
190 | { | |
cc8bc92a A |
191 | align_to_page(&address, &size); |
192 | ||
5ba3f43e A |
193 | vm_size_t j; |
194 | uint64_t *pte; | |
195 | ||
196 | for (j = 0; j < size; j += ARM_PGBYTES) { | |
197 | vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j); | |
198 | ||
199 | assert(virt_shadow_target >= KASAN_SHADOW_MIN); | |
200 | assert(virt_shadow_target < KASAN_SHADOW_MAX); | |
201 | ||
202 | uint64_t *base = (uint64_t *)bootstrap_pgtable_phys; | |
203 | ||
5ba3f43e A |
204 | /* lookup L1 entry */ |
205 | pte = base + ((virt_shadow_target & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); | |
206 | if (*pte & ARM_TTE_VALID) { | |
207 | assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE); | |
208 | } else { | |
209 | /* create new L1 table */ | |
210 | vm_address_t pg = alloc_page(); | |
211 | __nosan_bzero((void *)pg, ARM_PGBYTES); | |
212 | *pte = ((uint64_t)pg & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE; | |
213 | } | |
214 | base = (uint64_t *)(*pte & ARM_TTE_TABLE_MASK); | |
5ba3f43e A |
215 | |
216 | /* lookup L2 entry */ | |
217 | pte = base + ((virt_shadow_target & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT); | |
218 | if (*pte & ARM_TTE_VALID) { | |
219 | assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE); | |
220 | } else { | |
221 | /* create new L3 table */ | |
222 | vm_address_t pg = alloc_page(); | |
223 | __nosan_bzero((void *)pg, ARM_PGBYTES); | |
224 | *pte = ((uint64_t)pg & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE; | |
225 | } | |
226 | base = (uint64_t *)(*pte & ARM_TTE_TABLE_MASK); | |
227 | ||
228 | /* lookup L3 entry */ | |
229 | pte = base + ((virt_shadow_target & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT); | |
230 | ||
0a7de745 | 231 | if ((*pte & (ARM_PTE_TYPE | ARM_PTE_APMASK)) == (ARM_PTE_TYPE_VALID | ARM_PTE_AP(AP_RWNA))) { |
5ba3f43e A |
232 | /* L3 entry valid and mapped RW - do nothing */ |
233 | } else { | |
234 | /* Not mapped, or mapped RO - create new L3 entry or upgrade to RW */ | |
235 | ||
236 | uint64_t newpte; | |
237 | if (is_zero) { | |
238 | /* map the zero page RO */ | |
239 | newpte = (uint64_t)zero_page_phys | ARM_PTE_AP(AP_RONA); | |
240 | } else { | |
241 | /* map a fresh page RW */ | |
242 | vm_address_t pg = alloc_page(); | |
243 | __nosan_bzero((void *)pg, ARM_PGBYTES); | |
244 | newpte = pg | ARM_PTE_AP(AP_RWNA); | |
245 | } | |
246 | ||
247 | /* add the default attributes */ | |
248 | newpte |= ARM_PTE_TYPE_VALID | |
0a7de745 A |
249 | | ARM_PTE_AF |
250 | | ARM_PTE_SH(SH_OUTER_MEMORY) | |
251 | | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT) | |
252 | | ARM_PTE_NX | |
253 | | ARM_PTE_PNX; | |
5ba3f43e A |
254 | |
255 | *pte = newpte; | |
256 | } | |
257 | } | |
258 | ||
259 | flush_mmu_tlb(); | |
260 | } | |
261 | ||
262 | void | |
263 | kasan_arch_init(void) | |
264 | { | |
5ba3f43e A |
265 | /* Map the physical aperture */ |
266 | kasan_map_shadow(kernel_vtop, physmap_vtop - kernel_vtop, true); | |
267 | ||
c6bf4f31 | 268 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
5ba3f43e A |
269 | /* Pre-allocate all the L3 page table pages to avoid triggering KTRR */ |
270 | kasan_map_shadow_internal(VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1, false, false); | |
271 | #endif | |
272 | } | |
273 | ||
274 | /* | |
275 | * Steal memory for the shadow, and shadow map the bootstrap page tables so we can | |
276 | * run until kasan_init(). Called while running with identity (V=P) map active. | |
277 | */ | |
278 | void | |
279 | kasan_bootstrap(boot_args *args, vm_offset_t pgtable) | |
280 | { | |
281 | uintptr_t tosteal; | |
282 | ||
283 | vm_address_t pbase = args->physBase; | |
284 | vm_address_t ptop = args->topOfKernelData; | |
285 | vm_offset_t extra = (vm_offset_t)&_mh_execute_header - pbase; | |
286 | ||
287 | kernel_vbase = args->virtBase; | |
288 | kernel_vtop = args->virtBase + ptop - pbase; | |
289 | ||
5c9f4661 A |
290 | tosteal = (args->memSize * STOLEN_MEM_PERCENT) / 100 + STOLEN_MEM_BYTES; |
291 | tosteal = vm_map_trunc_page(tosteal, ARM_PGMASK); | |
292 | ||
5ba3f43e A |
293 | args->memSize -= tosteal; |
294 | ||
295 | /* Initialize the page allocator */ | |
296 | shadow_pbase = vm_map_round_page(pbase + args->memSize, ARM_PGMASK); | |
297 | shadow_ptop = shadow_pbase + tosteal; | |
298 | shadow_pnext = shadow_pbase; | |
299 | shadow_pages_total = (long)((shadow_ptop - shadow_pbase) / ARM_PGBYTES); | |
300 | ||
301 | /* Set aside a page of zeros we can use for dummy shadow mappings */ | |
302 | zero_page_phys = alloc_page(); | |
303 | __nosan_bzero((void *)zero_page_phys, ARM_PGBYTES); | |
304 | ||
305 | /* Shadow the KVA bootstrap mapping: start of kernel Mach-O to end of physical */ | |
306 | bootstrap_pgtable_phys = pgtable; | |
307 | kasan_map_shadow_early(kernel_vbase + extra, args->memSize - extra, true); | |
308 | ||
309 | /* Shadow the early stacks */ | |
310 | vm_offset_t p2v = args->virtBase - args->physBase; | |
311 | ||
312 | vm_offset_t intstack_virt = (vm_offset_t)&intstack + p2v; | |
313 | vm_offset_t excepstack_virt = (vm_offset_t)&excepstack + p2v; | |
314 | vm_offset_t intstack_size = (vm_offset_t)&intstack_top - (vm_offset_t)&intstack; | |
315 | vm_offset_t excepstack_size = (vm_offset_t)&excepstack_top - (vm_offset_t)&excepstack; | |
316 | ||
317 | kasan_map_shadow_early(intstack_virt, intstack_size, false); | |
318 | kasan_map_shadow_early(excepstack_virt, excepstack_size, false); | |
319 | } | |
a39ff7e2 A |
320 | |
321 | bool | |
322 | kasan_is_shadow_mapped(uintptr_t shadowp) | |
323 | { | |
324 | uint64_t *pte; | |
325 | uint64_t *base = cpu_tte; | |
326 | ||
327 | assert(shadowp >= KASAN_SHADOW_MIN); | |
328 | assert(shadowp < KASAN_SHADOW_MAX); | |
329 | ||
a39ff7e2 A |
330 | /* lookup L1 entry */ |
331 | pte = base + ((shadowp & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); | |
332 | if (!(*pte & ARM_TTE_VALID)) { | |
333 | return false; | |
334 | } | |
335 | base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK); | |
a39ff7e2 A |
336 | |
337 | /* lookup L2 entry */ | |
338 | pte = base + ((shadowp & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT); | |
339 | if (!(*pte & ARM_TTE_VALID)) { | |
340 | return false; | |
341 | } | |
342 | base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK); | |
343 | ||
344 | /* lookup L3 entry */ | |
345 | pte = base + ((shadowp & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT); | |
346 | if (!(*pte & ARM_PTE_TYPE_VALID)) { | |
347 | return false; | |
348 | } | |
349 | ||
350 | return true; | |
351 | } |