]> git.saurik.com Git - apple/xnu.git/blame - san/kasan-arm64.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / san / kasan-arm64.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <stdint.h>
30#include <string.h>
31#include <vm/vm_kern.h>
32#include <vm/vm_map.h>
33#include <kern/assert.h>
34#include <machine/machine_routines.h>
35#include <kern/locks.h>
36#include <kern/simple_lock.h>
37#include <kern/debug.h>
38#include <mach/mach_vm.h>
39#include <mach/vm_param.h>
40#include <libkern/libkern.h>
41#include <sys/queue.h>
42#include <vm/pmap.h>
43#include <kasan.h>
44#include <kasan_internal.h>
45#include <memintrinsics.h>
46
47#include <pexpert/arm64/boot.h>
48#include <arm64/proc_reg.h>
49
50#include <libkern/kernel_mach_header.h>
51
52extern uint64_t *cpu_tte;
53extern unsigned long gVirtBase, gPhysBase;
54#define phystokv(a) ((vm_address_t)(a) - gPhysBase + gVirtBase)
55
56vm_offset_t physmap_vbase;
57vm_offset_t physmap_vtop;
58
59vm_offset_t shadow_pbase;
60vm_offset_t shadow_ptop;
61static vm_offset_t shadow_pnext;
62
63static vm_offset_t zero_page_phys;
64static vm_offset_t bootstrap_pgtable_phys;
65
66extern vm_offset_t intstack, intstack_top;
67extern vm_offset_t excepstack, excepstack_top;
68
69void kasan_bootstrap(boot_args *, vm_offset_t pgtable);
70void flush_mmu_tlb(void);
71
72#ifndef __ARM_16K_PG__
73#error "Unsupported HW config: Assuming 16K pages"
74#endif
75
76#define KASAN_SHIFT_ARM64 0xdffffff800000000ULL /* Defined in makedefs/MakeInc.def */
77#define KASAN_SHADOW_MIN 0xfffffff400000000ULL
78#define KASAN_SHADOW_MAX 0xfffffff680000000ULL
79
80_Static_assert(KASAN_SHIFT == KASAN_SHIFT_ARM64, "KASan inconsistent shadow shift");
81_Static_assert(VM_MAX_KERNEL_ADDRESS < KASAN_SHADOW_MIN, "KASan shadow overlaps with kernel VM");
82_Static_assert((VM_MIN_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM");
83_Static_assert((VM_MAX_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM");
84
85static uintptr_t
86alloc_page(void)
87{
88 if (shadow_pnext + ARM_PGBYTES >= shadow_ptop) {
89 panic("KASAN: OOM");
90 }
91
92 uintptr_t mem = shadow_pnext;
93 shadow_pnext += ARM_PGBYTES;
94 shadow_pages_used++;
95
96 return mem;
97}
98
99static uintptr_t
100alloc_zero_page(void)
101{
102 uintptr_t mem = alloc_page();
103 __nosan_bzero((void *)phystokv(mem), ARM_PGBYTES);
104 return mem;
105}
106
cc8bc92a
A
107static void
108align_to_page(vm_offset_t *addrp, vm_offset_t *sizep)
109{
110 vm_offset_t addr_aligned = vm_map_trunc_page(*addrp, ARM_PGMASK);
111 *sizep = vm_map_round_page(*sizep + (*addrp - addr_aligned), ARM_PGMASK);
112 *addrp = addr_aligned;
113}
114
5ba3f43e
A
115static void
116kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, bool back_page)
117{
cc8bc92a
A
118 align_to_page(&address, &size);
119
5ba3f43e
A
120 vm_size_t j;
121 uint64_t *pte;
122
123 /* XXX: this could be more efficient by walking through the shadow pages
124 * instead of the source pages */
125
126 for (j = 0; j < size; j += ARM_PGBYTES) {
127 vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j);
128
129 assert(virt_shadow_target >= KASAN_SHADOW_MIN);
130 assert(virt_shadow_target < KASAN_SHADOW_MAX);
131
132 uint64_t *base = cpu_tte;
133
134#if !__ARM64_TWO_LEVEL_PMAP__
135 /* lookup L1 entry */
136 pte = base + ((virt_shadow_target & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
137 if (*pte & ARM_TTE_VALID) {
138 assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
139 } else {
140 /* create new L1 table */
141 *pte = ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
142 }
143 base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
144#endif
145
146 /* lookup L2 entry */
147 pte = base + ((virt_shadow_target & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
148 if (*pte & ARM_TTE_VALID) {
149 assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
150 } else {
151 /* create new L3 table */
152 *pte = ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
153 }
154 base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
155
156 if (!back_page) {
157 continue;
158 }
159
160 /* lookup L3 entry */
161 pte = base + ((virt_shadow_target & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
162 if ((*pte & ARM_PTE_TYPE_VALID) &&
163 ((((*pte) & ARM_PTE_APMASK) != ARM_PTE_AP(AP_RONA)) || is_zero)) {
164 /* nothing to do - page already mapped and we are not
165 * upgrading */
166 } else {
167 /* create new L3 entry */
168 uint64_t newpte;
169 if (is_zero) {
170 /* map the zero page RO */
171 newpte = (uint64_t)zero_page_phys | ARM_PTE_AP(AP_RONA);
172 } else {
173 /* map a fresh page RW */
174 newpte = (uint64_t)alloc_zero_page() | ARM_PTE_AP(AP_RWNA);
175 }
176 newpte |= ARM_PTE_TYPE_VALID
177 | ARM_PTE_AF
178 | ARM_PTE_SH(SH_OUTER_MEMORY)
179 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
180 | ARM_PTE_NX
181 | ARM_PTE_PNX;
182 *pte = newpte;
183 }
184 }
185
186 flush_mmu_tlb();
187}
188
189void
190kasan_map_shadow(vm_offset_t address, vm_size_t size, bool is_zero)
191{
192 kasan_map_shadow_internal(address, size, is_zero, true);
193}
194
195/*
196 * TODO: mappings here can be reclaimed after kasan_init()
197 */
198static void
199kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero)
200{
cc8bc92a
A
201 align_to_page(&address, &size);
202
5ba3f43e
A
203 vm_size_t j;
204 uint64_t *pte;
205
206 for (j = 0; j < size; j += ARM_PGBYTES) {
207 vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j);
208
209 assert(virt_shadow_target >= KASAN_SHADOW_MIN);
210 assert(virt_shadow_target < KASAN_SHADOW_MAX);
211
212 uint64_t *base = (uint64_t *)bootstrap_pgtable_phys;
213
214#if !__ARM64_TWO_LEVEL_PMAP__
215 /* lookup L1 entry */
216 pte = base + ((virt_shadow_target & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
217 if (*pte & ARM_TTE_VALID) {
218 assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
219 } else {
220 /* create new L1 table */
221 vm_address_t pg = alloc_page();
222 __nosan_bzero((void *)pg, ARM_PGBYTES);
223 *pte = ((uint64_t)pg & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
224 }
225 base = (uint64_t *)(*pte & ARM_TTE_TABLE_MASK);
226#endif
227
228 /* lookup L2 entry */
229 pte = base + ((virt_shadow_target & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
230 if (*pte & ARM_TTE_VALID) {
231 assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
232 } else {
233 /* create new L3 table */
234 vm_address_t pg = alloc_page();
235 __nosan_bzero((void *)pg, ARM_PGBYTES);
236 *pte = ((uint64_t)pg & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
237 }
238 base = (uint64_t *)(*pte & ARM_TTE_TABLE_MASK);
239
240 /* lookup L3 entry */
241 pte = base + ((virt_shadow_target & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
242
243 if ((*pte & (ARM_PTE_TYPE|ARM_PTE_APMASK)) == (ARM_PTE_TYPE_VALID|ARM_PTE_AP(AP_RWNA))) {
244 /* L3 entry valid and mapped RW - do nothing */
245 } else {
246 /* Not mapped, or mapped RO - create new L3 entry or upgrade to RW */
247
248 uint64_t newpte;
249 if (is_zero) {
250 /* map the zero page RO */
251 newpte = (uint64_t)zero_page_phys | ARM_PTE_AP(AP_RONA);
252 } else {
253 /* map a fresh page RW */
254 vm_address_t pg = alloc_page();
255 __nosan_bzero((void *)pg, ARM_PGBYTES);
256 newpte = pg | ARM_PTE_AP(AP_RWNA);
257 }
258
259 /* add the default attributes */
260 newpte |= ARM_PTE_TYPE_VALID
261 | ARM_PTE_AF
262 | ARM_PTE_SH(SH_OUTER_MEMORY)
263 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
264 | ARM_PTE_NX
265 | ARM_PTE_PNX;
266
267 *pte = newpte;
268 }
269 }
270
271 flush_mmu_tlb();
272}
273
274void
275kasan_arch_init(void)
276{
277 assert(KASAN_SHADOW_MIN >= VM_MAX_KERNEL_ADDRESS);
278
279 /* Map the physical aperture */
280 kasan_map_shadow(kernel_vtop, physmap_vtop - kernel_vtop, true);
281
282#if defined(KERNEL_INTEGRITY_KTRR)
283 /* Pre-allocate all the L3 page table pages to avoid triggering KTRR */
284 kasan_map_shadow_internal(VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1, false, false);
285#endif
286}
287
288/*
289 * Steal memory for the shadow, and shadow map the bootstrap page tables so we can
290 * run until kasan_init(). Called while running with identity (V=P) map active.
291 */
292void
293kasan_bootstrap(boot_args *args, vm_offset_t pgtable)
294{
295 uintptr_t tosteal;
296
297 vm_address_t pbase = args->physBase;
298 vm_address_t ptop = args->topOfKernelData;
299 vm_offset_t extra = (vm_offset_t)&_mh_execute_header - pbase;
300
301 kernel_vbase = args->virtBase;
302 kernel_vtop = args->virtBase + ptop - pbase;
303
5c9f4661
A
304 tosteal = (args->memSize * STOLEN_MEM_PERCENT) / 100 + STOLEN_MEM_BYTES;
305 tosteal = vm_map_trunc_page(tosteal, ARM_PGMASK);
306
5ba3f43e
A
307 args->memSize -= tosteal;
308
309 /* Initialize the page allocator */
310 shadow_pbase = vm_map_round_page(pbase + args->memSize, ARM_PGMASK);
311 shadow_ptop = shadow_pbase + tosteal;
312 shadow_pnext = shadow_pbase;
313 shadow_pages_total = (long)((shadow_ptop - shadow_pbase) / ARM_PGBYTES);
314
315 /* Set aside a page of zeros we can use for dummy shadow mappings */
316 zero_page_phys = alloc_page();
317 __nosan_bzero((void *)zero_page_phys, ARM_PGBYTES);
318
319 /* Shadow the KVA bootstrap mapping: start of kernel Mach-O to end of physical */
320 bootstrap_pgtable_phys = pgtable;
321 kasan_map_shadow_early(kernel_vbase + extra, args->memSize - extra, true);
322
323 /* Shadow the early stacks */
324 vm_offset_t p2v = args->virtBase - args->physBase;
325
326 vm_offset_t intstack_virt = (vm_offset_t)&intstack + p2v;
327 vm_offset_t excepstack_virt = (vm_offset_t)&excepstack + p2v;
328 vm_offset_t intstack_size = (vm_offset_t)&intstack_top - (vm_offset_t)&intstack;
329 vm_offset_t excepstack_size = (vm_offset_t)&excepstack_top - (vm_offset_t)&excepstack;
330
331 kasan_map_shadow_early(intstack_virt, intstack_size, false);
332 kasan_map_shadow_early(excepstack_virt, excepstack_size, false);
333}