]> git.saurik.com Git - apple/xnu.git/blame - san/kasan-x86_64.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / san / kasan-x86_64.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <stdint.h>
30#include <string.h>
31#include <vm/vm_kern.h>
32#include <vm/vm_map.h>
33#include <kern/assert.h>
34#include <i386/proc_reg.h>
35#include <i386/machine_routines.h>
36#include <kern/debug.h>
37#include <mach/mach_vm.h>
38#include <mach/vm_param.h>
0a7de745
A
39#include <sys/param.h>
40#include <i386/pmap.h>
5ba3f43e
A
41#include <libkern/libkern.h>
42#include <pexpert/i386/efi.h>
43#include <pexpert/i386/boot.h>
44#include <sys/queue.h>
45#include <kasan.h>
46#include <kasan_internal.h>
47#include <vm/pmap.h>
48#include <pexpert/i386/efi.h>
49#include <pexpert/i386/boot.h>
50#include <memintrinsics.h>
51
52extern uint64_t *IdlePML4;
5ba3f43e
A
53#define phys2virt(x) ((uintptr_t)(x) + physmap_base)
54
5ba3f43e
A
55vm_offset_t shadow_pbase;
56vm_offset_t shadow_ptop;
57vm_offset_t shadow_pnext;
58unsigned shadow_stolen_idx;
59
60static vm_offset_t zero_superpage_phys;
61
62typedef struct {
0a7de745
A
63 unsigned int pml4 : 9;
64 unsigned int pdpt : 9;
65 unsigned int pd : 9;
66 unsigned int pt : 9;
67 unsigned int offset : 12;
5ba3f43e
A
68} split_addr_t;
69
70static split_addr_t
71split_address(vm_offset_t address)
72{
73 split_addr_t addr;
74
75 addr.pml4 = (address >> 39) & 0x1ff;
76 addr.pdpt = (address >> 30) & 0x1ff;
77 addr.pd = (address >> 21) & 0x1ff;
78 addr.pt = (address >> 12) & 0x1ff;
79 // addr.offset = address & PAGE_MASK;
80
81 return addr;
82}
83
84static uintptr_t
85alloc_page(void)
86{
87 if (shadow_pnext + I386_PGBYTES >= shadow_ptop) {
88 panic("KASAN: OOM");
89 }
90
91 uintptr_t mem = shadow_pnext;
92 shadow_pnext += I386_PGBYTES;
93 shadow_pages_used++;
94
95 return mem;
96}
97
98#define ROUND_SUPERPAGE(x) ((((uintptr_t)(x)) + I386_LPGBYTES - 1) & ~(I386_LPGMASK))
99
100static uintptr_t
101alloc_superpage(void)
102{
103 uintptr_t mem;
104 shadow_pnext = ROUND_SUPERPAGE(shadow_pnext);
105 assert((shadow_pnext & I386_LPGMASK) == 0);
106 mem = shadow_pnext;
107 shadow_pnext += I386_LPGBYTES;
108 shadow_pages_used += I386_LPGBYTES / I386_PGBYTES;
109 /* XXX: not accounting for superpage rounding */
110 return mem;
111}
112
113static uintptr_t
114alloc_page_zero(void)
115{
116 uintptr_t mem = alloc_page();
117 bzero_phys(mem, I386_PGBYTES);
118 return mem;
119}
120
121static void
122kasan_map_shadow_superpage_zero(vm_offset_t address, vm_size_t size)
123{
124 address = vm_map_trunc_page(address, I386_LPGMASK);
125 size = vm_map_round_page(size, I386_LPGMASK);
126
127 vm_size_t j;
128 for (j = 0; j < size; j += I386_LPGBYTES * 8) {
5ba3f43e
A
129 vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j);
130
131 split_addr_t addr = split_address(virt_shadow_target);
0a7de745
A
132 assert(addr.pml4 >= KERNEL_KASAN_PML4_FIRST &&
133 addr.pml4 <= KERNEL_KASAN_PML4_LAST);
5ba3f43e
A
134
135 uint64_t *L3;
136 uint64_t *L2;
137 uint64_t *L1;
138
139 L3 = (uint64_t *)(IdlePML4[addr.pml4] & ~PAGE_MASK);
140 if (L3 == NULL) {
141 uintptr_t pmem = alloc_page_zero();
142 L3 = (uint64_t *)phys2virt(pmem);
143 IdlePML4[addr.pml4] = pmem
0a7de745
A
144 | INTEL_PTE_VALID
145 | INTEL_PTE_WRITE;
5ba3f43e
A
146 } else {
147 L3 = (uint64_t *)phys2virt(L3);
148 }
149
150 L2 = (uint64_t *)(L3[addr.pdpt] & ~PAGE_MASK);
151 if (L2 == NULL) {
152 uintptr_t pmem = alloc_page_zero();
153 L2 = (uint64_t *)phys2virt(pmem);
154 L3[addr.pdpt] = pmem
0a7de745
A
155 | INTEL_PTE_VALID
156 | INTEL_PTE_WRITE;
5ba3f43e
A
157 } else {
158 L2 = (uint64_t *)phys2virt(L2);
159 }
160
161 L1 = (uint64_t *)(L2[addr.pd] & ~PAGE_MASK);
162 if (L1 == NULL) {
163 L2[addr.pd] = (uint64_t)zero_superpage_phys
0a7de745
A
164 | INTEL_PTE_VALID
165 | INTEL_PTE_PS
166 | INTEL_PTE_NX;
5ba3f43e
A
167 } else {
168 panic("Unexpected shadow mapping, addr = %lx, sz = %lu\n",
0a7de745 169 address, size);
5ba3f43e
A
170 }
171
172 /* adding a new entry, this is not strictly required */
173 invlpg(virt_shadow_target);
174 }
175}
176
177void
178kasan_map_shadow(vm_offset_t address, vm_size_t size, bool is_zero)
179{
f427ee49 180 size = kasan_granule_round(size);
a39ff7e2
A
181 vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), PAGE_MASK);
182 vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), PAGE_MASK);
5ba3f43e 183
f427ee49 184 assert(kasan_granule_partial(size) == 0);
d9a64523 185
a39ff7e2 186 for (; shadow_base < shadow_top; shadow_base += I386_PGBYTES) {
a39ff7e2 187 split_addr_t addr = split_address(shadow_base);
0a7de745
A
188 assert(addr.pml4 >= KERNEL_KASAN_PML4_FIRST &&
189 addr.pml4 <= KERNEL_KASAN_PML4_LAST);
5ba3f43e
A
190
191 uint64_t *L3;
192 uint64_t *L2;
193 uint64_t *L1;
194 uint64_t *pte;
195
196 L3 = (uint64_t *)(IdlePML4[addr.pml4] & ~PAGE_MASK);
197 if (L3 == NULL) {
198 uintptr_t pmem = alloc_page_zero();
199 L3 = (uint64_t *)phys2virt(pmem);
200 IdlePML4[addr.pml4] = pmem
0a7de745
A
201 | INTEL_PTE_VALID
202 | INTEL_PTE_WRITE;
5ba3f43e
A
203 } else {
204 L3 = (uint64_t *)phys2virt(L3);
205 }
206
207 L2 = (uint64_t *)(L3[addr.pdpt] & ~PAGE_MASK);
208 if (L2 == NULL) {
209 uintptr_t pmem = alloc_page_zero();
210 L2 = (uint64_t *)phys2virt(pmem);
211 L3[addr.pdpt] = pmem
0a7de745
A
212 | INTEL_PTE_VALID
213 | INTEL_PTE_WRITE;
5ba3f43e
A
214 } else {
215 L2 = (uint64_t *)phys2virt(L2);
216 }
217
218 uint64_t pde = L2[addr.pd];
0a7de745 219 if ((pde & (INTEL_PTE_VALID | INTEL_PTE_PS)) == (INTEL_PTE_VALID | INTEL_PTE_PS)) {
5ba3f43e
A
220 /* Already mapped as a superpage */
221 continue;
222 }
223
224 L1 = (uint64_t *)(pde & ~PAGE_MASK);
225 if (L1 == NULL) {
226 uintptr_t pmem = alloc_page_zero();
227 L1 = (uint64_t *)phys2virt(pmem);
228 L2[addr.pd] = pmem
0a7de745
A
229 | INTEL_PTE_VALID
230 | INTEL_PTE_WRITE;
5ba3f43e
A
231 } else {
232 L1 = (uint64_t *)phys2virt(L1);
233 }
234
235 pte = (uint64_t *)(L1[addr.pt] & ~PAGE_MASK);
236 if (pte == NULL) {
237 uint64_t newpte;
238 if (is_zero) {
239 newpte = (uint64_t)zero_superpage_phys;
240 } else {
241 newpte = (vm_offset_t)alloc_page_zero()
0a7de745 242 | INTEL_PTE_WRITE;
5ba3f43e
A
243 }
244 L1[addr.pt] = newpte
0a7de745
A
245 | INTEL_PTE_VALID
246 | INTEL_PTE_NX;
5ba3f43e 247
a39ff7e2
A
248 /* adding a new entry, this is not strictly required */
249 invlpg(shadow_base);
250 }
5ba3f43e
A
251 }
252}
253
254void
255kasan_arch_init(void)
256{
257 __nosan_bzero((void *)phys2virt(zero_superpage_phys), I386_LPGBYTES);
258
259 /* Map the physical aperture */
260 kasan_map_shadow_superpage_zero(physmap_base, physmap_max - physmap_base);
5ba3f43e
A
261}
262
263/*
264 * Steal some memory from EFI for the shadow map.
265 */
266void
267kasan_reserve_memory(void *_args)
268{
269 boot_args *args = (boot_args *)_args;
270 vm_address_t pbase = args->kaddr;
271 vm_address_t ptop = args->kaddr + args->ksize;
272
273 kernel_vbase = ml_static_ptovirt(pbase);
274 kernel_vtop = ml_static_ptovirt(ptop);
275
276 EfiMemoryRange *mptr, *mptr_tmp;
277 unsigned int mcount;
278 unsigned int msize;
279 unsigned int i;
280 unsigned long total_pages;
281 unsigned long to_steal;
282
283 mptr = (EfiMemoryRange *)ml_static_ptovirt((vm_offset_t)args->MemoryMap);
284 msize = args->MemoryMapDescriptorSize;
285 mcount = args->MemoryMapSize / msize;
286
287 /* sum total physical memory */
288 total_pages = 0;
289 for (i = 0, mptr_tmp = mptr; i < mcount; i++, mptr_tmp = (EfiMemoryRange *)(((vm_offset_t)mptr_tmp) + msize)) {
290 total_pages += mptr_tmp->NumberOfPages;
291 }
292
f427ee49 293 to_steal = (unsigned long)(total_pages * STOLEN_MEM_PERCENT) / 100 + (STOLEN_MEM_BYTES / I386_PGBYTES);
5ba3f43e
A
294
295 /* Search for a range large enough to steal from */
296 for (i = 0, mptr_tmp = mptr; i < mcount; i++, mptr_tmp = (EfiMemoryRange *)(((vm_offset_t)mptr_tmp) + msize)) {
297 ppnum_t base, top;
298 base = (ppnum_t)(mptr_tmp->PhysicalStart >> I386_PGSHIFT);
299 top = (ppnum_t)((mptr_tmp->PhysicalStart >> I386_PGSHIFT) + mptr_tmp->NumberOfPages - 1);
300
301 if ((mptr_tmp->Type == kEfiConventionalMemory) && (mptr_tmp->NumberOfPages > to_steal)) {
302 /* Found a region with sufficient space - steal from the end */
303 mptr_tmp->NumberOfPages -= to_steal;
304
305 shadow_pbase = mptr_tmp->PhysicalStart + (mptr_tmp->NumberOfPages << I386_PGSHIFT);
306 shadow_ptop = shadow_pbase + (to_steal << I386_PGSHIFT);
307 shadow_pnext = shadow_pbase;
f427ee49 308 shadow_pages_total = (unsigned int)to_steal;
5ba3f43e
A
309 shadow_stolen_idx = i;
310
311 /* Set aside a page of zeros we can use for dummy shadow mappings */
312 zero_superpage_phys = alloc_superpage();
313
314 return;
315 }
316 }
317
318 panic("KASAN: could not reserve memory");
319}
320
a39ff7e2
A
321bool
322kasan_is_shadow_mapped(uintptr_t shadowp)
323{
324 split_addr_t addr = split_address(shadowp);
0a7de745
A
325 assert(addr.pml4 >= KERNEL_KASAN_PML4_FIRST &&
326 addr.pml4 <= KERNEL_KASAN_PML4_LAST);
a39ff7e2
A
327
328 uint64_t *L3;
329 uint64_t *L2;
330 uint64_t *L1;
331
332 L3 = (uint64_t *)(IdlePML4[addr.pml4] & ~PAGE_MASK);
333 if (L3 == NULL) {
334 return false;
335 }
336 L3 = (uint64_t *)phys2virt(L3);
337
338 L2 = (uint64_t *)(L3[addr.pdpt] & ~PAGE_MASK);
339 if (L2 == NULL) {
340 return false;
341 }
342 L2 = (uint64_t *)phys2virt(L2);
343
344 uint64_t pde = L2[addr.pd];
0a7de745 345 if ((pde & (INTEL_PTE_VALID | INTEL_PTE_PS)) == (INTEL_PTE_VALID | INTEL_PTE_PS)) {
a39ff7e2
A
346 /* mapped as superpage */
347 return true;
348 }
349 L1 = (uint64_t *)(pde & ~PAGE_MASK);
350 if (L1 == NULL) {
351 return false;
352 }
353 L1 = (uint64_t *)phys2virt(L1);
354
355 if (L1[addr.pt] & INTEL_PTE_VALID) {
356 return true;
357 }
358
359 return false;
360}