]> git.saurik.com Git - apple/xnu.git/blame - san/kasan-x86_64.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / san / kasan-x86_64.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <stdint.h>
30#include <string.h>
31#include <vm/vm_kern.h>
32#include <vm/vm_map.h>
33#include <kern/assert.h>
34#include <i386/proc_reg.h>
35#include <i386/machine_routines.h>
36#include <kern/debug.h>
37#include <mach/mach_vm.h>
38#include <mach/vm_param.h>
39#include <libkern/libkern.h>
40#include <pexpert/i386/efi.h>
41#include <pexpert/i386/boot.h>
42#include <sys/queue.h>
43#include <kasan.h>
44#include <kasan_internal.h>
45#include <vm/pmap.h>
46#include <pexpert/i386/efi.h>
47#include <pexpert/i386/boot.h>
48#include <memintrinsics.h>
49
50extern uint64_t *IdlePML4;
51extern uintptr_t physmap_base;
52extern uintptr_t physmap_max;
53#define phys2virt(x) ((uintptr_t)(x) + physmap_base)
54
55#define INTEL_PTE_VALID 0x00000001ULL
56#define INTEL_PTE_WRITE 0x00000002ULL
57#define INTEL_PTE_RW 0x00000002ULL
58#define INTEL_PTE_USER 0x00000004ULL
59#define INTEL_PTE_WTHRU 0x00000008ULL
60#define INTEL_PTE_NCACHE 0x00000010ULL
61#define INTEL_PTE_REF 0x00000020ULL
62#define INTEL_PTE_MOD 0x00000040ULL
63#define INTEL_PTE_PS 0x00000080ULL
64#define INTEL_PTE_PTA 0x00000080ULL
65#define INTEL_PTE_GLOBAL 0x00000100ULL
66#define INTEL_PTE_WIRED 0x00000200ULL
67#define INTEL_PDPTE_NESTED 0x00000400ULL
68#define INTEL_PTE_PFN PG_FRAME
69#define INTEL_PTE_NX (1ULL << 63)
70#define INTEL_PTE_INVALID 0
71
72vm_offset_t shadow_pbase;
73vm_offset_t shadow_ptop;
74vm_offset_t shadow_pnext;
75unsigned shadow_stolen_idx;
76
77static vm_offset_t zero_superpage_phys;
78
79typedef struct {
80 unsigned int pml4 : 9;
81 unsigned int pdpt : 9;
82 unsigned int pd : 9;
83 unsigned int pt : 9;
84 unsigned int offset : 12;
85} split_addr_t;
86
87static split_addr_t
88split_address(vm_offset_t address)
89{
90 split_addr_t addr;
91
92 addr.pml4 = (address >> 39) & 0x1ff;
93 addr.pdpt = (address >> 30) & 0x1ff;
94 addr.pd = (address >> 21) & 0x1ff;
95 addr.pt = (address >> 12) & 0x1ff;
96 // addr.offset = address & PAGE_MASK;
97
98 return addr;
99}
100
101static uintptr_t
102alloc_page(void)
103{
104 if (shadow_pnext + I386_PGBYTES >= shadow_ptop) {
105 panic("KASAN: OOM");
106 }
107
108 uintptr_t mem = shadow_pnext;
109 shadow_pnext += I386_PGBYTES;
110 shadow_pages_used++;
111
112 return mem;
113}
114
115#define ROUND_SUPERPAGE(x) ((((uintptr_t)(x)) + I386_LPGBYTES - 1) & ~(I386_LPGMASK))
116
117static uintptr_t
118alloc_superpage(void)
119{
120 uintptr_t mem;
121 shadow_pnext = ROUND_SUPERPAGE(shadow_pnext);
122 assert((shadow_pnext & I386_LPGMASK) == 0);
123 mem = shadow_pnext;
124 shadow_pnext += I386_LPGBYTES;
125 shadow_pages_used += I386_LPGBYTES / I386_PGBYTES;
126 /* XXX: not accounting for superpage rounding */
127 return mem;
128}
129
130static uintptr_t
131alloc_page_zero(void)
132{
133 uintptr_t mem = alloc_page();
134 bzero_phys(mem, I386_PGBYTES);
135 return mem;
136}
137
138static void
139kasan_map_shadow_superpage_zero(vm_offset_t address, vm_size_t size)
140{
141 address = vm_map_trunc_page(address, I386_LPGMASK);
142 size = vm_map_round_page(size, I386_LPGMASK);
143
144 vm_size_t j;
145 for (j = 0; j < size; j += I386_LPGBYTES * 8) {
146
147 vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j);
148
149 split_addr_t addr = split_address(virt_shadow_target);
150 assert(addr.pml4 == 507 || addr.pml4 == 508);
151
152 uint64_t *L3;
153 uint64_t *L2;
154 uint64_t *L1;
155
156 L3 = (uint64_t *)(IdlePML4[addr.pml4] & ~PAGE_MASK);
157 if (L3 == NULL) {
158 uintptr_t pmem = alloc_page_zero();
159 L3 = (uint64_t *)phys2virt(pmem);
160 IdlePML4[addr.pml4] = pmem
161 | INTEL_PTE_VALID
162 | INTEL_PTE_WRITE;
163 } else {
164 L3 = (uint64_t *)phys2virt(L3);
165 }
166
167 L2 = (uint64_t *)(L3[addr.pdpt] & ~PAGE_MASK);
168 if (L2 == NULL) {
169 uintptr_t pmem = alloc_page_zero();
170 L2 = (uint64_t *)phys2virt(pmem);
171 L3[addr.pdpt] = pmem
172 | INTEL_PTE_VALID
173 | INTEL_PTE_WRITE;
174 } else {
175 L2 = (uint64_t *)phys2virt(L2);
176 }
177
178 L1 = (uint64_t *)(L2[addr.pd] & ~PAGE_MASK);
179 if (L1 == NULL) {
180 L2[addr.pd] = (uint64_t)zero_superpage_phys
181 | INTEL_PTE_VALID
182 | INTEL_PTE_PS
183 | INTEL_PTE_NX;
184 } else {
185 panic("Unexpected shadow mapping, addr = %lx, sz = %lu\n",
186 address, size);
187 }
188
189 /* adding a new entry, this is not strictly required */
190 invlpg(virt_shadow_target);
191 }
192}
193
194void
195kasan_map_shadow(vm_offset_t address, vm_size_t size, bool is_zero)
196{
a39ff7e2
A
197 vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), PAGE_MASK);
198 vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), PAGE_MASK);
5ba3f43e 199
a39ff7e2 200 for (; shadow_base < shadow_top; shadow_base += I386_PGBYTES) {
5ba3f43e 201
a39ff7e2 202 split_addr_t addr = split_address(shadow_base);
5ba3f43e
A
203 assert(addr.pml4 == 507 || addr.pml4 == 508);
204
205 uint64_t *L3;
206 uint64_t *L2;
207 uint64_t *L1;
208 uint64_t *pte;
209
210 L3 = (uint64_t *)(IdlePML4[addr.pml4] & ~PAGE_MASK);
211 if (L3 == NULL) {
212 uintptr_t pmem = alloc_page_zero();
213 L3 = (uint64_t *)phys2virt(pmem);
214 IdlePML4[addr.pml4] = pmem
215 | INTEL_PTE_VALID
216 | INTEL_PTE_WRITE;
217 } else {
218 L3 = (uint64_t *)phys2virt(L3);
219 }
220
221 L2 = (uint64_t *)(L3[addr.pdpt] & ~PAGE_MASK);
222 if (L2 == NULL) {
223 uintptr_t pmem = alloc_page_zero();
224 L2 = (uint64_t *)phys2virt(pmem);
225 L3[addr.pdpt] = pmem
226 | INTEL_PTE_VALID
227 | INTEL_PTE_WRITE;
228 } else {
229 L2 = (uint64_t *)phys2virt(L2);
230 }
231
232 uint64_t pde = L2[addr.pd];
233 if ((pde & (INTEL_PTE_VALID|INTEL_PTE_PS)) == (INTEL_PTE_VALID|INTEL_PTE_PS)) {
234 /* Already mapped as a superpage */
235 continue;
236 }
237
238 L1 = (uint64_t *)(pde & ~PAGE_MASK);
239 if (L1 == NULL) {
240 uintptr_t pmem = alloc_page_zero();
241 L1 = (uint64_t *)phys2virt(pmem);
242 L2[addr.pd] = pmem
243 | INTEL_PTE_VALID
244 | INTEL_PTE_WRITE;
245 } else {
246 L1 = (uint64_t *)phys2virt(L1);
247 }
248
249 pte = (uint64_t *)(L1[addr.pt] & ~PAGE_MASK);
250 if (pte == NULL) {
251 uint64_t newpte;
252 if (is_zero) {
253 newpte = (uint64_t)zero_superpage_phys;
254 } else {
255 newpte = (vm_offset_t)alloc_page_zero()
256 | INTEL_PTE_WRITE;
257 }
258 L1[addr.pt] = newpte
259 | INTEL_PTE_VALID
260 | INTEL_PTE_NX;
5ba3f43e 261
a39ff7e2
A
262 /* adding a new entry, this is not strictly required */
263 invlpg(shadow_base);
264 }
5ba3f43e
A
265 }
266}
267
268void
269kasan_arch_init(void)
270{
271 __nosan_bzero((void *)phys2virt(zero_superpage_phys), I386_LPGBYTES);
272
273 /* Map the physical aperture */
274 kasan_map_shadow_superpage_zero(physmap_base, physmap_max - physmap_base);
5ba3f43e
A
275}
276
277/*
278 * Steal some memory from EFI for the shadow map.
279 */
280void
281kasan_reserve_memory(void *_args)
282{
283 boot_args *args = (boot_args *)_args;
284 vm_address_t pbase = args->kaddr;
285 vm_address_t ptop = args->kaddr + args->ksize;
286
287 kernel_vbase = ml_static_ptovirt(pbase);
288 kernel_vtop = ml_static_ptovirt(ptop);
289
290 EfiMemoryRange *mptr, *mptr_tmp;
291 unsigned int mcount;
292 unsigned int msize;
293 unsigned int i;
294 unsigned long total_pages;
295 unsigned long to_steal;
296
297 mptr = (EfiMemoryRange *)ml_static_ptovirt((vm_offset_t)args->MemoryMap);
298 msize = args->MemoryMapDescriptorSize;
299 mcount = args->MemoryMapSize / msize;
300
301 /* sum total physical memory */
302 total_pages = 0;
303 for (i = 0, mptr_tmp = mptr; i < mcount; i++, mptr_tmp = (EfiMemoryRange *)(((vm_offset_t)mptr_tmp) + msize)) {
304 total_pages += mptr_tmp->NumberOfPages;
305 }
306
5c9f4661 307 to_steal = (total_pages * STOLEN_MEM_PERCENT) / 100 + (STOLEN_MEM_BYTES / I386_PGBYTES);
5ba3f43e
A
308
309 /* Search for a range large enough to steal from */
310 for (i = 0, mptr_tmp = mptr; i < mcount; i++, mptr_tmp = (EfiMemoryRange *)(((vm_offset_t)mptr_tmp) + msize)) {
311 ppnum_t base, top;
312 base = (ppnum_t)(mptr_tmp->PhysicalStart >> I386_PGSHIFT);
313 top = (ppnum_t)((mptr_tmp->PhysicalStart >> I386_PGSHIFT) + mptr_tmp->NumberOfPages - 1);
314
315 if ((mptr_tmp->Type == kEfiConventionalMemory) && (mptr_tmp->NumberOfPages > to_steal)) {
316 /* Found a region with sufficient space - steal from the end */
317 mptr_tmp->NumberOfPages -= to_steal;
318
319 shadow_pbase = mptr_tmp->PhysicalStart + (mptr_tmp->NumberOfPages << I386_PGSHIFT);
320 shadow_ptop = shadow_pbase + (to_steal << I386_PGSHIFT);
321 shadow_pnext = shadow_pbase;
322 shadow_pages_total = to_steal;
323 shadow_stolen_idx = i;
324
325 /* Set aside a page of zeros we can use for dummy shadow mappings */
326 zero_superpage_phys = alloc_superpage();
327
328 return;
329 }
330 }
331
332 panic("KASAN: could not reserve memory");
333}
334
a39ff7e2
A
335bool
336kasan_is_shadow_mapped(uintptr_t shadowp)
337{
338 split_addr_t addr = split_address(shadowp);
339 assert(addr.pml4 == 507 || addr.pml4 == 508);
340
341 uint64_t *L3;
342 uint64_t *L2;
343 uint64_t *L1;
344
345 L3 = (uint64_t *)(IdlePML4[addr.pml4] & ~PAGE_MASK);
346 if (L3 == NULL) {
347 return false;
348 }
349 L3 = (uint64_t *)phys2virt(L3);
350
351 L2 = (uint64_t *)(L3[addr.pdpt] & ~PAGE_MASK);
352 if (L2 == NULL) {
353 return false;
354 }
355 L2 = (uint64_t *)phys2virt(L2);
356
357 uint64_t pde = L2[addr.pd];
358 if ((pde & (INTEL_PTE_VALID|INTEL_PTE_PS)) == (INTEL_PTE_VALID|INTEL_PTE_PS)) {
359 /* mapped as superpage */
360 return true;
361 }
362 L1 = (uint64_t *)(pde & ~PAGE_MASK);
363 if (L1 == NULL) {
364 return false;
365 }
366 L1 = (uint64_t *)phys2virt(L1);
367
368 if (L1[addr.pt] & INTEL_PTE_VALID) {
369 return true;
370 }
371
372 return false;
373}