]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <stdint.h> | |
30 | #include <string.h> | |
31 | #include <vm/vm_kern.h> | |
32 | #include <vm/vm_map.h> | |
33 | #include <kern/assert.h> | |
34 | #include <i386/proc_reg.h> | |
35 | #include <i386/machine_routines.h> | |
36 | #include <kern/debug.h> | |
37 | #include <mach/mach_vm.h> | |
38 | #include <mach/vm_param.h> | |
39 | #include <libkern/libkern.h> | |
40 | #include <pexpert/i386/efi.h> | |
41 | #include <pexpert/i386/boot.h> | |
42 | #include <sys/queue.h> | |
43 | #include <kasan.h> | |
44 | #include <kasan_internal.h> | |
45 | #include <vm/pmap.h> | |
46 | #include <pexpert/i386/efi.h> | |
47 | #include <pexpert/i386/boot.h> | |
48 | #include <memintrinsics.h> | |
49 | ||
50 | extern uint64_t *IdlePML4; | |
51 | extern uintptr_t physmap_base; | |
52 | extern uintptr_t physmap_max; | |
53 | #define phys2virt(x) ((uintptr_t)(x) + physmap_base) | |
54 | ||
55 | #define INTEL_PTE_VALID 0x00000001ULL | |
56 | #define INTEL_PTE_WRITE 0x00000002ULL | |
57 | #define INTEL_PTE_RW 0x00000002ULL | |
58 | #define INTEL_PTE_USER 0x00000004ULL | |
59 | #define INTEL_PTE_WTHRU 0x00000008ULL | |
60 | #define INTEL_PTE_NCACHE 0x00000010ULL | |
61 | #define INTEL_PTE_REF 0x00000020ULL | |
62 | #define INTEL_PTE_MOD 0x00000040ULL | |
63 | #define INTEL_PTE_PS 0x00000080ULL | |
64 | #define INTEL_PTE_PTA 0x00000080ULL | |
65 | #define INTEL_PTE_GLOBAL 0x00000100ULL | |
66 | #define INTEL_PTE_WIRED 0x00000200ULL | |
67 | #define INTEL_PDPTE_NESTED 0x00000400ULL | |
68 | #define INTEL_PTE_PFN PG_FRAME | |
69 | #define INTEL_PTE_NX (1ULL << 63) | |
70 | #define INTEL_PTE_INVALID 0 | |
71 | ||
72 | vm_offset_t shadow_pbase; | |
73 | vm_offset_t shadow_ptop; | |
74 | vm_offset_t shadow_pnext; | |
75 | unsigned shadow_stolen_idx; | |
76 | ||
77 | static vm_offset_t zero_superpage_phys; | |
78 | ||
79 | typedef struct { | |
80 | unsigned int pml4 : 9; | |
81 | unsigned int pdpt : 9; | |
82 | unsigned int pd : 9; | |
83 | unsigned int pt : 9; | |
84 | unsigned int offset : 12; | |
85 | } split_addr_t; | |
86 | ||
87 | static split_addr_t | |
88 | split_address(vm_offset_t address) | |
89 | { | |
90 | split_addr_t addr; | |
91 | ||
92 | addr.pml4 = (address >> 39) & 0x1ff; | |
93 | addr.pdpt = (address >> 30) & 0x1ff; | |
94 | addr.pd = (address >> 21) & 0x1ff; | |
95 | addr.pt = (address >> 12) & 0x1ff; | |
96 | // addr.offset = address & PAGE_MASK; | |
97 | ||
98 | return addr; | |
99 | } | |
100 | ||
101 | static uintptr_t | |
102 | alloc_page(void) | |
103 | { | |
104 | if (shadow_pnext + I386_PGBYTES >= shadow_ptop) { | |
105 | panic("KASAN: OOM"); | |
106 | } | |
107 | ||
108 | uintptr_t mem = shadow_pnext; | |
109 | shadow_pnext += I386_PGBYTES; | |
110 | shadow_pages_used++; | |
111 | ||
112 | return mem; | |
113 | } | |
114 | ||
115 | #define ROUND_SUPERPAGE(x) ((((uintptr_t)(x)) + I386_LPGBYTES - 1) & ~(I386_LPGMASK)) | |
116 | ||
117 | static uintptr_t | |
118 | alloc_superpage(void) | |
119 | { | |
120 | uintptr_t mem; | |
121 | shadow_pnext = ROUND_SUPERPAGE(shadow_pnext); | |
122 | assert((shadow_pnext & I386_LPGMASK) == 0); | |
123 | mem = shadow_pnext; | |
124 | shadow_pnext += I386_LPGBYTES; | |
125 | shadow_pages_used += I386_LPGBYTES / I386_PGBYTES; | |
126 | /* XXX: not accounting for superpage rounding */ | |
127 | return mem; | |
128 | } | |
129 | ||
130 | static uintptr_t | |
131 | alloc_page_zero(void) | |
132 | { | |
133 | uintptr_t mem = alloc_page(); | |
134 | bzero_phys(mem, I386_PGBYTES); | |
135 | return mem; | |
136 | } | |
137 | ||
138 | static void | |
139 | kasan_map_shadow_superpage_zero(vm_offset_t address, vm_size_t size) | |
140 | { | |
141 | address = vm_map_trunc_page(address, I386_LPGMASK); | |
142 | size = vm_map_round_page(size, I386_LPGMASK); | |
143 | ||
144 | vm_size_t j; | |
145 | for (j = 0; j < size; j += I386_LPGBYTES * 8) { | |
146 | ||
147 | vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j); | |
148 | ||
149 | split_addr_t addr = split_address(virt_shadow_target); | |
150 | assert(addr.pml4 == 507 || addr.pml4 == 508); | |
151 | ||
152 | uint64_t *L3; | |
153 | uint64_t *L2; | |
154 | uint64_t *L1; | |
155 | ||
156 | L3 = (uint64_t *)(IdlePML4[addr.pml4] & ~PAGE_MASK); | |
157 | if (L3 == NULL) { | |
158 | uintptr_t pmem = alloc_page_zero(); | |
159 | L3 = (uint64_t *)phys2virt(pmem); | |
160 | IdlePML4[addr.pml4] = pmem | |
161 | | INTEL_PTE_VALID | |
162 | | INTEL_PTE_WRITE; | |
163 | } else { | |
164 | L3 = (uint64_t *)phys2virt(L3); | |
165 | } | |
166 | ||
167 | L2 = (uint64_t *)(L3[addr.pdpt] & ~PAGE_MASK); | |
168 | if (L2 == NULL) { | |
169 | uintptr_t pmem = alloc_page_zero(); | |
170 | L2 = (uint64_t *)phys2virt(pmem); | |
171 | L3[addr.pdpt] = pmem | |
172 | | INTEL_PTE_VALID | |
173 | | INTEL_PTE_WRITE; | |
174 | } else { | |
175 | L2 = (uint64_t *)phys2virt(L2); | |
176 | } | |
177 | ||
178 | L1 = (uint64_t *)(L2[addr.pd] & ~PAGE_MASK); | |
179 | if (L1 == NULL) { | |
180 | L2[addr.pd] = (uint64_t)zero_superpage_phys | |
181 | | INTEL_PTE_VALID | |
182 | | INTEL_PTE_PS | |
183 | | INTEL_PTE_NX; | |
184 | } else { | |
185 | panic("Unexpected shadow mapping, addr = %lx, sz = %lu\n", | |
186 | address, size); | |
187 | } | |
188 | ||
189 | /* adding a new entry, this is not strictly required */ | |
190 | invlpg(virt_shadow_target); | |
191 | } | |
192 | } | |
193 | ||
194 | void | |
195 | kasan_map_shadow(vm_offset_t address, vm_size_t size, bool is_zero) | |
196 | { | |
d9a64523 | 197 | size = (size + 0x7UL) & ~0x7UL; |
a39ff7e2 A |
198 | vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), PAGE_MASK); |
199 | vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), PAGE_MASK); | |
5ba3f43e | 200 | |
d9a64523 A |
201 | assert((size & 0x7) == 0); |
202 | ||
a39ff7e2 | 203 | for (; shadow_base < shadow_top; shadow_base += I386_PGBYTES) { |
5ba3f43e | 204 | |
a39ff7e2 | 205 | split_addr_t addr = split_address(shadow_base); |
5ba3f43e A |
206 | assert(addr.pml4 == 507 || addr.pml4 == 508); |
207 | ||
208 | uint64_t *L3; | |
209 | uint64_t *L2; | |
210 | uint64_t *L1; | |
211 | uint64_t *pte; | |
212 | ||
213 | L3 = (uint64_t *)(IdlePML4[addr.pml4] & ~PAGE_MASK); | |
214 | if (L3 == NULL) { | |
215 | uintptr_t pmem = alloc_page_zero(); | |
216 | L3 = (uint64_t *)phys2virt(pmem); | |
217 | IdlePML4[addr.pml4] = pmem | |
218 | | INTEL_PTE_VALID | |
219 | | INTEL_PTE_WRITE; | |
220 | } else { | |
221 | L3 = (uint64_t *)phys2virt(L3); | |
222 | } | |
223 | ||
224 | L2 = (uint64_t *)(L3[addr.pdpt] & ~PAGE_MASK); | |
225 | if (L2 == NULL) { | |
226 | uintptr_t pmem = alloc_page_zero(); | |
227 | L2 = (uint64_t *)phys2virt(pmem); | |
228 | L3[addr.pdpt] = pmem | |
229 | | INTEL_PTE_VALID | |
230 | | INTEL_PTE_WRITE; | |
231 | } else { | |
232 | L2 = (uint64_t *)phys2virt(L2); | |
233 | } | |
234 | ||
235 | uint64_t pde = L2[addr.pd]; | |
236 | if ((pde & (INTEL_PTE_VALID|INTEL_PTE_PS)) == (INTEL_PTE_VALID|INTEL_PTE_PS)) { | |
237 | /* Already mapped as a superpage */ | |
238 | continue; | |
239 | } | |
240 | ||
241 | L1 = (uint64_t *)(pde & ~PAGE_MASK); | |
242 | if (L1 == NULL) { | |
243 | uintptr_t pmem = alloc_page_zero(); | |
244 | L1 = (uint64_t *)phys2virt(pmem); | |
245 | L2[addr.pd] = pmem | |
246 | | INTEL_PTE_VALID | |
247 | | INTEL_PTE_WRITE; | |
248 | } else { | |
249 | L1 = (uint64_t *)phys2virt(L1); | |
250 | } | |
251 | ||
252 | pte = (uint64_t *)(L1[addr.pt] & ~PAGE_MASK); | |
253 | if (pte == NULL) { | |
254 | uint64_t newpte; | |
255 | if (is_zero) { | |
256 | newpte = (uint64_t)zero_superpage_phys; | |
257 | } else { | |
258 | newpte = (vm_offset_t)alloc_page_zero() | |
259 | | INTEL_PTE_WRITE; | |
260 | } | |
261 | L1[addr.pt] = newpte | |
262 | | INTEL_PTE_VALID | |
263 | | INTEL_PTE_NX; | |
5ba3f43e | 264 | |
a39ff7e2 A |
265 | /* adding a new entry, this is not strictly required */ |
266 | invlpg(shadow_base); | |
267 | } | |
5ba3f43e A |
268 | } |
269 | } | |
270 | ||
271 | void | |
272 | kasan_arch_init(void) | |
273 | { | |
274 | __nosan_bzero((void *)phys2virt(zero_superpage_phys), I386_LPGBYTES); | |
275 | ||
276 | /* Map the physical aperture */ | |
277 | kasan_map_shadow_superpage_zero(physmap_base, physmap_max - physmap_base); | |
5ba3f43e A |
278 | } |
279 | ||
280 | /* | |
281 | * Steal some memory from EFI for the shadow map. | |
282 | */ | |
283 | void | |
284 | kasan_reserve_memory(void *_args) | |
285 | { | |
286 | boot_args *args = (boot_args *)_args; | |
287 | vm_address_t pbase = args->kaddr; | |
288 | vm_address_t ptop = args->kaddr + args->ksize; | |
289 | ||
290 | kernel_vbase = ml_static_ptovirt(pbase); | |
291 | kernel_vtop = ml_static_ptovirt(ptop); | |
292 | ||
293 | EfiMemoryRange *mptr, *mptr_tmp; | |
294 | unsigned int mcount; | |
295 | unsigned int msize; | |
296 | unsigned int i; | |
297 | unsigned long total_pages; | |
298 | unsigned long to_steal; | |
299 | ||
300 | mptr = (EfiMemoryRange *)ml_static_ptovirt((vm_offset_t)args->MemoryMap); | |
301 | msize = args->MemoryMapDescriptorSize; | |
302 | mcount = args->MemoryMapSize / msize; | |
303 | ||
304 | /* sum total physical memory */ | |
305 | total_pages = 0; | |
306 | for (i = 0, mptr_tmp = mptr; i < mcount; i++, mptr_tmp = (EfiMemoryRange *)(((vm_offset_t)mptr_tmp) + msize)) { | |
307 | total_pages += mptr_tmp->NumberOfPages; | |
308 | } | |
309 | ||
5c9f4661 | 310 | to_steal = (total_pages * STOLEN_MEM_PERCENT) / 100 + (STOLEN_MEM_BYTES / I386_PGBYTES); |
5ba3f43e A |
311 | |
312 | /* Search for a range large enough to steal from */ | |
313 | for (i = 0, mptr_tmp = mptr; i < mcount; i++, mptr_tmp = (EfiMemoryRange *)(((vm_offset_t)mptr_tmp) + msize)) { | |
314 | ppnum_t base, top; | |
315 | base = (ppnum_t)(mptr_tmp->PhysicalStart >> I386_PGSHIFT); | |
316 | top = (ppnum_t)((mptr_tmp->PhysicalStart >> I386_PGSHIFT) + mptr_tmp->NumberOfPages - 1); | |
317 | ||
318 | if ((mptr_tmp->Type == kEfiConventionalMemory) && (mptr_tmp->NumberOfPages > to_steal)) { | |
319 | /* Found a region with sufficient space - steal from the end */ | |
320 | mptr_tmp->NumberOfPages -= to_steal; | |
321 | ||
322 | shadow_pbase = mptr_tmp->PhysicalStart + (mptr_tmp->NumberOfPages << I386_PGSHIFT); | |
323 | shadow_ptop = shadow_pbase + (to_steal << I386_PGSHIFT); | |
324 | shadow_pnext = shadow_pbase; | |
325 | shadow_pages_total = to_steal; | |
326 | shadow_stolen_idx = i; | |
327 | ||
328 | /* Set aside a page of zeros we can use for dummy shadow mappings */ | |
329 | zero_superpage_phys = alloc_superpage(); | |
330 | ||
331 | return; | |
332 | } | |
333 | } | |
334 | ||
335 | panic("KASAN: could not reserve memory"); | |
336 | } | |
337 | ||
a39ff7e2 A |
338 | bool |
339 | kasan_is_shadow_mapped(uintptr_t shadowp) | |
340 | { | |
341 | split_addr_t addr = split_address(shadowp); | |
342 | assert(addr.pml4 == 507 || addr.pml4 == 508); | |
343 | ||
344 | uint64_t *L3; | |
345 | uint64_t *L2; | |
346 | uint64_t *L1; | |
347 | ||
348 | L3 = (uint64_t *)(IdlePML4[addr.pml4] & ~PAGE_MASK); | |
349 | if (L3 == NULL) { | |
350 | return false; | |
351 | } | |
352 | L3 = (uint64_t *)phys2virt(L3); | |
353 | ||
354 | L2 = (uint64_t *)(L3[addr.pdpt] & ~PAGE_MASK); | |
355 | if (L2 == NULL) { | |
356 | return false; | |
357 | } | |
358 | L2 = (uint64_t *)phys2virt(L2); | |
359 | ||
360 | uint64_t pde = L2[addr.pd]; | |
361 | if ((pde & (INTEL_PTE_VALID|INTEL_PTE_PS)) == (INTEL_PTE_VALID|INTEL_PTE_PS)) { | |
362 | /* mapped as superpage */ | |
363 | return true; | |
364 | } | |
365 | L1 = (uint64_t *)(pde & ~PAGE_MASK); | |
366 | if (L1 == NULL) { | |
367 | return false; | |
368 | } | |
369 | L1 = (uint64_t *)phys2virt(L1); | |
370 | ||
371 | if (L1[addr.pt] & INTEL_PTE_VALID) { | |
372 | return true; | |
373 | } | |
374 | ||
375 | return false; | |
376 | } |