2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <vm/vm_kern.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <i386/proc_reg.h>
35 #include <i386/machine_routines.h>
36 #include <kern/debug.h>
37 #include <mach/mach_vm.h>
38 #include <mach/vm_param.h>
39 #include <libkern/libkern.h>
40 #include <pexpert/i386/efi.h>
41 #include <pexpert/i386/boot.h>
42 #include <sys/queue.h>
44 #include <kasan_internal.h>
46 #include <pexpert/i386/efi.h>
47 #include <pexpert/i386/boot.h>
48 #include <memintrinsics.h>
50 extern uint64_t *IdlePML4
;
51 extern uintptr_t physmap_base
;
52 extern uintptr_t physmap_max
;
53 #define phys2virt(x) ((uintptr_t)(x) + physmap_base)
55 #define INTEL_PTE_VALID 0x00000001ULL
56 #define INTEL_PTE_WRITE 0x00000002ULL
57 #define INTEL_PTE_RW 0x00000002ULL
58 #define INTEL_PTE_USER 0x00000004ULL
59 #define INTEL_PTE_WTHRU 0x00000008ULL
60 #define INTEL_PTE_NCACHE 0x00000010ULL
61 #define INTEL_PTE_REF 0x00000020ULL
62 #define INTEL_PTE_MOD 0x00000040ULL
63 #define INTEL_PTE_PS 0x00000080ULL
64 #define INTEL_PTE_PTA 0x00000080ULL
65 #define INTEL_PTE_GLOBAL 0x00000100ULL
66 #define INTEL_PTE_WIRED 0x00000200ULL
67 #define INTEL_PDPTE_NESTED 0x00000400ULL
68 #define INTEL_PTE_PFN PG_FRAME
69 #define INTEL_PTE_NX (1ULL << 63)
70 #define INTEL_PTE_INVALID 0
72 vm_offset_t shadow_pbase
;
73 vm_offset_t shadow_ptop
;
74 vm_offset_t shadow_pnext
;
75 unsigned shadow_stolen_idx
;
77 static vm_offset_t zero_superpage_phys
;
80 unsigned int pml4
: 9;
81 unsigned int pdpt
: 9;
84 unsigned int offset
: 12;
88 split_address(vm_offset_t address
)
92 addr
.pml4
= (address
>> 39) & 0x1ff;
93 addr
.pdpt
= (address
>> 30) & 0x1ff;
94 addr
.pd
= (address
>> 21) & 0x1ff;
95 addr
.pt
= (address
>> 12) & 0x1ff;
96 // addr.offset = address & PAGE_MASK;
104 if (shadow_pnext
+ I386_PGBYTES
>= shadow_ptop
) {
108 uintptr_t mem
= shadow_pnext
;
109 shadow_pnext
+= I386_PGBYTES
;
115 #define ROUND_SUPERPAGE(x) ((((uintptr_t)(x)) + I386_LPGBYTES - 1) & ~(I386_LPGMASK))
118 alloc_superpage(void)
121 shadow_pnext
= ROUND_SUPERPAGE(shadow_pnext
);
122 assert((shadow_pnext
& I386_LPGMASK
) == 0);
124 shadow_pnext
+= I386_LPGBYTES
;
125 shadow_pages_used
+= I386_LPGBYTES
/ I386_PGBYTES
;
126 /* XXX: not accounting for superpage rounding */
131 alloc_page_zero(void)
133 uintptr_t mem
= alloc_page();
134 bzero_phys(mem
, I386_PGBYTES
);
139 kasan_map_shadow_superpage_zero(vm_offset_t address
, vm_size_t size
)
141 address
= vm_map_trunc_page(address
, I386_LPGMASK
);
142 size
= vm_map_round_page(size
, I386_LPGMASK
);
145 for (j
= 0; j
< size
; j
+= I386_LPGBYTES
* 8) {
147 vm_offset_t virt_shadow_target
= (vm_offset_t
)SHADOW_FOR_ADDRESS(address
+ j
);
149 split_addr_t addr
= split_address(virt_shadow_target
);
150 assert(addr
.pml4
== 507 || addr
.pml4
== 508);
156 L3
= (uint64_t *)(IdlePML4
[addr
.pml4
] & ~PAGE_MASK
);
158 uintptr_t pmem
= alloc_page_zero();
159 L3
= (uint64_t *)phys2virt(pmem
);
160 IdlePML4
[addr
.pml4
] = pmem
164 L3
= (uint64_t *)phys2virt(L3
);
167 L2
= (uint64_t *)(L3
[addr
.pdpt
] & ~PAGE_MASK
);
169 uintptr_t pmem
= alloc_page_zero();
170 L2
= (uint64_t *)phys2virt(pmem
);
175 L2
= (uint64_t *)phys2virt(L2
);
178 L1
= (uint64_t *)(L2
[addr
.pd
] & ~PAGE_MASK
);
180 L2
[addr
.pd
] = (uint64_t)zero_superpage_phys
185 panic("Unexpected shadow mapping, addr = %lx, sz = %lu\n",
189 /* adding a new entry, this is not strictly required */
190 invlpg(virt_shadow_target
);
195 kasan_map_shadow(vm_offset_t address
, vm_size_t size
, bool is_zero
)
197 vm_offset_t shadow_base
= vm_map_trunc_page(SHADOW_FOR_ADDRESS(address
), PAGE_MASK
);
198 vm_offset_t shadow_top
= vm_map_round_page(SHADOW_FOR_ADDRESS(address
+ size
), PAGE_MASK
);
200 for (; shadow_base
< shadow_top
; shadow_base
+= I386_PGBYTES
) {
202 split_addr_t addr
= split_address(shadow_base
);
203 assert(addr
.pml4
== 507 || addr
.pml4
== 508);
210 L3
= (uint64_t *)(IdlePML4
[addr
.pml4
] & ~PAGE_MASK
);
212 uintptr_t pmem
= alloc_page_zero();
213 L3
= (uint64_t *)phys2virt(pmem
);
214 IdlePML4
[addr
.pml4
] = pmem
218 L3
= (uint64_t *)phys2virt(L3
);
221 L2
= (uint64_t *)(L3
[addr
.pdpt
] & ~PAGE_MASK
);
223 uintptr_t pmem
= alloc_page_zero();
224 L2
= (uint64_t *)phys2virt(pmem
);
229 L2
= (uint64_t *)phys2virt(L2
);
232 uint64_t pde
= L2
[addr
.pd
];
233 if ((pde
& (INTEL_PTE_VALID
|INTEL_PTE_PS
)) == (INTEL_PTE_VALID
|INTEL_PTE_PS
)) {
234 /* Already mapped as a superpage */
238 L1
= (uint64_t *)(pde
& ~PAGE_MASK
);
240 uintptr_t pmem
= alloc_page_zero();
241 L1
= (uint64_t *)phys2virt(pmem
);
246 L1
= (uint64_t *)phys2virt(L1
);
249 pte
= (uint64_t *)(L1
[addr
.pt
] & ~PAGE_MASK
);
253 newpte
= (uint64_t)zero_superpage_phys
;
255 newpte
= (vm_offset_t
)alloc_page_zero()
262 /* adding a new entry, this is not strictly required */
269 kasan_arch_init(void)
271 __nosan_bzero((void *)phys2virt(zero_superpage_phys
), I386_LPGBYTES
);
273 /* Map the physical aperture */
274 kasan_map_shadow_superpage_zero(physmap_base
, physmap_max
- physmap_base
);
278 * Steal some memory from EFI for the shadow map.
281 kasan_reserve_memory(void *_args
)
283 boot_args
*args
= (boot_args
*)_args
;
284 vm_address_t pbase
= args
->kaddr
;
285 vm_address_t ptop
= args
->kaddr
+ args
->ksize
;
287 kernel_vbase
= ml_static_ptovirt(pbase
);
288 kernel_vtop
= ml_static_ptovirt(ptop
);
290 EfiMemoryRange
*mptr
, *mptr_tmp
;
294 unsigned long total_pages
;
295 unsigned long to_steal
;
297 mptr
= (EfiMemoryRange
*)ml_static_ptovirt((vm_offset_t
)args
->MemoryMap
);
298 msize
= args
->MemoryMapDescriptorSize
;
299 mcount
= args
->MemoryMapSize
/ msize
;
301 /* sum total physical memory */
303 for (i
= 0, mptr_tmp
= mptr
; i
< mcount
; i
++, mptr_tmp
= (EfiMemoryRange
*)(((vm_offset_t
)mptr_tmp
) + msize
)) {
304 total_pages
+= mptr_tmp
->NumberOfPages
;
307 to_steal
= (total_pages
* STOLEN_MEM_PERCENT
) / 100 + (STOLEN_MEM_BYTES
/ I386_PGBYTES
);
309 /* Search for a range large enough to steal from */
310 for (i
= 0, mptr_tmp
= mptr
; i
< mcount
; i
++, mptr_tmp
= (EfiMemoryRange
*)(((vm_offset_t
)mptr_tmp
) + msize
)) {
312 base
= (ppnum_t
)(mptr_tmp
->PhysicalStart
>> I386_PGSHIFT
);
313 top
= (ppnum_t
)((mptr_tmp
->PhysicalStart
>> I386_PGSHIFT
) + mptr_tmp
->NumberOfPages
- 1);
315 if ((mptr_tmp
->Type
== kEfiConventionalMemory
) && (mptr_tmp
->NumberOfPages
> to_steal
)) {
316 /* Found a region with sufficient space - steal from the end */
317 mptr_tmp
->NumberOfPages
-= to_steal
;
319 shadow_pbase
= mptr_tmp
->PhysicalStart
+ (mptr_tmp
->NumberOfPages
<< I386_PGSHIFT
);
320 shadow_ptop
= shadow_pbase
+ (to_steal
<< I386_PGSHIFT
);
321 shadow_pnext
= shadow_pbase
;
322 shadow_pages_total
= to_steal
;
323 shadow_stolen_idx
= i
;
325 /* Set aside a page of zeros we can use for dummy shadow mappings */
326 zero_superpage_phys
= alloc_superpage();
332 panic("KASAN: could not reserve memory");
336 kasan_is_shadow_mapped(uintptr_t shadowp
)
338 split_addr_t addr
= split_address(shadowp
);
339 assert(addr
.pml4
== 507 || addr
.pml4
== 508);
345 L3
= (uint64_t *)(IdlePML4
[addr
.pml4
] & ~PAGE_MASK
);
349 L3
= (uint64_t *)phys2virt(L3
);
351 L2
= (uint64_t *)(L3
[addr
.pdpt
] & ~PAGE_MASK
);
355 L2
= (uint64_t *)phys2virt(L2
);
357 uint64_t pde
= L2
[addr
.pd
];
358 if ((pde
& (INTEL_PTE_VALID
|INTEL_PTE_PS
)) == (INTEL_PTE_VALID
|INTEL_PTE_PS
)) {
359 /* mapped as superpage */
362 L1
= (uint64_t *)(pde
& ~PAGE_MASK
);
366 L1
= (uint64_t *)phys2virt(L1
);
368 if (L1
[addr
.pt
] & INTEL_PTE_VALID
) {