2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <vm/vm_kern.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <i386/proc_reg.h>
35 #include <i386/machine_routines.h>
36 #include <kern/debug.h>
37 #include <mach/mach_vm.h>
38 #include <mach/vm_param.h>
39 #include <sys/param.h>
40 #include <i386/pmap.h>
41 #include <libkern/libkern.h>
42 #include <pexpert/i386/efi.h>
43 #include <pexpert/i386/boot.h>
44 #include <sys/queue.h>
46 #include <kasan_internal.h>
48 #include <pexpert/i386/efi.h>
49 #include <pexpert/i386/boot.h>
50 #include <memintrinsics.h>
52 extern uint64_t *IdlePML4
;
53 #define phys2virt(x) ((uintptr_t)(x) + physmap_base)
55 vm_offset_t shadow_pbase
;
56 vm_offset_t shadow_ptop
;
57 vm_offset_t shadow_pnext
;
58 unsigned shadow_stolen_idx
;
60 static vm_offset_t zero_superpage_phys
;
63 unsigned int pml4
: 9;
64 unsigned int pdpt
: 9;
67 unsigned int offset
: 12;
71 split_address(vm_offset_t address
)
75 addr
.pml4
= (address
>> 39) & 0x1ff;
76 addr
.pdpt
= (address
>> 30) & 0x1ff;
77 addr
.pd
= (address
>> 21) & 0x1ff;
78 addr
.pt
= (address
>> 12) & 0x1ff;
79 // addr.offset = address & PAGE_MASK;
87 if (shadow_pnext
+ I386_PGBYTES
>= shadow_ptop
) {
91 uintptr_t mem
= shadow_pnext
;
92 shadow_pnext
+= I386_PGBYTES
;
98 #define ROUND_SUPERPAGE(x) ((((uintptr_t)(x)) + I386_LPGBYTES - 1) & ~(I386_LPGMASK))
101 alloc_superpage(void)
104 shadow_pnext
= ROUND_SUPERPAGE(shadow_pnext
);
105 assert((shadow_pnext
& I386_LPGMASK
) == 0);
107 shadow_pnext
+= I386_LPGBYTES
;
108 shadow_pages_used
+= I386_LPGBYTES
/ I386_PGBYTES
;
109 /* XXX: not accounting for superpage rounding */
114 alloc_page_zero(void)
116 uintptr_t mem
= alloc_page();
117 bzero_phys(mem
, I386_PGBYTES
);
122 kasan_map_shadow_superpage_zero(vm_offset_t address
, vm_size_t size
)
124 address
= vm_map_trunc_page(address
, I386_LPGMASK
);
125 size
= vm_map_round_page(size
, I386_LPGMASK
);
128 for (j
= 0; j
< size
; j
+= I386_LPGBYTES
* 8) {
129 vm_offset_t virt_shadow_target
= (vm_offset_t
)SHADOW_FOR_ADDRESS(address
+ j
);
131 split_addr_t addr
= split_address(virt_shadow_target
);
132 assert(addr
.pml4
>= KERNEL_KASAN_PML4_FIRST
&&
133 addr
.pml4
<= KERNEL_KASAN_PML4_LAST
);
139 L3
= (uint64_t *)(IdlePML4
[addr
.pml4
] & ~PAGE_MASK
);
141 uintptr_t pmem
= alloc_page_zero();
142 L3
= (uint64_t *)phys2virt(pmem
);
143 IdlePML4
[addr
.pml4
] = pmem
147 L3
= (uint64_t *)phys2virt(L3
);
150 L2
= (uint64_t *)(L3
[addr
.pdpt
] & ~PAGE_MASK
);
152 uintptr_t pmem
= alloc_page_zero();
153 L2
= (uint64_t *)phys2virt(pmem
);
158 L2
= (uint64_t *)phys2virt(L2
);
161 L1
= (uint64_t *)(L2
[addr
.pd
] & ~PAGE_MASK
);
163 L2
[addr
.pd
] = (uint64_t)zero_superpage_phys
168 panic("Unexpected shadow mapping, addr = %lx, sz = %lu\n",
172 /* adding a new entry, this is not strictly required */
173 invlpg(virt_shadow_target
);
178 kasan_map_shadow(vm_offset_t address
, vm_size_t size
, bool is_zero
)
180 size
= kasan_granule_round(size
);
181 vm_offset_t shadow_base
= vm_map_trunc_page(SHADOW_FOR_ADDRESS(address
), PAGE_MASK
);
182 vm_offset_t shadow_top
= vm_map_round_page(SHADOW_FOR_ADDRESS(address
+ size
), PAGE_MASK
);
184 assert(kasan_granule_partial(size
) == 0);
186 for (; shadow_base
< shadow_top
; shadow_base
+= I386_PGBYTES
) {
187 split_addr_t addr
= split_address(shadow_base
);
188 assert(addr
.pml4
>= KERNEL_KASAN_PML4_FIRST
&&
189 addr
.pml4
<= KERNEL_KASAN_PML4_LAST
);
196 L3
= (uint64_t *)(IdlePML4
[addr
.pml4
] & ~PAGE_MASK
);
198 uintptr_t pmem
= alloc_page_zero();
199 L3
= (uint64_t *)phys2virt(pmem
);
200 IdlePML4
[addr
.pml4
] = pmem
204 L3
= (uint64_t *)phys2virt(L3
);
207 L2
= (uint64_t *)(L3
[addr
.pdpt
] & ~PAGE_MASK
);
209 uintptr_t pmem
= alloc_page_zero();
210 L2
= (uint64_t *)phys2virt(pmem
);
215 L2
= (uint64_t *)phys2virt(L2
);
218 uint64_t pde
= L2
[addr
.pd
];
219 if ((pde
& (INTEL_PTE_VALID
| INTEL_PTE_PS
)) == (INTEL_PTE_VALID
| INTEL_PTE_PS
)) {
220 /* Already mapped as a superpage */
224 L1
= (uint64_t *)(pde
& ~PAGE_MASK
);
226 uintptr_t pmem
= alloc_page_zero();
227 L1
= (uint64_t *)phys2virt(pmem
);
232 L1
= (uint64_t *)phys2virt(L1
);
235 pte
= (uint64_t *)(L1
[addr
.pt
] & ~PAGE_MASK
);
239 newpte
= (uint64_t)zero_superpage_phys
;
241 newpte
= (vm_offset_t
)alloc_page_zero()
248 /* adding a new entry, this is not strictly required */
255 kasan_arch_init(void)
257 __nosan_bzero((void *)phys2virt(zero_superpage_phys
), I386_LPGBYTES
);
259 /* Map the physical aperture */
260 kasan_map_shadow_superpage_zero(physmap_base
, physmap_max
- physmap_base
);
264 * Steal some memory from EFI for the shadow map.
267 kasan_reserve_memory(void *_args
)
269 boot_args
*args
= (boot_args
*)_args
;
270 vm_address_t pbase
= args
->kaddr
;
271 vm_address_t ptop
= args
->kaddr
+ args
->ksize
;
273 kernel_vbase
= ml_static_ptovirt(pbase
);
274 kernel_vtop
= ml_static_ptovirt(ptop
);
276 EfiMemoryRange
*mptr
, *mptr_tmp
;
280 unsigned long total_pages
;
281 unsigned long to_steal
;
283 mptr
= (EfiMemoryRange
*)ml_static_ptovirt((vm_offset_t
)args
->MemoryMap
);
284 msize
= args
->MemoryMapDescriptorSize
;
285 mcount
= args
->MemoryMapSize
/ msize
;
287 /* sum total physical memory */
289 for (i
= 0, mptr_tmp
= mptr
; i
< mcount
; i
++, mptr_tmp
= (EfiMemoryRange
*)(((vm_offset_t
)mptr_tmp
) + msize
)) {
290 total_pages
+= mptr_tmp
->NumberOfPages
;
293 to_steal
= (unsigned long)(total_pages
* STOLEN_MEM_PERCENT
) / 100 + (STOLEN_MEM_BYTES
/ I386_PGBYTES
);
295 /* Search for a range large enough to steal from */
296 for (i
= 0, mptr_tmp
= mptr
; i
< mcount
; i
++, mptr_tmp
= (EfiMemoryRange
*)(((vm_offset_t
)mptr_tmp
) + msize
)) {
298 base
= (ppnum_t
)(mptr_tmp
->PhysicalStart
>> I386_PGSHIFT
);
299 top
= (ppnum_t
)((mptr_tmp
->PhysicalStart
>> I386_PGSHIFT
) + mptr_tmp
->NumberOfPages
- 1);
301 if ((mptr_tmp
->Type
== kEfiConventionalMemory
) && (mptr_tmp
->NumberOfPages
> to_steal
)) {
302 /* Found a region with sufficient space - steal from the end */
303 mptr_tmp
->NumberOfPages
-= to_steal
;
305 shadow_pbase
= mptr_tmp
->PhysicalStart
+ (mptr_tmp
->NumberOfPages
<< I386_PGSHIFT
);
306 shadow_ptop
= shadow_pbase
+ (to_steal
<< I386_PGSHIFT
);
307 shadow_pnext
= shadow_pbase
;
308 shadow_pages_total
= (unsigned int)to_steal
;
309 shadow_stolen_idx
= i
;
311 /* Set aside a page of zeros we can use for dummy shadow mappings */
312 zero_superpage_phys
= alloc_superpage();
318 panic("KASAN: could not reserve memory");
322 kasan_is_shadow_mapped(uintptr_t shadowp
)
324 split_addr_t addr
= split_address(shadowp
);
325 assert(addr
.pml4
>= KERNEL_KASAN_PML4_FIRST
&&
326 addr
.pml4
<= KERNEL_KASAN_PML4_LAST
);
332 L3
= (uint64_t *)(IdlePML4
[addr
.pml4
] & ~PAGE_MASK
);
336 L3
= (uint64_t *)phys2virt(L3
);
338 L2
= (uint64_t *)(L3
[addr
.pdpt
] & ~PAGE_MASK
);
342 L2
= (uint64_t *)phys2virt(L2
);
344 uint64_t pde
= L2
[addr
.pd
];
345 if ((pde
& (INTEL_PTE_VALID
| INTEL_PTE_PS
)) == (INTEL_PTE_VALID
| INTEL_PTE_PS
)) {
346 /* mapped as superpage */
349 L1
= (uint64_t *)(pde
& ~PAGE_MASK
);
353 L1
= (uint64_t *)phys2virt(L1
);
355 if (L1
[addr
.pt
] & INTEL_PTE_VALID
) {