2 * Copyright (c) 2016-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <vm/vm_kern.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <machine/machine_routines.h>
35 #include <kern/locks.h>
36 #include <kern/simple_lock.h>
37 #include <kern/debug.h>
38 #include <mach/mach_vm.h>
39 #include <mach/vm_param.h>
40 #include <libkern/libkern.h>
41 #include <sys/queue.h>
44 #include <kasan_internal.h>
45 #include <memintrinsics.h>
47 #include <pexpert/device_tree.h>
48 #include <pexpert/arm64/boot.h>
49 #include <arm64/tlb.h>
51 #include <libkern/kernel_mach_header.h>
53 extern uint64_t *cpu_tte
;
54 extern unsigned long gVirtBase
, gPhysBase
;
56 typedef uint64_t pmap_paddr_t
;
57 extern vm_map_address_t
phystokv(pmap_paddr_t pa
);
59 vm_offset_t physmap_vbase
;
60 vm_offset_t physmap_vtop
;
62 vm_offset_t shadow_pbase
;
63 vm_offset_t shadow_ptop
;
65 // if we're building a kernel with hibernation support, hibernate_write_image depends on this symbol
66 vm_offset_t shadow_pnext
;
68 static vm_offset_t shadow_pnext
;
71 static vm_offset_t zero_page_phys
;
72 static vm_offset_t bootstrap_pgtable_phys
;
74 extern vm_offset_t intstack
, intstack_top
;
75 extern vm_offset_t excepstack
, excepstack_top
;
77 void kasan_bootstrap(boot_args
*, vm_offset_t pgtable
);
79 #define KASAN_OFFSET_ARM64 0xe000000000000000ULL /* Defined in makedefs/MakeInc.def */
81 #if defined(ARM_LARGE_MEMORY)
82 #define KASAN_SHADOW_MIN (VM_MAX_KERNEL_ADDRESS+1)
83 #define KASAN_SHADOW_MAX 0xffffffffffffffffULL
85 #define KASAN_SHADOW_MIN 0xfffffffc00000000ULL
86 #define KASAN_SHADOW_MAX 0xffffffff80000000ULL
89 _Static_assert(KASAN_OFFSET
== KASAN_OFFSET_ARM64
, "KASan inconsistent shadow offset");
90 _Static_assert(VM_MAX_KERNEL_ADDRESS
< KASAN_SHADOW_MIN
, "KASan shadow overlaps with kernel VM");
91 _Static_assert((VM_MIN_KERNEL_ADDRESS
>> KASAN_SCALE
) + KASAN_OFFSET_ARM64
>= KASAN_SHADOW_MIN
, "KASan shadow does not cover kernel VM");
92 _Static_assert((VM_MAX_KERNEL_ADDRESS
>> KASAN_SCALE
) + KASAN_OFFSET_ARM64
< KASAN_SHADOW_MAX
, "KASan shadow does not cover kernel VM");
97 if (shadow_pnext
+ ARM_PGBYTES
>= shadow_ptop
) {
101 uintptr_t mem
= shadow_pnext
;
102 shadow_pnext
+= ARM_PGBYTES
;
109 alloc_zero_page(void)
111 uintptr_t mem
= alloc_page();
112 __nosan_bzero((void *)phystokv(mem
), ARM_PGBYTES
);
117 align_to_page(vm_offset_t
*addrp
, vm_offset_t
*sizep
)
119 vm_offset_t addr_aligned
= vm_map_trunc_page(*addrp
, ARM_PGMASK
);
120 *sizep
= vm_map_round_page(*sizep
+ (*addrp
- addr_aligned
), ARM_PGMASK
);
121 *addrp
= addr_aligned
;
125 kasan_map_shadow_internal(vm_offset_t address
, vm_size_t size
, bool is_zero
, bool back_page
)
127 size
= (size
+ 0x7UL
) & ~0x7UL
;
128 vm_offset_t shadow_base
= vm_map_trunc_page(SHADOW_FOR_ADDRESS(address
), ARM_PGMASK
);
129 vm_offset_t shadow_top
= vm_map_round_page(SHADOW_FOR_ADDRESS(address
+ size
), ARM_PGMASK
);
131 assert(shadow_base
>= KASAN_SHADOW_MIN
&& shadow_top
<= KASAN_SHADOW_MAX
);
132 assert((size
& 0x7) == 0);
134 for (; shadow_base
< shadow_top
; shadow_base
+= ARM_PGBYTES
) {
135 uint64_t *base
= cpu_tte
;
138 /* lookup L1 entry */
139 pte
= base
+ ((shadow_base
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
140 if (*pte
& ARM_TTE_VALID
) {
141 assert((*pte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_TABLE
);
143 /* create new L1 table */
144 *pte
= ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK
) | ARM_TTE_VALID
| ARM_TTE_TYPE_TABLE
;
146 base
= (uint64_t *)phystokv(*pte
& ARM_TTE_TABLE_MASK
);
148 /* lookup L2 entry */
149 pte
= base
+ ((shadow_base
& ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
);
150 if (*pte
& ARM_TTE_VALID
) {
151 assert((*pte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_TABLE
);
153 /* create new L3 table */
154 *pte
= ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK
) | ARM_TTE_VALID
| ARM_TTE_TYPE_TABLE
;
156 base
= (uint64_t *)phystokv(*pte
& ARM_TTE_TABLE_MASK
);
162 /* lookup L3 entry */
163 pte
= base
+ ((shadow_base
& ARM_TT_L3_INDEX_MASK
) >> ARM_TT_L3_SHIFT
);
164 if ((*pte
& ARM_PTE_TYPE_VALID
) &&
165 ((((*pte
) & ARM_PTE_APMASK
) != ARM_PTE_AP(AP_RONA
)) || is_zero
)) {
166 /* nothing to do - page already mapped and we are not
169 /* create new L3 entry */
172 /* map the zero page RO */
173 newpte
= (uint64_t)zero_page_phys
| ARM_PTE_AP(AP_RONA
);
175 /* map a fresh page RW */
176 newpte
= (uint64_t)alloc_zero_page() | ARM_PTE_AP(AP_RWNA
);
178 newpte
|= ARM_PTE_TYPE_VALID
180 | ARM_PTE_SH(SH_OUTER_MEMORY
)
181 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT
)
192 kasan_map_shadow(vm_offset_t address
, vm_size_t size
, bool is_zero
)
194 kasan_map_shadow_internal(address
, size
, is_zero
, true);
198 * TODO: mappings here can be reclaimed after kasan_init()
201 kasan_map_shadow_early(vm_offset_t address
, vm_size_t size
, bool is_zero
)
203 align_to_page(&address
, &size
);
208 for (j
= 0; j
< size
; j
+= ARM_PGBYTES
) {
209 vm_offset_t virt_shadow_target
= (vm_offset_t
)SHADOW_FOR_ADDRESS(address
+ j
);
211 assert(virt_shadow_target
>= KASAN_SHADOW_MIN
);
212 assert(virt_shadow_target
< KASAN_SHADOW_MAX
);
214 uint64_t *base
= (uint64_t *)bootstrap_pgtable_phys
;
216 /* lookup L1 entry */
217 pte
= base
+ ((virt_shadow_target
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
218 if (*pte
& ARM_TTE_VALID
) {
219 assert((*pte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_TABLE
);
221 /* create new L1 table */
222 vm_address_t pg
= alloc_page();
223 __nosan_bzero((void *)pg
, ARM_PGBYTES
);
224 *pte
= ((uint64_t)pg
& ARM_TTE_TABLE_MASK
) | ARM_TTE_VALID
| ARM_TTE_TYPE_TABLE
;
226 base
= (uint64_t *)(*pte
& ARM_TTE_TABLE_MASK
);
228 /* lookup L2 entry */
229 pte
= base
+ ((virt_shadow_target
& ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
);
230 if (*pte
& ARM_TTE_VALID
) {
231 assert((*pte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_TABLE
);
233 /* create new L3 table */
234 vm_address_t pg
= alloc_page();
235 __nosan_bzero((void *)pg
, ARM_PGBYTES
);
236 *pte
= ((uint64_t)pg
& ARM_TTE_TABLE_MASK
) | ARM_TTE_VALID
| ARM_TTE_TYPE_TABLE
;
238 base
= (uint64_t *)(*pte
& ARM_TTE_TABLE_MASK
);
240 /* lookup L3 entry */
241 pte
= base
+ ((virt_shadow_target
& ARM_TT_L3_INDEX_MASK
) >> ARM_TT_L3_SHIFT
);
243 if ((*pte
& (ARM_PTE_TYPE
| ARM_PTE_APMASK
)) == (ARM_PTE_TYPE_VALID
| ARM_PTE_AP(AP_RWNA
))) {
244 /* L3 entry valid and mapped RW - do nothing */
246 /* Not mapped, or mapped RO - create new L3 entry or upgrade to RW */
250 /* map the zero page RO */
251 newpte
= (uint64_t)zero_page_phys
| ARM_PTE_AP(AP_RONA
);
253 /* map a fresh page RW */
254 vm_address_t pg
= alloc_page();
255 __nosan_bzero((void *)pg
, ARM_PGBYTES
);
256 newpte
= pg
| ARM_PTE_AP(AP_RWNA
);
259 /* add the default attributes */
260 newpte
|= ARM_PTE_TYPE_VALID
262 | ARM_PTE_SH(SH_OUTER_MEMORY
)
263 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT
)
275 kasan_arch_init(void)
277 /* Map the physical aperture */
278 kasan_map_shadow(physmap_vbase
, physmap_vtop
- physmap_vbase
, true);
280 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
281 /* Pre-allocate all the L3 page table pages to avoid triggering KTRR */
282 kasan_map_shadow_internal(VM_MIN_KERNEL_ADDRESS
, VM_MAX_KERNEL_ADDRESS
- VM_MIN_KERNEL_ADDRESS
+ 1, false, false);
287 * Steal memory for the shadow, and shadow map the bootstrap page tables so we can
288 * run until kasan_init(). Called while running with identity (V=P) map active.
291 kasan_bootstrap(boot_args
*args
, vm_offset_t pgtable
)
295 vm_address_t pbase
= args
->physBase
;
296 vm_address_t ptop
= args
->topOfKernelData
;
297 vm_offset_t extra
= (vm_offset_t
)&_mh_execute_header
- pbase
;
299 kernel_vbase
= args
->virtBase
;
300 kernel_vtop
= args
->virtBase
+ ptop
- pbase
;
302 tosteal
= (args
->memSize
* STOLEN_MEM_PERCENT
) / 100 + STOLEN_MEM_BYTES
;
303 tosteal
= vm_map_trunc_page(tosteal
, ARM_PGMASK
);
305 args
->memSize
-= tosteal
;
307 /* Initialize the page allocator */
308 shadow_pbase
= vm_map_round_page(pbase
+ args
->memSize
, ARM_PGMASK
);
309 shadow_ptop
= shadow_pbase
+ tosteal
;
310 shadow_pnext
= shadow_pbase
;
311 shadow_pages_total
= (uint32_t)((shadow_ptop
- shadow_pbase
) / ARM_PGBYTES
);
313 /* Set aside a page of zeros we can use for dummy shadow mappings */
314 zero_page_phys
= alloc_page();
315 __nosan_bzero((void *)zero_page_phys
, ARM_PGBYTES
);
317 /* Shadow the KVA bootstrap mapping: start of kernel Mach-O to end of physical */
318 bootstrap_pgtable_phys
= pgtable
;
319 kasan_map_shadow_early(kernel_vbase
+ extra
, args
->memSize
- extra
, true);
321 /* Shadow the early stacks */
322 vm_offset_t p2v
= args
->virtBase
- args
->physBase
;
324 vm_offset_t intstack_virt
= (vm_offset_t
)&intstack
+ p2v
;
325 vm_offset_t excepstack_virt
= (vm_offset_t
)&excepstack
+ p2v
;
326 vm_offset_t intstack_size
= (vm_offset_t
)&intstack_top
- (vm_offset_t
)&intstack
;
327 vm_offset_t excepstack_size
= (vm_offset_t
)&excepstack_top
- (vm_offset_t
)&excepstack
;
329 kasan_map_shadow_early(intstack_virt
, intstack_size
, false);
330 kasan_map_shadow_early(excepstack_virt
, excepstack_size
, false);
332 if ((vm_offset_t
)args
->deviceTreeP
- p2v
< (vm_offset_t
)&_mh_execute_header
) {
333 kasan_map_shadow_early((vm_offset_t
)args
->deviceTreeP
, args
->deviceTreeLength
, false);
338 kasan_is_shadow_mapped(uintptr_t shadowp
)
341 uint64_t *base
= cpu_tte
;
343 assert(shadowp
>= KASAN_SHADOW_MIN
);
344 assert(shadowp
< KASAN_SHADOW_MAX
);
346 /* lookup L1 entry */
347 pte
= base
+ ((shadowp
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
348 if (!(*pte
& ARM_TTE_VALID
)) {
351 base
= (uint64_t *)phystokv(*pte
& ARM_TTE_TABLE_MASK
);
353 /* lookup L2 entry */
354 pte
= base
+ ((shadowp
& ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
);
355 if (!(*pte
& ARM_TTE_VALID
)) {
358 base
= (uint64_t *)phystokv(*pte
& ARM_TTE_TABLE_MASK
);
360 /* lookup L3 entry */
361 pte
= base
+ ((shadowp
& ARM_TT_L3_INDEX_MASK
) >> ARM_TT_L3_SHIFT
);
362 if (!(*pte
& ARM_PTE_TYPE_VALID
)) {