2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <vm/vm_kern.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <machine/machine_routines.h>
35 #include <kern/locks.h>
36 #include <kern/simple_lock.h>
37 #include <kern/debug.h>
38 #include <mach/mach_vm.h>
39 #include <mach/vm_param.h>
40 #include <libkern/libkern.h>
41 #include <sys/queue.h>
44 #include <kasan_internal.h>
45 #include <memintrinsics.h>
47 #include <pexpert/arm64/boot.h>
48 #include <arm64/proc_reg.h>
50 #include <libkern/kernel_mach_header.h>
52 extern uint64_t *cpu_tte
;
53 extern unsigned long gVirtBase
, gPhysBase
;
55 typedef uint64_t pmap_paddr_t
;
56 extern vm_map_address_t
phystokv(pmap_paddr_t pa
);
58 vm_offset_t physmap_vbase
;
59 vm_offset_t physmap_vtop
;
61 vm_offset_t shadow_pbase
;
62 vm_offset_t shadow_ptop
;
63 static vm_offset_t shadow_pnext
;
65 static vm_offset_t zero_page_phys
;
66 static vm_offset_t bootstrap_pgtable_phys
;
68 extern vm_offset_t intstack
, intstack_top
;
69 extern vm_offset_t excepstack
, excepstack_top
;
71 void kasan_bootstrap(boot_args
*, vm_offset_t pgtable
);
72 void flush_mmu_tlb(void);
74 #define KASAN_SHIFT_ARM64 0xdffffff800000000ULL /* Defined in makedefs/MakeInc.def */
75 #define KASAN_SHADOW_MIN 0xfffffff400000000ULL
76 #define KASAN_SHADOW_MAX 0xfffffff680000000ULL
78 _Static_assert(KASAN_SHIFT
== KASAN_SHIFT_ARM64
, "KASan inconsistent shadow shift");
79 _Static_assert(VM_MAX_KERNEL_ADDRESS
< KASAN_SHADOW_MIN
, "KASan shadow overlaps with kernel VM");
80 _Static_assert((VM_MIN_KERNEL_ADDRESS
>> 3) + KASAN_SHIFT_ARM64
>= KASAN_SHADOW_MIN
, "KASan shadow does not cover kernel VM");
81 _Static_assert((VM_MAX_KERNEL_ADDRESS
>> 3) + KASAN_SHIFT_ARM64
< KASAN_SHADOW_MAX
, "KASan shadow does not cover kernel VM");
86 if (shadow_pnext
+ ARM_PGBYTES
>= shadow_ptop
) {
90 uintptr_t mem
= shadow_pnext
;
91 shadow_pnext
+= ARM_PGBYTES
;
100 uintptr_t mem
= alloc_page();
101 __nosan_bzero((void *)phystokv(mem
), ARM_PGBYTES
);
106 align_to_page(vm_offset_t
*addrp
, vm_offset_t
*sizep
)
108 vm_offset_t addr_aligned
= vm_map_trunc_page(*addrp
, ARM_PGMASK
);
109 *sizep
= vm_map_round_page(*sizep
+ (*addrp
- addr_aligned
), ARM_PGMASK
);
110 *addrp
= addr_aligned
;
114 kasan_map_shadow_internal(vm_offset_t address
, vm_size_t size
, bool is_zero
, bool back_page
)
116 size
= (size
+ 0x7UL
) & ~0x7UL
;
117 vm_offset_t shadow_base
= vm_map_trunc_page(SHADOW_FOR_ADDRESS(address
), ARM_PGMASK
);
118 vm_offset_t shadow_top
= vm_map_round_page(SHADOW_FOR_ADDRESS(address
+ size
), ARM_PGMASK
);
120 assert(shadow_base
>= KASAN_SHADOW_MIN
&& shadow_top
<= KASAN_SHADOW_MAX
);
121 assert((size
& 0x7) == 0);
123 for (; shadow_base
< shadow_top
; shadow_base
+= ARM_PGBYTES
) {
124 uint64_t *base
= cpu_tte
;
127 #if !__ARM64_TWO_LEVEL_PMAP__
128 /* lookup L1 entry */
129 pte
= base
+ ((shadow_base
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
130 if (*pte
& ARM_TTE_VALID
) {
131 assert((*pte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_TABLE
);
133 /* create new L1 table */
134 *pte
= ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK
) | ARM_TTE_VALID
| ARM_TTE_TYPE_TABLE
;
136 base
= (uint64_t *)phystokv(*pte
& ARM_TTE_TABLE_MASK
);
139 /* lookup L2 entry */
140 pte
= base
+ ((shadow_base
& ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
);
141 if (*pte
& ARM_TTE_VALID
) {
142 assert((*pte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_TABLE
);
144 /* create new L3 table */
145 *pte
= ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK
) | ARM_TTE_VALID
| ARM_TTE_TYPE_TABLE
;
147 base
= (uint64_t *)phystokv(*pte
& ARM_TTE_TABLE_MASK
);
153 /* lookup L3 entry */
154 pte
= base
+ ((shadow_base
& ARM_TT_L3_INDEX_MASK
) >> ARM_TT_L3_SHIFT
);
155 if ((*pte
& ARM_PTE_TYPE_VALID
) &&
156 ((((*pte
) & ARM_PTE_APMASK
) != ARM_PTE_AP(AP_RONA
)) || is_zero
)) {
157 /* nothing to do - page already mapped and we are not
160 /* create new L3 entry */
163 /* map the zero page RO */
164 newpte
= (uint64_t)zero_page_phys
| ARM_PTE_AP(AP_RONA
);
166 /* map a fresh page RW */
167 newpte
= (uint64_t)alloc_zero_page() | ARM_PTE_AP(AP_RWNA
);
169 newpte
|= ARM_PTE_TYPE_VALID
171 | ARM_PTE_SH(SH_OUTER_MEMORY
)
172 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT
)
183 kasan_map_shadow(vm_offset_t address
, vm_size_t size
, bool is_zero
)
185 kasan_map_shadow_internal(address
, size
, is_zero
, true);
189 * TODO: mappings here can be reclaimed after kasan_init()
192 kasan_map_shadow_early(vm_offset_t address
, vm_size_t size
, bool is_zero
)
194 align_to_page(&address
, &size
);
199 for (j
= 0; j
< size
; j
+= ARM_PGBYTES
) {
200 vm_offset_t virt_shadow_target
= (vm_offset_t
)SHADOW_FOR_ADDRESS(address
+ j
);
202 assert(virt_shadow_target
>= KASAN_SHADOW_MIN
);
203 assert(virt_shadow_target
< KASAN_SHADOW_MAX
);
205 uint64_t *base
= (uint64_t *)bootstrap_pgtable_phys
;
207 #if !__ARM64_TWO_LEVEL_PMAP__
208 /* lookup L1 entry */
209 pte
= base
+ ((virt_shadow_target
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
210 if (*pte
& ARM_TTE_VALID
) {
211 assert((*pte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_TABLE
);
213 /* create new L1 table */
214 vm_address_t pg
= alloc_page();
215 __nosan_bzero((void *)pg
, ARM_PGBYTES
);
216 *pte
= ((uint64_t)pg
& ARM_TTE_TABLE_MASK
) | ARM_TTE_VALID
| ARM_TTE_TYPE_TABLE
;
218 base
= (uint64_t *)(*pte
& ARM_TTE_TABLE_MASK
);
221 /* lookup L2 entry */
222 pte
= base
+ ((virt_shadow_target
& ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
);
223 if (*pte
& ARM_TTE_VALID
) {
224 assert((*pte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_TABLE
);
226 /* create new L3 table */
227 vm_address_t pg
= alloc_page();
228 __nosan_bzero((void *)pg
, ARM_PGBYTES
);
229 *pte
= ((uint64_t)pg
& ARM_TTE_TABLE_MASK
) | ARM_TTE_VALID
| ARM_TTE_TYPE_TABLE
;
231 base
= (uint64_t *)(*pte
& ARM_TTE_TABLE_MASK
);
233 /* lookup L3 entry */
234 pte
= base
+ ((virt_shadow_target
& ARM_TT_L3_INDEX_MASK
) >> ARM_TT_L3_SHIFT
);
236 if ((*pte
& (ARM_PTE_TYPE
|ARM_PTE_APMASK
)) == (ARM_PTE_TYPE_VALID
|ARM_PTE_AP(AP_RWNA
))) {
237 /* L3 entry valid and mapped RW - do nothing */
239 /* Not mapped, or mapped RO - create new L3 entry or upgrade to RW */
243 /* map the zero page RO */
244 newpte
= (uint64_t)zero_page_phys
| ARM_PTE_AP(AP_RONA
);
246 /* map a fresh page RW */
247 vm_address_t pg
= alloc_page();
248 __nosan_bzero((void *)pg
, ARM_PGBYTES
);
249 newpte
= pg
| ARM_PTE_AP(AP_RWNA
);
252 /* add the default attributes */
253 newpte
|= ARM_PTE_TYPE_VALID
255 | ARM_PTE_SH(SH_OUTER_MEMORY
)
256 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT
)
268 kasan_arch_init(void)
270 /* Map the physical aperture */
271 kasan_map_shadow(kernel_vtop
, physmap_vtop
- kernel_vtop
, true);
273 #if defined(KERNEL_INTEGRITY_KTRR)
274 /* Pre-allocate all the L3 page table pages to avoid triggering KTRR */
275 kasan_map_shadow_internal(VM_MIN_KERNEL_ADDRESS
, VM_MAX_KERNEL_ADDRESS
- VM_MIN_KERNEL_ADDRESS
+ 1, false, false);
280 * Steal memory for the shadow, and shadow map the bootstrap page tables so we can
281 * run until kasan_init(). Called while running with identity (V=P) map active.
284 kasan_bootstrap(boot_args
*args
, vm_offset_t pgtable
)
288 vm_address_t pbase
= args
->physBase
;
289 vm_address_t ptop
= args
->topOfKernelData
;
290 vm_offset_t extra
= (vm_offset_t
)&_mh_execute_header
- pbase
;
292 kernel_vbase
= args
->virtBase
;
293 kernel_vtop
= args
->virtBase
+ ptop
- pbase
;
295 tosteal
= (args
->memSize
* STOLEN_MEM_PERCENT
) / 100 + STOLEN_MEM_BYTES
;
296 tosteal
= vm_map_trunc_page(tosteal
, ARM_PGMASK
);
298 args
->memSize
-= tosteal
;
300 /* Initialize the page allocator */
301 shadow_pbase
= vm_map_round_page(pbase
+ args
->memSize
, ARM_PGMASK
);
302 shadow_ptop
= shadow_pbase
+ tosteal
;
303 shadow_pnext
= shadow_pbase
;
304 shadow_pages_total
= (long)((shadow_ptop
- shadow_pbase
) / ARM_PGBYTES
);
306 /* Set aside a page of zeros we can use for dummy shadow mappings */
307 zero_page_phys
= alloc_page();
308 __nosan_bzero((void *)zero_page_phys
, ARM_PGBYTES
);
310 /* Shadow the KVA bootstrap mapping: start of kernel Mach-O to end of physical */
311 bootstrap_pgtable_phys
= pgtable
;
312 kasan_map_shadow_early(kernel_vbase
+ extra
, args
->memSize
- extra
, true);
314 /* Shadow the early stacks */
315 vm_offset_t p2v
= args
->virtBase
- args
->physBase
;
317 vm_offset_t intstack_virt
= (vm_offset_t
)&intstack
+ p2v
;
318 vm_offset_t excepstack_virt
= (vm_offset_t
)&excepstack
+ p2v
;
319 vm_offset_t intstack_size
= (vm_offset_t
)&intstack_top
- (vm_offset_t
)&intstack
;
320 vm_offset_t excepstack_size
= (vm_offset_t
)&excepstack_top
- (vm_offset_t
)&excepstack
;
322 kasan_map_shadow_early(intstack_virt
, intstack_size
, false);
323 kasan_map_shadow_early(excepstack_virt
, excepstack_size
, false);
327 kasan_is_shadow_mapped(uintptr_t shadowp
)
330 uint64_t *base
= cpu_tte
;
332 assert(shadowp
>= KASAN_SHADOW_MIN
);
333 assert(shadowp
< KASAN_SHADOW_MAX
);
335 #if !__ARM64_TWO_LEVEL_PMAP__
336 /* lookup L1 entry */
337 pte
= base
+ ((shadowp
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
338 if (!(*pte
& ARM_TTE_VALID
)) {
341 base
= (uint64_t *)phystokv(*pte
& ARM_TTE_TABLE_MASK
);
344 /* lookup L2 entry */
345 pte
= base
+ ((shadowp
& ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
);
346 if (!(*pte
& ARM_TTE_VALID
)) {
349 base
= (uint64_t *)phystokv(*pte
& ARM_TTE_TABLE_MASK
);
351 /* lookup L3 entry */
352 pte
= base
+ ((shadowp
& ARM_TT_L3_INDEX_MASK
) >> ARM_TT_L3_SHIFT
);
353 if (!(*pte
& ARM_PTE_TYPE_VALID
)) {