2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <vm/vm_kern.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <machine/machine_routines.h>
35 #include <kern/locks.h>
36 #include <kern/simple_lock.h>
37 #include <kern/debug.h>
38 #include <mach/mach_vm.h>
39 #include <mach/vm_param.h>
40 #include <libkern/libkern.h>
41 #include <sys/queue.h>
44 #include <kasan_internal.h>
45 #include <memintrinsics.h>
47 #include <pexpert/arm64/boot.h>
48 #include <arm64/proc_reg.h>
50 #include <libkern/kernel_mach_header.h>
52 extern uint64_t *cpu_tte
;
53 extern unsigned long gVirtBase
, gPhysBase
;
54 #define phystokv(a) ((vm_address_t)(a) - gPhysBase + gVirtBase)
56 vm_offset_t physmap_vbase
;
57 vm_offset_t physmap_vtop
;
59 vm_offset_t shadow_pbase
;
60 vm_offset_t shadow_ptop
;
61 static vm_offset_t shadow_pnext
;
63 static vm_offset_t zero_page_phys
;
64 static vm_offset_t bootstrap_pgtable_phys
;
66 extern vm_offset_t intstack
, intstack_top
;
67 extern vm_offset_t excepstack
, excepstack_top
;
69 void kasan_bootstrap(boot_args
*, vm_offset_t pgtable
);
70 void flush_mmu_tlb(void);
72 #ifndef __ARM_16K_PG__
73 #error "Unsupported HW config: Assuming 16K pages"
76 #define KASAN_SHIFT_ARM64 0xdffffff800000000ULL /* Defined in makedefs/MakeInc.def */
77 #define KASAN_SHADOW_MIN 0xfffffff400000000ULL
78 #define KASAN_SHADOW_MAX 0xfffffff680000000ULL
80 _Static_assert(KASAN_SHIFT
== KASAN_SHIFT_ARM64
, "KASan inconsistent shadow shift");
81 _Static_assert(VM_MAX_KERNEL_ADDRESS
< KASAN_SHADOW_MIN
, "KASan shadow overlaps with kernel VM");
82 _Static_assert((VM_MIN_KERNEL_ADDRESS
>> 3) + KASAN_SHIFT_ARM64
>= KASAN_SHADOW_MIN
, "KASan shadow does not cover kernel VM");
83 _Static_assert((VM_MAX_KERNEL_ADDRESS
>> 3) + KASAN_SHIFT_ARM64
< KASAN_SHADOW_MAX
, "KASan shadow does not cover kernel VM");
88 if (shadow_pnext
+ ARM_PGBYTES
>= shadow_ptop
) {
92 uintptr_t mem
= shadow_pnext
;
93 shadow_pnext
+= ARM_PGBYTES
;
100 alloc_zero_page(void)
102 uintptr_t mem
= alloc_page();
103 __nosan_bzero((void *)phystokv(mem
), ARM_PGBYTES
);
108 align_to_page(vm_offset_t
*addrp
, vm_offset_t
*sizep
)
110 vm_offset_t addr_aligned
= vm_map_trunc_page(*addrp
, ARM_PGMASK
);
111 *sizep
= vm_map_round_page(*sizep
+ (*addrp
- addr_aligned
), ARM_PGMASK
);
112 *addrp
= addr_aligned
;
116 kasan_map_shadow_internal(vm_offset_t address
, vm_size_t size
, bool is_zero
, bool back_page
)
118 align_to_page(&address
, &size
);
123 /* XXX: this could be more efficient by walking through the shadow pages
124 * instead of the source pages */
126 for (j
= 0; j
< size
; j
+= ARM_PGBYTES
) {
127 vm_offset_t virt_shadow_target
= (vm_offset_t
)SHADOW_FOR_ADDRESS(address
+ j
);
129 assert(virt_shadow_target
>= KASAN_SHADOW_MIN
);
130 assert(virt_shadow_target
< KASAN_SHADOW_MAX
);
132 uint64_t *base
= cpu_tte
;
134 #if !__ARM64_TWO_LEVEL_PMAP__
135 /* lookup L1 entry */
136 pte
= base
+ ((virt_shadow_target
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
137 if (*pte
& ARM_TTE_VALID
) {
138 assert((*pte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_TABLE
);
140 /* create new L1 table */
141 *pte
= ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK
) | ARM_TTE_VALID
| ARM_TTE_TYPE_TABLE
;
143 base
= (uint64_t *)phystokv(*pte
& ARM_TTE_TABLE_MASK
);
146 /* lookup L2 entry */
147 pte
= base
+ ((virt_shadow_target
& ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
);
148 if (*pte
& ARM_TTE_VALID
) {
149 assert((*pte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_TABLE
);
151 /* create new L3 table */
152 *pte
= ((uint64_t)alloc_zero_page() & ARM_TTE_TABLE_MASK
) | ARM_TTE_VALID
| ARM_TTE_TYPE_TABLE
;
154 base
= (uint64_t *)phystokv(*pte
& ARM_TTE_TABLE_MASK
);
160 /* lookup L3 entry */
161 pte
= base
+ ((virt_shadow_target
& ARM_TT_L3_INDEX_MASK
) >> ARM_TT_L3_SHIFT
);
162 if ((*pte
& ARM_PTE_TYPE_VALID
) &&
163 ((((*pte
) & ARM_PTE_APMASK
) != ARM_PTE_AP(AP_RONA
)) || is_zero
)) {
164 /* nothing to do - page already mapped and we are not
167 /* create new L3 entry */
170 /* map the zero page RO */
171 newpte
= (uint64_t)zero_page_phys
| ARM_PTE_AP(AP_RONA
);
173 /* map a fresh page RW */
174 newpte
= (uint64_t)alloc_zero_page() | ARM_PTE_AP(AP_RWNA
);
176 newpte
|= ARM_PTE_TYPE_VALID
178 | ARM_PTE_SH(SH_OUTER_MEMORY
)
179 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT
)
190 kasan_map_shadow(vm_offset_t address
, vm_size_t size
, bool is_zero
)
192 kasan_map_shadow_internal(address
, size
, is_zero
, true);
196 * TODO: mappings here can be reclaimed after kasan_init()
199 kasan_map_shadow_early(vm_offset_t address
, vm_size_t size
, bool is_zero
)
201 align_to_page(&address
, &size
);
206 for (j
= 0; j
< size
; j
+= ARM_PGBYTES
) {
207 vm_offset_t virt_shadow_target
= (vm_offset_t
)SHADOW_FOR_ADDRESS(address
+ j
);
209 assert(virt_shadow_target
>= KASAN_SHADOW_MIN
);
210 assert(virt_shadow_target
< KASAN_SHADOW_MAX
);
212 uint64_t *base
= (uint64_t *)bootstrap_pgtable_phys
;
214 #if !__ARM64_TWO_LEVEL_PMAP__
215 /* lookup L1 entry */
216 pte
= base
+ ((virt_shadow_target
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
217 if (*pte
& ARM_TTE_VALID
) {
218 assert((*pte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_TABLE
);
220 /* create new L1 table */
221 vm_address_t pg
= alloc_page();
222 __nosan_bzero((void *)pg
, ARM_PGBYTES
);
223 *pte
= ((uint64_t)pg
& ARM_TTE_TABLE_MASK
) | ARM_TTE_VALID
| ARM_TTE_TYPE_TABLE
;
225 base
= (uint64_t *)(*pte
& ARM_TTE_TABLE_MASK
);
228 /* lookup L2 entry */
229 pte
= base
+ ((virt_shadow_target
& ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
);
230 if (*pte
& ARM_TTE_VALID
) {
231 assert((*pte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_TABLE
);
233 /* create new L3 table */
234 vm_address_t pg
= alloc_page();
235 __nosan_bzero((void *)pg
, ARM_PGBYTES
);
236 *pte
= ((uint64_t)pg
& ARM_TTE_TABLE_MASK
) | ARM_TTE_VALID
| ARM_TTE_TYPE_TABLE
;
238 base
= (uint64_t *)(*pte
& ARM_TTE_TABLE_MASK
);
240 /* lookup L3 entry */
241 pte
= base
+ ((virt_shadow_target
& ARM_TT_L3_INDEX_MASK
) >> ARM_TT_L3_SHIFT
);
243 if ((*pte
& (ARM_PTE_TYPE
|ARM_PTE_APMASK
)) == (ARM_PTE_TYPE_VALID
|ARM_PTE_AP(AP_RWNA
))) {
244 /* L3 entry valid and mapped RW - do nothing */
246 /* Not mapped, or mapped RO - create new L3 entry or upgrade to RW */
250 /* map the zero page RO */
251 newpte
= (uint64_t)zero_page_phys
| ARM_PTE_AP(AP_RONA
);
253 /* map a fresh page RW */
254 vm_address_t pg
= alloc_page();
255 __nosan_bzero((void *)pg
, ARM_PGBYTES
);
256 newpte
= pg
| ARM_PTE_AP(AP_RWNA
);
259 /* add the default attributes */
260 newpte
|= ARM_PTE_TYPE_VALID
262 | ARM_PTE_SH(SH_OUTER_MEMORY
)
263 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT
)
275 kasan_arch_init(void)
277 assert(KASAN_SHADOW_MIN
>= VM_MAX_KERNEL_ADDRESS
);
279 /* Map the physical aperture */
280 kasan_map_shadow(kernel_vtop
, physmap_vtop
- kernel_vtop
, true);
282 #if defined(KERNEL_INTEGRITY_KTRR)
283 /* Pre-allocate all the L3 page table pages to avoid triggering KTRR */
284 kasan_map_shadow_internal(VM_MIN_KERNEL_ADDRESS
, VM_MAX_KERNEL_ADDRESS
- VM_MIN_KERNEL_ADDRESS
+ 1, false, false);
289 * Steal memory for the shadow, and shadow map the bootstrap page tables so we can
290 * run until kasan_init(). Called while running with identity (V=P) map active.
293 kasan_bootstrap(boot_args
*args
, vm_offset_t pgtable
)
297 vm_address_t pbase
= args
->physBase
;
298 vm_address_t ptop
= args
->topOfKernelData
;
299 vm_offset_t extra
= (vm_offset_t
)&_mh_execute_header
- pbase
;
301 kernel_vbase
= args
->virtBase
;
302 kernel_vtop
= args
->virtBase
+ ptop
- pbase
;
304 tosteal
= (args
->memSize
* STOLEN_MEM_PERCENT
) / 100 + STOLEN_MEM_BYTES
;
305 tosteal
= vm_map_trunc_page(tosteal
, ARM_PGMASK
);
307 args
->memSize
-= tosteal
;
309 /* Initialize the page allocator */
310 shadow_pbase
= vm_map_round_page(pbase
+ args
->memSize
, ARM_PGMASK
);
311 shadow_ptop
= shadow_pbase
+ tosteal
;
312 shadow_pnext
= shadow_pbase
;
313 shadow_pages_total
= (long)((shadow_ptop
- shadow_pbase
) / ARM_PGBYTES
);
315 /* Set aside a page of zeros we can use for dummy shadow mappings */
316 zero_page_phys
= alloc_page();
317 __nosan_bzero((void *)zero_page_phys
, ARM_PGBYTES
);
319 /* Shadow the KVA bootstrap mapping: start of kernel Mach-O to end of physical */
320 bootstrap_pgtable_phys
= pgtable
;
321 kasan_map_shadow_early(kernel_vbase
+ extra
, args
->memSize
- extra
, true);
323 /* Shadow the early stacks */
324 vm_offset_t p2v
= args
->virtBase
- args
->physBase
;
326 vm_offset_t intstack_virt
= (vm_offset_t
)&intstack
+ p2v
;
327 vm_offset_t excepstack_virt
= (vm_offset_t
)&excepstack
+ p2v
;
328 vm_offset_t intstack_size
= (vm_offset_t
)&intstack_top
- (vm_offset_t
)&intstack
;
329 vm_offset_t excepstack_size
= (vm_offset_t
)&excepstack_top
- (vm_offset_t
)&excepstack
;
331 kasan_map_shadow_early(intstack_virt
, intstack_size
, false);
332 kasan_map_shadow_early(excepstack_virt
, excepstack_size
, false);