2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach_debug.h>
33 #include <kern/assert.h>
34 #include <kern/misc_protos.h>
35 #include <kern/monotonic.h>
36 #include <mach/vm_types.h>
37 #include <mach/vm_param.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_page.h>
42 #include <machine/atomic.h>
43 #include <arm64/proc_reg.h>
44 #include <arm64/lowglobals.h>
45 #include <arm/cpu_data_internal.h>
46 #include <arm/misc_protos.h>
47 #include <pexpert/arm64/boot.h>
48 #include <pexpert/device_tree.h>
50 #include <libkern/kernel_mach_header.h>
51 #include <libkern/section_keywords.h>
53 #include <san/kasan.h>
55 #if __ARM_KERNEL_PROTECT__
57 * If we want to support __ARM_KERNEL_PROTECT__, we need a sufficient amount of
58 * mappable space preceeding the kernel (as we unmap the kernel by cutting the
59 * range covered by TTBR1 in half). This must also cover the exception vectors.
61 static_assert(KERNEL_PMAP_HEAP_RANGE_START
> ARM_KERNEL_PROTECT_EXCEPTION_START
);
63 /* The exception vectors and the kernel cannot share root TTEs. */
64 static_assert((KERNEL_PMAP_HEAP_RANGE_START
& ~ARM_TT_ROOT_OFFMASK
) > ARM_KERNEL_PROTECT_EXCEPTION_START
);
67 * We must have enough space in the TTBR1_EL1 range to create the EL0 mapping of
68 * the exception vectors.
70 static_assert((((~ARM_KERNEL_PROTECT_EXCEPTION_START
) + 1) * 2ULL) <= (ARM_TT_ROOT_SIZE
+ ARM_TT_ROOT_INDEX_MASK
));
71 #endif /* __ARM_KERNEL_PROTECT__ */
73 #if __APRR_SUPPORTED__ && XNU_MONITOR
75 * If APRR is supported, setting XN on L1/L2 table entries will shift the effective
76 * APRR index of L3 PTEs covering PPL-protected pages in the kernel dynamic region
77 * from PPL R/W to kernel R/W. That will effectively remove PPL write protection
78 * from those pages. Avoid setting XN at the table level for MONITOR-enabled builds
79 * that are backed by APRR.
81 #define ARM_DYNAMIC_TABLE_XN ARM_TTE_TABLE_PXN
83 #define ARM_DYNAMIC_TABLE_XN (ARM_TTE_TABLE_PXN | ARM_TTE_TABLE_XN)
87 extern vm_offset_t shadow_pbase
;
88 extern vm_offset_t shadow_ptop
;
89 extern vm_offset_t physmap_vbase
;
90 extern vm_offset_t physmap_vtop
;
94 * Denotes the end of xnu.
96 extern void *last_kernel_symbol
;
98 extern void arm64_replace_bootstack(cpu_data_t
*);
99 extern void PE_slide_devicetree(vm_offset_t
);
104 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_kernel_base
;
105 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_kernel_top
;
106 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_kext_base
;
107 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_kext_top
;
108 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_kernel_stext
;
109 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_kernel_etext
;
110 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_kernel_slide
;
111 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_kernel_slid_base
;
112 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_kernel_slid_top
;
114 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_prelink_stext
;
115 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_prelink_etext
;
116 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_prelink_sdata
;
117 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_prelink_edata
;
118 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_prelink_sinfo
;
119 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_prelink_einfo
;
120 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_slinkedit
;
121 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_elinkedit
;
123 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_kernel_builtinkmod_text
;
124 SECURITY_READ_ONLY_LATE(vm_offset_t
) vm_kernel_builtinkmod_text_end
;
126 /* Used by <mach/arm/vm_param.h> */
127 SECURITY_READ_ONLY_LATE(unsigned long) gVirtBase
;
128 SECURITY_READ_ONLY_LATE(unsigned long) gPhysBase
;
129 SECURITY_READ_ONLY_LATE(unsigned long) gPhysSize
;
130 SECURITY_READ_ONLY_LATE(unsigned long) gT0Sz
= T0SZ_BOOT
;
131 SECURITY_READ_ONLY_LATE(unsigned long) gT1Sz
= T1SZ_BOOT
;
133 /* 23543331 - step 1 of kext / kernel __TEXT and __DATA colocation is to move
134 * all kexts before the kernel. This is only for arm64 devices and looks
135 * something like the following:
137 * 0xffffff8004004000 __PRELINK_TEXT
138 * 0xffffff8007004000 __TEXT (xnu)
139 * 0xffffff80075ec000 __DATA (xnu)
140 * 0xffffff80076dc000 __KLD (xnu)
141 * 0xffffff80076e0000 __LAST (xnu)
142 * 0xffffff80076e4000 __LINKEDIT (xnu)
143 * 0xffffff80076e4000 __PRELINK_DATA (not used yet)
144 * 0xffffff800782c000 __PRELINK_INFO
145 * 0xffffff80078e4000 -- End of kernelcache
148 /* 24921709 - make XNU ready for KTRR
150 * Two possible kernel cache layouts, depending on which kcgen is being used.
151 * VAs increasing downwards.
161 * __PRELINK_DATA (expected empty)
167 * __PRELINK_TEXT <--- First KTRR (ReadOnly) segment
174 * __LAST <--- Last KTRR (ReadOnly) segment
176 * __BOOTDATA (if present)
178 * __PRELINK_DATA (expected populated now)
184 vm_offset_t mem_size
; /* Size of actual physical memory present
185 * minus any performance buffer and possibly
186 * limited by mem_limit in bytes */
187 uint64_t mem_actual
; /* The "One True" physical memory size
188 * actually, it's the highest physical
190 uint64_t max_mem
; /* Size of physical memory (bytes), adjusted
192 uint64_t sane_size
; /* Memory size to use for defaults
194 /* This no longer appears to be used; kill it? */
195 addr64_t vm_last_addr
= VM_MAX_KERNEL_ADDRESS
; /* Highest kernel
196 * virtual address known
197 * to the VM system */
199 SECURITY_READ_ONLY_LATE(vm_offset_t
) segEXTRADATA
;
200 SECURITY_READ_ONLY_LATE(unsigned long) segSizeEXTRADATA
;
202 SECURITY_READ_ONLY_LATE(vm_offset_t
) segLOWESTTEXT
;
203 SECURITY_READ_ONLY_LATE(vm_offset_t
) segLOWEST
;
205 SECURITY_READ_ONLY_LATE(static vm_offset_t
) segTEXTB
;
206 SECURITY_READ_ONLY_LATE(static unsigned long) segSizeTEXT
;
209 SECURITY_READ_ONLY_LATE(vm_offset_t
) segPPLTEXTB
;
210 SECURITY_READ_ONLY_LATE(unsigned long) segSizePPLTEXT
;
212 SECURITY_READ_ONLY_LATE(vm_offset_t
) segPPLTRAMPB
;
213 SECURITY_READ_ONLY_LATE(unsigned long) segSizePPLTRAMP
;
215 SECURITY_READ_ONLY_LATE(vm_offset_t
) segPPLDATACONSTB
;
216 SECURITY_READ_ONLY_LATE(unsigned long) segSizePPLDATACONST
;
217 SECURITY_READ_ONLY_LATE(void *) pmap_stacks_start
= NULL
;
218 SECURITY_READ_ONLY_LATE(void *) pmap_stacks_end
= NULL
;
221 SECURITY_READ_ONLY_LATE(static vm_offset_t
) segDATACONSTB
;
222 SECURITY_READ_ONLY_LATE(static unsigned long) segSizeDATACONST
;
224 SECURITY_READ_ONLY_LATE(static vm_offset_t
) segTEXTEXECB
;
225 SECURITY_READ_ONLY_LATE(static unsigned long) segSizeTEXTEXEC
;
227 SECURITY_READ_ONLY_LATE(static vm_offset_t
) segDATAB
;
228 SECURITY_READ_ONLY_LATE(static unsigned long) segSizeDATA
;
231 SECURITY_READ_ONLY_LATE(vm_offset_t
) segPPLDATAB
;
232 SECURITY_READ_ONLY_LATE(unsigned long) segSizePPLDATA
;
235 SECURITY_READ_ONLY_LATE(vm_offset_t
) segBOOTDATAB
;
236 SECURITY_READ_ONLY_LATE(unsigned long) segSizeBOOTDATA
;
237 extern vm_offset_t intstack_low_guard
;
238 extern vm_offset_t intstack_high_guard
;
239 extern vm_offset_t excepstack_high_guard
;
241 SECURITY_READ_ONLY_LATE(static vm_offset_t
) segLINKB
;
242 SECURITY_READ_ONLY_LATE(static unsigned long) segSizeLINK
;
244 SECURITY_READ_ONLY_LATE(static vm_offset_t
) segKLDB
;
245 SECURITY_READ_ONLY_LATE(static unsigned long) segSizeKLD
;
246 SECURITY_READ_ONLY_LATE(vm_offset_t
) segLASTB
;
247 SECURITY_READ_ONLY_LATE(unsigned long) segSizeLAST
;
249 SECURITY_READ_ONLY_LATE(vm_offset_t
) segPRELINKTEXTB
;
250 SECURITY_READ_ONLY_LATE(unsigned long) segSizePRELINKTEXT
;
252 SECURITY_READ_ONLY_LATE(static vm_offset_t
) segPLKTEXTEXECB
;
253 SECURITY_READ_ONLY_LATE(static unsigned long) segSizePLKTEXTEXEC
;
255 SECURITY_READ_ONLY_LATE(static vm_offset_t
) segPLKDATACONSTB
;
256 SECURITY_READ_ONLY_LATE(static unsigned long) segSizePLKDATACONST
;
258 SECURITY_READ_ONLY_LATE(static vm_offset_t
) segPRELINKDATAB
;
259 SECURITY_READ_ONLY_LATE(static unsigned long) segSizePRELINKDATA
;
261 SECURITY_READ_ONLY_LATE(static vm_offset_t
) segPLKLLVMCOVB
= 0;
262 SECURITY_READ_ONLY_LATE(static unsigned long) segSizePLKLLVMCOV
= 0;
264 SECURITY_READ_ONLY_LATE(static vm_offset_t
) segPLKLINKEDITB
;
265 SECURITY_READ_ONLY_LATE(static unsigned long) segSizePLKLINKEDIT
;
267 SECURITY_READ_ONLY_LATE(static vm_offset_t
) segPRELINKINFOB
;
268 SECURITY_READ_ONLY_LATE(static unsigned long) segSizePRELINKINFO
;
270 SECURITY_READ_ONLY_LATE(static boolean_t
) use_contiguous_hint
= TRUE
;
272 SECURITY_READ_ONLY_LATE(unsigned) PAGE_SHIFT_CONST
;
274 SECURITY_READ_ONLY_LATE(vm_offset_t
) end_kern
;
275 SECURITY_READ_ONLY_LATE(vm_offset_t
) etext
;
276 SECURITY_READ_ONLY_LATE(vm_offset_t
) sdata
;
277 SECURITY_READ_ONLY_LATE(vm_offset_t
) edata
;
279 vm_offset_t
alloc_ptpage(boolean_t map_static
);
280 SECURITY_READ_ONLY_LATE(vm_offset_t
) ropage_next
;
283 * Bootstrap the system enough to run with virtual memory.
284 * Map the kernel's code and data, and allocate the system page table.
285 * Page_size must already be set.
288 * first_avail: first available physical page -
289 * after kernel page tables
290 * avail_start: PA of first physical page
291 * avail_end: PA of last physical page
293 SECURITY_READ_ONLY_LATE(vm_offset_t
) first_avail
;
294 SECURITY_READ_ONLY_LATE(vm_offset_t
) static_memory_end
;
295 SECURITY_READ_ONLY_LATE(pmap_paddr_t
) avail_start
;
296 SECURITY_READ_ONLY_LATE(pmap_paddr_t
) avail_end
;
297 SECURITY_READ_ONLY_LATE(pmap_paddr_t
) real_avail_end
;
298 SECURITY_READ_ONLY_LATE(unsigned long) real_phys_size
;
300 #if __ARM_KERNEL_PROTECT__
301 extern void ExceptionVectorsBase
;
302 extern void ExceptionVectorsEnd
;
303 #endif /* __ARM_KERNEL_PROTECT__ */
311 #define PTOV_TABLE_SIZE 8
312 SECURITY_READ_ONLY_LATE(static ptov_table_entry
) ptov_table
[PTOV_TABLE_SIZE
];
313 SECURITY_READ_ONLY_LATE(static boolean_t
) kva_active
= FALSE
;
317 phystokv(pmap_paddr_t pa
)
319 for (size_t i
= 0; (i
< PTOV_TABLE_SIZE
) && (ptov_table
[i
].len
!= 0); i
++) {
320 if ((pa
>= ptov_table
[i
].pa
) && (pa
< (ptov_table
[i
].pa
+ ptov_table
[i
].len
)))
321 return (pa
- ptov_table
[i
].pa
+ ptov_table
[i
].va
);
323 assertf((pa
- gPhysBase
) < real_phys_size
, "%s: illegal PA: 0x%llx", __func__
, (uint64_t)pa
);
324 return (pa
- gPhysBase
+ gVirtBase
);
328 phystokv_range(pmap_paddr_t pa
, vm_size_t
*max_len
)
331 for (size_t i
= 0; (i
< PTOV_TABLE_SIZE
) && (ptov_table
[i
].len
!= 0); i
++) {
332 if ((pa
>= ptov_table
[i
].pa
) && (pa
< (ptov_table
[i
].pa
+ ptov_table
[i
].len
))) {
333 len
= ptov_table
[i
].len
- (pa
- ptov_table
[i
].pa
);
336 return (pa
- ptov_table
[i
].pa
+ ptov_table
[i
].va
);
339 len
= PAGE_SIZE
- (pa
& PAGE_MASK
);
342 assertf((pa
- gPhysBase
) < real_phys_size
, "%s: illegal PA: 0x%llx", __func__
, (uint64_t)pa
);
343 return (pa
- gPhysBase
+ gVirtBase
);
347 ml_static_vtop(vm_offset_t va
)
349 for (size_t i
= 0; (i
< PTOV_TABLE_SIZE
) && (ptov_table
[i
].len
!= 0); i
++) {
350 if ((va
>= ptov_table
[i
].va
) && (va
< (ptov_table
[i
].va
+ ptov_table
[i
].len
)))
351 return (va
- ptov_table
[i
].va
+ ptov_table
[i
].pa
);
353 assertf(((vm_address_t
)(va
) - gVirtBase
) < gPhysSize
, "%s: illegal VA: %p", __func__
, (void*)va
);
354 return ((vm_address_t
)(va
) - gVirtBase
+ gPhysBase
);
358 * This rounds the given address up to the nearest boundary for a PTE contiguous
362 round_up_pte_hint_address(vm_offset_t address
)
364 vm_offset_t hint_size
= ARM_PTE_SIZE
<< ARM_PTE_HINT_ENTRIES_SHIFT
;
365 return ((address
+ (hint_size
- 1)) & ~(hint_size
- 1));
368 /* allocate a page for a page table: we support static and dynamic mappings.
370 * returns a virtual address for the allocated page
372 * for static mappings, we allocate from the region ropagetable_begin to ro_pagetable_end-1,
373 * which is defined in the DATA_CONST segment and will be protected RNX when vm_prot_finalize runs.
375 * for dynamic mappings, we allocate from avail_start, which should remain RWNX.
378 vm_offset_t
alloc_ptpage(boolean_t map_static
) {
381 #if !(defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR))
386 ropage_next
= (vm_offset_t
)&ropagetable_begin
;
390 assert(ropage_next
< (vm_offset_t
)&ropagetable_end
);
393 ropage_next
+= ARM_PGBYTES
;
397 vaddr
= phystokv(avail_start
);
398 avail_start
+= ARM_PGBYTES
;
406 void dump_kva_l2(vm_offset_t tt_base
, tt_entry_t
*tt
, int indent
, uint64_t *rosz_out
, uint64_t *rwsz_out
);
408 void dump_kva_l2(vm_offset_t tt_base
, tt_entry_t
*tt
, int indent
, uint64_t *rosz_out
, uint64_t *rwsz_out
) {
410 boolean_t cur_ro
, prev_ro
= 0;
411 int start_entry
= -1;
412 tt_entry_t cur
, prev
= 0;
413 pmap_paddr_t robegin
= kvtophys((vm_offset_t
)&ropagetable_begin
);
414 pmap_paddr_t roend
= kvtophys((vm_offset_t
)&ropagetable_end
);
415 boolean_t tt_static
= kvtophys((vm_offset_t
)tt
) >= robegin
&&
416 kvtophys((vm_offset_t
)tt
) < roend
;
418 for(i
=0; i
<TTE_PGENTRIES
; i
++) {
419 int tte_type
= tt
[i
] & ARM_TTE_TYPE_MASK
;
420 cur
= tt
[i
] & ARM_TTE_TABLE_MASK
;
423 /* addresses mapped by this entry are static if it is a block mapping,
424 * or the table was allocated from the RO page table region */
425 cur_ro
= (tte_type
== ARM_TTE_TYPE_BLOCK
) || (cur
>= robegin
&& cur
< roend
);
430 if ((cur
== 0 && prev
!= 0) || (cur_ro
!= prev_ro
&& prev
!= 0)) { // falling edge
431 uintptr_t start
,end
,sz
;
433 start
= (uintptr_t)start_entry
<< ARM_TT_L2_SHIFT
;
435 end
= ((uintptr_t)i
<< ARM_TT_L2_SHIFT
) - 1;
438 sz
= end
- start
+ 1;
439 printf("%*s0x%08x_%08x-0x%08x_%08x %s (%luMB)\n",
441 (uint32_t)(start
>> 32),(uint32_t)start
,
442 (uint32_t)(end
>> 32),(uint32_t)end
,
443 prev_ro
? "Static " : "Dynamic",
453 if ((prev
== 0 && cur
!= 0) || cur_ro
!= prev_ro
) { // rising edge: set start
462 void dump_kva_space() {
463 uint64_t tot_rosz
=0, tot_rwsz
=0;
464 int ro_ptpages
, rw_ptpages
;
465 pmap_paddr_t robegin
= kvtophys((vm_offset_t
)&ropagetable_begin
);
466 pmap_paddr_t roend
= kvtophys((vm_offset_t
)&ropagetable_end
);
467 boolean_t root_static
= kvtophys((vm_offset_t
)cpu_tte
) >= robegin
&&
468 kvtophys((vm_offset_t
)cpu_tte
) < roend
;
469 uint64_t kva_base
= ~((1ULL << (64 - T1SZ_BOOT
)) - 1);
471 printf("Root page table: %s\n", root_static
? "Static" : "Dynamic");
473 for(unsigned int i
=0; i
<TTE_PGENTRIES
; i
++) {
477 uint64_t rosz
= 0, rwsz
= 0;
479 if ((cpu_tte
[i
] & ARM_TTE_VALID
) == 0)
482 cur
= cpu_tte
[i
] & ARM_TTE_TABLE_MASK
;
483 start
= (uint64_t)i
<< ARM_TT_L1_SHIFT
;
484 start
= start
+ kva_base
;
485 end
= start
+ (ARM_TT_L1_SIZE
- 1);
486 cur_ro
= cur
>= robegin
&& cur
< roend
;
488 printf("0x%08x_%08x-0x%08x_%08x %s\n",
489 (uint32_t)(start
>> 32),(uint32_t)start
,
490 (uint32_t)(end
>> 32),(uint32_t)end
,
491 cur_ro
? "Static " : "Dynamic");
493 dump_kva_l2(start
, (tt_entry_t
*)phystokv(cur
), 1, &rosz
, &rwsz
);
498 printf("L2 Address space mapped: Static %lluMB Dynamic %lluMB Total %lluMB\n",
501 (tot_rosz
>> 20) + (tot_rwsz
>> 20));
503 ro_ptpages
= (int)((ropage_next
- (vm_offset_t
)&ropagetable_begin
) >> ARM_PGSHIFT
);
504 rw_ptpages
= (int)(lowGlo
.lgStaticSize
>> ARM_PGSHIFT
);
505 printf("Pages used: static %d dynamic %d\n", ro_ptpages
, rw_ptpages
);
510 #if __ARM_KERNEL_PROTECT__ || XNU_MONITOR
513 * root_ttp: The kernel virtual address for the root of the target page tables
514 * vaddr: The target virtual address
515 * pte: A page table entry value (may be ARM_PTE_EMPTY)
517 * This function installs pte at vaddr in root_ttp. Any page table pages needed
518 * to install pte will be allocated by this function.
521 arm_vm_map(tt_entry_t
* root_ttp
, vm_offset_t vaddr
, pt_entry_t pte
)
523 vm_offset_t ptpage
= 0;
524 tt_entry_t
* ttp
= root_ttp
;
526 tt_entry_t
* l1_ttep
= NULL
;
527 tt_entry_t l1_tte
= 0;
529 tt_entry_t
* l2_ttep
= NULL
;
530 tt_entry_t l2_tte
= 0;
531 pt_entry_t
* ptep
= NULL
;
535 * Walk the target page table to find the PTE for the given virtual
536 * address. Allocate any page table pages needed to do this.
538 l1_ttep
= ttp
+ ((vaddr
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
541 if (l1_tte
== ARM_TTE_EMPTY
) {
542 ptpage
= alloc_ptpage(TRUE
);
543 bzero((void *)ptpage
, ARM_PGBYTES
);
544 l1_tte
= kvtophys(ptpage
);
545 l1_tte
&= ARM_TTE_TABLE_MASK
;
546 l1_tte
|= ARM_TTE_VALID
| ARM_TTE_TYPE_TABLE
;
551 ttp
= (tt_entry_t
*)phystokv(l1_tte
& ARM_TTE_TABLE_MASK
);
553 l2_ttep
= ttp
+ ((vaddr
& ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
);
556 if (l2_tte
== ARM_TTE_EMPTY
) {
557 ptpage
= alloc_ptpage(TRUE
);
558 bzero((void *)ptpage
, ARM_PGBYTES
);
559 l2_tte
= kvtophys(ptpage
);
560 l2_tte
&= ARM_TTE_TABLE_MASK
;
561 l2_tte
|= ARM_TTE_VALID
| ARM_TTE_TYPE_TABLE
;
566 ttp
= (tt_entry_t
*)phystokv(l2_tte
& ARM_TTE_TABLE_MASK
);
568 ptep
= ttp
+ ((vaddr
& ARM_TT_L3_INDEX_MASK
) >> ARM_TT_L3_SHIFT
);
572 * If the existing PTE is not empty, then we are replacing a valid
575 if (cpte
!= ARM_PTE_EMPTY
) {
576 panic("%s: cpte=%#llx is not empty, "
577 "vaddr=%#lx, pte=%#llx",
585 #endif // __ARM_KERNEL_PROTECT || XNU_MONITOR
587 #if __ARM_KERNEL_PROTECT__
590 * arm_vm_kernel_el0_map:
591 * vaddr: The target virtual address
592 * pte: A page table entry value (may be ARM_PTE_EMPTY)
594 * This function installs pte at vaddr for the EL0 kernel mappings.
597 arm_vm_kernel_el0_map(vm_offset_t vaddr
, pt_entry_t pte
)
599 /* Calculate where vaddr will be in the EL1 kernel page tables. */
600 vm_offset_t kernel_pmap_vaddr
= vaddr
- ((ARM_TT_ROOT_INDEX_MASK
+ ARM_TT_ROOT_SIZE
) / 2ULL);
601 arm_vm_map(cpu_tte
, kernel_pmap_vaddr
, pte
);
605 * arm_vm_kernel_el1_map:
606 * vaddr: The target virtual address
607 * pte: A page table entry value (may be ARM_PTE_EMPTY)
609 * This function installs pte at vaddr for the EL1 kernel mappings.
612 arm_vm_kernel_el1_map(vm_offset_t vaddr
, pt_entry_t pte
) {
613 arm_vm_map(cpu_tte
, vaddr
, pte
);
618 * vaddr: The target virtual address
620 * This function returns the PTE value for the given vaddr from the kernel page
621 * tables. If the region has been been block mapped, we return what an
622 * equivalent PTE value would be (as regards permissions and flags). We also
623 * remove the HINT bit (as we are not necessarily creating contiguous mappings.
626 arm_vm_kernel_pte(vm_offset_t vaddr
)
628 tt_entry_t
* ttp
= cpu_tte
;
629 tt_entry_t
* ttep
= NULL
;
631 pt_entry_t
* ptep
= NULL
;
634 ttep
= ttp
+ ((vaddr
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
637 assert(tte
& ARM_TTE_VALID
);
639 if ((tte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_BLOCK
) {
640 /* This is a block mapping; return the equivalent PTE value. */
641 pte
= (pt_entry_t
)(tte
& ~ARM_TTE_TYPE_MASK
);
642 pte
|= ARM_PTE_TYPE_VALID
;
643 pte
|= vaddr
& ((ARM_TT_L1_SIZE
- 1) & ARM_PTE_PAGE_MASK
);
644 pte
&= ~ARM_PTE_HINT_MASK
;
648 ttp
= (tt_entry_t
*)phystokv(tte
& ARM_TTE_TABLE_MASK
);
649 ttep
= ttp
+ ((vaddr
& ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
);
652 assert(tte
& ARM_TTE_VALID
);
654 if ((tte
& ARM_TTE_TYPE_MASK
) == ARM_TTE_TYPE_BLOCK
) {
655 /* This is a block mapping; return the equivalent PTE value. */
656 pte
= (pt_entry_t
)(tte
& ~ARM_TTE_TYPE_MASK
);
657 pte
|= ARM_PTE_TYPE_VALID
;
658 pte
|= vaddr
& ((ARM_TT_L2_SIZE
- 1) & ARM_PTE_PAGE_MASK
);
659 pte
&= ~ARM_PTE_HINT_MASK
;
663 ttp
= (tt_entry_t
*)phystokv(tte
& ARM_TTE_TABLE_MASK
);
665 ptep
= ttp
+ ((vaddr
& ARM_TT_L3_INDEX_MASK
) >> ARM_TT_L3_SHIFT
);
667 pte
&= ~ARM_PTE_HINT_MASK
;
672 * arm_vm_prepare_kernel_el0_mappings:
673 * alloc_only: Indicates if PTE values should be copied from the EL1 kernel
676 * This function expands the kernel page tables to support the EL0 kernel
677 * mappings, and conditionally installs the PTE values for the EL0 kernel
678 * mappings (if alloc_only is false).
681 arm_vm_prepare_kernel_el0_mappings(bool alloc_only
)
684 vm_offset_t start
= ((vm_offset_t
)&ExceptionVectorsBase
) & ~PAGE_MASK
;
685 vm_offset_t end
= (((vm_offset_t
)&ExceptionVectorsEnd
) + PAGE_MASK
) & ~PAGE_MASK
;
687 vm_offset_t cur_fixed
= 0;
689 /* Expand for/map the exceptions vectors in the EL0 kernel mappings. */
690 for (cur
= start
, cur_fixed
= ARM_KERNEL_PROTECT_EXCEPTION_START
; cur
< end
; cur
+= ARM_PGBYTES
, cur_fixed
+= ARM_PGBYTES
) {
692 * We map the exception vectors at a different address than that
693 * of the kernelcache to avoid sharing page table pages with the
694 * kernelcache (as this may cause issues with TLB caching of
698 pte
= arm_vm_kernel_pte(cur
);
701 arm_vm_kernel_el1_map(cur_fixed
, pte
);
702 arm_vm_kernel_el0_map(cur_fixed
, pte
);
705 __builtin_arm_dmb(DMB_ISH
);
706 __builtin_arm_isb(ISB_SY
);
710 * If we have created the alternate exception vector mappings,
711 * the boot CPU may now switch over to them.
713 set_vbar_el1(ARM_KERNEL_PROTECT_EXCEPTION_START
);
714 __builtin_arm_isb(ISB_SY
);
719 * arm_vm_populate_kernel_el0_mappings:
721 * This function adds all required mappings to the EL0 kernel mappings.
724 arm_vm_populate_kernel_el0_mappings(void)
726 arm_vm_prepare_kernel_el0_mappings(FALSE
);
730 * arm_vm_expand_kernel_el0_mappings:
732 * This function expands the kernel page tables to accomodate the EL0 kernel
736 arm_vm_expand_kernel_el0_mappings(void)
738 arm_vm_prepare_kernel_el0_mappings(TRUE
);
740 #endif /* __ARM_KERNEL_PROTECT__ */
742 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
743 extern void bootstrap_instructions
;
746 * arm_replace_identity_map takes the V=P map that we construct in start.s
747 * and repurposes it in order to have it map only the page we need in order
748 * to turn on the MMU. This prevents us from running into issues where
749 * KTRR will cause us to fault on executable block mappings that cross the
752 static void arm_replace_identity_map(boot_args
* args
)
757 pmap_paddr_t l1_ptp_phys
= 0;
758 tt_entry_t
*l1_ptp_virt
= NULL
;
759 tt_entry_t
*tte1
= NULL
;
760 pmap_paddr_t l2_ptp_phys
= 0;
761 tt_entry_t
*l2_ptp_virt
= NULL
;
762 tt_entry_t
*tte2
= NULL
;
763 pmap_paddr_t l3_ptp_phys
= 0;
764 pt_entry_t
*l3_ptp_virt
= NULL
;
765 pt_entry_t
*ptep
= NULL
;
767 addr
= ((vm_offset_t
)&bootstrap_instructions
) & ~ARM_PGMASK
;
768 paddr
= kvtophys(addr
);
771 * The V=P page tables (at the time this comment was written) start
772 * after the last bit of kernel data, and consist of 1 L1 page and 1 or
774 * Grab references to those pages, and allocate an L3 page.
776 l1_ptp_phys
= args
->topOfKernelData
;
777 l1_ptp_virt
= (tt_entry_t
*)phystokv(l1_ptp_phys
);
778 tte1
= &l1_ptp_virt
[L1_TABLE_INDEX(paddr
)];
780 l2_ptp_virt
= L2_TABLE_VA(tte1
);
781 l2_ptp_phys
= (*tte1
) & ARM_TTE_TABLE_MASK
;
782 tte2
= &l2_ptp_virt
[L2_TABLE_INDEX(paddr
)];
784 l3_ptp_virt
= (pt_entry_t
*)alloc_ptpage(FALSE
);
785 l3_ptp_phys
= kvtophys((vm_offset_t
)l3_ptp_virt
);
786 ptep
= &l3_ptp_virt
[L3_TABLE_INDEX(paddr
)];
789 * Replace the large V=P mapping with a mapping that provides only the
790 * mappings needed to turn on the MMU.
793 bzero(l1_ptp_virt
, ARM_PGBYTES
);
794 *tte1
= ARM_TTE_BOOT_TABLE
| (l2_ptp_phys
& ARM_TTE_TABLE_MASK
);
796 bzero(l2_ptp_virt
, ARM_PGBYTES
);
797 *tte2
= ARM_TTE_BOOT_TABLE
| (l3_ptp_phys
& ARM_TTE_TABLE_MASK
);
799 *ptep
= (paddr
& ARM_PTE_MASK
) |
801 ARM_PTE_SH(SH_OUTER_MEMORY
) |
802 ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK
) |
804 ARM_PTE_AP(AP_RONA
) |
807 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
809 tt_entry_t
*arm_kva_to_tte(vm_offset_t
);
812 arm_kva_to_tte(vm_offset_t va
)
814 tt_entry_t
*tte1
, *tte2
;
815 tte1
= cpu_tte
+ L1_TABLE_INDEX(va
);
816 tte2
= L2_TABLE_VA(tte1
) + L2_TABLE_INDEX(va
);
823 static inline pt_entry_t
*
824 arm_kva_to_pte(vm_offset_t va
)
826 tt_entry_t
*tte2
= arm_kva_to_tte(va
);
827 return L3_TABLE_VA(tte2
) + L3_TABLE_INDEX(va
);
832 #define ARM64_GRANULE_ALLOW_BLOCK (1 << 0)
833 #define ARM64_GRANULE_ALLOW_HINT (1 << 1)
836 * arm_vm_page_granular_helper updates protections at the L3 level. It will (if
837 * neccessary) allocate a page for the L3 table and update the corresponding L2
838 * entry. Then, it will iterate over the L3 table, updating protections as necessary.
839 * This expects to be invoked on a L2 entry or sub L2 entry granularity, so this should
840 * not be invoked from a context that does not do L2 iteration separately (basically,
841 * don't call this except from arm_vm_page_granular_prot).
843 * unsigned granule: 0 => force to page granule, or a combination of
844 * ARM64_GRANULE_* flags declared above.
848 arm_vm_page_granular_helper(vm_offset_t start
, vm_offset_t _end
, vm_offset_t va
, pmap_paddr_t pa_offset
,
849 int pte_prot_APX
, int pte_prot_XN
, unsigned granule
,
850 pt_entry_t
**deferred_pte
, pt_entry_t
*deferred_ptmp
)
852 if (va
& ARM_TT_L2_OFFMASK
) { /* ragged edge hanging over a ARM_TT_L2_SIZE boundary */
856 pt_entry_t
*ppte
, *recursive_pte
= NULL
, ptmp
, recursive_ptmp
= 0;
860 va
&= ~ARM_TT_L2_OFFMASK
;
861 pa
= va
- gVirtBase
+ gPhysBase
- pa_offset
;
863 if (pa
>= real_avail_end
)
866 tte2
= arm_kva_to_tte(va
);
871 if (ARM_TTE_TYPE_TABLE
== (tmplate
& ARM_TTE_TYPE_MASK
)) {
872 /* pick up the existing page table. */
873 ppte
= (pt_entry_t
*)phystokv((tmplate
& ARM_TTE_TABLE_MASK
));
875 // TTE must be reincarnated with page level mappings.
876 ppte
= (pt_entry_t
*)alloc_ptpage(pa_offset
== 0);
877 bzero(ppte
, ARM_PGBYTES
);
878 ppte_phys
= kvtophys((vm_offset_t
)ppte
);
880 *tte2
= pa_to_tte(ppte_phys
) | ARM_TTE_TYPE_TABLE
| ARM_TTE_VALID
;
883 vm_offset_t len
= _end
- va
;
884 if ((pa
+ len
) > real_avail_end
)
885 _end
-= (pa
+ len
- real_avail_end
);
886 assert((start
- gVirtBase
+ gPhysBase
- pa_offset
) >= gPhysBase
);
888 /* Round up to the nearest PAGE_SIZE boundary when creating mappings:
889 * PAGE_SIZE may be a multiple of ARM_PGBYTES, and we don't want to leave
890 * a ragged non-PAGE_SIZE-aligned edge. */
891 vm_offset_t rounded_end
= round_page(_end
);
892 /* Apply the desired protections to the specified page range */
893 for (i
= 0; i
<= (ARM_TT_L3_INDEX_MASK
>>ARM_TT_L3_SHIFT
); i
++) {
894 if ((start
<= va
) && (va
< rounded_end
)) {
896 ptmp
= pa
| ARM_PTE_AF
| ARM_PTE_SH(SH_OUTER_MEMORY
) | ARM_PTE_TYPE
;
897 ptmp
= ptmp
| ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT
);
898 ptmp
= ptmp
| ARM_PTE_AP(pte_prot_APX
);
899 ptmp
= ptmp
| ARM_PTE_NX
;
900 #if __ARM_KERNEL_PROTECT__
901 ptmp
= ptmp
| ARM_PTE_NG
;
902 #endif /* __ARM_KERNEL_PROTECT__ */
905 ptmp
= ptmp
| ARM_PTE_PNX
;
909 * If we can, apply the contiguous hint to this range. The hint is
910 * applicable if the current address falls within a hint-sized range that will
911 * be fully covered by this mapping request.
913 if ((va
>= round_up_pte_hint_address(start
)) && (round_up_pte_hint_address(va
+ 1) <= _end
) &&
914 (granule
& ARM64_GRANULE_ALLOW_HINT
) && use_contiguous_hint
) {
915 assert((va
& ((1 << ARM_PTE_HINT_ADDR_SHIFT
) - 1)) == ((pa
& ((1 << ARM_PTE_HINT_ADDR_SHIFT
) - 1))));
916 ptmp
|= ARM_PTE_HINT
;
917 /* Do not attempt to reapply the hint bit to an already-active mapping.
918 * This very likely means we're attempting to change attributes on an already-active mapping,
919 * which violates the requirement of the hint bit.*/
920 assert(!kva_active
|| (ppte
[i
] == ARM_PTE_TYPE_FAULT
));
923 * Do not change the contiguous bit on an active mapping. Even in a single-threaded
924 * environment, it's possible for prefetch to produce a TLB conflict by trying to pull in
925 * a hint-sized entry on top of one or more existing page-sized entries. It's also useful
926 * to make sure we're not trying to unhint a sub-range of a larger hinted range, which
927 * could produce a later TLB conflict.
929 assert(!kva_active
|| (ppte
[i
] == ARM_PTE_TYPE_FAULT
) || ((ppte
[i
] & ARM_PTE_HINT
) == (ptmp
& ARM_PTE_HINT
)));
932 * If we reach an entry that maps the current pte page, delay updating it until the very end.
933 * Otherwise we might end up making the PTE page read-only, leading to a fault later on in
934 * this function if we manage to outrun the TLB. This can happen on KTRR-enabled devices when
935 * marking segDATACONST read-only. Mappings for this region may straddle a PT page boundary,
936 * so we must also defer assignment of the following PTE. We will assume that if the region
937 * were to require one or more full L3 pages, it would instead use L2 blocks where possible,
938 * therefore only requiring at most one L3 page at the beginning and one at the end.
940 if (kva_active
&& ((pt_entry_t
*)(phystokv(pa
)) == ppte
)) {
941 assert(recursive_pte
== NULL
);
942 assert(granule
& ARM64_GRANULE_ALLOW_BLOCK
);
943 recursive_pte
= &ppte
[i
];
944 recursive_ptmp
= ptmp
;
945 } else if ((deferred_pte
!= NULL
) && (&ppte
[i
] == &recursive_pte
[1])) {
946 assert(*deferred_pte
== NULL
);
947 assert(deferred_ptmp
!= NULL
);
948 *deferred_pte
= &ppte
[i
];
949 *deferred_ptmp
= ptmp
;
958 if (recursive_pte
!= NULL
)
959 *recursive_pte
= recursive_ptmp
;
964 * arm_vm_page_granular_prot updates protections by iterating over the L2 entries and
965 * changing them. If a particular chunk necessitates L3 entries (for reasons of
966 * alignment or length, or an explicit request that the entry be fully expanded), we
967 * hand off to arm_vm_page_granular_helper to deal with the L3 chunk of the logic.
970 arm_vm_page_granular_prot(vm_offset_t start
, unsigned long size
, pmap_paddr_t pa_offset
,
971 int tte_prot_XN
, int pte_prot_APX
, int pte_prot_XN
,
974 pt_entry_t
*deferred_pte
= NULL
, deferred_ptmp
= 0;
975 vm_offset_t _end
= start
+ size
;
976 vm_offset_t align_start
= (start
+ ARM_TT_L2_OFFMASK
) & ~ARM_TT_L2_OFFMASK
;
981 if (align_start
> _end
) {
982 arm_vm_page_granular_helper(start
, _end
, start
, pa_offset
, pte_prot_APX
, pte_prot_XN
, granule
, NULL
, NULL
);
986 arm_vm_page_granular_helper(start
, align_start
, start
, pa_offset
, pte_prot_APX
, pte_prot_XN
, granule
, &deferred_pte
, &deferred_ptmp
);
988 while ((_end
- align_start
) >= ARM_TT_L2_SIZE
) {
989 if (!(granule
& ARM64_GRANULE_ALLOW_BLOCK
)) {
990 arm_vm_page_granular_helper(align_start
, align_start
+ARM_TT_L2_SIZE
, align_start
+ 1, pa_offset
,
991 pte_prot_APX
, pte_prot_XN
, granule
, NULL
, NULL
);
993 pmap_paddr_t pa
= align_start
- gVirtBase
+ gPhysBase
- pa_offset
;
994 assert((pa
& ARM_TT_L2_OFFMASK
) == 0);
998 tte2
= arm_kva_to_tte(align_start
);
1000 if ((pa
>= gPhysBase
) && (pa
< real_avail_end
)) {
1001 tmplate
= (pa
& ARM_TTE_BLOCK_L2_MASK
) | ARM_TTE_TYPE_BLOCK
1002 | ARM_TTE_VALID
| ARM_TTE_BLOCK_AF
| ARM_TTE_BLOCK_NX
1003 | ARM_TTE_BLOCK_AP(pte_prot_APX
) | ARM_TTE_BLOCK_SH(SH_OUTER_MEMORY
)
1004 | ARM_TTE_BLOCK_ATTRINDX(CACHE_ATTRINDX_WRITEBACK
);
1006 #if __ARM_KERNEL_PROTECT__
1007 tmplate
= tmplate
| ARM_TTE_BLOCK_NG
;
1008 #endif /* __ARM_KERNEL_PROTECT__ */
1010 tmplate
= tmplate
| ARM_TTE_BLOCK_PNX
;
1015 align_start
+= ARM_TT_L2_SIZE
;
1018 if (align_start
< _end
)
1019 arm_vm_page_granular_helper(align_start
, _end
, _end
, pa_offset
, pte_prot_APX
, pte_prot_XN
, granule
, &deferred_pte
, &deferred_ptmp
);
1021 if (deferred_pte
!= NULL
)
1022 *deferred_pte
= deferred_ptmp
;
1026 arm_vm_page_granular_RNX(vm_offset_t start
, unsigned long size
, unsigned granule
)
1028 arm_vm_page_granular_prot(start
, size
, 0, 1, AP_RONA
, 1, granule
);
1032 arm_vm_page_granular_ROX(vm_offset_t start
, unsigned long size
, unsigned granule
)
1034 arm_vm_page_granular_prot(start
, size
, 0, 0, AP_RONA
, 0, granule
);
1038 arm_vm_page_granular_RWNX(vm_offset_t start
, unsigned long size
, unsigned granule
)
1040 arm_vm_page_granular_prot(start
, size
, 0, 1, AP_RWNA
, 1, granule
);
1043 /* used in the chosen/memory-map node, populated by iBoot. */
1044 typedef struct MemoryMapFileInfo
{
1047 } MemoryMapFileInfo
;
1050 arm_vm_prot_init(boot_args
* args
)
1053 segLOWESTTEXT
= UINT64_MAX
;
1054 if (segSizePRELINKTEXT
&& (segPRELINKTEXTB
< segLOWESTTEXT
)) segLOWESTTEXT
= segPRELINKTEXTB
;
1055 assert(segSizeTEXT
);
1056 if (segTEXTB
< segLOWESTTEXT
) segLOWESTTEXT
= segTEXTB
;
1057 assert(segLOWESTTEXT
< UINT64_MAX
);
1059 segEXTRADATA
= segLOWESTTEXT
;
1060 segSizeEXTRADATA
= 0;
1062 segLOWEST
= segLOWESTTEXT
;
1065 MemoryMapFileInfo
*trustCacheRange
;
1066 unsigned int trustCacheRangeSize
;
1069 err
= DTLookupEntry(NULL
, "chosen/memory-map", &memory_map
);
1070 assert(err
== kSuccess
);
1072 err
= DTGetProperty(memory_map
, "TrustCache", (void**)&trustCacheRange
, &trustCacheRangeSize
);
1073 if (err
== kSuccess
) {
1074 assert(trustCacheRangeSize
== sizeof(MemoryMapFileInfo
));
1076 segEXTRADATA
= phystokv(trustCacheRange
->paddr
);
1077 segSizeEXTRADATA
= trustCacheRange
->length
;
1079 if (segEXTRADATA
<= segLOWEST
) {
1080 segLOWEST
= segEXTRADATA
;
1082 #if !(DEBUG || DEVELOPMENT)
1086 panic("EXTRADATA is in an unexpected place: %#lx > %#lx", segEXTRADATA
, segLOWEST
);
1088 #endif /* !(DEBUG || DEVELOPMENT) */
1090 arm_vm_page_granular_RNX(segEXTRADATA
, segSizeEXTRADATA
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
);
1094 /* Map coalesced kext TEXT segment RWNX for now */
1095 arm_vm_page_granular_RWNX(segPRELINKTEXTB
, segSizePRELINKTEXT
, ARM64_GRANULE_ALLOW_BLOCK
); // Refined in OSKext::readPrelinkedExtensions
1097 /* Map coalesced kext DATA_CONST segment RWNX (could be empty) */
1098 arm_vm_page_granular_RWNX(segPLKDATACONSTB
, segSizePLKDATACONST
, ARM64_GRANULE_ALLOW_BLOCK
); // Refined in OSKext::readPrelinkedExtensions
1100 /* Map coalesced kext TEXT_EXEC segment RX (could be empty) */
1101 arm_vm_page_granular_ROX(segPLKTEXTEXECB
, segSizePLKTEXTEXEC
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
); // Refined in OSKext::readPrelinkedExtensions
1103 /* if new segments not present, set space between PRELINK_TEXT and xnu TEXT to RWNX
1104 * otherwise we no longer expect any space between the coalesced kext read only segments and xnu rosegments
1106 if (!segSizePLKDATACONST
&& !segSizePLKTEXTEXEC
) {
1107 if (segSizePRELINKTEXT
) {
1108 arm_vm_page_granular_RWNX(segPRELINKTEXTB
+ segSizePRELINKTEXT
, segTEXTB
- (segPRELINKTEXTB
+ segSizePRELINKTEXT
),
1109 ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
);
1113 * If we have the new segments, we should still protect the gap between kext
1114 * read-only pages and kernel read-only pages, in the event that this gap
1117 if ((segPLKDATACONSTB
+ segSizePLKDATACONST
) < segTEXTB
) {
1118 arm_vm_page_granular_RWNX(segPLKDATACONSTB
+ segSizePLKDATACONST
, segTEXTB
- (segPLKDATACONSTB
+ segSizePLKDATACONST
),
1119 ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
);
1124 * Protection on kernel text is loose here to allow shenanigans early on. These
1125 * protections are tightened in arm_vm_prot_finalize(). This is necessary because
1126 * we currently patch LowResetVectorBase in cpu.c.
1128 * TEXT segment contains mach headers and other non-executable data. This will become RONX later.
1130 arm_vm_page_granular_RNX(segTEXTB
, segSizeTEXT
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
);
1132 /* Can DATACONST start out and stay RNX?
1133 * NO, stuff in this segment gets modified during startup (viz. mac_policy_init()/mac_policy_list)
1134 * Make RNX in prot_finalize
1137 /* The ropagetable region will ultimately be owned by the PPL. Set permissions
1138 * on it separately to avoid applying mismatched block settings between this function,
1139 * pmap_static_allocations_done(), and arm_vm_prot_finalize(). */
1140 vm_offset_t segDATACONSTE
= segDATACONSTB
+ segSizeDATACONST
;
1142 arm_vm_page_granular_RWNX(segDATACONSTB
, (vm_offset_t
)&ropagetable_begin
- segDATACONSTB
, ARM64_GRANULE_ALLOW_BLOCK
);
1143 arm_vm_page_granular_RWNX((vm_offset_t
)&ropagetable_begin
,
1144 (vm_offset_t
)&ropagetable_end
- (vm_offset_t
)&ropagetable_begin
, ARM64_GRANULE_ALLOW_BLOCK
);
1145 arm_vm_page_granular_RWNX((vm_offset_t
)&ropagetable_end
,
1146 segDATACONSTE
- (vm_offset_t
)&ropagetable_end
, ARM64_GRANULE_ALLOW_BLOCK
);
1148 arm_vm_page_granular_RWNX(segDATACONSTB
, segSizeDATACONST
, ARM64_GRANULE_ALLOW_BLOCK
);
1151 arm_vm_page_granular_ROX(segTEXTEXECB
, segSizeTEXTEXEC
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
);
1154 arm_vm_page_granular_ROX(segPPLTEXTB
, segSizePPLTEXT
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
);
1155 arm_vm_page_granular_ROX(segPPLTRAMPB
, segSizePPLTRAMP
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
);
1156 arm_vm_page_granular_RNX(segPPLDATACONSTB
, segSizePPLDATACONST
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
);
1159 /* DATA segment will remain RWNX */
1160 arm_vm_page_granular_RWNX(segDATAB
, segSizeDATA
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
);
1162 arm_vm_page_granular_RWNX(segPPLDATAB
, segSizePPLDATA
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
);
1165 arm_vm_page_granular_RWNX(segBOOTDATAB
, segSizeBOOTDATA
, 0);
1166 arm_vm_page_granular_RNX((vm_offset_t
)&intstack_low_guard
, PAGE_MAX_SIZE
, 0);
1167 arm_vm_page_granular_RNX((vm_offset_t
)&intstack_high_guard
, PAGE_MAX_SIZE
, 0);
1168 arm_vm_page_granular_RNX((vm_offset_t
)&excepstack_high_guard
, PAGE_MAX_SIZE
, 0);
1170 arm_vm_page_granular_ROX(segKLDB
, segSizeKLD
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
);
1171 arm_vm_page_granular_RWNX(segLINKB
, segSizeLINK
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
);
1172 arm_vm_page_granular_RWNX(segPLKLINKEDITB
, segSizePLKLINKEDIT
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
); // Coalesced kext LINKEDIT segment
1173 arm_vm_page_granular_ROX(segLASTB
, segSizeLAST
, ARM64_GRANULE_ALLOW_BLOCK
); // __LAST may be empty, but we cannot assume this
1175 arm_vm_page_granular_RWNX(segPRELINKDATAB
, segSizePRELINKDATA
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
); // Prelink __DATA for kexts (RW data)
1177 if (segSizePLKLLVMCOV
> 0)
1178 arm_vm_page_granular_RWNX(segPLKLLVMCOVB
, segSizePLKLLVMCOV
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
); // LLVM code coverage data
1180 arm_vm_page_granular_RWNX(segPRELINKINFOB
, segSizePRELINKINFO
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
); /* PreLinkInfoDictionary */
1182 arm_vm_page_granular_RNX(phystokv(args
->topOfKernelData
), BOOTSTRAP_TABLE_SIZE
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
); // Boot page tables; they should not be mutable.
1186 * return < 0 for a < b
1190 typedef int (*cmpfunc_t
)(const void *a
, const void *b
);
1193 qsort(void *a
, size_t n
, size_t es
, cmpfunc_t cmp
);
1196 cmp_ptov_entries(const void *a
, const void *b
)
1198 const ptov_table_entry
*entry_a
= a
;
1199 const ptov_table_entry
*entry_b
= b
;
1200 // Sort in descending order of segment length
1201 if (entry_a
->len
< entry_b
->len
)
1203 else if (entry_a
->len
> entry_b
->len
)
1209 SECURITY_READ_ONLY_LATE(static unsigned int) ptov_index
= 0;
1211 #define ROUND_TWIG(addr) (((addr) + ARM_TT_TWIG_OFFMASK) & ~(ARM_TT_TWIG_OFFMASK))
1214 arm_vm_physmap_slide(ptov_table_entry
*temp_ptov_table
, vm_map_address_t physmap_base
, vm_map_address_t orig_va
, vm_size_t len
, int pte_prot_APX
, unsigned granule
)
1216 pmap_paddr_t pa_offset
;
1218 assert(ptov_index
< PTOV_TABLE_SIZE
);
1219 assert((orig_va
& ARM_PGMASK
) == 0);
1220 temp_ptov_table
[ptov_index
].pa
= orig_va
- gVirtBase
+ gPhysBase
;
1221 if (ptov_index
== 0)
1222 temp_ptov_table
[ptov_index
].va
= physmap_base
;
1224 temp_ptov_table
[ptov_index
].va
= temp_ptov_table
[ptov_index
- 1].va
+ temp_ptov_table
[ptov_index
- 1].len
;
1225 if (granule
& ARM64_GRANULE_ALLOW_BLOCK
) {
1226 vm_map_address_t orig_offset
= temp_ptov_table
[ptov_index
].pa
& ARM_TT_TWIG_OFFMASK
;
1227 vm_map_address_t new_offset
= temp_ptov_table
[ptov_index
].va
& ARM_TT_TWIG_OFFMASK
;
1228 if (new_offset
< orig_offset
)
1229 temp_ptov_table
[ptov_index
].va
+= (orig_offset
- new_offset
);
1230 else if (new_offset
> orig_offset
)
1231 temp_ptov_table
[ptov_index
].va
= ROUND_TWIG(temp_ptov_table
[ptov_index
].va
) + orig_offset
;
1233 assert((temp_ptov_table
[ptov_index
].va
& ARM_PGMASK
) == 0);
1234 temp_ptov_table
[ptov_index
].len
= round_page(len
);
1235 pa_offset
= temp_ptov_table
[ptov_index
].va
- orig_va
;
1236 arm_vm_page_granular_prot(temp_ptov_table
[ptov_index
].va
, temp_ptov_table
[ptov_index
].len
, pa_offset
, 1, pte_prot_APX
, 1, granule
);
1242 SECURITY_READ_ONLY_LATE(static boolean_t
) keep_linkedit
= FALSE
;
1245 arm_vm_physmap_init(boot_args
*args
, vm_map_address_t physmap_base
, vm_map_address_t dynamic_memory_begin __unused
)
1247 ptov_table_entry temp_ptov_table
[PTOV_TABLE_SIZE
];
1248 bzero(temp_ptov_table
, sizeof(temp_ptov_table
));
1250 // This is memory that will either be handed back to the VM layer via ml_static_mfree(),
1251 // or will be available for general-purpose use. Physical aperture mappings for this memory
1252 // must be at page granularity, so that PPL ownership or cache attribute changes can be reflected
1253 // in the physical aperture mappings.
1256 // Slid region between gPhysBase and beginning of protected text
1257 arm_vm_physmap_slide(temp_ptov_table
, physmap_base
, gVirtBase
, segLOWEST
- gVirtBase
, AP_RWNA
, 0);
1259 // kext bootstrap segment
1260 arm_vm_physmap_slide(temp_ptov_table
, physmap_base
, segKLDB
, segSizeKLD
, AP_RONA
, 0);
1263 arm_vm_physmap_slide(temp_ptov_table
, physmap_base
, segBOOTDATAB
, segSizeBOOTDATA
, AP_RONA
, 0);
1265 #if KASAN_DYNAMIC_BLACKLIST
1266 /* KASAN's dynamic blacklist needs to query the LINKEDIT segment at runtime. As such, the
1267 * kext bootstrap code will not jettison LINKEDIT on kasan kernels, so don't bother to relocate it. */
1268 keep_linkedit
= TRUE
;
1270 PE_parse_boot_argn("keepsyms", &keep_linkedit
, sizeof(keep_linkedit
));
1272 if (!keep_linkedit
) {
1274 arm_vm_physmap_slide(temp_ptov_table
, physmap_base
, segLINKB
, segSizeLINK
, AP_RWNA
, 0);
1276 // Prelinked kernel LINKEDIT
1277 arm_vm_physmap_slide(temp_ptov_table
, physmap_base
, segPLKLINKEDITB
, segSizePLKLINKEDIT
, AP_RWNA
, 0);
1280 // Prelinked kernel plists
1281 arm_vm_physmap_slide(temp_ptov_table
, physmap_base
, segPRELINKINFOB
, segSizePRELINKINFO
, AP_RWNA
, 0);
1283 // Device tree, ramdisk, boot args
1284 arm_vm_physmap_slide(temp_ptov_table
, physmap_base
, end_kern
, (args
->topOfKernelData
- gPhysBase
+ gVirtBase
) - end_kern
, AP_RWNA
, 0);
1285 PE_slide_devicetree(temp_ptov_table
[ptov_index
- 1].va
- end_kern
);
1287 // Remainder of physical memory
1288 arm_vm_physmap_slide(temp_ptov_table
, physmap_base
, (args
->topOfKernelData
+ BOOTSTRAP_TABLE_SIZE
- gPhysBase
+ gVirtBase
),
1289 real_avail_end
- (args
->topOfKernelData
+ BOOTSTRAP_TABLE_SIZE
), AP_RWNA
, 0);
1291 assert((temp_ptov_table
[ptov_index
- 1].va
+ temp_ptov_table
[ptov_index
- 1].len
) <= dynamic_memory_begin
);
1293 // Sort in descending order of segment length. LUT traversal is linear, so largest (most likely used)
1294 // segments should be placed earliest in the table to optimize lookup performance.
1295 qsort(temp_ptov_table
, PTOV_TABLE_SIZE
, sizeof(temp_ptov_table
[0]), cmp_ptov_entries
);
1297 memcpy(ptov_table
, temp_ptov_table
, sizeof(ptov_table
));
1303 arm_vm_physmap_init(boot_args
*args
, vm_map_address_t physmap_base
, vm_map_address_t dynamic_memory_begin __unused
)
1305 ptov_table_entry temp_ptov_table
[PTOV_TABLE_SIZE
];
1306 bzero(temp_ptov_table
, sizeof(temp_ptov_table
));
1308 // Will be handed back to VM layer through ml_static_mfree() in arm_vm_prot_finalize()
1309 arm_vm_physmap_slide(temp_ptov_table
, physmap_base
, gVirtBase
, segLOWEST
- gVirtBase
, AP_RWNA
,
1310 ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
);
1312 arm_vm_page_granular_RWNX(end_kern
, phystokv(args
->topOfKernelData
) - end_kern
,
1313 ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
); /* Device Tree, RAM Disk (if present), bootArgs */
1315 arm_vm_physmap_slide(temp_ptov_table
, physmap_base
, (args
->topOfKernelData
+ BOOTSTRAP_TABLE_SIZE
- gPhysBase
+ gVirtBase
),
1316 real_avail_end
- (args
->topOfKernelData
+ BOOTSTRAP_TABLE_SIZE
), AP_RWNA
, ARM64_GRANULE_ALLOW_BLOCK
| ARM64_GRANULE_ALLOW_HINT
); // rest of physmem
1318 assert((temp_ptov_table
[ptov_index
- 1].va
+ temp_ptov_table
[ptov_index
- 1].len
) <= dynamic_memory_begin
);
1320 // Sort in descending order of segment length. LUT traversal is linear, so largest (most likely used)
1321 // segments should be placed earliest in the table to optimize lookup performance.
1322 qsort(temp_ptov_table
, PTOV_TABLE_SIZE
, sizeof(temp_ptov_table
[0]), cmp_ptov_entries
);
1324 memcpy(ptov_table
, temp_ptov_table
, sizeof(ptov_table
));
1327 #endif // XNU_MONITOR
1330 arm_vm_prot_finalize(boot_args
* args __unused
)
1333 * At this point, we are far enough along in the boot process that it will be
1334 * safe to free up all of the memory preceeding the kernel. It may in fact
1335 * be safe to do this earlier.
1337 * This keeps the memory in the V-to-P mapping, but advertises it to the VM
1342 * if old style PRELINK segment exists, free memory before it, and after it before XNU text
1343 * otherwise we're dealing with a new style kernel cache, so we should just free the
1344 * memory before PRELINK_TEXT segment, since the rest of the KEXT read only data segments
1345 * should be immediately followed by XNU's TEXT segment
1348 ml_static_mfree(phystokv(gPhysBase
), segLOWEST
- gVirtBase
);
1351 * KTRR support means we will be mucking with these pages and trying to
1352 * protect them; we cannot free the pages to the VM if we do this.
1354 if (!segSizePLKDATACONST
&& !segSizePLKTEXTEXEC
&& segSizePRELINKTEXT
) {
1355 /* If new segments not present, PRELINK_TEXT is not dynamically sized, free DRAM between it and xnu TEXT */
1356 ml_static_mfree(segPRELINKTEXTB
+ segSizePRELINKTEXT
, segTEXTB
- (segPRELINKTEXTB
+ segSizePRELINKTEXT
));
1359 /* tighten permissions on kext read only data and code */
1360 arm_vm_page_granular_RNX(segPRELINKTEXTB
, segSizePRELINKTEXT
, ARM64_GRANULE_ALLOW_BLOCK
);
1361 arm_vm_page_granular_RNX(segPLKDATACONSTB
, segSizePLKDATACONST
, ARM64_GRANULE_ALLOW_BLOCK
);
1363 cpu_stack_alloc(&BootCpuData
);
1364 arm64_replace_bootstack(&BootCpuData
);
1365 ml_static_mfree(phystokv(segBOOTDATAB
- gVirtBase
+ gPhysBase
), segSizeBOOTDATA
);
1367 #if __ARM_KERNEL_PROTECT__
1368 arm_vm_populate_kernel_el0_mappings();
1369 #endif /* __ARM_KERNEL_PROTECT__ */
1372 for (vm_offset_t va
= segKLDB
; va
< (segKLDB
+ segSizeKLD
); va
+= ARM_PGBYTES
) {
1373 pt_entry_t
*pte
= arm_kva_to_pte(va
);
1374 *pte
= ARM_PTE_EMPTY
;
1376 /* Clear the original stack mappings; these pages should be mapped through ptov_table. */
1377 for (vm_offset_t va
= segBOOTDATAB
; va
< (segBOOTDATAB
+ segSizeBOOTDATA
); va
+= ARM_PGBYTES
) {
1378 pt_entry_t
*pte
= arm_kva_to_pte(va
);
1379 *pte
= ARM_PTE_EMPTY
;
1381 /* Clear the original PRELINKINFO mapping. This segment should be jettisoned during I/O Kit
1382 * initialization before we reach this point. */
1383 for (vm_offset_t va
= segPRELINKINFOB
; va
< (segPRELINKINFOB
+ segSizePRELINKINFO
); va
+= ARM_PGBYTES
) {
1384 pt_entry_t
*pte
= arm_kva_to_pte(va
);
1385 *pte
= ARM_PTE_EMPTY
;
1387 if (!keep_linkedit
) {
1388 for (vm_offset_t va
= segLINKB
; va
< (segLINKB
+ segSizeLINK
); va
+= ARM_PGBYTES
) {
1389 pt_entry_t
*pte
= arm_kva_to_pte(va
);
1390 *pte
= ARM_PTE_EMPTY
;
1392 for (vm_offset_t va
= segPLKLINKEDITB
; va
< (segPLKLINKEDITB
+ segSizePLKLINKEDIT
); va
+= ARM_PGBYTES
) {
1393 pt_entry_t
*pte
= arm_kva_to_pte(va
);
1394 *pte
= ARM_PTE_EMPTY
;
1397 #endif /* XNU_MONITOR */
1399 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
1401 * __LAST,__pinst should no longer be executable.
1403 arm_vm_page_granular_RNX(segLASTB
, segSizeLAST
, ARM64_GRANULE_ALLOW_BLOCK
);
1406 * Must wait until all other region permissions are set before locking down DATA_CONST
1407 * as the kernel static page tables live in DATA_CONST on KTRR enabled systems
1408 * and will become immutable.
1413 vm_offset_t segDATACONSTE
= segDATACONSTB
+ segSizeDATACONST
;
1416 * For the moment, the RO pagetable allocation is part of the
1417 * constant data segment, but it is technically owned by the
1418 * PPL. Hence, we should not reprotect it.
1420 arm_vm_page_granular_RNX(segDATACONSTB
, (vm_offset_t
)&ropagetable_begin
- segDATACONSTB
, ARM64_GRANULE_ALLOW_BLOCK
);
1421 arm_vm_page_granular_RNX((vm_offset_t
)&ropagetable_end
,
1422 segDATACONSTE
- (vm_offset_t
)&ropagetable_end
, ARM64_GRANULE_ALLOW_BLOCK
);
1424 arm_vm_page_granular_RNX(segDATACONSTB
, segSizeDATACONST
, ARM64_GRANULE_ALLOW_BLOCK
);
1427 __builtin_arm_dsb(DSB_ISH
);
1431 #define TBI_USER 0x1
1432 #define TBI_KERNEL 0x2
1434 boolean_t user_tbi
= TRUE
;
1437 * TBI (top-byte ignore) is an ARMv8 feature for ignoring the top 8 bits of
1438 * address accesses. It can be enabled separately for TTBR0 (user) and
1439 * TTBR1 (kernel). We enable it by default for user only, but allow both
1440 * to be controlled by the 'tbi' boot-arg.
1445 #if !__ARM_KERNEL_PROTECT__
1446 /* If we are not built with __ARM_KERNEL_PROTECT__, TBI can be turned
1447 * off with a boot-arg.
1449 uint64_t old_tcr
, new_tcr
;
1452 if (PE_parse_boot_argn("tbi", &tbi
, sizeof(tbi
)))
1453 user_tbi
= ((tbi
& TBI_USER
) == TBI_USER
);
1454 old_tcr
= new_tcr
= get_tcr();
1455 new_tcr
|= (user_tbi
) ? TCR_TBI0_TOPBYTE_IGNORED
: 0;
1457 #if !defined(HAS_APPLE_PAC)
1459 * arm_vm_init() runs after rebase_threaded_starts(), so enabling TBI1
1460 * at this point will break the computed pointer signatures. TBID1
1461 * could help mitigate this problem, but for now we'll just disable
1462 * kernel TBI if PAC is being used.
1464 new_tcr
|= (tbi
& TBI_KERNEL
) ? TCR_TBI1_TOPBYTE_IGNORED
: 0;
1467 if (old_tcr
!= new_tcr
) {
1469 sysreg_restore
.tcr_el1
= new_tcr
;
1471 #endif /* !__ARM_KERNEL_PROTECT__ */
1474 #define ARM64_PHYSMAP_SLIDE_RANGE (1ULL << 30) // 1 GB
1475 #define ARM64_PHYSMAP_SLIDE_MASK (ARM64_PHYSMAP_SLIDE_RANGE - 1)
1478 arm_vm_init(uint64_t memory_size
, boot_args
* args
)
1480 vm_map_address_t va_l1
, va_l1_end
;
1481 tt_entry_t
*cpu_l1_tte
;
1482 vm_map_address_t va_l2
, va_l2_end
;
1483 tt_entry_t
*cpu_l2_tte
;
1484 pmap_paddr_t boot_ttep
;
1485 tt_entry_t
*boot_tte
;
1486 uint64_t mem_segments
;
1487 vm_offset_t ptpage_vaddr
;
1488 vm_map_address_t dynamic_memory_begin
;
1489 vm_map_address_t physmap_base
;
1493 * Get the virtual and physical memory base from boot_args.
1495 gVirtBase
= args
->virtBase
;
1496 gPhysBase
= args
->physBase
;
1498 real_phys_size
= args
->memSize
+ (shadow_ptop
- shadow_pbase
);
1500 real_phys_size
= args
->memSize
;
1503 * Ensure the physical region we specify for the VM to manage ends on a
1504 * software page boundary. Note that the software page size (PAGE_SIZE)
1505 * may be a multiple of the hardware page size specified in ARM_PGBYTES.
1506 * We must round the reported memory size down to the nearest PAGE_SIZE
1507 * boundary to ensure the VM does not try to manage a page it does not
1508 * completely own. The KASAN shadow region, if present, is managed entirely
1509 * in units of the hardware page size and should not need similar treatment.
1511 gPhysSize
= mem_size
= ((gPhysBase
+ args
->memSize
) & ~PAGE_MASK
) - gPhysBase
;
1513 if ((memory_size
!= 0) && (mem_size
> memory_size
))
1514 mem_size
= memory_size
;
1515 if (mem_size
>= ((VM_MAX_KERNEL_ADDRESS
- VM_MIN_KERNEL_ADDRESS
) / 4))
1516 panic("Unsupported memory configuration %lx\n", mem_size
);
1518 physmap_base
= phystokv(args
->topOfKernelData
) + BOOTSTRAP_TABLE_SIZE
;
1520 // Slide the physical aperture to a random page-aligned location within the slide range
1521 uint64_t physmap_slide
= early_random() & ARM64_PHYSMAP_SLIDE_MASK
& ~((uint64_t)PAGE_MASK
);
1522 assert(physmap_slide
< ARM64_PHYSMAP_SLIDE_RANGE
);
1524 physmap_base
+= physmap_slide
;
1527 physmap_base
= ROUND_TWIG(physmap_base
);
1528 static_memory_end
= physmap_base
+ mem_size
;
1530 static_memory_end
= physmap_base
+ mem_size
+ (PTOV_TABLE_SIZE
* ARM_TT_TWIG_SIZE
); // worst possible case for block alignment
1533 /* add the KASAN stolen memory to the physmap */
1534 dynamic_memory_begin
= static_memory_end
+ (shadow_ptop
- shadow_pbase
);
1536 dynamic_memory_begin
= static_memory_end
;
1539 pmap_stacks_start
= (void*)dynamic_memory_begin
;
1540 dynamic_memory_begin
+= PPL_STACK_REGION_SIZE
;
1541 pmap_stacks_end
= (void*)dynamic_memory_begin
;
1543 if (dynamic_memory_begin
> VM_MAX_KERNEL_ADDRESS
)
1544 panic("Unsupported memory configuration %lx\n", mem_size
);
1546 boot_ttep
= args
->topOfKernelData
;
1547 boot_tte
= (tt_entry_t
*) phystokv(boot_ttep
);
1549 #if DEVELOPMENT || DEBUG
1550 /* Sanity check - assert that BOOTSTRAP_TABLE_SIZE is sufficiently-large to
1551 * hold our bootstrap mappings for any possible slide */
1552 size_t bytes_mapped
= dynamic_memory_begin
- gVirtBase
;
1553 size_t l1_entries
= 1 + ((bytes_mapped
+ ARM_TT_L1_SIZE
- 1) / ARM_TT_L1_SIZE
);
1554 /* 1 L1 each for V=P and KVA, plus 1 page for each L2 */
1555 size_t pages_used
= 2 * (l1_entries
+ 1);
1556 if (pages_used
> BOOTSTRAP_TABLE_SIZE
) {
1557 panic("BOOTSTRAP_TABLE_SIZE too small for memory config\n");
1562 * TTBR0 L1, TTBR0 L2 - 1:1 bootstrap mapping.
1563 * TTBR1 L1, TTBR1 L2 - kernel mapping
1565 avail_start
= boot_ttep
+ BOOTSTRAP_TABLE_SIZE
;
1567 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
1568 arm_replace_identity_map(args
);
1571 /* Initialize invalid tte page */
1572 invalid_tte
= (tt_entry_t
*)alloc_ptpage(TRUE
);
1573 invalid_ttep
= kvtophys((vm_offset_t
)invalid_tte
);
1574 bzero(invalid_tte
, ARM_PGBYTES
);
1577 * Initialize l1 page table page
1579 cpu_tte
= (tt_entry_t
*)alloc_ptpage(TRUE
);
1580 cpu_ttep
= kvtophys((vm_offset_t
)cpu_tte
);
1581 bzero(cpu_tte
, ARM_PGBYTES
);
1582 avail_end
= gPhysBase
+ mem_size
;
1583 assert(!(avail_end
& PAGE_MASK
));
1586 real_avail_end
= gPhysBase
+ real_phys_size
;
1588 real_avail_end
= avail_end
;
1592 * Initialize l1 and l2 page table pages :
1593 * map physical memory at the kernel base virtual address
1594 * cover the kernel dynamic address range section
1596 * the so called physical aperture should be statically mapped
1599 va_l1_end
= dynamic_memory_begin
;
1600 cpu_l1_tte
= cpu_tte
+ ((va_l1
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
1602 while (va_l1
< va_l1_end
) {
1603 if (*cpu_l1_tte
== ARM_TTE_EMPTY
) {
1604 /* Allocate a page and setup L1 Table TTE in L1 */
1605 ptpage_vaddr
= alloc_ptpage(TRUE
);
1606 *cpu_l1_tte
= (kvtophys(ptpage_vaddr
) & ARM_TTE_TABLE_MASK
) | ARM_TTE_TYPE_TABLE
| ARM_TTE_VALID
;
1607 bzero((void *)ptpage_vaddr
, ARM_PGBYTES
);
1610 if ((va_l1
+ ARM_TT_L1_SIZE
) < va_l1
) {
1611 /* If this is the last L1 entry, it must cover the last mapping. */
1615 va_l1
+= ARM_TT_L1_SIZE
;
1619 #if __ARM_KERNEL_PROTECT__
1620 /* Expand the page tables to prepare for the EL0 mappings. */
1621 arm_vm_expand_kernel_el0_mappings();
1622 #endif /* __ARM_KERNEL_PROTECT__ */
1625 * Now retrieve addresses for end, edata, and etext from MACH-O headers
1627 segPRELINKTEXTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PRELINK_TEXT", &segSizePRELINKTEXT
);
1628 segPLKDATACONSTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PLK_DATA_CONST", &segSizePLKDATACONST
);
1629 segPLKTEXTEXECB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PLK_TEXT_EXEC", &segSizePLKTEXTEXEC
);
1630 segTEXTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__TEXT", &segSizeTEXT
);
1631 segDATACONSTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__DATA_CONST", &segSizeDATACONST
);
1632 segTEXTEXECB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__TEXT_EXEC", &segSizeTEXTEXEC
);
1634 segPPLTEXTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PPLTEXT", &segSizePPLTEXT
);
1635 segPPLTRAMPB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PPLTRAMP", &segSizePPLTRAMP
);
1636 segPPLDATACONSTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PPLDATA_CONST", &segSizePPLDATACONST
);
1638 segDATAB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__DATA", &segSizeDATA
);
1640 segPPLDATAB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PPLDATA", &segSizePPLDATA
);
1643 segBOOTDATAB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__BOOTDATA", &segSizeBOOTDATA
);
1644 segLINKB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__LINKEDIT", &segSizeLINK
);
1645 segKLDB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__KLD", &segSizeKLD
);
1646 segPRELINKDATAB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PRELINK_DATA", &segSizePRELINKDATA
);
1647 segPRELINKINFOB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PRELINK_INFO", &segSizePRELINKINFO
);
1648 segPLKLLVMCOVB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PLK_LLVM_COV", &segSizePLKLLVMCOV
);
1649 segPLKLINKEDITB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__PLK_LINKEDIT", &segSizePLKLINKEDIT
);
1650 segLASTB
= (vm_offset_t
) getsegdatafromheader(&_mh_execute_header
, "__LAST", &segSizeLAST
);
1652 (void) PE_parse_boot_argn("use_contiguous_hint", &use_contiguous_hint
, sizeof(use_contiguous_hint
));
1653 assert(segSizePRELINKTEXT
< 0x03000000); /* 23355738 */
1655 /* if one of the new segments is present, the other one better be as well */
1656 if (segSizePLKDATACONST
|| segSizePLKTEXTEXEC
) {
1657 assert(segSizePLKDATACONST
&& segSizePLKTEXTEXEC
);
1660 etext
= (vm_offset_t
) segTEXTB
+ segSizeTEXT
;
1661 sdata
= (vm_offset_t
) segDATAB
;
1662 edata
= (vm_offset_t
) segDATAB
+ segSizeDATA
;
1663 end_kern
= round_page(getlastaddr()); /* Force end to next page */
1667 vm_kernel_base
= segTEXTB
;
1668 vm_kernel_top
= (vm_offset_t
) &last_kernel_symbol
;
1669 vm_kext_base
= segPRELINKTEXTB
;
1670 vm_kext_top
= vm_kext_base
+ segSizePRELINKTEXT
;
1672 vm_prelink_stext
= segPRELINKTEXTB
;
1673 if (!segSizePLKTEXTEXEC
&& !segSizePLKDATACONST
) {
1674 vm_prelink_etext
= segPRELINKTEXTB
+ segSizePRELINKTEXT
;
1676 vm_prelink_etext
= segPRELINKTEXTB
+ segSizePRELINKTEXT
+ segSizePLKDATACONST
+ segSizePLKTEXTEXEC
;
1678 vm_prelink_sinfo
= segPRELINKINFOB
;
1679 vm_prelink_einfo
= segPRELINKINFOB
+ segSizePRELINKINFO
;
1680 vm_slinkedit
= segLINKB
;
1681 vm_elinkedit
= segLINKB
+ segSizeLINK
;
1683 vm_prelink_sdata
= segPRELINKDATAB
;
1684 vm_prelink_edata
= segPRELINKDATAB
+ segSizePRELINKDATA
;
1686 arm_vm_prot_init(args
);
1690 * Initialize the page tables for the low globals:
1691 * cover this address range:
1692 * LOW_GLOBAL_BASE_ADDRESS + 2MB
1694 va_l1
= va_l2
= LOW_GLOBAL_BASE_ADDRESS
;
1695 cpu_l1_tte
= cpu_tte
+ ((va_l1
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
1696 cpu_l2_tte
= ((tt_entry_t
*) phystokv(((*cpu_l1_tte
) & ARM_TTE_TABLE_MASK
))) + ((va_l2
& ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
);
1697 ptpage_vaddr
= alloc_ptpage(TRUE
);
1698 *cpu_l2_tte
= (kvtophys(ptpage_vaddr
) & ARM_TTE_TABLE_MASK
) | ARM_TTE_TYPE_TABLE
| ARM_TTE_VALID
| ARM_TTE_TABLE_PXN
| ARM_TTE_TABLE_XN
;
1699 bzero((void *)ptpage_vaddr
, ARM_PGBYTES
);
1702 * Initialize l2 page table pages :
1703 * cover this address range:
1704 * KERNEL_DYNAMIC_ADDR - VM_MAX_KERNEL_ADDRESS
1706 va_l1
= dynamic_memory_begin
;
1707 va_l1_end
= VM_MAX_KERNEL_ADDRESS
;
1708 cpu_l1_tte
= cpu_tte
+ ((va_l1
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
1710 while (va_l1
< va_l1_end
) {
1711 if (*cpu_l1_tte
== ARM_TTE_EMPTY
) {
1712 /* Allocate a page and setup L1 Table TTE in L1 */
1713 ptpage_vaddr
= alloc_ptpage(TRUE
);
1714 *cpu_l1_tte
= (kvtophys(ptpage_vaddr
) & ARM_TTE_TABLE_MASK
) | ARM_TTE_TYPE_TABLE
| ARM_TTE_VALID
| ARM_DYNAMIC_TABLE_XN
;
1715 bzero((void *)ptpage_vaddr
, ARM_PGBYTES
);
1718 if ((va_l1
+ ARM_TT_L1_SIZE
) < va_l1
) {
1719 /* If this is the last L1 entry, it must cover the last mapping. */
1723 va_l1
+= ARM_TT_L1_SIZE
;
1728 /* record the extent of the physmap */
1729 physmap_vbase
= physmap_base
;
1730 physmap_vtop
= static_memory_end
;
1736 #endif /* MONOTONIC */
1740 arm_vm_physmap_init(args
, physmap_base
, dynamic_memory_begin
);
1741 set_mmu_ttb_alternate(cpu_ttep
& TTBR_BADDR_MASK
);
1744 set_mmu_ttb(invalid_ttep
& TTBR_BADDR_MASK
);
1747 #if defined(HAS_VMSA_LOCK)
1751 // global table pointers may need to be different due to physical aperture remapping
1752 cpu_tte
= (tt_entry_t
*)(phystokv(cpu_ttep
));
1753 invalid_tte
= (tt_entry_t
*)(phystokv(invalid_ttep
));
1755 sane_size
= mem_size
- (avail_start
- gPhysBase
);
1757 vm_kernel_slid_base
= segLOWESTTEXT
;
1758 vm_kernel_slid_top
= vm_prelink_einfo
;
1759 vm_kernel_slide
= segTEXTB
-VM_KERNEL_LINK_ADDRESS
;
1760 vm_kernel_stext
= segTEXTB
;
1761 assert(segDATACONSTB
== segTEXTB
+ segSizeTEXT
);
1762 assert(segTEXTEXECB
== segDATACONSTB
+ segSizeDATACONST
);
1763 vm_kernel_etext
= segTEXTB
+ segSizeTEXT
+ segSizeDATACONST
+ segSizeTEXTEXEC
;
1765 dynamic_memory_begin
= ROUND_TWIG(dynamic_memory_begin
);
1766 #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
1767 // reserve a 32MB region without permission overrides to use later for a CTRR unit test
1769 extern vm_offset_t ctrr_test_page
;
1770 tt_entry_t
*new_tte
;
1772 ctrr_test_page
= dynamic_memory_begin
;
1773 dynamic_memory_begin
+= ARM_TT_L2_SIZE
;
1774 cpu_l1_tte
= cpu_tte
+ ((ctrr_test_page
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
1775 assert((*cpu_l1_tte
) & ARM_TTE_VALID
);
1776 cpu_l2_tte
= ((tt_entry_t
*) phystokv(((*cpu_l1_tte
) & ARM_TTE_TABLE_MASK
))) + ((ctrr_test_page
& ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
);
1777 assert((*cpu_l2_tte
) == ARM_TTE_EMPTY
);
1778 new_tte
= (tt_entry_t
*)alloc_ptpage(FALSE
);
1779 bzero(new_tte
, ARM_PGBYTES
);
1780 *cpu_l2_tte
= (kvtophys((vm_offset_t
)new_tte
) & ARM_TTE_TABLE_MASK
) | ARM_TTE_TYPE_TABLE
| ARM_TTE_VALID
;
1782 #endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */
1784 for (vm_offset_t cur
= (vm_offset_t
)pmap_stacks_start
; cur
< (vm_offset_t
)pmap_stacks_end
; cur
+= ARM_PGBYTES
) {
1785 arm_vm_map(cpu_tte
, cur
, ARM_PTE_EMPTY
);
1788 pmap_bootstrap(dynamic_memory_begin
);
1790 disable_preemption();
1793 * Initialize l3 page table pages :
1794 * cover this address range:
1795 * 2MB + FrameBuffer size + 10MB for each 256MB segment
1798 mem_segments
= (mem_size
+ 0x0FFFFFFF) >> 28;
1800 va_l1
= dynamic_memory_begin
;
1801 va_l1_end
= va_l1
+ ((2 + (mem_segments
* 10)) << 20);
1802 va_l1_end
+= round_page(args
->Video
.v_height
* args
->Video
.v_rowBytes
);
1803 va_l1_end
= (va_l1_end
+ 0x00000000007FFFFFULL
) & 0xFFFFFFFFFF800000ULL
;
1805 cpu_l1_tte
= cpu_tte
+ ((va_l1
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
1807 while (va_l1
< va_l1_end
) {
1811 if (((va_l1
& ~ARM_TT_L1_OFFMASK
)+ARM_TT_L1_SIZE
) < va_l1
) {
1812 /* If this is the last L1 entry, it must cover the last mapping. */
1813 va_l2_end
= va_l1_end
;
1815 va_l2_end
= MIN((va_l1
& ~ARM_TT_L1_OFFMASK
)+ARM_TT_L1_SIZE
, va_l1_end
);
1818 cpu_l2_tte
= ((tt_entry_t
*) phystokv(((*cpu_l1_tte
) & ARM_TTE_TABLE_MASK
))) + ((va_l2
& ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
);
1820 while (va_l2
< va_l2_end
) {
1822 pmap_paddr_t ptp_phys
;
1824 /* Allocate a page and setup L3 Table TTE in L2 */
1825 ptp
= (pt_entry_t
*) alloc_ptpage(FALSE
);
1826 ptp_phys
= (pmap_paddr_t
)kvtophys((vm_offset_t
)ptp
);
1828 pmap_init_pte_page(kernel_pmap
, ptp
, va_l2
, 3, TRUE
, TRUE
);
1830 *cpu_l2_tte
= (pa_to_tte (ptp_phys
)) | ARM_TTE_TYPE_TABLE
| ARM_TTE_VALID
| ARM_DYNAMIC_TABLE_XN
;
1832 va_l2
+= ARM_TT_L2_SIZE
;
1841 * Initialize l3 page table pages :
1842 * cover this address range:
1843 * (VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) - VM_MAX_KERNEL_ADDRESS
1845 va_l1
= VM_MAX_KERNEL_ADDRESS
& CPUWINDOWS_BASE_MASK
;
1846 va_l1_end
= VM_MAX_KERNEL_ADDRESS
;
1848 cpu_l1_tte
= cpu_tte
+ ((va_l1
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
1850 while (va_l1
< va_l1_end
) {
1854 if (((va_l1
& ~ARM_TT_L1_OFFMASK
)+ARM_TT_L1_SIZE
) < va_l1
) {
1855 /* If this is the last L1 entry, it must cover the last mapping. */
1856 va_l2_end
= va_l1_end
;
1858 va_l2_end
= MIN((va_l1
& ~ARM_TT_L1_OFFMASK
)+ARM_TT_L1_SIZE
, va_l1_end
);
1861 cpu_l2_tte
= ((tt_entry_t
*) phystokv(((*cpu_l1_tte
) & ARM_TTE_TABLE_MASK
))) + ((va_l2
& ARM_TT_L2_INDEX_MASK
) >> ARM_TT_L2_SHIFT
);
1863 while (va_l2
< va_l2_end
) {
1865 pmap_paddr_t ptp_phys
;
1867 /* Allocate a page and setup L3 Table TTE in L2 */
1868 ptp
= (pt_entry_t
*) alloc_ptpage(FALSE
);
1869 ptp_phys
= (pmap_paddr_t
)kvtophys((vm_offset_t
)ptp
);
1871 pmap_init_pte_page(kernel_pmap
, ptp
, va_l2
, 3, TRUE
, TRUE
);
1873 *cpu_l2_tte
= (pa_to_tte (ptp_phys
)) | ARM_TTE_TYPE_TABLE
| ARM_TTE_VALID
| ARM_DYNAMIC_TABLE_XN
;
1875 va_l2
+= ARM_TT_L2_SIZE
;
1883 #if __ARM64_PMAP_SUBPAGE_L1__ && __ARM_16K_PG__
1885 * In this configuration, the bootstrap mappings (arm_vm_init) and
1886 * the heap mappings occupy separate L1 regions. Explicitly set up
1887 * the heap L1 allocations here.
1889 va_l1
= VM_MIN_KERNEL_ADDRESS
& ~ARM_TT_L1_OFFMASK
;
1890 cpu_l1_tte
= cpu_tte
+ ((va_l1
& ARM_TT_L1_INDEX_MASK
) >> ARM_TT_L1_SHIFT
);
1892 while ((va_l1
>= (VM_MIN_KERNEL_ADDRESS
& ~ARM_TT_L1_OFFMASK
)) && (va_l1
< VM_MAX_KERNEL_ADDRESS
)) {
1894 * If the L1 entry has not yet been allocated, allocate it
1895 * now and treat it as a heap table.
1897 if (*cpu_l1_tte
== ARM_TTE_EMPTY
) {
1898 tt_entry_t
*new_tte
= (tt_entry_t
*)alloc_ptpage(FALSE
);
1899 bzero(new_tte
, ARM_PGBYTES
);
1900 *cpu_l1_tte
= (kvtophys((vm_offset_t
)new_tte
) & ARM_TTE_TABLE_MASK
) | ARM_TTE_TYPE_TABLE
| ARM_TTE_VALID
| ARM_DYNAMIC_TABLE_XN
;
1904 va_l1
+= ARM_TT_L1_SIZE
;
1909 * Adjust avail_start so that the range that the VM owns
1910 * starts on a PAGE_SIZE aligned boundary.
1912 avail_start
= (avail_start
+ PAGE_MASK
) & ~PAGE_MASK
;
1915 pmap_static_allocations_done();
1917 first_avail
= avail_start
;
1918 patch_low_glo_static_region(args
->topOfKernelData
, avail_start
- args
->topOfKernelData
);
1919 enable_preemption();