]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/arm_vm_init.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / osfmk / arm64 / arm_vm_init.c
1 /*
2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_debug.h>
30 #include <mach_kdp.h>
31 #include <debug.h>
32
33 #include <kern/assert.h>
34 #include <kern/misc_protos.h>
35 #include <kern/monotonic.h>
36 #include <mach/vm_types.h>
37 #include <mach/vm_param.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_page.h>
40 #include <vm/pmap.h>
41
42 #include <machine/atomic.h>
43 #include <arm64/proc_reg.h>
44 #include <arm64/lowglobals.h>
45 #include <arm/cpu_data_internal.h>
46 #include <arm/misc_protos.h>
47 #include <pexpert/arm64/boot.h>
48 #include <pexpert/device_tree.h>
49
50 #include <libkern/kernel_mach_header.h>
51 #include <libkern/section_keywords.h>
52
53 #include <san/kasan.h>
54
55 #if __ARM_KERNEL_PROTECT__
56 /*
57 * If we want to support __ARM_KERNEL_PROTECT__, we need a sufficient amount of
58 * mappable space preceeding the kernel (as we unmap the kernel by cutting the
59 * range covered by TTBR1 in half). This must also cover the exception vectors.
60 */
61 static_assert(KERNEL_PMAP_HEAP_RANGE_START > ARM_KERNEL_PROTECT_EXCEPTION_START);
62
63 /* The exception vectors and the kernel cannot share root TTEs. */
64 static_assert((KERNEL_PMAP_HEAP_RANGE_START & ~ARM_TT_ROOT_OFFMASK) > ARM_KERNEL_PROTECT_EXCEPTION_START);
65
66 /*
67 * We must have enough space in the TTBR1_EL1 range to create the EL0 mapping of
68 * the exception vectors.
69 */
70 static_assert((((~ARM_KERNEL_PROTECT_EXCEPTION_START) + 1) * 2ULL) <= (ARM_TT_ROOT_SIZE + ARM_TT_ROOT_INDEX_MASK));
71 #endif /* __ARM_KERNEL_PROTECT__ */
72
73 #if __APRR_SUPPORTED__ && XNU_MONITOR
74 /*
75 * If APRR is supported, setting XN on L1/L2 table entries will shift the effective
76 * APRR index of L3 PTEs covering PPL-protected pages in the kernel dynamic region
77 * from PPL R/W to kernel R/W. That will effectively remove PPL write protection
78 * from those pages. Avoid setting XN at the table level for MONITOR-enabled builds
79 * that are backed by APRR.
80 */
81 #define ARM_DYNAMIC_TABLE_XN ARM_TTE_TABLE_PXN
82 #else
83 #define ARM_DYNAMIC_TABLE_XN (ARM_TTE_TABLE_PXN | ARM_TTE_TABLE_XN)
84 #endif
85
86 #if KASAN
87 extern vm_offset_t shadow_pbase;
88 extern vm_offset_t shadow_ptop;
89 extern vm_offset_t physmap_vbase;
90 extern vm_offset_t physmap_vtop;
91 #endif
92
93 /*
94 * Denotes the end of xnu.
95 */
96 extern void *last_kernel_symbol;
97
98 extern void arm64_replace_bootstack(cpu_data_t*);
99 extern void PE_slide_devicetree(vm_offset_t);
100
101 /*
102 * KASLR parameters
103 */
104 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_base;
105 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_top;
106 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kext_base;
107 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kext_top;
108 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_stext;
109 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_etext;
110 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_slide;
111 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_slid_base;
112 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_slid_top;
113
114 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_prelink_stext;
115 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_prelink_etext;
116 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_prelink_sdata;
117 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_prelink_edata;
118 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_prelink_sinfo;
119 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_prelink_einfo;
120 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_slinkedit;
121 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_elinkedit;
122
123 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_builtinkmod_text;
124 SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_builtinkmod_text_end;
125
126 /* Used by <mach/arm/vm_param.h> */
127 SECURITY_READ_ONLY_LATE(unsigned long) gVirtBase;
128 SECURITY_READ_ONLY_LATE(unsigned long) gPhysBase;
129 SECURITY_READ_ONLY_LATE(unsigned long) gPhysSize;
130 SECURITY_READ_ONLY_LATE(unsigned long) gT0Sz = T0SZ_BOOT;
131 SECURITY_READ_ONLY_LATE(unsigned long) gT1Sz = T1SZ_BOOT;
132
133 /* 23543331 - step 1 of kext / kernel __TEXT and __DATA colocation is to move
134 * all kexts before the kernel. This is only for arm64 devices and looks
135 * something like the following:
136 * -- vmaddr order --
137 * 0xffffff8004004000 __PRELINK_TEXT
138 * 0xffffff8007004000 __TEXT (xnu)
139 * 0xffffff80075ec000 __DATA (xnu)
140 * 0xffffff80076dc000 __KLD (xnu)
141 * 0xffffff80076e0000 __LAST (xnu)
142 * 0xffffff80076e4000 __LINKEDIT (xnu)
143 * 0xffffff80076e4000 __PRELINK_DATA (not used yet)
144 * 0xffffff800782c000 __PRELINK_INFO
145 * 0xffffff80078e4000 -- End of kernelcache
146 */
147
148 /* 24921709 - make XNU ready for KTRR
149 *
150 * Two possible kernel cache layouts, depending on which kcgen is being used.
151 * VAs increasing downwards.
152 * Old KCGEN:
153 *
154 * __PRELINK_TEXT
155 * __TEXT
156 * __DATA_CONST
157 * __TEXT_EXEC
158 * __KLD
159 * __LAST
160 * __DATA
161 * __PRELINK_DATA (expected empty)
162 * __LINKEDIT
163 * __PRELINK_INFO
164 *
165 * New kcgen:
166 *
167 * __PRELINK_TEXT <--- First KTRR (ReadOnly) segment
168 * __PLK_DATA_CONST
169 * __PLK_TEXT_EXEC
170 * __TEXT
171 * __DATA_CONST
172 * __TEXT_EXEC
173 * __KLD
174 * __LAST <--- Last KTRR (ReadOnly) segment
175 * __DATA
176 * __BOOTDATA (if present)
177 * __LINKEDIT
178 * __PRELINK_DATA (expected populated now)
179 * __PLK_LINKEDIT
180 * __PRELINK_INFO
181 *
182 */
183
184 vm_offset_t mem_size; /* Size of actual physical memory present
185 * minus any performance buffer and possibly
186 * limited by mem_limit in bytes */
187 uint64_t mem_actual; /* The "One True" physical memory size
188 * actually, it's the highest physical
189 * address + 1 */
190 uint64_t max_mem; /* Size of physical memory (bytes), adjusted
191 * by maxmem */
192 uint64_t sane_size; /* Memory size to use for defaults
193 * calculations */
194 /* This no longer appears to be used; kill it? */
195 addr64_t vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Highest kernel
196 * virtual address known
197 * to the VM system */
198
199 SECURITY_READ_ONLY_LATE(vm_offset_t) segEXTRADATA;
200 SECURITY_READ_ONLY_LATE(unsigned long) segSizeEXTRADATA;
201
202 SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWESTTEXT;
203 SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWEST;
204
205 SECURITY_READ_ONLY_LATE(static vm_offset_t) segTEXTB;
206 SECURITY_READ_ONLY_LATE(static unsigned long) segSizeTEXT;
207
208 #if XNU_MONITOR
209 SECURITY_READ_ONLY_LATE(vm_offset_t) segPPLTEXTB;
210 SECURITY_READ_ONLY_LATE(unsigned long) segSizePPLTEXT;
211
212 SECURITY_READ_ONLY_LATE(vm_offset_t) segPPLTRAMPB;
213 SECURITY_READ_ONLY_LATE(unsigned long) segSizePPLTRAMP;
214
215 SECURITY_READ_ONLY_LATE(vm_offset_t) segPPLDATACONSTB;
216 SECURITY_READ_ONLY_LATE(unsigned long) segSizePPLDATACONST;
217 SECURITY_READ_ONLY_LATE(void *) pmap_stacks_start = NULL;
218 SECURITY_READ_ONLY_LATE(void *) pmap_stacks_end = NULL;
219 #endif
220
221 SECURITY_READ_ONLY_LATE(static vm_offset_t) segDATACONSTB;
222 SECURITY_READ_ONLY_LATE(static unsigned long) segSizeDATACONST;
223
224 SECURITY_READ_ONLY_LATE(static vm_offset_t) segTEXTEXECB;
225 SECURITY_READ_ONLY_LATE(static unsigned long) segSizeTEXTEXEC;
226
227 SECURITY_READ_ONLY_LATE(static vm_offset_t) segDATAB;
228 SECURITY_READ_ONLY_LATE(static unsigned long) segSizeDATA;
229
230 #if XNU_MONITOR
231 SECURITY_READ_ONLY_LATE(vm_offset_t) segPPLDATAB;
232 SECURITY_READ_ONLY_LATE(unsigned long) segSizePPLDATA;
233 #endif
234
235 SECURITY_READ_ONLY_LATE(vm_offset_t) segBOOTDATAB;
236 SECURITY_READ_ONLY_LATE(unsigned long) segSizeBOOTDATA;
237 extern vm_offset_t intstack_low_guard;
238 extern vm_offset_t intstack_high_guard;
239 extern vm_offset_t excepstack_high_guard;
240
241 SECURITY_READ_ONLY_LATE(static vm_offset_t) segLINKB;
242 SECURITY_READ_ONLY_LATE(static unsigned long) segSizeLINK;
243
244 SECURITY_READ_ONLY_LATE(static vm_offset_t) segKLDB;
245 SECURITY_READ_ONLY_LATE(static unsigned long) segSizeKLD;
246 SECURITY_READ_ONLY_LATE(vm_offset_t) segLASTB;
247 SECURITY_READ_ONLY_LATE(unsigned long) segSizeLAST;
248
249 SECURITY_READ_ONLY_LATE(vm_offset_t) segPRELINKTEXTB;
250 SECURITY_READ_ONLY_LATE(unsigned long) segSizePRELINKTEXT;
251
252 SECURITY_READ_ONLY_LATE(static vm_offset_t) segPLKTEXTEXECB;
253 SECURITY_READ_ONLY_LATE(static unsigned long) segSizePLKTEXTEXEC;
254
255 SECURITY_READ_ONLY_LATE(static vm_offset_t) segPLKDATACONSTB;
256 SECURITY_READ_ONLY_LATE(static unsigned long) segSizePLKDATACONST;
257
258 SECURITY_READ_ONLY_LATE(static vm_offset_t) segPRELINKDATAB;
259 SECURITY_READ_ONLY_LATE(static unsigned long) segSizePRELINKDATA;
260
261 SECURITY_READ_ONLY_LATE(static vm_offset_t) segPLKLLVMCOVB = 0;
262 SECURITY_READ_ONLY_LATE(static unsigned long) segSizePLKLLVMCOV = 0;
263
264 SECURITY_READ_ONLY_LATE(static vm_offset_t) segPLKLINKEDITB;
265 SECURITY_READ_ONLY_LATE(static unsigned long) segSizePLKLINKEDIT;
266
267 SECURITY_READ_ONLY_LATE(static vm_offset_t) segPRELINKINFOB;
268 SECURITY_READ_ONLY_LATE(static unsigned long) segSizePRELINKINFO;
269
270 SECURITY_READ_ONLY_LATE(static boolean_t) use_contiguous_hint = TRUE;
271
272 SECURITY_READ_ONLY_LATE(unsigned) PAGE_SHIFT_CONST;
273
274 SECURITY_READ_ONLY_LATE(vm_offset_t) end_kern;
275 SECURITY_READ_ONLY_LATE(vm_offset_t) etext;
276 SECURITY_READ_ONLY_LATE(vm_offset_t) sdata;
277 SECURITY_READ_ONLY_LATE(vm_offset_t) edata;
278
279 vm_offset_t alloc_ptpage(boolean_t map_static);
280 SECURITY_READ_ONLY_LATE(vm_offset_t) ropage_next;
281
282 /*
283 * Bootstrap the system enough to run with virtual memory.
284 * Map the kernel's code and data, and allocate the system page table.
285 * Page_size must already be set.
286 *
287 * Parameters:
288 * first_avail: first available physical page -
289 * after kernel page tables
290 * avail_start: PA of first physical page
291 * avail_end: PA of last physical page
292 */
293 SECURITY_READ_ONLY_LATE(vm_offset_t) first_avail;
294 SECURITY_READ_ONLY_LATE(vm_offset_t) static_memory_end;
295 SECURITY_READ_ONLY_LATE(pmap_paddr_t) avail_start;
296 SECURITY_READ_ONLY_LATE(pmap_paddr_t) avail_end;
297 SECURITY_READ_ONLY_LATE(pmap_paddr_t) real_avail_end;
298 SECURITY_READ_ONLY_LATE(unsigned long) real_phys_size;
299
300 #if __ARM_KERNEL_PROTECT__
301 extern void ExceptionVectorsBase;
302 extern void ExceptionVectorsEnd;
303 #endif /* __ARM_KERNEL_PROTECT__ */
304
305 typedef struct {
306 pmap_paddr_t pa;
307 vm_map_address_t va;
308 vm_size_t len;
309 } ptov_table_entry;
310
311 #define PTOV_TABLE_SIZE 8
312 SECURITY_READ_ONLY_LATE(static ptov_table_entry) ptov_table[PTOV_TABLE_SIZE];
313 SECURITY_READ_ONLY_LATE(static boolean_t) kva_active = FALSE;
314
315
316 vm_map_address_t
317 phystokv(pmap_paddr_t pa)
318 {
319 for (size_t i = 0; (i < PTOV_TABLE_SIZE) && (ptov_table[i].len != 0); i++) {
320 if ((pa >= ptov_table[i].pa) && (pa < (ptov_table[i].pa + ptov_table[i].len)))
321 return (pa - ptov_table[i].pa + ptov_table[i].va);
322 }
323 assertf((pa - gPhysBase) < real_phys_size, "%s: illegal PA: 0x%llx", __func__, (uint64_t)pa);
324 return (pa - gPhysBase + gVirtBase);
325 }
326
327 vm_map_address_t
328 phystokv_range(pmap_paddr_t pa, vm_size_t *max_len)
329 {
330 vm_size_t len;
331 for (size_t i = 0; (i < PTOV_TABLE_SIZE) && (ptov_table[i].len != 0); i++) {
332 if ((pa >= ptov_table[i].pa) && (pa < (ptov_table[i].pa + ptov_table[i].len))) {
333 len = ptov_table[i].len - (pa - ptov_table[i].pa);
334 if (*max_len > len)
335 *max_len = len;
336 return (pa - ptov_table[i].pa + ptov_table[i].va);
337 }
338 }
339 len = PAGE_SIZE - (pa & PAGE_MASK);
340 if (*max_len > len)
341 *max_len = len;
342 assertf((pa - gPhysBase) < real_phys_size, "%s: illegal PA: 0x%llx", __func__, (uint64_t)pa);
343 return (pa - gPhysBase + gVirtBase);
344 }
345
346 vm_offset_t
347 ml_static_vtop(vm_offset_t va)
348 {
349 for (size_t i = 0; (i < PTOV_TABLE_SIZE) && (ptov_table[i].len != 0); i++) {
350 if ((va >= ptov_table[i].va) && (va < (ptov_table[i].va + ptov_table[i].len)))
351 return (va - ptov_table[i].va + ptov_table[i].pa);
352 }
353 assertf(((vm_address_t)(va) - gVirtBase) < gPhysSize, "%s: illegal VA: %p", __func__, (void*)va);
354 return ((vm_address_t)(va) - gVirtBase + gPhysBase);
355 }
356
357 /*
358 * This rounds the given address up to the nearest boundary for a PTE contiguous
359 * hint.
360 */
361 static vm_offset_t
362 round_up_pte_hint_address(vm_offset_t address)
363 {
364 vm_offset_t hint_size = ARM_PTE_SIZE << ARM_PTE_HINT_ENTRIES_SHIFT;
365 return ((address + (hint_size - 1)) & ~(hint_size - 1));
366 }
367
368 /* allocate a page for a page table: we support static and dynamic mappings.
369 *
370 * returns a virtual address for the allocated page
371 *
372 * for static mappings, we allocate from the region ropagetable_begin to ro_pagetable_end-1,
373 * which is defined in the DATA_CONST segment and will be protected RNX when vm_prot_finalize runs.
374 *
375 * for dynamic mappings, we allocate from avail_start, which should remain RWNX.
376 */
377
378 vm_offset_t alloc_ptpage(boolean_t map_static) {
379 vm_offset_t vaddr;
380
381 #if !(defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR))
382 map_static = FALSE;
383 #endif
384
385 if (!ropage_next) {
386 ropage_next = (vm_offset_t)&ropagetable_begin;
387 }
388
389 if (map_static) {
390 assert(ropage_next < (vm_offset_t)&ropagetable_end);
391
392 vaddr = ropage_next;
393 ropage_next += ARM_PGBYTES;
394
395 return vaddr;
396 } else {
397 vaddr = phystokv(avail_start);
398 avail_start += ARM_PGBYTES;
399
400 return vaddr;
401 }
402 }
403
404 #if DEBUG
405
406 void dump_kva_l2(vm_offset_t tt_base, tt_entry_t *tt, int indent, uint64_t *rosz_out, uint64_t *rwsz_out);
407
408 void dump_kva_l2(vm_offset_t tt_base, tt_entry_t *tt, int indent, uint64_t *rosz_out, uint64_t *rwsz_out) {
409 unsigned int i;
410 boolean_t cur_ro, prev_ro = 0;
411 int start_entry = -1;
412 tt_entry_t cur, prev = 0;
413 pmap_paddr_t robegin = kvtophys((vm_offset_t)&ropagetable_begin);
414 pmap_paddr_t roend = kvtophys((vm_offset_t)&ropagetable_end);
415 boolean_t tt_static = kvtophys((vm_offset_t)tt) >= robegin &&
416 kvtophys((vm_offset_t)tt) < roend;
417
418 for(i=0; i<TTE_PGENTRIES; i++) {
419 int tte_type = tt[i] & ARM_TTE_TYPE_MASK;
420 cur = tt[i] & ARM_TTE_TABLE_MASK;
421
422 if (tt_static) {
423 /* addresses mapped by this entry are static if it is a block mapping,
424 * or the table was allocated from the RO page table region */
425 cur_ro = (tte_type == ARM_TTE_TYPE_BLOCK) || (cur >= robegin && cur < roend);
426 } else {
427 cur_ro = 0;
428 }
429
430 if ((cur == 0 && prev != 0) || (cur_ro != prev_ro && prev != 0)) { // falling edge
431 uintptr_t start,end,sz;
432
433 start = (uintptr_t)start_entry << ARM_TT_L2_SHIFT;
434 start += tt_base;
435 end = ((uintptr_t)i << ARM_TT_L2_SHIFT) - 1;
436 end += tt_base;
437
438 sz = end - start + 1;
439 printf("%*s0x%08x_%08x-0x%08x_%08x %s (%luMB)\n",
440 indent*4, "",
441 (uint32_t)(start >> 32),(uint32_t)start,
442 (uint32_t)(end >> 32),(uint32_t)end,
443 prev_ro ? "Static " : "Dynamic",
444 (sz >> 20));
445
446 if (prev_ro) {
447 *rosz_out += sz;
448 } else {
449 *rwsz_out += sz;
450 }
451 }
452
453 if ((prev == 0 && cur != 0) || cur_ro != prev_ro) { // rising edge: set start
454 start_entry = i;
455 }
456
457 prev = cur;
458 prev_ro = cur_ro;
459 }
460 }
461
462 void dump_kva_space() {
463 uint64_t tot_rosz=0, tot_rwsz=0;
464 int ro_ptpages, rw_ptpages;
465 pmap_paddr_t robegin = kvtophys((vm_offset_t)&ropagetable_begin);
466 pmap_paddr_t roend = kvtophys((vm_offset_t)&ropagetable_end);
467 boolean_t root_static = kvtophys((vm_offset_t)cpu_tte) >= robegin &&
468 kvtophys((vm_offset_t)cpu_tte) < roend;
469 uint64_t kva_base = ~((1ULL << (64 - T1SZ_BOOT)) - 1);
470
471 printf("Root page table: %s\n", root_static ? "Static" : "Dynamic");
472
473 for(unsigned int i=0; i<TTE_PGENTRIES; i++) {
474 pmap_paddr_t cur;
475 boolean_t cur_ro;
476 uintptr_t start,end;
477 uint64_t rosz = 0, rwsz = 0;
478
479 if ((cpu_tte[i] & ARM_TTE_VALID) == 0)
480 continue;
481
482 cur = cpu_tte[i] & ARM_TTE_TABLE_MASK;
483 start = (uint64_t)i << ARM_TT_L1_SHIFT;
484 start = start + kva_base;
485 end = start + (ARM_TT_L1_SIZE - 1);
486 cur_ro = cur >= robegin && cur < roend;
487
488 printf("0x%08x_%08x-0x%08x_%08x %s\n",
489 (uint32_t)(start >> 32),(uint32_t)start,
490 (uint32_t)(end >> 32),(uint32_t)end,
491 cur_ro ? "Static " : "Dynamic");
492
493 dump_kva_l2(start, (tt_entry_t*)phystokv(cur), 1, &rosz, &rwsz);
494 tot_rosz += rosz;
495 tot_rwsz += rwsz;
496 }
497
498 printf("L2 Address space mapped: Static %lluMB Dynamic %lluMB Total %lluMB\n",
499 tot_rosz >> 20,
500 tot_rwsz >> 20,
501 (tot_rosz >> 20) + (tot_rwsz >> 20));
502
503 ro_ptpages = (int)((ropage_next - (vm_offset_t)&ropagetable_begin) >> ARM_PGSHIFT);
504 rw_ptpages = (int)(lowGlo.lgStaticSize >> ARM_PGSHIFT);
505 printf("Pages used: static %d dynamic %d\n", ro_ptpages, rw_ptpages);
506 }
507
508 #endif /* DEBUG */
509
510 #if __ARM_KERNEL_PROTECT__ || XNU_MONITOR
511 /*
512 * arm_vm_map:
513 * root_ttp: The kernel virtual address for the root of the target page tables
514 * vaddr: The target virtual address
515 * pte: A page table entry value (may be ARM_PTE_EMPTY)
516 *
517 * This function installs pte at vaddr in root_ttp. Any page table pages needed
518 * to install pte will be allocated by this function.
519 */
520 static void
521 arm_vm_map(tt_entry_t * root_ttp, vm_offset_t vaddr, pt_entry_t pte)
522 {
523 vm_offset_t ptpage = 0;
524 tt_entry_t * ttp = root_ttp;
525
526 tt_entry_t * l1_ttep = NULL;
527 tt_entry_t l1_tte = 0;
528
529 tt_entry_t * l2_ttep = NULL;
530 tt_entry_t l2_tte = 0;
531 pt_entry_t * ptep = NULL;
532 pt_entry_t cpte = 0;
533
534 /*
535 * Walk the target page table to find the PTE for the given virtual
536 * address. Allocate any page table pages needed to do this.
537 */
538 l1_ttep = ttp + ((vaddr & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
539 l1_tte = *l1_ttep;
540
541 if (l1_tte == ARM_TTE_EMPTY) {
542 ptpage = alloc_ptpage(TRUE);
543 bzero((void *)ptpage, ARM_PGBYTES);
544 l1_tte = kvtophys(ptpage);
545 l1_tte &= ARM_TTE_TABLE_MASK;
546 l1_tte |= ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
547 *l1_ttep = l1_tte;
548 ptpage = 0;
549 }
550
551 ttp = (tt_entry_t *)phystokv(l1_tte & ARM_TTE_TABLE_MASK);
552
553 l2_ttep = ttp + ((vaddr & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
554 l2_tte = *l2_ttep;
555
556 if (l2_tte == ARM_TTE_EMPTY) {
557 ptpage = alloc_ptpage(TRUE);
558 bzero((void *)ptpage, ARM_PGBYTES);
559 l2_tte = kvtophys(ptpage);
560 l2_tte &= ARM_TTE_TABLE_MASK;
561 l2_tte |= ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
562 *l2_ttep = l2_tte;
563 ptpage = 0;
564 }
565
566 ttp = (tt_entry_t *)phystokv(l2_tte & ARM_TTE_TABLE_MASK);
567
568 ptep = ttp + ((vaddr & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
569 cpte = *ptep;
570
571 /*
572 * If the existing PTE is not empty, then we are replacing a valid
573 * mapping.
574 */
575 if (cpte != ARM_PTE_EMPTY) {
576 panic("%s: cpte=%#llx is not empty, "
577 "vaddr=%#lx, pte=%#llx",
578 __FUNCTION__, cpte,
579 vaddr, pte);
580 }
581
582 *ptep = pte;
583 }
584
585 #endif // __ARM_KERNEL_PROTECT || XNU_MONITOR
586
587 #if __ARM_KERNEL_PROTECT__
588
589 /*
590 * arm_vm_kernel_el0_map:
591 * vaddr: The target virtual address
592 * pte: A page table entry value (may be ARM_PTE_EMPTY)
593 *
594 * This function installs pte at vaddr for the EL0 kernel mappings.
595 */
596 static void
597 arm_vm_kernel_el0_map(vm_offset_t vaddr, pt_entry_t pte)
598 {
599 /* Calculate where vaddr will be in the EL1 kernel page tables. */
600 vm_offset_t kernel_pmap_vaddr = vaddr - ((ARM_TT_ROOT_INDEX_MASK + ARM_TT_ROOT_SIZE) / 2ULL);
601 arm_vm_map(cpu_tte, kernel_pmap_vaddr, pte);
602 }
603
604 /*
605 * arm_vm_kernel_el1_map:
606 * vaddr: The target virtual address
607 * pte: A page table entry value (may be ARM_PTE_EMPTY)
608 *
609 * This function installs pte at vaddr for the EL1 kernel mappings.
610 */
611 static void
612 arm_vm_kernel_el1_map(vm_offset_t vaddr, pt_entry_t pte) {
613 arm_vm_map(cpu_tte, vaddr, pte);
614 }
615
616 /*
617 * arm_vm_kernel_pte:
618 * vaddr: The target virtual address
619 *
620 * This function returns the PTE value for the given vaddr from the kernel page
621 * tables. If the region has been been block mapped, we return what an
622 * equivalent PTE value would be (as regards permissions and flags). We also
623 * remove the HINT bit (as we are not necessarily creating contiguous mappings.
624 */
625 static pt_entry_t
626 arm_vm_kernel_pte(vm_offset_t vaddr)
627 {
628 tt_entry_t * ttp = cpu_tte;
629 tt_entry_t * ttep = NULL;
630 tt_entry_t tte = 0;
631 pt_entry_t * ptep = NULL;
632 pt_entry_t pte = 0;
633
634 ttep = ttp + ((vaddr & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
635 tte = *ttep;
636
637 assert(tte & ARM_TTE_VALID);
638
639 if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) {
640 /* This is a block mapping; return the equivalent PTE value. */
641 pte = (pt_entry_t)(tte & ~ARM_TTE_TYPE_MASK);
642 pte |= ARM_PTE_TYPE_VALID;
643 pte |= vaddr & ((ARM_TT_L1_SIZE - 1) & ARM_PTE_PAGE_MASK);
644 pte &= ~ARM_PTE_HINT_MASK;
645 return pte;
646 }
647
648 ttp = (tt_entry_t *)phystokv(tte & ARM_TTE_TABLE_MASK);
649 ttep = ttp + ((vaddr & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
650 tte = *ttep;
651
652 assert(tte & ARM_TTE_VALID);
653
654 if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) {
655 /* This is a block mapping; return the equivalent PTE value. */
656 pte = (pt_entry_t)(tte & ~ARM_TTE_TYPE_MASK);
657 pte |= ARM_PTE_TYPE_VALID;
658 pte |= vaddr & ((ARM_TT_L2_SIZE - 1) & ARM_PTE_PAGE_MASK);
659 pte &= ~ARM_PTE_HINT_MASK;
660 return pte;
661 }
662
663 ttp = (tt_entry_t *)phystokv(tte & ARM_TTE_TABLE_MASK);
664
665 ptep = ttp + ((vaddr & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
666 pte = *ptep;
667 pte &= ~ARM_PTE_HINT_MASK;
668 return pte;
669 }
670
671 /*
672 * arm_vm_prepare_kernel_el0_mappings:
673 * alloc_only: Indicates if PTE values should be copied from the EL1 kernel
674 * mappings.
675 *
676 * This function expands the kernel page tables to support the EL0 kernel
677 * mappings, and conditionally installs the PTE values for the EL0 kernel
678 * mappings (if alloc_only is false).
679 */
680 static void
681 arm_vm_prepare_kernel_el0_mappings(bool alloc_only)
682 {
683 pt_entry_t pte = 0;
684 vm_offset_t start = ((vm_offset_t)&ExceptionVectorsBase) & ~PAGE_MASK;
685 vm_offset_t end = (((vm_offset_t)&ExceptionVectorsEnd) + PAGE_MASK) & ~PAGE_MASK;
686 vm_offset_t cur = 0;
687 vm_offset_t cur_fixed = 0;
688
689 /* Expand for/map the exceptions vectors in the EL0 kernel mappings. */
690 for (cur = start, cur_fixed = ARM_KERNEL_PROTECT_EXCEPTION_START; cur < end; cur += ARM_PGBYTES, cur_fixed += ARM_PGBYTES) {
691 /*
692 * We map the exception vectors at a different address than that
693 * of the kernelcache to avoid sharing page table pages with the
694 * kernelcache (as this may cause issues with TLB caching of
695 * page table pages.
696 */
697 if (!alloc_only) {
698 pte = arm_vm_kernel_pte(cur);
699 }
700
701 arm_vm_kernel_el1_map(cur_fixed, pte);
702 arm_vm_kernel_el0_map(cur_fixed, pte);
703 }
704
705 __builtin_arm_dmb(DMB_ISH);
706 __builtin_arm_isb(ISB_SY);
707
708 if (!alloc_only) {
709 /*
710 * If we have created the alternate exception vector mappings,
711 * the boot CPU may now switch over to them.
712 */
713 set_vbar_el1(ARM_KERNEL_PROTECT_EXCEPTION_START);
714 __builtin_arm_isb(ISB_SY);
715 }
716 }
717
718 /*
719 * arm_vm_populate_kernel_el0_mappings:
720 *
721 * This function adds all required mappings to the EL0 kernel mappings.
722 */
723 static void
724 arm_vm_populate_kernel_el0_mappings(void)
725 {
726 arm_vm_prepare_kernel_el0_mappings(FALSE);
727 }
728
729 /*
730 * arm_vm_expand_kernel_el0_mappings:
731 *
732 * This function expands the kernel page tables to accomodate the EL0 kernel
733 * mappings.
734 */
735 static void
736 arm_vm_expand_kernel_el0_mappings(void)
737 {
738 arm_vm_prepare_kernel_el0_mappings(TRUE);
739 }
740 #endif /* __ARM_KERNEL_PROTECT__ */
741
742 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
743 extern void bootstrap_instructions;
744
745 /*
746 * arm_replace_identity_map takes the V=P map that we construct in start.s
747 * and repurposes it in order to have it map only the page we need in order
748 * to turn on the MMU. This prevents us from running into issues where
749 * KTRR will cause us to fault on executable block mappings that cross the
750 * KTRR boundary.
751 */
752 static void arm_replace_identity_map(boot_args * args)
753 {
754 vm_offset_t addr;
755 pmap_paddr_t paddr;
756
757 pmap_paddr_t l1_ptp_phys = 0;
758 tt_entry_t *l1_ptp_virt = NULL;
759 tt_entry_t *tte1 = NULL;
760 pmap_paddr_t l2_ptp_phys = 0;
761 tt_entry_t *l2_ptp_virt = NULL;
762 tt_entry_t *tte2 = NULL;
763 pmap_paddr_t l3_ptp_phys = 0;
764 pt_entry_t *l3_ptp_virt = NULL;
765 pt_entry_t *ptep = NULL;
766
767 addr = ((vm_offset_t)&bootstrap_instructions) & ~ARM_PGMASK;
768 paddr = kvtophys(addr);
769
770 /*
771 * The V=P page tables (at the time this comment was written) start
772 * after the last bit of kernel data, and consist of 1 L1 page and 1 or
773 * more L2 pages.
774 * Grab references to those pages, and allocate an L3 page.
775 */
776 l1_ptp_phys = args->topOfKernelData;
777 l1_ptp_virt = (tt_entry_t *)phystokv(l1_ptp_phys);
778 tte1 = &l1_ptp_virt[L1_TABLE_INDEX(paddr)];
779
780 l2_ptp_virt = L2_TABLE_VA(tte1);
781 l2_ptp_phys = (*tte1) & ARM_TTE_TABLE_MASK;
782 tte2 = &l2_ptp_virt[L2_TABLE_INDEX(paddr)];
783
784 l3_ptp_virt = (pt_entry_t *)alloc_ptpage(FALSE);
785 l3_ptp_phys = kvtophys((vm_offset_t)l3_ptp_virt);
786 ptep = &l3_ptp_virt[L3_TABLE_INDEX(paddr)];
787
788 /*
789 * Replace the large V=P mapping with a mapping that provides only the
790 * mappings needed to turn on the MMU.
791 */
792
793 bzero(l1_ptp_virt, ARM_PGBYTES);
794 *tte1 = ARM_TTE_BOOT_TABLE | (l2_ptp_phys & ARM_TTE_TABLE_MASK);
795
796 bzero(l2_ptp_virt, ARM_PGBYTES);
797 *tte2 = ARM_TTE_BOOT_TABLE | (l3_ptp_phys & ARM_TTE_TABLE_MASK);
798
799 *ptep = (paddr & ARM_PTE_MASK) |
800 ARM_PTE_TYPE_VALID |
801 ARM_PTE_SH(SH_OUTER_MEMORY) |
802 ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK) |
803 ARM_PTE_AF |
804 ARM_PTE_AP(AP_RONA) |
805 ARM_PTE_NX;
806 }
807 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
808
809 tt_entry_t *arm_kva_to_tte(vm_offset_t);
810
811 tt_entry_t *
812 arm_kva_to_tte(vm_offset_t va)
813 {
814 tt_entry_t *tte1, *tte2;
815 tte1 = cpu_tte + L1_TABLE_INDEX(va);
816 tte2 = L2_TABLE_VA(tte1) + L2_TABLE_INDEX(va);
817
818 return tte2;
819 }
820
821 #if XNU_MONITOR
822
823 static inline pt_entry_t *
824 arm_kva_to_pte(vm_offset_t va)
825 {
826 tt_entry_t *tte2 = arm_kva_to_tte(va);
827 return L3_TABLE_VA(tte2) + L3_TABLE_INDEX(va);
828 }
829
830 #endif
831
832 #define ARM64_GRANULE_ALLOW_BLOCK (1 << 0)
833 #define ARM64_GRANULE_ALLOW_HINT (1 << 1)
834
835 /*
836 * arm_vm_page_granular_helper updates protections at the L3 level. It will (if
837 * neccessary) allocate a page for the L3 table and update the corresponding L2
838 * entry. Then, it will iterate over the L3 table, updating protections as necessary.
839 * This expects to be invoked on a L2 entry or sub L2 entry granularity, so this should
840 * not be invoked from a context that does not do L2 iteration separately (basically,
841 * don't call this except from arm_vm_page_granular_prot).
842 *
843 * unsigned granule: 0 => force to page granule, or a combination of
844 * ARM64_GRANULE_* flags declared above.
845 */
846
847 static void
848 arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va, pmap_paddr_t pa_offset,
849 int pte_prot_APX, int pte_prot_XN, unsigned granule,
850 pt_entry_t **deferred_pte, pt_entry_t *deferred_ptmp)
851 {
852 if (va & ARM_TT_L2_OFFMASK) { /* ragged edge hanging over a ARM_TT_L2_SIZE boundary */
853 tt_entry_t *tte2;
854 tt_entry_t tmplate;
855 pmap_paddr_t pa;
856 pt_entry_t *ppte, *recursive_pte = NULL, ptmp, recursive_ptmp = 0;
857 addr64_t ppte_phys;
858 unsigned i;
859
860 va &= ~ARM_TT_L2_OFFMASK;
861 pa = va - gVirtBase + gPhysBase - pa_offset;
862
863 if (pa >= real_avail_end)
864 return;
865
866 tte2 = arm_kva_to_tte(va);
867
868 assert(_end >= va);
869 tmplate = *tte2;
870
871 if (ARM_TTE_TYPE_TABLE == (tmplate & ARM_TTE_TYPE_MASK)) {
872 /* pick up the existing page table. */
873 ppte = (pt_entry_t *)phystokv((tmplate & ARM_TTE_TABLE_MASK));
874 } else {
875 // TTE must be reincarnated with page level mappings.
876 ppte = (pt_entry_t*)alloc_ptpage(pa_offset == 0);
877 bzero(ppte, ARM_PGBYTES);
878 ppte_phys = kvtophys((vm_offset_t)ppte);
879
880 *tte2 = pa_to_tte(ppte_phys) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID;
881 }
882
883 vm_offset_t len = _end - va;
884 if ((pa + len) > real_avail_end)
885 _end -= (pa + len - real_avail_end);
886 assert((start - gVirtBase + gPhysBase - pa_offset) >= gPhysBase);
887
888 /* Round up to the nearest PAGE_SIZE boundary when creating mappings:
889 * PAGE_SIZE may be a multiple of ARM_PGBYTES, and we don't want to leave
890 * a ragged non-PAGE_SIZE-aligned edge. */
891 vm_offset_t rounded_end = round_page(_end);
892 /* Apply the desired protections to the specified page range */
893 for (i = 0; i <= (ARM_TT_L3_INDEX_MASK>>ARM_TT_L3_SHIFT); i++) {
894 if ((start <= va) && (va < rounded_end)) {
895
896 ptmp = pa | ARM_PTE_AF | ARM_PTE_SH(SH_OUTER_MEMORY) | ARM_PTE_TYPE;
897 ptmp = ptmp | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT);
898 ptmp = ptmp | ARM_PTE_AP(pte_prot_APX);
899 ptmp = ptmp | ARM_PTE_NX;
900 #if __ARM_KERNEL_PROTECT__
901 ptmp = ptmp | ARM_PTE_NG;
902 #endif /* __ARM_KERNEL_PROTECT__ */
903
904 if (pte_prot_XN) {
905 ptmp = ptmp | ARM_PTE_PNX;
906 }
907
908 /*
909 * If we can, apply the contiguous hint to this range. The hint is
910 * applicable if the current address falls within a hint-sized range that will
911 * be fully covered by this mapping request.
912 */
913 if ((va >= round_up_pte_hint_address(start)) && (round_up_pte_hint_address(va + 1) <= _end) &&
914 (granule & ARM64_GRANULE_ALLOW_HINT) && use_contiguous_hint) {
915 assert((va & ((1 << ARM_PTE_HINT_ADDR_SHIFT) - 1)) == ((pa & ((1 << ARM_PTE_HINT_ADDR_SHIFT) - 1))));
916 ptmp |= ARM_PTE_HINT;
917 /* Do not attempt to reapply the hint bit to an already-active mapping.
918 * This very likely means we're attempting to change attributes on an already-active mapping,
919 * which violates the requirement of the hint bit.*/
920 assert(!kva_active || (ppte[i] == ARM_PTE_TYPE_FAULT));
921 }
922 /*
923 * Do not change the contiguous bit on an active mapping. Even in a single-threaded
924 * environment, it's possible for prefetch to produce a TLB conflict by trying to pull in
925 * a hint-sized entry on top of one or more existing page-sized entries. It's also useful
926 * to make sure we're not trying to unhint a sub-range of a larger hinted range, which
927 * could produce a later TLB conflict.
928 */
929 assert(!kva_active || (ppte[i] == ARM_PTE_TYPE_FAULT) || ((ppte[i] & ARM_PTE_HINT) == (ptmp & ARM_PTE_HINT)));
930
931 /*
932 * If we reach an entry that maps the current pte page, delay updating it until the very end.
933 * Otherwise we might end up making the PTE page read-only, leading to a fault later on in
934 * this function if we manage to outrun the TLB. This can happen on KTRR-enabled devices when
935 * marking segDATACONST read-only. Mappings for this region may straddle a PT page boundary,
936 * so we must also defer assignment of the following PTE. We will assume that if the region
937 * were to require one or more full L3 pages, it would instead use L2 blocks where possible,
938 * therefore only requiring at most one L3 page at the beginning and one at the end.
939 */
940 if (kva_active && ((pt_entry_t*)(phystokv(pa)) == ppte)) {
941 assert(recursive_pte == NULL);
942 assert(granule & ARM64_GRANULE_ALLOW_BLOCK);
943 recursive_pte = &ppte[i];
944 recursive_ptmp = ptmp;
945 } else if ((deferred_pte != NULL) && (&ppte[i] == &recursive_pte[1])) {
946 assert(*deferred_pte == NULL);
947 assert(deferred_ptmp != NULL);
948 *deferred_pte = &ppte[i];
949 *deferred_ptmp = ptmp;
950 } else {
951 ppte[i] = ptmp;
952 }
953 }
954
955 va += ARM_PGBYTES;
956 pa += ARM_PGBYTES;
957 }
958 if (recursive_pte != NULL)
959 *recursive_pte = recursive_ptmp;
960 }
961 }
962
963 /*
964 * arm_vm_page_granular_prot updates protections by iterating over the L2 entries and
965 * changing them. If a particular chunk necessitates L3 entries (for reasons of
966 * alignment or length, or an explicit request that the entry be fully expanded), we
967 * hand off to arm_vm_page_granular_helper to deal with the L3 chunk of the logic.
968 */
969 static void
970 arm_vm_page_granular_prot(vm_offset_t start, unsigned long size, pmap_paddr_t pa_offset,
971 int tte_prot_XN, int pte_prot_APX, int pte_prot_XN,
972 unsigned granule)
973 {
974 pt_entry_t *deferred_pte = NULL, deferred_ptmp = 0;
975 vm_offset_t _end = start + size;
976 vm_offset_t align_start = (start + ARM_TT_L2_OFFMASK) & ~ARM_TT_L2_OFFMASK;
977
978 if (size == 0x0UL)
979 return;
980
981 if (align_start > _end) {
982 arm_vm_page_granular_helper(start, _end, start, pa_offset, pte_prot_APX, pte_prot_XN, granule, NULL, NULL);
983 return;
984 }
985
986 arm_vm_page_granular_helper(start, align_start, start, pa_offset, pte_prot_APX, pte_prot_XN, granule, &deferred_pte, &deferred_ptmp);
987
988 while ((_end - align_start) >= ARM_TT_L2_SIZE) {
989 if (!(granule & ARM64_GRANULE_ALLOW_BLOCK)) {
990 arm_vm_page_granular_helper(align_start, align_start+ARM_TT_L2_SIZE, align_start + 1, pa_offset,
991 pte_prot_APX, pte_prot_XN, granule, NULL, NULL);
992 } else {
993 pmap_paddr_t pa = align_start - gVirtBase + gPhysBase - pa_offset;
994 assert((pa & ARM_TT_L2_OFFMASK) == 0);
995 tt_entry_t *tte2;
996 tt_entry_t tmplate;
997
998 tte2 = arm_kva_to_tte(align_start);
999
1000 if ((pa >= gPhysBase) && (pa < real_avail_end)) {
1001 tmplate = (pa & ARM_TTE_BLOCK_L2_MASK) | ARM_TTE_TYPE_BLOCK
1002 | ARM_TTE_VALID | ARM_TTE_BLOCK_AF | ARM_TTE_BLOCK_NX
1003 | ARM_TTE_BLOCK_AP(pte_prot_APX) | ARM_TTE_BLOCK_SH(SH_OUTER_MEMORY)
1004 | ARM_TTE_BLOCK_ATTRINDX(CACHE_ATTRINDX_WRITEBACK);
1005
1006 #if __ARM_KERNEL_PROTECT__
1007 tmplate = tmplate | ARM_TTE_BLOCK_NG;
1008 #endif /* __ARM_KERNEL_PROTECT__ */
1009 if (tte_prot_XN)
1010 tmplate = tmplate | ARM_TTE_BLOCK_PNX;
1011
1012 *tte2 = tmplate;
1013 }
1014 }
1015 align_start += ARM_TT_L2_SIZE;
1016 }
1017
1018 if (align_start < _end)
1019 arm_vm_page_granular_helper(align_start, _end, _end, pa_offset, pte_prot_APX, pte_prot_XN, granule, &deferred_pte, &deferred_ptmp);
1020
1021 if (deferred_pte != NULL)
1022 *deferred_pte = deferred_ptmp;
1023 }
1024
1025 static inline void
1026 arm_vm_page_granular_RNX(vm_offset_t start, unsigned long size, unsigned granule)
1027 {
1028 arm_vm_page_granular_prot(start, size, 0, 1, AP_RONA, 1, granule);
1029 }
1030
1031 static inline void
1032 arm_vm_page_granular_ROX(vm_offset_t start, unsigned long size, unsigned granule)
1033 {
1034 arm_vm_page_granular_prot(start, size, 0, 0, AP_RONA, 0, granule);
1035 }
1036
1037 static inline void
1038 arm_vm_page_granular_RWNX(vm_offset_t start, unsigned long size, unsigned granule)
1039 {
1040 arm_vm_page_granular_prot(start, size, 0, 1, AP_RWNA, 1, granule);
1041 }
1042
1043 /* used in the chosen/memory-map node, populated by iBoot. */
1044 typedef struct MemoryMapFileInfo {
1045 vm_offset_t paddr;
1046 size_t length;
1047 } MemoryMapFileInfo;
1048
1049 void
1050 arm_vm_prot_init(boot_args * args)
1051 {
1052
1053 segLOWESTTEXT = UINT64_MAX;
1054 if (segSizePRELINKTEXT && (segPRELINKTEXTB < segLOWESTTEXT)) segLOWESTTEXT = segPRELINKTEXTB;
1055 assert(segSizeTEXT);
1056 if (segTEXTB < segLOWESTTEXT) segLOWESTTEXT = segTEXTB;
1057 assert(segLOWESTTEXT < UINT64_MAX);
1058
1059 segEXTRADATA = segLOWESTTEXT;
1060 segSizeEXTRADATA = 0;
1061
1062 segLOWEST = segLOWESTTEXT;
1063
1064 DTEntry memory_map;
1065 MemoryMapFileInfo *trustCacheRange;
1066 unsigned int trustCacheRangeSize;
1067 int err;
1068
1069 err = DTLookupEntry(NULL, "chosen/memory-map", &memory_map);
1070 assert(err == kSuccess);
1071
1072 err = DTGetProperty(memory_map, "TrustCache", (void**)&trustCacheRange, &trustCacheRangeSize);
1073 if (err == kSuccess) {
1074 assert(trustCacheRangeSize == sizeof(MemoryMapFileInfo));
1075
1076 segEXTRADATA = phystokv(trustCacheRange->paddr);
1077 segSizeEXTRADATA = trustCacheRange->length;
1078
1079 if (segEXTRADATA <= segLOWEST) {
1080 segLOWEST = segEXTRADATA;
1081 }
1082 #if !(DEBUG || DEVELOPMENT)
1083
1084
1085 else {
1086 panic("EXTRADATA is in an unexpected place: %#lx > %#lx", segEXTRADATA, segLOWEST);
1087 }
1088 #endif /* !(DEBUG || DEVELOPMENT) */
1089
1090 arm_vm_page_granular_RNX(segEXTRADATA, segSizeEXTRADATA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT);
1091
1092 }
1093
1094 /* Map coalesced kext TEXT segment RWNX for now */
1095 arm_vm_page_granular_RWNX(segPRELINKTEXTB, segSizePRELINKTEXT, ARM64_GRANULE_ALLOW_BLOCK); // Refined in OSKext::readPrelinkedExtensions
1096
1097 /* Map coalesced kext DATA_CONST segment RWNX (could be empty) */
1098 arm_vm_page_granular_RWNX(segPLKDATACONSTB, segSizePLKDATACONST, ARM64_GRANULE_ALLOW_BLOCK); // Refined in OSKext::readPrelinkedExtensions
1099
1100 /* Map coalesced kext TEXT_EXEC segment RX (could be empty) */
1101 arm_vm_page_granular_ROX(segPLKTEXTEXECB, segSizePLKTEXTEXEC, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // Refined in OSKext::readPrelinkedExtensions
1102
1103 /* if new segments not present, set space between PRELINK_TEXT and xnu TEXT to RWNX
1104 * otherwise we no longer expect any space between the coalesced kext read only segments and xnu rosegments
1105 */
1106 if (!segSizePLKDATACONST && !segSizePLKTEXTEXEC) {
1107 if (segSizePRELINKTEXT) {
1108 arm_vm_page_granular_RWNX(segPRELINKTEXTB + segSizePRELINKTEXT, segTEXTB - (segPRELINKTEXTB + segSizePRELINKTEXT),
1109 ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT);
1110 }
1111 } else {
1112 /*
1113 * If we have the new segments, we should still protect the gap between kext
1114 * read-only pages and kernel read-only pages, in the event that this gap
1115 * exists.
1116 */
1117 if ((segPLKDATACONSTB + segSizePLKDATACONST) < segTEXTB) {
1118 arm_vm_page_granular_RWNX(segPLKDATACONSTB + segSizePLKDATACONST, segTEXTB - (segPLKDATACONSTB + segSizePLKDATACONST),
1119 ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT);
1120 }
1121 }
1122
1123 /*
1124 * Protection on kernel text is loose here to allow shenanigans early on. These
1125 * protections are tightened in arm_vm_prot_finalize(). This is necessary because
1126 * we currently patch LowResetVectorBase in cpu.c.
1127 *
1128 * TEXT segment contains mach headers and other non-executable data. This will become RONX later.
1129 */
1130 arm_vm_page_granular_RNX(segTEXTB, segSizeTEXT, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT);
1131
1132 /* Can DATACONST start out and stay RNX?
1133 * NO, stuff in this segment gets modified during startup (viz. mac_policy_init()/mac_policy_list)
1134 * Make RNX in prot_finalize
1135 */
1136 #if XNU_MONITOR
1137 /* The ropagetable region will ultimately be owned by the PPL. Set permissions
1138 * on it separately to avoid applying mismatched block settings between this function,
1139 * pmap_static_allocations_done(), and arm_vm_prot_finalize(). */
1140 vm_offset_t segDATACONSTE = segDATACONSTB + segSizeDATACONST;
1141
1142 arm_vm_page_granular_RWNX(segDATACONSTB, (vm_offset_t)&ropagetable_begin - segDATACONSTB, ARM64_GRANULE_ALLOW_BLOCK);
1143 arm_vm_page_granular_RWNX((vm_offset_t)&ropagetable_begin,
1144 (vm_offset_t)&ropagetable_end - (vm_offset_t)&ropagetable_begin, ARM64_GRANULE_ALLOW_BLOCK);
1145 arm_vm_page_granular_RWNX((vm_offset_t)&ropagetable_end,
1146 segDATACONSTE - (vm_offset_t)&ropagetable_end, ARM64_GRANULE_ALLOW_BLOCK);
1147 #else
1148 arm_vm_page_granular_RWNX(segDATACONSTB, segSizeDATACONST, ARM64_GRANULE_ALLOW_BLOCK);
1149 #endif
1150
1151 arm_vm_page_granular_ROX(segTEXTEXECB, segSizeTEXTEXEC, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT);
1152
1153 #if XNU_MONITOR
1154 arm_vm_page_granular_ROX(segPPLTEXTB, segSizePPLTEXT, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT);
1155 arm_vm_page_granular_ROX(segPPLTRAMPB, segSizePPLTRAMP, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT);
1156 arm_vm_page_granular_RNX(segPPLDATACONSTB, segSizePPLDATACONST, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT);
1157 #endif
1158
1159 /* DATA segment will remain RWNX */
1160 arm_vm_page_granular_RWNX(segDATAB, segSizeDATA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT);
1161 #if XNU_MONITOR
1162 arm_vm_page_granular_RWNX(segPPLDATAB, segSizePPLDATA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT);
1163 #endif
1164
1165 arm_vm_page_granular_RWNX(segBOOTDATAB, segSizeBOOTDATA, 0);
1166 arm_vm_page_granular_RNX((vm_offset_t)&intstack_low_guard, PAGE_MAX_SIZE, 0);
1167 arm_vm_page_granular_RNX((vm_offset_t)&intstack_high_guard, PAGE_MAX_SIZE, 0);
1168 arm_vm_page_granular_RNX((vm_offset_t)&excepstack_high_guard, PAGE_MAX_SIZE, 0);
1169
1170 arm_vm_page_granular_ROX(segKLDB, segSizeKLD, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT);
1171 arm_vm_page_granular_RWNX(segLINKB, segSizeLINK, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT);
1172 arm_vm_page_granular_RWNX(segPLKLINKEDITB, segSizePLKLINKEDIT, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // Coalesced kext LINKEDIT segment
1173 arm_vm_page_granular_ROX(segLASTB, segSizeLAST, ARM64_GRANULE_ALLOW_BLOCK); // __LAST may be empty, but we cannot assume this
1174
1175 arm_vm_page_granular_RWNX(segPRELINKDATAB, segSizePRELINKDATA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // Prelink __DATA for kexts (RW data)
1176
1177 if (segSizePLKLLVMCOV > 0)
1178 arm_vm_page_granular_RWNX(segPLKLLVMCOVB, segSizePLKLLVMCOV, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // LLVM code coverage data
1179
1180 arm_vm_page_granular_RWNX(segPRELINKINFOB, segSizePRELINKINFO, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); /* PreLinkInfoDictionary */
1181
1182 arm_vm_page_granular_RNX(phystokv(args->topOfKernelData), BOOTSTRAP_TABLE_SIZE, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // Boot page tables; they should not be mutable.
1183 }
1184
1185 /*
1186 * return < 0 for a < b
1187 * 0 for a == b
1188 * > 0 for a > b
1189 */
1190 typedef int (*cmpfunc_t)(const void *a, const void *b);
1191
1192 extern void
1193 qsort(void *a, size_t n, size_t es, cmpfunc_t cmp);
1194
1195 static int
1196 cmp_ptov_entries(const void *a, const void *b)
1197 {
1198 const ptov_table_entry *entry_a = a;
1199 const ptov_table_entry *entry_b = b;
1200 // Sort in descending order of segment length
1201 if (entry_a->len < entry_b->len)
1202 return 1;
1203 else if (entry_a->len > entry_b->len)
1204 return -1;
1205 else
1206 return 0;
1207 }
1208
1209 SECURITY_READ_ONLY_LATE(static unsigned int) ptov_index = 0;
1210
1211 #define ROUND_TWIG(addr) (((addr) + ARM_TT_TWIG_OFFMASK) & ~(ARM_TT_TWIG_OFFMASK))
1212
1213 static void
1214 arm_vm_physmap_slide(ptov_table_entry *temp_ptov_table, vm_map_address_t physmap_base, vm_map_address_t orig_va, vm_size_t len, int pte_prot_APX, unsigned granule)
1215 {
1216 pmap_paddr_t pa_offset;
1217
1218 assert(ptov_index < PTOV_TABLE_SIZE);
1219 assert((orig_va & ARM_PGMASK) == 0);
1220 temp_ptov_table[ptov_index].pa = orig_va - gVirtBase + gPhysBase;
1221 if (ptov_index == 0)
1222 temp_ptov_table[ptov_index].va = physmap_base;
1223 else
1224 temp_ptov_table[ptov_index].va = temp_ptov_table[ptov_index - 1].va + temp_ptov_table[ptov_index - 1].len;
1225 if (granule & ARM64_GRANULE_ALLOW_BLOCK) {
1226 vm_map_address_t orig_offset = temp_ptov_table[ptov_index].pa & ARM_TT_TWIG_OFFMASK;
1227 vm_map_address_t new_offset = temp_ptov_table[ptov_index].va & ARM_TT_TWIG_OFFMASK;
1228 if (new_offset < orig_offset)
1229 temp_ptov_table[ptov_index].va += (orig_offset - new_offset);
1230 else if (new_offset > orig_offset)
1231 temp_ptov_table[ptov_index].va = ROUND_TWIG(temp_ptov_table[ptov_index].va) + orig_offset;
1232 }
1233 assert((temp_ptov_table[ptov_index].va & ARM_PGMASK) == 0);
1234 temp_ptov_table[ptov_index].len = round_page(len);
1235 pa_offset = temp_ptov_table[ptov_index].va - orig_va;
1236 arm_vm_page_granular_prot(temp_ptov_table[ptov_index].va, temp_ptov_table[ptov_index].len, pa_offset, 1, pte_prot_APX, 1, granule);
1237 ++ptov_index;
1238 }
1239
1240 #if XNU_MONITOR
1241
1242 SECURITY_READ_ONLY_LATE(static boolean_t) keep_linkedit = FALSE;
1243
1244 static void
1245 arm_vm_physmap_init(boot_args *args, vm_map_address_t physmap_base, vm_map_address_t dynamic_memory_begin __unused)
1246 {
1247 ptov_table_entry temp_ptov_table[PTOV_TABLE_SIZE];
1248 bzero(temp_ptov_table, sizeof(temp_ptov_table));
1249
1250 // This is memory that will either be handed back to the VM layer via ml_static_mfree(),
1251 // or will be available for general-purpose use. Physical aperture mappings for this memory
1252 // must be at page granularity, so that PPL ownership or cache attribute changes can be reflected
1253 // in the physical aperture mappings.
1254
1255
1256 // Slid region between gPhysBase and beginning of protected text
1257 arm_vm_physmap_slide(temp_ptov_table, physmap_base, gVirtBase, segLOWEST - gVirtBase, AP_RWNA, 0);
1258
1259 // kext bootstrap segment
1260 arm_vm_physmap_slide(temp_ptov_table, physmap_base, segKLDB, segSizeKLD, AP_RONA, 0);
1261
1262 // Early-boot data
1263 arm_vm_physmap_slide(temp_ptov_table, physmap_base, segBOOTDATAB, segSizeBOOTDATA, AP_RONA, 0);
1264
1265 #if KASAN_DYNAMIC_BLACKLIST
1266 /* KASAN's dynamic blacklist needs to query the LINKEDIT segment at runtime. As such, the
1267 * kext bootstrap code will not jettison LINKEDIT on kasan kernels, so don't bother to relocate it. */
1268 keep_linkedit = TRUE;
1269 #else
1270 PE_parse_boot_argn("keepsyms", &keep_linkedit, sizeof(keep_linkedit));
1271 #endif
1272 if (!keep_linkedit) {
1273 // Kernel LINKEDIT
1274 arm_vm_physmap_slide(temp_ptov_table, physmap_base, segLINKB, segSizeLINK, AP_RWNA, 0);
1275
1276 // Prelinked kernel LINKEDIT
1277 arm_vm_physmap_slide(temp_ptov_table, physmap_base, segPLKLINKEDITB, segSizePLKLINKEDIT, AP_RWNA, 0);
1278 }
1279
1280 // Prelinked kernel plists
1281 arm_vm_physmap_slide(temp_ptov_table, physmap_base, segPRELINKINFOB, segSizePRELINKINFO, AP_RWNA, 0);
1282
1283 // Device tree, ramdisk, boot args
1284 arm_vm_physmap_slide(temp_ptov_table, physmap_base, end_kern, (args->topOfKernelData - gPhysBase + gVirtBase) - end_kern, AP_RWNA, 0);
1285 PE_slide_devicetree(temp_ptov_table[ptov_index - 1].va - end_kern);
1286
1287 // Remainder of physical memory
1288 arm_vm_physmap_slide(temp_ptov_table, physmap_base, (args->topOfKernelData + BOOTSTRAP_TABLE_SIZE - gPhysBase + gVirtBase),
1289 real_avail_end - (args->topOfKernelData + BOOTSTRAP_TABLE_SIZE), AP_RWNA, 0);
1290
1291 assert((temp_ptov_table[ptov_index - 1].va + temp_ptov_table[ptov_index - 1].len) <= dynamic_memory_begin);
1292
1293 // Sort in descending order of segment length. LUT traversal is linear, so largest (most likely used)
1294 // segments should be placed earliest in the table to optimize lookup performance.
1295 qsort(temp_ptov_table, PTOV_TABLE_SIZE, sizeof(temp_ptov_table[0]), cmp_ptov_entries);
1296
1297 memcpy(ptov_table, temp_ptov_table, sizeof(ptov_table));
1298 }
1299
1300 #else
1301
1302 static void
1303 arm_vm_physmap_init(boot_args *args, vm_map_address_t physmap_base, vm_map_address_t dynamic_memory_begin __unused)
1304 {
1305 ptov_table_entry temp_ptov_table[PTOV_TABLE_SIZE];
1306 bzero(temp_ptov_table, sizeof(temp_ptov_table));
1307
1308 // Will be handed back to VM layer through ml_static_mfree() in arm_vm_prot_finalize()
1309 arm_vm_physmap_slide(temp_ptov_table, physmap_base, gVirtBase, segLOWEST - gVirtBase, AP_RWNA,
1310 ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT);
1311
1312 arm_vm_page_granular_RWNX(end_kern, phystokv(args->topOfKernelData) - end_kern,
1313 ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); /* Device Tree, RAM Disk (if present), bootArgs */
1314
1315 arm_vm_physmap_slide(temp_ptov_table, physmap_base, (args->topOfKernelData + BOOTSTRAP_TABLE_SIZE - gPhysBase + gVirtBase),
1316 real_avail_end - (args->topOfKernelData + BOOTSTRAP_TABLE_SIZE), AP_RWNA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // rest of physmem
1317
1318 assert((temp_ptov_table[ptov_index - 1].va + temp_ptov_table[ptov_index - 1].len) <= dynamic_memory_begin);
1319
1320 // Sort in descending order of segment length. LUT traversal is linear, so largest (most likely used)
1321 // segments should be placed earliest in the table to optimize lookup performance.
1322 qsort(temp_ptov_table, PTOV_TABLE_SIZE, sizeof(temp_ptov_table[0]), cmp_ptov_entries);
1323
1324 memcpy(ptov_table, temp_ptov_table, sizeof(ptov_table));
1325 }
1326
1327 #endif // XNU_MONITOR
1328
1329 void
1330 arm_vm_prot_finalize(boot_args * args __unused)
1331 {
1332 /*
1333 * At this point, we are far enough along in the boot process that it will be
1334 * safe to free up all of the memory preceeding the kernel. It may in fact
1335 * be safe to do this earlier.
1336 *
1337 * This keeps the memory in the V-to-P mapping, but advertises it to the VM
1338 * as usable.
1339 */
1340
1341 /*
1342 * if old style PRELINK segment exists, free memory before it, and after it before XNU text
1343 * otherwise we're dealing with a new style kernel cache, so we should just free the
1344 * memory before PRELINK_TEXT segment, since the rest of the KEXT read only data segments
1345 * should be immediately followed by XNU's TEXT segment
1346 */
1347
1348 ml_static_mfree(phystokv(gPhysBase), segLOWEST - gVirtBase);
1349
1350 /*
1351 * KTRR support means we will be mucking with these pages and trying to
1352 * protect them; we cannot free the pages to the VM if we do this.
1353 */
1354 if (!segSizePLKDATACONST && !segSizePLKTEXTEXEC && segSizePRELINKTEXT) {
1355 /* If new segments not present, PRELINK_TEXT is not dynamically sized, free DRAM between it and xnu TEXT */
1356 ml_static_mfree(segPRELINKTEXTB + segSizePRELINKTEXT, segTEXTB - (segPRELINKTEXTB + segSizePRELINKTEXT));
1357 }
1358
1359 /* tighten permissions on kext read only data and code */
1360 arm_vm_page_granular_RNX(segPRELINKTEXTB, segSizePRELINKTEXT, ARM64_GRANULE_ALLOW_BLOCK);
1361 arm_vm_page_granular_RNX(segPLKDATACONSTB, segSizePLKDATACONST, ARM64_GRANULE_ALLOW_BLOCK);
1362
1363 cpu_stack_alloc(&BootCpuData);
1364 arm64_replace_bootstack(&BootCpuData);
1365 ml_static_mfree(phystokv(segBOOTDATAB - gVirtBase + gPhysBase), segSizeBOOTDATA);
1366
1367 #if __ARM_KERNEL_PROTECT__
1368 arm_vm_populate_kernel_el0_mappings();
1369 #endif /* __ARM_KERNEL_PROTECT__ */
1370
1371 #if XNU_MONITOR
1372 for (vm_offset_t va = segKLDB; va < (segKLDB + segSizeKLD); va += ARM_PGBYTES) {
1373 pt_entry_t *pte = arm_kva_to_pte(va);
1374 *pte = ARM_PTE_EMPTY;
1375 }
1376 /* Clear the original stack mappings; these pages should be mapped through ptov_table. */
1377 for (vm_offset_t va = segBOOTDATAB; va < (segBOOTDATAB + segSizeBOOTDATA); va += ARM_PGBYTES) {
1378 pt_entry_t *pte = arm_kva_to_pte(va);
1379 *pte = ARM_PTE_EMPTY;
1380 }
1381 /* Clear the original PRELINKINFO mapping. This segment should be jettisoned during I/O Kit
1382 * initialization before we reach this point. */
1383 for (vm_offset_t va = segPRELINKINFOB; va < (segPRELINKINFOB + segSizePRELINKINFO); va += ARM_PGBYTES) {
1384 pt_entry_t *pte = arm_kva_to_pte(va);
1385 *pte = ARM_PTE_EMPTY;
1386 }
1387 if (!keep_linkedit) {
1388 for (vm_offset_t va = segLINKB; va < (segLINKB + segSizeLINK); va += ARM_PGBYTES) {
1389 pt_entry_t *pte = arm_kva_to_pte(va);
1390 *pte = ARM_PTE_EMPTY;
1391 }
1392 for (vm_offset_t va = segPLKLINKEDITB; va < (segPLKLINKEDITB + segSizePLKLINKEDIT); va += ARM_PGBYTES) {
1393 pt_entry_t *pte = arm_kva_to_pte(va);
1394 *pte = ARM_PTE_EMPTY;
1395 }
1396 }
1397 #endif /* XNU_MONITOR */
1398
1399 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
1400 /*
1401 * __LAST,__pinst should no longer be executable.
1402 */
1403 arm_vm_page_granular_RNX(segLASTB, segSizeLAST, ARM64_GRANULE_ALLOW_BLOCK);
1404
1405 /*
1406 * Must wait until all other region permissions are set before locking down DATA_CONST
1407 * as the kernel static page tables live in DATA_CONST on KTRR enabled systems
1408 * and will become immutable.
1409 */
1410 #endif
1411
1412 #if XNU_MONITOR
1413 vm_offset_t segDATACONSTE = segDATACONSTB + segSizeDATACONST;
1414
1415 /*
1416 * For the moment, the RO pagetable allocation is part of the
1417 * constant data segment, but it is technically owned by the
1418 * PPL. Hence, we should not reprotect it.
1419 */
1420 arm_vm_page_granular_RNX(segDATACONSTB, (vm_offset_t)&ropagetable_begin - segDATACONSTB, ARM64_GRANULE_ALLOW_BLOCK);
1421 arm_vm_page_granular_RNX((vm_offset_t)&ropagetable_end,
1422 segDATACONSTE - (vm_offset_t)&ropagetable_end, ARM64_GRANULE_ALLOW_BLOCK);
1423 #else
1424 arm_vm_page_granular_RNX(segDATACONSTB, segSizeDATACONST, ARM64_GRANULE_ALLOW_BLOCK);
1425 #endif
1426
1427 __builtin_arm_dsb(DSB_ISH);
1428 flush_mmu_tlb();
1429 }
1430
1431 #define TBI_USER 0x1
1432 #define TBI_KERNEL 0x2
1433
1434 boolean_t user_tbi = TRUE;
1435
1436 /*
1437 * TBI (top-byte ignore) is an ARMv8 feature for ignoring the top 8 bits of
1438 * address accesses. It can be enabled separately for TTBR0 (user) and
1439 * TTBR1 (kernel). We enable it by default for user only, but allow both
1440 * to be controlled by the 'tbi' boot-arg.
1441 */
1442 static void
1443 set_tbi(void)
1444 {
1445 #if !__ARM_KERNEL_PROTECT__
1446 /* If we are not built with __ARM_KERNEL_PROTECT__, TBI can be turned
1447 * off with a boot-arg.
1448 */
1449 uint64_t old_tcr, new_tcr;
1450 int tbi = 0;
1451
1452 if (PE_parse_boot_argn("tbi", &tbi, sizeof(tbi)))
1453 user_tbi = ((tbi & TBI_USER) == TBI_USER);
1454 old_tcr = new_tcr = get_tcr();
1455 new_tcr |= (user_tbi) ? TCR_TBI0_TOPBYTE_IGNORED : 0;
1456
1457 #if !defined(HAS_APPLE_PAC)
1458 /*
1459 * arm_vm_init() runs after rebase_threaded_starts(), so enabling TBI1
1460 * at this point will break the computed pointer signatures. TBID1
1461 * could help mitigate this problem, but for now we'll just disable
1462 * kernel TBI if PAC is being used.
1463 */
1464 new_tcr |= (tbi & TBI_KERNEL) ? TCR_TBI1_TOPBYTE_IGNORED : 0;
1465 #endif
1466
1467 if (old_tcr != new_tcr) {
1468 set_tcr(new_tcr);
1469 sysreg_restore.tcr_el1 = new_tcr;
1470 }
1471 #endif /* !__ARM_KERNEL_PROTECT__ */
1472 }
1473
1474 #define ARM64_PHYSMAP_SLIDE_RANGE (1ULL << 30) // 1 GB
1475 #define ARM64_PHYSMAP_SLIDE_MASK (ARM64_PHYSMAP_SLIDE_RANGE - 1)
1476
1477 void
1478 arm_vm_init(uint64_t memory_size, boot_args * args)
1479 {
1480 vm_map_address_t va_l1, va_l1_end;
1481 tt_entry_t *cpu_l1_tte;
1482 vm_map_address_t va_l2, va_l2_end;
1483 tt_entry_t *cpu_l2_tte;
1484 pmap_paddr_t boot_ttep;
1485 tt_entry_t *boot_tte;
1486 uint64_t mem_segments;
1487 vm_offset_t ptpage_vaddr;
1488 vm_map_address_t dynamic_memory_begin;
1489 vm_map_address_t physmap_base;
1490
1491
1492 /*
1493 * Get the virtual and physical memory base from boot_args.
1494 */
1495 gVirtBase = args->virtBase;
1496 gPhysBase = args->physBase;
1497 #if KASAN
1498 real_phys_size = args->memSize + (shadow_ptop - shadow_pbase);
1499 #else
1500 real_phys_size = args->memSize;
1501 #endif
1502 /*
1503 * Ensure the physical region we specify for the VM to manage ends on a
1504 * software page boundary. Note that the software page size (PAGE_SIZE)
1505 * may be a multiple of the hardware page size specified in ARM_PGBYTES.
1506 * We must round the reported memory size down to the nearest PAGE_SIZE
1507 * boundary to ensure the VM does not try to manage a page it does not
1508 * completely own. The KASAN shadow region, if present, is managed entirely
1509 * in units of the hardware page size and should not need similar treatment.
1510 */
1511 gPhysSize = mem_size = ((gPhysBase + args->memSize) & ~PAGE_MASK) - gPhysBase;
1512
1513 if ((memory_size != 0) && (mem_size > memory_size))
1514 mem_size = memory_size;
1515 if (mem_size >= ((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 4))
1516 panic("Unsupported memory configuration %lx\n", mem_size);
1517
1518 physmap_base = phystokv(args->topOfKernelData) + BOOTSTRAP_TABLE_SIZE;
1519
1520 // Slide the physical aperture to a random page-aligned location within the slide range
1521 uint64_t physmap_slide = early_random() & ARM64_PHYSMAP_SLIDE_MASK & ~((uint64_t)PAGE_MASK);
1522 assert(physmap_slide < ARM64_PHYSMAP_SLIDE_RANGE);
1523
1524 physmap_base += physmap_slide;
1525
1526 #if XNU_MONITOR
1527 physmap_base = ROUND_TWIG(physmap_base);
1528 static_memory_end = physmap_base + mem_size;
1529 #else
1530 static_memory_end = physmap_base + mem_size + (PTOV_TABLE_SIZE * ARM_TT_TWIG_SIZE); // worst possible case for block alignment
1531 #endif
1532 #if KASAN
1533 /* add the KASAN stolen memory to the physmap */
1534 dynamic_memory_begin = static_memory_end + (shadow_ptop - shadow_pbase);
1535 #else
1536 dynamic_memory_begin = static_memory_end;
1537 #endif
1538 #if XNU_MONITOR
1539 pmap_stacks_start = (void*)dynamic_memory_begin;
1540 dynamic_memory_begin += PPL_STACK_REGION_SIZE;
1541 pmap_stacks_end = (void*)dynamic_memory_begin;
1542 #endif
1543 if (dynamic_memory_begin > VM_MAX_KERNEL_ADDRESS)
1544 panic("Unsupported memory configuration %lx\n", mem_size);
1545
1546 boot_ttep = args->topOfKernelData;
1547 boot_tte = (tt_entry_t *) phystokv(boot_ttep);
1548
1549 #if DEVELOPMENT || DEBUG
1550 /* Sanity check - assert that BOOTSTRAP_TABLE_SIZE is sufficiently-large to
1551 * hold our bootstrap mappings for any possible slide */
1552 size_t bytes_mapped = dynamic_memory_begin - gVirtBase;
1553 size_t l1_entries = 1 + ((bytes_mapped + ARM_TT_L1_SIZE - 1) / ARM_TT_L1_SIZE);
1554 /* 1 L1 each for V=P and KVA, plus 1 page for each L2 */
1555 size_t pages_used = 2 * (l1_entries + 1);
1556 if (pages_used > BOOTSTRAP_TABLE_SIZE) {
1557 panic("BOOTSTRAP_TABLE_SIZE too small for memory config\n");
1558 }
1559 #endif
1560
1561 /*
1562 * TTBR0 L1, TTBR0 L2 - 1:1 bootstrap mapping.
1563 * TTBR1 L1, TTBR1 L2 - kernel mapping
1564 */
1565 avail_start = boot_ttep + BOOTSTRAP_TABLE_SIZE;
1566
1567 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
1568 arm_replace_identity_map(args);
1569 #endif
1570
1571 /* Initialize invalid tte page */
1572 invalid_tte = (tt_entry_t *)alloc_ptpage(TRUE);
1573 invalid_ttep = kvtophys((vm_offset_t)invalid_tte);
1574 bzero(invalid_tte, ARM_PGBYTES);
1575
1576 /*
1577 * Initialize l1 page table page
1578 */
1579 cpu_tte = (tt_entry_t *)alloc_ptpage(TRUE);
1580 cpu_ttep = kvtophys((vm_offset_t)cpu_tte);
1581 bzero(cpu_tte, ARM_PGBYTES);
1582 avail_end = gPhysBase + mem_size;
1583 assert(!(avail_end & PAGE_MASK));
1584
1585 #if KASAN
1586 real_avail_end = gPhysBase + real_phys_size;
1587 #else
1588 real_avail_end = avail_end;
1589 #endif
1590
1591 /*
1592 * Initialize l1 and l2 page table pages :
1593 * map physical memory at the kernel base virtual address
1594 * cover the kernel dynamic address range section
1595 *
1596 * the so called physical aperture should be statically mapped
1597 */
1598 va_l1 = gVirtBase;
1599 va_l1_end = dynamic_memory_begin;
1600 cpu_l1_tte = cpu_tte + ((va_l1 & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
1601
1602 while (va_l1 < va_l1_end) {
1603 if (*cpu_l1_tte == ARM_TTE_EMPTY) {
1604 /* Allocate a page and setup L1 Table TTE in L1 */
1605 ptpage_vaddr = alloc_ptpage(TRUE);
1606 *cpu_l1_tte = (kvtophys(ptpage_vaddr) & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID;
1607 bzero((void *)ptpage_vaddr, ARM_PGBYTES);
1608 }
1609
1610 if ((va_l1 + ARM_TT_L1_SIZE) < va_l1) {
1611 /* If this is the last L1 entry, it must cover the last mapping. */
1612 break;
1613 }
1614
1615 va_l1 += ARM_TT_L1_SIZE;
1616 cpu_l1_tte++;
1617 }
1618
1619 #if __ARM_KERNEL_PROTECT__
1620 /* Expand the page tables to prepare for the EL0 mappings. */
1621 arm_vm_expand_kernel_el0_mappings();
1622 #endif /* __ARM_KERNEL_PROTECT__ */
1623
1624 /*
1625 * Now retrieve addresses for end, edata, and etext from MACH-O headers
1626 */
1627 segPRELINKTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_TEXT", &segSizePRELINKTEXT);
1628 segPLKDATACONSTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PLK_DATA_CONST", &segSizePLKDATACONST);
1629 segPLKTEXTEXECB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PLK_TEXT_EXEC", &segSizePLKTEXTEXEC);
1630 segTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__TEXT", &segSizeTEXT);
1631 segDATACONSTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__DATA_CONST", &segSizeDATACONST);
1632 segTEXTEXECB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__TEXT_EXEC", &segSizeTEXTEXEC);
1633 #if XNU_MONITOR
1634 segPPLTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PPLTEXT", &segSizePPLTEXT);
1635 segPPLTRAMPB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PPLTRAMP", &segSizePPLTRAMP);
1636 segPPLDATACONSTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PPLDATA_CONST", &segSizePPLDATACONST);
1637 #endif
1638 segDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__DATA", &segSizeDATA);
1639 #if XNU_MONITOR
1640 segPPLDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PPLDATA", &segSizePPLDATA);
1641 #endif
1642
1643 segBOOTDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__BOOTDATA", &segSizeBOOTDATA);
1644 segLINKB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LINKEDIT", &segSizeLINK);
1645 segKLDB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__KLD", &segSizeKLD);
1646 segPRELINKDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_DATA", &segSizePRELINKDATA);
1647 segPRELINKINFOB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_INFO", &segSizePRELINKINFO);
1648 segPLKLLVMCOVB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PLK_LLVM_COV", &segSizePLKLLVMCOV);
1649 segPLKLINKEDITB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PLK_LINKEDIT", &segSizePLKLINKEDIT);
1650 segLASTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LAST", &segSizeLAST);
1651
1652 (void) PE_parse_boot_argn("use_contiguous_hint", &use_contiguous_hint, sizeof(use_contiguous_hint));
1653 assert(segSizePRELINKTEXT < 0x03000000); /* 23355738 */
1654
1655 /* if one of the new segments is present, the other one better be as well */
1656 if (segSizePLKDATACONST || segSizePLKTEXTEXEC) {
1657 assert(segSizePLKDATACONST && segSizePLKTEXTEXEC);
1658 }
1659
1660 etext = (vm_offset_t) segTEXTB + segSizeTEXT;
1661 sdata = (vm_offset_t) segDATAB;
1662 edata = (vm_offset_t) segDATAB + segSizeDATA;
1663 end_kern = round_page(getlastaddr()); /* Force end to next page */
1664
1665 vm_set_page_size();
1666
1667 vm_kernel_base = segTEXTB;
1668 vm_kernel_top = (vm_offset_t) &last_kernel_symbol;
1669 vm_kext_base = segPRELINKTEXTB;
1670 vm_kext_top = vm_kext_base + segSizePRELINKTEXT;
1671
1672 vm_prelink_stext = segPRELINKTEXTB;
1673 if (!segSizePLKTEXTEXEC && !segSizePLKDATACONST) {
1674 vm_prelink_etext = segPRELINKTEXTB + segSizePRELINKTEXT;
1675 } else {
1676 vm_prelink_etext = segPRELINKTEXTB + segSizePRELINKTEXT + segSizePLKDATACONST + segSizePLKTEXTEXEC;
1677 }
1678 vm_prelink_sinfo = segPRELINKINFOB;
1679 vm_prelink_einfo = segPRELINKINFOB + segSizePRELINKINFO;
1680 vm_slinkedit = segLINKB;
1681 vm_elinkedit = segLINKB + segSizeLINK;
1682
1683 vm_prelink_sdata = segPRELINKDATAB;
1684 vm_prelink_edata = segPRELINKDATAB + segSizePRELINKDATA;
1685
1686 arm_vm_prot_init(args);
1687
1688
1689 /*
1690 * Initialize the page tables for the low globals:
1691 * cover this address range:
1692 * LOW_GLOBAL_BASE_ADDRESS + 2MB
1693 */
1694 va_l1 = va_l2 = LOW_GLOBAL_BASE_ADDRESS;
1695 cpu_l1_tte = cpu_tte + ((va_l1 & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
1696 cpu_l2_tte = ((tt_entry_t *) phystokv(((*cpu_l1_tte) & ARM_TTE_TABLE_MASK))) + ((va_l2 & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
1697 ptpage_vaddr = alloc_ptpage(TRUE);
1698 *cpu_l2_tte = (kvtophys(ptpage_vaddr) & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | ARM_TTE_TABLE_PXN | ARM_TTE_TABLE_XN;
1699 bzero((void *)ptpage_vaddr, ARM_PGBYTES);
1700
1701 /*
1702 * Initialize l2 page table pages :
1703 * cover this address range:
1704 * KERNEL_DYNAMIC_ADDR - VM_MAX_KERNEL_ADDRESS
1705 */
1706 va_l1 = dynamic_memory_begin;
1707 va_l1_end = VM_MAX_KERNEL_ADDRESS;
1708 cpu_l1_tte = cpu_tte + ((va_l1 & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
1709
1710 while (va_l1 < va_l1_end) {
1711 if (*cpu_l1_tte == ARM_TTE_EMPTY) {
1712 /* Allocate a page and setup L1 Table TTE in L1 */
1713 ptpage_vaddr = alloc_ptpage(TRUE);
1714 *cpu_l1_tte = (kvtophys(ptpage_vaddr) & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | ARM_DYNAMIC_TABLE_XN;
1715 bzero((void *)ptpage_vaddr, ARM_PGBYTES);
1716 }
1717
1718 if ((va_l1 + ARM_TT_L1_SIZE) < va_l1) {
1719 /* If this is the last L1 entry, it must cover the last mapping. */
1720 break;
1721 }
1722
1723 va_l1 += ARM_TT_L1_SIZE;
1724 cpu_l1_tte++;
1725 }
1726
1727 #if KASAN
1728 /* record the extent of the physmap */
1729 physmap_vbase = physmap_base;
1730 physmap_vtop = static_memory_end;
1731 kasan_init();
1732 #endif /* KASAN */
1733
1734 #if MONOTONIC
1735 mt_early_init();
1736 #endif /* MONOTONIC */
1737
1738 set_tbi();
1739
1740 arm_vm_physmap_init(args, physmap_base, dynamic_memory_begin);
1741 set_mmu_ttb_alternate(cpu_ttep & TTBR_BADDR_MASK);
1742
1743
1744 set_mmu_ttb(invalid_ttep & TTBR_BADDR_MASK);
1745
1746 flush_mmu_tlb();
1747 #if defined(HAS_VMSA_LOCK)
1748 vmsa_lock();
1749 #endif
1750 kva_active = TRUE;
1751 // global table pointers may need to be different due to physical aperture remapping
1752 cpu_tte = (tt_entry_t*)(phystokv(cpu_ttep));
1753 invalid_tte = (tt_entry_t*)(phystokv(invalid_ttep));
1754
1755 sane_size = mem_size - (avail_start - gPhysBase);
1756 max_mem = mem_size;
1757 vm_kernel_slid_base = segLOWESTTEXT;
1758 vm_kernel_slid_top = vm_prelink_einfo;
1759 vm_kernel_slide = segTEXTB-VM_KERNEL_LINK_ADDRESS;
1760 vm_kernel_stext = segTEXTB;
1761 assert(segDATACONSTB == segTEXTB + segSizeTEXT);
1762 assert(segTEXTEXECB == segDATACONSTB + segSizeDATACONST);
1763 vm_kernel_etext = segTEXTB + segSizeTEXT + segSizeDATACONST + segSizeTEXTEXEC;
1764
1765 dynamic_memory_begin = ROUND_TWIG(dynamic_memory_begin);
1766 #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
1767 // reserve a 32MB region without permission overrides to use later for a CTRR unit test
1768 {
1769 extern vm_offset_t ctrr_test_page;
1770 tt_entry_t *new_tte;
1771
1772 ctrr_test_page = dynamic_memory_begin;
1773 dynamic_memory_begin += ARM_TT_L2_SIZE;
1774 cpu_l1_tte = cpu_tte + ((ctrr_test_page & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
1775 assert((*cpu_l1_tte) & ARM_TTE_VALID);
1776 cpu_l2_tte = ((tt_entry_t *) phystokv(((*cpu_l1_tte) & ARM_TTE_TABLE_MASK))) + ((ctrr_test_page & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
1777 assert((*cpu_l2_tte) == ARM_TTE_EMPTY);
1778 new_tte = (tt_entry_t *)alloc_ptpage(FALSE);
1779 bzero(new_tte, ARM_PGBYTES);
1780 *cpu_l2_tte = (kvtophys((vm_offset_t)new_tte) & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID;
1781 }
1782 #endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */
1783 #if XNU_MONITOR
1784 for (vm_offset_t cur = (vm_offset_t)pmap_stacks_start; cur < (vm_offset_t)pmap_stacks_end; cur += ARM_PGBYTES) {
1785 arm_vm_map(cpu_tte, cur, ARM_PTE_EMPTY);
1786 }
1787 #endif
1788 pmap_bootstrap(dynamic_memory_begin);
1789
1790 disable_preemption();
1791
1792 /*
1793 * Initialize l3 page table pages :
1794 * cover this address range:
1795 * 2MB + FrameBuffer size + 10MB for each 256MB segment
1796 */
1797
1798 mem_segments = (mem_size + 0x0FFFFFFF) >> 28;
1799
1800 va_l1 = dynamic_memory_begin;
1801 va_l1_end = va_l1 + ((2 + (mem_segments * 10)) << 20);
1802 va_l1_end += round_page(args->Video.v_height * args->Video.v_rowBytes);
1803 va_l1_end = (va_l1_end + 0x00000000007FFFFFULL) & 0xFFFFFFFFFF800000ULL;
1804
1805 cpu_l1_tte = cpu_tte + ((va_l1 & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
1806
1807 while (va_l1 < va_l1_end) {
1808
1809 va_l2 = va_l1;
1810
1811 if (((va_l1 & ~ARM_TT_L1_OFFMASK)+ARM_TT_L1_SIZE) < va_l1) {
1812 /* If this is the last L1 entry, it must cover the last mapping. */
1813 va_l2_end = va_l1_end;
1814 } else {
1815 va_l2_end = MIN((va_l1 & ~ARM_TT_L1_OFFMASK)+ARM_TT_L1_SIZE, va_l1_end);
1816 }
1817
1818 cpu_l2_tte = ((tt_entry_t *) phystokv(((*cpu_l1_tte) & ARM_TTE_TABLE_MASK))) + ((va_l2 & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
1819
1820 while (va_l2 < va_l2_end) {
1821 pt_entry_t * ptp;
1822 pmap_paddr_t ptp_phys;
1823
1824 /* Allocate a page and setup L3 Table TTE in L2 */
1825 ptp = (pt_entry_t *) alloc_ptpage(FALSE);
1826 ptp_phys = (pmap_paddr_t)kvtophys((vm_offset_t)ptp);
1827
1828 pmap_init_pte_page(kernel_pmap, ptp, va_l2, 3, TRUE, TRUE);
1829
1830 *cpu_l2_tte = (pa_to_tte (ptp_phys)) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | ARM_DYNAMIC_TABLE_XN;
1831
1832 va_l2 += ARM_TT_L2_SIZE;
1833 cpu_l2_tte++;
1834 };
1835
1836 va_l1 = va_l2_end;
1837 cpu_l1_tte++;
1838 }
1839
1840 /*
1841 * Initialize l3 page table pages :
1842 * cover this address range:
1843 * (VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) - VM_MAX_KERNEL_ADDRESS
1844 */
1845 va_l1 = VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK;
1846 va_l1_end = VM_MAX_KERNEL_ADDRESS;
1847
1848 cpu_l1_tte = cpu_tte + ((va_l1 & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
1849
1850 while (va_l1 < va_l1_end) {
1851
1852 va_l2 = va_l1;
1853
1854 if (((va_l1 & ~ARM_TT_L1_OFFMASK)+ARM_TT_L1_SIZE) < va_l1) {
1855 /* If this is the last L1 entry, it must cover the last mapping. */
1856 va_l2_end = va_l1_end;
1857 } else {
1858 va_l2_end = MIN((va_l1 & ~ARM_TT_L1_OFFMASK)+ARM_TT_L1_SIZE, va_l1_end);
1859 }
1860
1861 cpu_l2_tte = ((tt_entry_t *) phystokv(((*cpu_l1_tte) & ARM_TTE_TABLE_MASK))) + ((va_l2 & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
1862
1863 while (va_l2 < va_l2_end) {
1864 pt_entry_t * ptp;
1865 pmap_paddr_t ptp_phys;
1866
1867 /* Allocate a page and setup L3 Table TTE in L2 */
1868 ptp = (pt_entry_t *) alloc_ptpage(FALSE);
1869 ptp_phys = (pmap_paddr_t)kvtophys((vm_offset_t)ptp);
1870
1871 pmap_init_pte_page(kernel_pmap, ptp, va_l2, 3, TRUE, TRUE);
1872
1873 *cpu_l2_tte = (pa_to_tte (ptp_phys)) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | ARM_DYNAMIC_TABLE_XN;
1874
1875 va_l2 += ARM_TT_L2_SIZE;
1876 cpu_l2_tte++;
1877 };
1878
1879 va_l1 = va_l2_end;
1880 cpu_l1_tte++;
1881 }
1882
1883 #if __ARM64_PMAP_SUBPAGE_L1__ && __ARM_16K_PG__
1884 /*
1885 * In this configuration, the bootstrap mappings (arm_vm_init) and
1886 * the heap mappings occupy separate L1 regions. Explicitly set up
1887 * the heap L1 allocations here.
1888 */
1889 va_l1 = VM_MIN_KERNEL_ADDRESS & ~ARM_TT_L1_OFFMASK;
1890 cpu_l1_tte = cpu_tte + ((va_l1 & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
1891
1892 while ((va_l1 >= (VM_MIN_KERNEL_ADDRESS & ~ARM_TT_L1_OFFMASK)) && (va_l1 < VM_MAX_KERNEL_ADDRESS)) {
1893 /*
1894 * If the L1 entry has not yet been allocated, allocate it
1895 * now and treat it as a heap table.
1896 */
1897 if (*cpu_l1_tte == ARM_TTE_EMPTY) {
1898 tt_entry_t *new_tte = (tt_entry_t*)alloc_ptpage(FALSE);
1899 bzero(new_tte, ARM_PGBYTES);
1900 *cpu_l1_tte = (kvtophys((vm_offset_t)new_tte) & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | ARM_DYNAMIC_TABLE_XN;
1901 }
1902
1903 cpu_l1_tte++;
1904 va_l1 += ARM_TT_L1_SIZE;
1905 }
1906 #endif
1907
1908 /*
1909 * Adjust avail_start so that the range that the VM owns
1910 * starts on a PAGE_SIZE aligned boundary.
1911 */
1912 avail_start = (avail_start + PAGE_MASK) & ~PAGE_MASK;
1913
1914 #if XNU_MONITOR
1915 pmap_static_allocations_done();
1916 #endif
1917 first_avail = avail_start;
1918 patch_low_glo_static_region(args->topOfKernelData, avail_start - args->topOfKernelData);
1919 enable_preemption();
1920 }
1921