]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2011 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <mach_debug.h> | |
30 | #include <mach_kdp.h> | |
31 | #include <debug.h> | |
32 | ||
cb323159 A |
33 | #include <kern/assert.h> |
34 | #include <kern/misc_protos.h> | |
35 | #include <kern/monotonic.h> | |
5ba3f43e A |
36 | #include <mach/vm_types.h> |
37 | #include <mach/vm_param.h> | |
5ba3f43e A |
38 | #include <vm/vm_kern.h> |
39 | #include <vm/vm_page.h> | |
40 | #include <vm/pmap.h> | |
41 | ||
cb323159 | 42 | #include <machine/atomic.h> |
5ba3f43e A |
43 | #include <arm64/proc_reg.h> |
44 | #include <arm64/lowglobals.h> | |
45 | #include <arm/cpu_data_internal.h> | |
46 | #include <arm/misc_protos.h> | |
47 | #include <pexpert/arm64/boot.h> | |
d9a64523 | 48 | #include <pexpert/device_tree.h> |
5ba3f43e A |
49 | |
50 | #include <libkern/kernel_mach_header.h> | |
51 | #include <libkern/section_keywords.h> | |
52 | ||
d9a64523 | 53 | #include <san/kasan.h> |
5c9f4661 A |
54 | |
55 | #if __ARM_KERNEL_PROTECT__ | |
56 | /* | |
57 | * If we want to support __ARM_KERNEL_PROTECT__, we need a sufficient amount of | |
58 | * mappable space preceeding the kernel (as we unmap the kernel by cutting the | |
59 | * range covered by TTBR1 in half). This must also cover the exception vectors. | |
60 | */ | |
61 | static_assert(KERNEL_PMAP_HEAP_RANGE_START > ARM_KERNEL_PROTECT_EXCEPTION_START); | |
62 | ||
63 | /* The exception vectors and the kernel cannot share root TTEs. */ | |
64 | static_assert((KERNEL_PMAP_HEAP_RANGE_START & ~ARM_TT_ROOT_OFFMASK) > ARM_KERNEL_PROTECT_EXCEPTION_START); | |
65 | ||
66 | /* | |
67 | * We must have enough space in the TTBR1_EL1 range to create the EL0 mapping of | |
68 | * the exception vectors. | |
69 | */ | |
70 | static_assert((((~ARM_KERNEL_PROTECT_EXCEPTION_START) + 1) * 2ULL) <= (ARM_TT_ROOT_SIZE + ARM_TT_ROOT_INDEX_MASK)); | |
71 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
72 | ||
d9a64523 A |
73 | #define ARM_DYNAMIC_TABLE_XN (ARM_TTE_TABLE_PXN | ARM_TTE_TABLE_XN) |
74 | ||
5ba3f43e A |
75 | #if KASAN |
76 | extern vm_offset_t shadow_pbase; | |
77 | extern vm_offset_t shadow_ptop; | |
78 | extern vm_offset_t physmap_vbase; | |
79 | extern vm_offset_t physmap_vtop; | |
80 | #endif | |
81 | ||
f427ee49 A |
82 | /* |
83 | * We explicitly place this in const, as it is not const from a language | |
84 | * perspective, but it is only modified before we actually switch away from | |
85 | * the bootstrap page tables. | |
86 | */ | |
87 | SECURITY_READ_ONLY_LATE(uint8_t) bootstrap_pagetables[BOOTSTRAP_TABLE_SIZE] __attribute__((aligned(ARM_PGBYTES))); | |
88 | ||
5ba3f43e A |
89 | /* |
90 | * Denotes the end of xnu. | |
91 | */ | |
92 | extern void *last_kernel_symbol; | |
93 | ||
d9a64523 A |
94 | extern void arm64_replace_bootstack(cpu_data_t*); |
95 | extern void PE_slide_devicetree(vm_offset_t); | |
96 | ||
5ba3f43e A |
97 | /* |
98 | * KASLR parameters | |
99 | */ | |
100 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_base; | |
101 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_top; | |
102 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kext_base; | |
103 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kext_top; | |
104 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_stext; | |
105 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_etext; | |
106 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_slide; | |
107 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_slid_base; | |
108 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_slid_top; | |
109 | ||
110 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_prelink_stext; | |
111 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_prelink_etext; | |
112 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_prelink_sdata; | |
113 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_prelink_edata; | |
114 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_prelink_sinfo; | |
115 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_prelink_einfo; | |
116 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_slinkedit; | |
117 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_elinkedit; | |
118 | ||
d9a64523 A |
119 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_builtinkmod_text; |
120 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_builtinkmod_text_end; | |
121 | ||
f427ee49 A |
122 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernelcache_base; |
123 | SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernelcache_top; | |
124 | ||
5ba3f43e A |
125 | /* Used by <mach/arm/vm_param.h> */ |
126 | SECURITY_READ_ONLY_LATE(unsigned long) gVirtBase; | |
127 | SECURITY_READ_ONLY_LATE(unsigned long) gPhysBase; | |
128 | SECURITY_READ_ONLY_LATE(unsigned long) gPhysSize; | |
d9a64523 A |
129 | SECURITY_READ_ONLY_LATE(unsigned long) gT0Sz = T0SZ_BOOT; |
130 | SECURITY_READ_ONLY_LATE(unsigned long) gT1Sz = T1SZ_BOOT; | |
131 | ||
cb323159 A |
132 | /* 23543331 - step 1 of kext / kernel __TEXT and __DATA colocation is to move |
133 | * all kexts before the kernel. This is only for arm64 devices and looks | |
d9a64523 A |
134 | * something like the following: |
135 | * -- vmaddr order -- | |
136 | * 0xffffff8004004000 __PRELINK_TEXT | |
137 | * 0xffffff8007004000 __TEXT (xnu) | |
138 | * 0xffffff80075ec000 __DATA (xnu) | |
139 | * 0xffffff80076dc000 __KLD (xnu) | |
140 | * 0xffffff80076e0000 __LAST (xnu) | |
141 | * 0xffffff80076e4000 __LINKEDIT (xnu) | |
142 | * 0xffffff80076e4000 __PRELINK_DATA (not used yet) | |
143 | * 0xffffff800782c000 __PRELINK_INFO | |
144 | * 0xffffff80078e4000 -- End of kernelcache | |
145 | */ | |
5ba3f43e | 146 | |
d9a64523 A |
147 | /* 24921709 - make XNU ready for KTRR |
148 | * | |
149 | * Two possible kernel cache layouts, depending on which kcgen is being used. | |
150 | * VAs increasing downwards. | |
151 | * Old KCGEN: | |
152 | * | |
153 | * __PRELINK_TEXT | |
154 | * __TEXT | |
155 | * __DATA_CONST | |
156 | * __TEXT_EXEC | |
157 | * __KLD | |
158 | * __LAST | |
159 | * __DATA | |
160 | * __PRELINK_DATA (expected empty) | |
161 | * __LINKEDIT | |
162 | * __PRELINK_INFO | |
163 | * | |
164 | * New kcgen: | |
165 | * | |
166 | * __PRELINK_TEXT <--- First KTRR (ReadOnly) segment | |
167 | * __PLK_DATA_CONST | |
168 | * __PLK_TEXT_EXEC | |
169 | * __TEXT | |
170 | * __DATA_CONST | |
171 | * __TEXT_EXEC | |
172 | * __KLD | |
173 | * __LAST <--- Last KTRR (ReadOnly) segment | |
174 | * __DATA | |
175 | * __BOOTDATA (if present) | |
176 | * __LINKEDIT | |
177 | * __PRELINK_DATA (expected populated now) | |
178 | * __PLK_LINKEDIT | |
179 | * __PRELINK_INFO | |
180 | * | |
5ba3f43e | 181 | */ |
d9a64523 | 182 | |
5ba3f43e A |
183 | vm_offset_t mem_size; /* Size of actual physical memory present |
184 | * minus any performance buffer and possibly | |
185 | * limited by mem_limit in bytes */ | |
186 | uint64_t mem_actual; /* The "One True" physical memory size | |
187 | * actually, it's the highest physical | |
188 | * address + 1 */ | |
189 | uint64_t max_mem; /* Size of physical memory (bytes), adjusted | |
190 | * by maxmem */ | |
f427ee49 A |
191 | uint64_t max_mem_actual; /* Actual size of physical memory (bytes), |
192 | * adjusted by the maxmem boot-arg */ | |
5ba3f43e A |
193 | uint64_t sane_size; /* Memory size to use for defaults |
194 | * calculations */ | |
195 | /* This no longer appears to be used; kill it? */ | |
196 | addr64_t vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Highest kernel | |
197 | * virtual address known | |
198 | * to the VM system */ | |
199 | ||
f427ee49 A |
200 | SECURITY_READ_ONLY_LATE(vm_offset_t) segEXTRADATA; |
201 | SECURITY_READ_ONLY_LATE(unsigned long) segSizeEXTRADATA; | |
d9a64523 A |
202 | |
203 | SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWESTTEXT; | |
cb323159 | 204 | SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWEST; |
f427ee49 A |
205 | SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWESTRO; |
206 | SECURITY_READ_ONLY_LATE(vm_offset_t) segHIGHESTRO; | |
207 | ||
208 | /* Only set when booted from MH_FILESET kernel collections */ | |
209 | SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWESTKC; | |
210 | SECURITY_READ_ONLY_LATE(vm_offset_t) segHIGHESTKC; | |
211 | SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWESTROKC; | |
212 | SECURITY_READ_ONLY_LATE(vm_offset_t) segHIGHESTROKC; | |
213 | SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWESTAuxKC; | |
214 | SECURITY_READ_ONLY_LATE(vm_offset_t) segHIGHESTAuxKC; | |
215 | SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWESTROAuxKC; | |
216 | SECURITY_READ_ONLY_LATE(vm_offset_t) segHIGHESTROAuxKC; | |
217 | SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWESTRXAuxKC; | |
218 | SECURITY_READ_ONLY_LATE(vm_offset_t) segHIGHESTRXAuxKC; | |
219 | SECURITY_READ_ONLY_LATE(vm_offset_t) segHIGHESTNLEAuxKC; | |
d9a64523 | 220 | |
5ba3f43e A |
221 | SECURITY_READ_ONLY_LATE(static vm_offset_t) segTEXTB; |
222 | SECURITY_READ_ONLY_LATE(static unsigned long) segSizeTEXT; | |
223 | ||
c6bf4f31 A |
224 | #if XNU_MONITOR |
225 | SECURITY_READ_ONLY_LATE(vm_offset_t) segPPLTEXTB; | |
226 | SECURITY_READ_ONLY_LATE(unsigned long) segSizePPLTEXT; | |
227 | ||
228 | SECURITY_READ_ONLY_LATE(vm_offset_t) segPPLTRAMPB; | |
229 | SECURITY_READ_ONLY_LATE(unsigned long) segSizePPLTRAMP; | |
230 | ||
231 | SECURITY_READ_ONLY_LATE(vm_offset_t) segPPLDATACONSTB; | |
232 | SECURITY_READ_ONLY_LATE(unsigned long) segSizePPLDATACONST; | |
233 | SECURITY_READ_ONLY_LATE(void *) pmap_stacks_start = NULL; | |
234 | SECURITY_READ_ONLY_LATE(void *) pmap_stacks_end = NULL; | |
235 | #endif | |
5ba3f43e A |
236 | |
237 | SECURITY_READ_ONLY_LATE(static vm_offset_t) segDATACONSTB; | |
238 | SECURITY_READ_ONLY_LATE(static unsigned long) segSizeDATACONST; | |
239 | ||
f427ee49 A |
240 | SECURITY_READ_ONLY_LATE(vm_offset_t) segTEXTEXECB; |
241 | SECURITY_READ_ONLY_LATE(unsigned long) segSizeTEXTEXEC; | |
5ba3f43e A |
242 | |
243 | SECURITY_READ_ONLY_LATE(static vm_offset_t) segDATAB; | |
244 | SECURITY_READ_ONLY_LATE(static unsigned long) segSizeDATA; | |
245 | ||
c6bf4f31 A |
246 | #if XNU_MONITOR |
247 | SECURITY_READ_ONLY_LATE(vm_offset_t) segPPLDATAB; | |
248 | SECURITY_READ_ONLY_LATE(unsigned long) segSizePPLDATA; | |
249 | #endif | |
5ba3f43e | 250 | |
d9a64523 A |
251 | SECURITY_READ_ONLY_LATE(vm_offset_t) segBOOTDATAB; |
252 | SECURITY_READ_ONLY_LATE(unsigned long) segSizeBOOTDATA; | |
253 | extern vm_offset_t intstack_low_guard; | |
254 | extern vm_offset_t intstack_high_guard; | |
255 | extern vm_offset_t excepstack_high_guard; | |
256 | ||
f427ee49 | 257 | SECURITY_READ_ONLY_LATE(vm_offset_t) segLINKB; |
5ba3f43e A |
258 | SECURITY_READ_ONLY_LATE(static unsigned long) segSizeLINK; |
259 | ||
260 | SECURITY_READ_ONLY_LATE(static vm_offset_t) segKLDB; | |
c3c9b80d A |
261 | SECURITY_READ_ONLY_LATE(unsigned long) segSizeKLD; |
262 | SECURITY_READ_ONLY_LATE(static vm_offset_t) segKLDDATAB; | |
263 | SECURITY_READ_ONLY_LATE(static unsigned long) segSizeKLDDATA; | |
d9a64523 A |
264 | SECURITY_READ_ONLY_LATE(vm_offset_t) segLASTB; |
265 | SECURITY_READ_ONLY_LATE(unsigned long) segSizeLAST; | |
f427ee49 A |
266 | SECURITY_READ_ONLY_LATE(vm_offset_t) segLASTDATACONSTB; |
267 | SECURITY_READ_ONLY_LATE(unsigned long) segSizeLASTDATACONST; | |
268 | ||
269 | SECURITY_READ_ONLY_LATE(vm_offset_t) sectHIBTEXTB; | |
270 | SECURITY_READ_ONLY_LATE(unsigned long) sectSizeHIBTEXT; | |
271 | SECURITY_READ_ONLY_LATE(vm_offset_t) segHIBDATAB; | |
272 | SECURITY_READ_ONLY_LATE(unsigned long) segSizeHIBDATA; | |
273 | SECURITY_READ_ONLY_LATE(vm_offset_t) sectHIBDATACONSTB; | |
274 | SECURITY_READ_ONLY_LATE(unsigned long) sectSizeHIBDATACONST; | |
5ba3f43e A |
275 | |
276 | SECURITY_READ_ONLY_LATE(vm_offset_t) segPRELINKTEXTB; | |
277 | SECURITY_READ_ONLY_LATE(unsigned long) segSizePRELINKTEXT; | |
278 | ||
279 | SECURITY_READ_ONLY_LATE(static vm_offset_t) segPLKTEXTEXECB; | |
280 | SECURITY_READ_ONLY_LATE(static unsigned long) segSizePLKTEXTEXEC; | |
281 | ||
282 | SECURITY_READ_ONLY_LATE(static vm_offset_t) segPLKDATACONSTB; | |
283 | SECURITY_READ_ONLY_LATE(static unsigned long) segSizePLKDATACONST; | |
284 | ||
285 | SECURITY_READ_ONLY_LATE(static vm_offset_t) segPRELINKDATAB; | |
286 | SECURITY_READ_ONLY_LATE(static unsigned long) segSizePRELINKDATA; | |
287 | ||
288 | SECURITY_READ_ONLY_LATE(static vm_offset_t) segPLKLLVMCOVB = 0; | |
289 | SECURITY_READ_ONLY_LATE(static unsigned long) segSizePLKLLVMCOV = 0; | |
290 | ||
291 | SECURITY_READ_ONLY_LATE(static vm_offset_t) segPLKLINKEDITB; | |
292 | SECURITY_READ_ONLY_LATE(static unsigned long) segSizePLKLINKEDIT; | |
293 | ||
294 | SECURITY_READ_ONLY_LATE(static vm_offset_t) segPRELINKINFOB; | |
295 | SECURITY_READ_ONLY_LATE(static unsigned long) segSizePRELINKINFO; | |
296 | ||
f427ee49 A |
297 | /* Only set when booted from MH_FILESET primary kernel collection */ |
298 | SECURITY_READ_ONLY_LATE(vm_offset_t) segKCTEXTEXECB; | |
299 | SECURITY_READ_ONLY_LATE(unsigned long) segSizeKCTEXTEXEC; | |
300 | SECURITY_READ_ONLY_LATE(static vm_offset_t) segKCDATACONSTB; | |
301 | SECURITY_READ_ONLY_LATE(static unsigned long) segSizeKCDATACONST; | |
302 | SECURITY_READ_ONLY_LATE(static vm_offset_t) segKCDATAB; | |
303 | SECURITY_READ_ONLY_LATE(static unsigned long) segSizeKCDATA; | |
304 | ||
5ba3f43e A |
305 | SECURITY_READ_ONLY_LATE(static boolean_t) use_contiguous_hint = TRUE; |
306 | ||
f427ee49 | 307 | SECURITY_READ_ONLY_LATE(int) PAGE_SHIFT_CONST; |
5ba3f43e A |
308 | |
309 | SECURITY_READ_ONLY_LATE(vm_offset_t) end_kern; | |
310 | SECURITY_READ_ONLY_LATE(vm_offset_t) etext; | |
311 | SECURITY_READ_ONLY_LATE(vm_offset_t) sdata; | |
312 | SECURITY_READ_ONLY_LATE(vm_offset_t) edata; | |
313 | ||
f427ee49 A |
314 | SECURITY_READ_ONLY_LATE(static vm_offset_t) auxkc_mh, auxkc_base, auxkc_right_above; |
315 | ||
5ba3f43e A |
316 | vm_offset_t alloc_ptpage(boolean_t map_static); |
317 | SECURITY_READ_ONLY_LATE(vm_offset_t) ropage_next; | |
318 | ||
319 | /* | |
320 | * Bootstrap the system enough to run with virtual memory. | |
321 | * Map the kernel's code and data, and allocate the system page table. | |
322 | * Page_size must already be set. | |
323 | * | |
324 | * Parameters: | |
325 | * first_avail: first available physical page - | |
326 | * after kernel page tables | |
327 | * avail_start: PA of first physical page | |
328 | * avail_end: PA of last physical page | |
329 | */ | |
330 | SECURITY_READ_ONLY_LATE(vm_offset_t) first_avail; | |
331 | SECURITY_READ_ONLY_LATE(vm_offset_t) static_memory_end; | |
332 | SECURITY_READ_ONLY_LATE(pmap_paddr_t) avail_start; | |
333 | SECURITY_READ_ONLY_LATE(pmap_paddr_t) avail_end; | |
d9a64523 | 334 | SECURITY_READ_ONLY_LATE(pmap_paddr_t) real_avail_end; |
cb323159 | 335 | SECURITY_READ_ONLY_LATE(unsigned long) real_phys_size; |
f427ee49 A |
336 | SECURITY_READ_ONLY_LATE(vm_map_address_t) physmap_base = (vm_map_address_t)0; |
337 | SECURITY_READ_ONLY_LATE(vm_map_address_t) physmap_end = (vm_map_address_t)0; | |
5ba3f43e | 338 | |
5c9f4661 A |
339 | #if __ARM_KERNEL_PROTECT__ |
340 | extern void ExceptionVectorsBase; | |
341 | extern void ExceptionVectorsEnd; | |
342 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
343 | ||
d9a64523 A |
344 | typedef struct { |
345 | pmap_paddr_t pa; | |
346 | vm_map_address_t va; | |
347 | vm_size_t len; | |
348 | } ptov_table_entry; | |
349 | ||
f427ee49 A |
350 | #define PTOV_TABLE_SIZE 8 |
351 | SECURITY_READ_ONLY_LATE(static ptov_table_entry) ptov_table[PTOV_TABLE_SIZE]; | |
352 | SECURITY_READ_ONLY_LATE(static boolean_t) kva_active = FALSE; | |
d9a64523 A |
353 | |
354 | ||
355 | vm_map_address_t | |
356 | phystokv(pmap_paddr_t pa) | |
357 | { | |
358 | for (size_t i = 0; (i < PTOV_TABLE_SIZE) && (ptov_table[i].len != 0); i++) { | |
f427ee49 A |
359 | if ((pa >= ptov_table[i].pa) && (pa < (ptov_table[i].pa + ptov_table[i].len))) { |
360 | return pa - ptov_table[i].pa + ptov_table[i].va; | |
361 | } | |
d9a64523 | 362 | } |
cb323159 | 363 | assertf((pa - gPhysBase) < real_phys_size, "%s: illegal PA: 0x%llx", __func__, (uint64_t)pa); |
f427ee49 | 364 | return pa - gPhysBase + gVirtBase; |
d9a64523 A |
365 | } |
366 | ||
367 | vm_map_address_t | |
368 | phystokv_range(pmap_paddr_t pa, vm_size_t *max_len) | |
369 | { | |
370 | vm_size_t len; | |
371 | for (size_t i = 0; (i < PTOV_TABLE_SIZE) && (ptov_table[i].len != 0); i++) { | |
372 | if ((pa >= ptov_table[i].pa) && (pa < (ptov_table[i].pa + ptov_table[i].len))) { | |
373 | len = ptov_table[i].len - (pa - ptov_table[i].pa); | |
f427ee49 | 374 | if (*max_len > len) { |
d9a64523 | 375 | *max_len = len; |
f427ee49 A |
376 | } |
377 | return pa - ptov_table[i].pa + ptov_table[i].va; | |
d9a64523 A |
378 | } |
379 | } | |
380 | len = PAGE_SIZE - (pa & PAGE_MASK); | |
f427ee49 | 381 | if (*max_len > len) { |
d9a64523 | 382 | *max_len = len; |
f427ee49 | 383 | } |
cb323159 | 384 | assertf((pa - gPhysBase) < real_phys_size, "%s: illegal PA: 0x%llx", __func__, (uint64_t)pa); |
f427ee49 | 385 | return pa - gPhysBase + gVirtBase; |
d9a64523 A |
386 | } |
387 | ||
388 | vm_offset_t | |
389 | ml_static_vtop(vm_offset_t va) | |
390 | { | |
391 | for (size_t i = 0; (i < PTOV_TABLE_SIZE) && (ptov_table[i].len != 0); i++) { | |
f427ee49 A |
392 | if ((va >= ptov_table[i].va) && (va < (ptov_table[i].va + ptov_table[i].len))) { |
393 | return va - ptov_table[i].va + ptov_table[i].pa; | |
394 | } | |
d9a64523 | 395 | } |
cb323159 | 396 | assertf(((vm_address_t)(va) - gVirtBase) < gPhysSize, "%s: illegal VA: %p", __func__, (void*)va); |
f427ee49 | 397 | return (vm_address_t)(va) - gVirtBase + gPhysBase; |
d9a64523 A |
398 | } |
399 | ||
5ba3f43e A |
400 | /* |
401 | * This rounds the given address up to the nearest boundary for a PTE contiguous | |
402 | * hint. | |
403 | */ | |
404 | static vm_offset_t | |
405 | round_up_pte_hint_address(vm_offset_t address) | |
406 | { | |
407 | vm_offset_t hint_size = ARM_PTE_SIZE << ARM_PTE_HINT_ENTRIES_SHIFT; | |
f427ee49 | 408 | return (address + (hint_size - 1)) & ~(hint_size - 1); |
5ba3f43e A |
409 | } |
410 | ||
411 | /* allocate a page for a page table: we support static and dynamic mappings. | |
412 | * | |
413 | * returns a virtual address for the allocated page | |
414 | * | |
415 | * for static mappings, we allocate from the region ropagetable_begin to ro_pagetable_end-1, | |
416 | * which is defined in the DATA_CONST segment and will be protected RNX when vm_prot_finalize runs. | |
417 | * | |
418 | * for dynamic mappings, we allocate from avail_start, which should remain RWNX. | |
419 | */ | |
420 | ||
f427ee49 A |
421 | vm_offset_t |
422 | alloc_ptpage(boolean_t map_static) | |
423 | { | |
5ba3f43e A |
424 | vm_offset_t vaddr; |
425 | ||
c6bf4f31 | 426 | #if !(defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)) |
5ba3f43e A |
427 | map_static = FALSE; |
428 | #endif | |
429 | ||
430 | if (!ropage_next) { | |
431 | ropage_next = (vm_offset_t)&ropagetable_begin; | |
432 | } | |
433 | ||
434 | if (map_static) { | |
435 | assert(ropage_next < (vm_offset_t)&ropagetable_end); | |
436 | ||
437 | vaddr = ropage_next; | |
438 | ropage_next += ARM_PGBYTES; | |
439 | ||
440 | return vaddr; | |
441 | } else { | |
442 | vaddr = phystokv(avail_start); | |
443 | avail_start += ARM_PGBYTES; | |
444 | ||
445 | return vaddr; | |
446 | } | |
447 | } | |
448 | ||
449 | #if DEBUG | |
450 | ||
451 | void dump_kva_l2(vm_offset_t tt_base, tt_entry_t *tt, int indent, uint64_t *rosz_out, uint64_t *rwsz_out); | |
452 | ||
f427ee49 A |
453 | void |
454 | dump_kva_l2(vm_offset_t tt_base, tt_entry_t *tt, int indent, uint64_t *rosz_out, uint64_t *rwsz_out) | |
455 | { | |
5ba3f43e A |
456 | unsigned int i; |
457 | boolean_t cur_ro, prev_ro = 0; | |
458 | int start_entry = -1; | |
459 | tt_entry_t cur, prev = 0; | |
460 | pmap_paddr_t robegin = kvtophys((vm_offset_t)&ropagetable_begin); | |
461 | pmap_paddr_t roend = kvtophys((vm_offset_t)&ropagetable_end); | |
462 | boolean_t tt_static = kvtophys((vm_offset_t)tt) >= robegin && | |
f427ee49 | 463 | kvtophys((vm_offset_t)tt) < roend; |
5ba3f43e | 464 | |
f427ee49 | 465 | for (i = 0; i < TTE_PGENTRIES; i++) { |
5ba3f43e A |
466 | int tte_type = tt[i] & ARM_TTE_TYPE_MASK; |
467 | cur = tt[i] & ARM_TTE_TABLE_MASK; | |
468 | ||
469 | if (tt_static) { | |
470 | /* addresses mapped by this entry are static if it is a block mapping, | |
471 | * or the table was allocated from the RO page table region */ | |
472 | cur_ro = (tte_type == ARM_TTE_TYPE_BLOCK) || (cur >= robegin && cur < roend); | |
473 | } else { | |
474 | cur_ro = 0; | |
475 | } | |
476 | ||
477 | if ((cur == 0 && prev != 0) || (cur_ro != prev_ro && prev != 0)) { // falling edge | |
f427ee49 | 478 | uintptr_t start, end, sz; |
5ba3f43e A |
479 | |
480 | start = (uintptr_t)start_entry << ARM_TT_L2_SHIFT; | |
481 | start += tt_base; | |
482 | end = ((uintptr_t)i << ARM_TT_L2_SHIFT) - 1; | |
483 | end += tt_base; | |
484 | ||
485 | sz = end - start + 1; | |
486 | printf("%*s0x%08x_%08x-0x%08x_%08x %s (%luMB)\n", | |
f427ee49 A |
487 | indent * 4, "", |
488 | (uint32_t)(start >> 32), (uint32_t)start, | |
489 | (uint32_t)(end >> 32), (uint32_t)end, | |
490 | prev_ro ? "Static " : "Dynamic", | |
491 | (sz >> 20)); | |
5ba3f43e A |
492 | |
493 | if (prev_ro) { | |
494 | *rosz_out += sz; | |
495 | } else { | |
496 | *rwsz_out += sz; | |
497 | } | |
498 | } | |
499 | ||
500 | if ((prev == 0 && cur != 0) || cur_ro != prev_ro) { // rising edge: set start | |
501 | start_entry = i; | |
502 | } | |
503 | ||
504 | prev = cur; | |
505 | prev_ro = cur_ro; | |
506 | } | |
507 | } | |
508 | ||
f427ee49 A |
509 | void |
510 | dump_kva_space() | |
511 | { | |
512 | uint64_t tot_rosz = 0, tot_rwsz = 0; | |
5ba3f43e A |
513 | int ro_ptpages, rw_ptpages; |
514 | pmap_paddr_t robegin = kvtophys((vm_offset_t)&ropagetable_begin); | |
515 | pmap_paddr_t roend = kvtophys((vm_offset_t)&ropagetable_end); | |
516 | boolean_t root_static = kvtophys((vm_offset_t)cpu_tte) >= robegin && | |
f427ee49 | 517 | kvtophys((vm_offset_t)cpu_tte) < roend; |
5ba3f43e A |
518 | uint64_t kva_base = ~((1ULL << (64 - T1SZ_BOOT)) - 1); |
519 | ||
520 | printf("Root page table: %s\n", root_static ? "Static" : "Dynamic"); | |
521 | ||
f427ee49 | 522 | for (unsigned int i = 0; i < TTE_PGENTRIES; i++) { |
5ba3f43e A |
523 | pmap_paddr_t cur; |
524 | boolean_t cur_ro; | |
f427ee49 | 525 | uintptr_t start, end; |
5ba3f43e A |
526 | uint64_t rosz = 0, rwsz = 0; |
527 | ||
f427ee49 | 528 | if ((cpu_tte[i] & ARM_TTE_VALID) == 0) { |
5ba3f43e | 529 | continue; |
f427ee49 | 530 | } |
5ba3f43e A |
531 | |
532 | cur = cpu_tte[i] & ARM_TTE_TABLE_MASK; | |
533 | start = (uint64_t)i << ARM_TT_L1_SHIFT; | |
534 | start = start + kva_base; | |
535 | end = start + (ARM_TT_L1_SIZE - 1); | |
536 | cur_ro = cur >= robegin && cur < roend; | |
537 | ||
538 | printf("0x%08x_%08x-0x%08x_%08x %s\n", | |
f427ee49 A |
539 | (uint32_t)(start >> 32), (uint32_t)start, |
540 | (uint32_t)(end >> 32), (uint32_t)end, | |
541 | cur_ro ? "Static " : "Dynamic"); | |
5ba3f43e A |
542 | |
543 | dump_kva_l2(start, (tt_entry_t*)phystokv(cur), 1, &rosz, &rwsz); | |
544 | tot_rosz += rosz; | |
545 | tot_rwsz += rwsz; | |
546 | } | |
5ba3f43e A |
547 | |
548 | printf("L2 Address space mapped: Static %lluMB Dynamic %lluMB Total %lluMB\n", | |
f427ee49 A |
549 | tot_rosz >> 20, |
550 | tot_rwsz >> 20, | |
551 | (tot_rosz >> 20) + (tot_rwsz >> 20)); | |
5ba3f43e A |
552 | |
553 | ro_ptpages = (int)((ropage_next - (vm_offset_t)&ropagetable_begin) >> ARM_PGSHIFT); | |
554 | rw_ptpages = (int)(lowGlo.lgStaticSize >> ARM_PGSHIFT); | |
555 | printf("Pages used: static %d dynamic %d\n", ro_ptpages, rw_ptpages); | |
556 | } | |
557 | ||
558 | #endif /* DEBUG */ | |
559 | ||
c6bf4f31 | 560 | #if __ARM_KERNEL_PROTECT__ || XNU_MONITOR |
5c9f4661 A |
561 | /* |
562 | * arm_vm_map: | |
563 | * root_ttp: The kernel virtual address for the root of the target page tables | |
564 | * vaddr: The target virtual address | |
565 | * pte: A page table entry value (may be ARM_PTE_EMPTY) | |
566 | * | |
567 | * This function installs pte at vaddr in root_ttp. Any page table pages needed | |
568 | * to install pte will be allocated by this function. | |
569 | */ | |
570 | static void | |
571 | arm_vm_map(tt_entry_t * root_ttp, vm_offset_t vaddr, pt_entry_t pte) | |
572 | { | |
573 | vm_offset_t ptpage = 0; | |
574 | tt_entry_t * ttp = root_ttp; | |
575 | ||
5c9f4661 A |
576 | tt_entry_t * l1_ttep = NULL; |
577 | tt_entry_t l1_tte = 0; | |
5c9f4661 A |
578 | |
579 | tt_entry_t * l2_ttep = NULL; | |
580 | tt_entry_t l2_tte = 0; | |
581 | pt_entry_t * ptep = NULL; | |
582 | pt_entry_t cpte = 0; | |
583 | ||
584 | /* | |
585 | * Walk the target page table to find the PTE for the given virtual | |
586 | * address. Allocate any page table pages needed to do this. | |
587 | */ | |
5c9f4661 A |
588 | l1_ttep = ttp + ((vaddr & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); |
589 | l1_tte = *l1_ttep; | |
590 | ||
591 | if (l1_tte == ARM_TTE_EMPTY) { | |
592 | ptpage = alloc_ptpage(TRUE); | |
593 | bzero((void *)ptpage, ARM_PGBYTES); | |
594 | l1_tte = kvtophys(ptpage); | |
595 | l1_tte &= ARM_TTE_TABLE_MASK; | |
596 | l1_tte |= ARM_TTE_VALID | ARM_TTE_TYPE_TABLE; | |
597 | *l1_ttep = l1_tte; | |
598 | ptpage = 0; | |
599 | } | |
600 | ||
601 | ttp = (tt_entry_t *)phystokv(l1_tte & ARM_TTE_TABLE_MASK); | |
5c9f4661 A |
602 | |
603 | l2_ttep = ttp + ((vaddr & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT); | |
604 | l2_tte = *l2_ttep; | |
605 | ||
606 | if (l2_tte == ARM_TTE_EMPTY) { | |
607 | ptpage = alloc_ptpage(TRUE); | |
608 | bzero((void *)ptpage, ARM_PGBYTES); | |
609 | l2_tte = kvtophys(ptpage); | |
610 | l2_tte &= ARM_TTE_TABLE_MASK; | |
611 | l2_tte |= ARM_TTE_VALID | ARM_TTE_TYPE_TABLE; | |
612 | *l2_ttep = l2_tte; | |
613 | ptpage = 0; | |
614 | } | |
615 | ||
616 | ttp = (tt_entry_t *)phystokv(l2_tte & ARM_TTE_TABLE_MASK); | |
617 | ||
618 | ptep = ttp + ((vaddr & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT); | |
619 | cpte = *ptep; | |
620 | ||
621 | /* | |
622 | * If the existing PTE is not empty, then we are replacing a valid | |
623 | * mapping. | |
624 | */ | |
625 | if (cpte != ARM_PTE_EMPTY) { | |
626 | panic("%s: cpte=%#llx is not empty, " | |
f427ee49 A |
627 | "vaddr=%#lx, pte=%#llx", |
628 | __FUNCTION__, cpte, | |
629 | vaddr, pte); | |
5c9f4661 A |
630 | } |
631 | ||
632 | *ptep = pte; | |
633 | } | |
634 | ||
c6bf4f31 | 635 | #endif // __ARM_KERNEL_PROTECT || XNU_MONITOR |
cb323159 A |
636 | |
637 | #if __ARM_KERNEL_PROTECT__ | |
638 | ||
5c9f4661 A |
639 | /* |
640 | * arm_vm_kernel_el0_map: | |
641 | * vaddr: The target virtual address | |
642 | * pte: A page table entry value (may be ARM_PTE_EMPTY) | |
643 | * | |
644 | * This function installs pte at vaddr for the EL0 kernel mappings. | |
645 | */ | |
646 | static void | |
647 | arm_vm_kernel_el0_map(vm_offset_t vaddr, pt_entry_t pte) | |
648 | { | |
649 | /* Calculate where vaddr will be in the EL1 kernel page tables. */ | |
650 | vm_offset_t kernel_pmap_vaddr = vaddr - ((ARM_TT_ROOT_INDEX_MASK + ARM_TT_ROOT_SIZE) / 2ULL); | |
651 | arm_vm_map(cpu_tte, kernel_pmap_vaddr, pte); | |
652 | } | |
653 | ||
654 | /* | |
655 | * arm_vm_kernel_el1_map: | |
656 | * vaddr: The target virtual address | |
657 | * pte: A page table entry value (may be ARM_PTE_EMPTY) | |
658 | * | |
659 | * This function installs pte at vaddr for the EL1 kernel mappings. | |
660 | */ | |
661 | static void | |
f427ee49 A |
662 | arm_vm_kernel_el1_map(vm_offset_t vaddr, pt_entry_t pte) |
663 | { | |
5c9f4661 A |
664 | arm_vm_map(cpu_tte, vaddr, pte); |
665 | } | |
666 | ||
667 | /* | |
668 | * arm_vm_kernel_pte: | |
669 | * vaddr: The target virtual address | |
670 | * | |
671 | * This function returns the PTE value for the given vaddr from the kernel page | |
672 | * tables. If the region has been been block mapped, we return what an | |
673 | * equivalent PTE value would be (as regards permissions and flags). We also | |
674 | * remove the HINT bit (as we are not necessarily creating contiguous mappings. | |
675 | */ | |
676 | static pt_entry_t | |
677 | arm_vm_kernel_pte(vm_offset_t vaddr) | |
678 | { | |
679 | tt_entry_t * ttp = cpu_tte; | |
680 | tt_entry_t * ttep = NULL; | |
681 | tt_entry_t tte = 0; | |
682 | pt_entry_t * ptep = NULL; | |
683 | pt_entry_t pte = 0; | |
684 | ||
5c9f4661 A |
685 | ttep = ttp + ((vaddr & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); |
686 | tte = *ttep; | |
687 | ||
688 | assert(tte & ARM_TTE_VALID); | |
689 | ||
690 | if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) { | |
691 | /* This is a block mapping; return the equivalent PTE value. */ | |
692 | pte = (pt_entry_t)(tte & ~ARM_TTE_TYPE_MASK); | |
693 | pte |= ARM_PTE_TYPE_VALID; | |
694 | pte |= vaddr & ((ARM_TT_L1_SIZE - 1) & ARM_PTE_PAGE_MASK); | |
695 | pte &= ~ARM_PTE_HINT_MASK; | |
696 | return pte; | |
697 | } | |
698 | ||
699 | ttp = (tt_entry_t *)phystokv(tte & ARM_TTE_TABLE_MASK); | |
5c9f4661 A |
700 | ttep = ttp + ((vaddr & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT); |
701 | tte = *ttep; | |
702 | ||
703 | assert(tte & ARM_TTE_VALID); | |
704 | ||
705 | if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) { | |
706 | /* This is a block mapping; return the equivalent PTE value. */ | |
707 | pte = (pt_entry_t)(tte & ~ARM_TTE_TYPE_MASK); | |
708 | pte |= ARM_PTE_TYPE_VALID; | |
709 | pte |= vaddr & ((ARM_TT_L2_SIZE - 1) & ARM_PTE_PAGE_MASK); | |
710 | pte &= ~ARM_PTE_HINT_MASK; | |
711 | return pte; | |
712 | } | |
713 | ||
714 | ttp = (tt_entry_t *)phystokv(tte & ARM_TTE_TABLE_MASK); | |
715 | ||
716 | ptep = ttp + ((vaddr & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT); | |
717 | pte = *ptep; | |
718 | pte &= ~ARM_PTE_HINT_MASK; | |
719 | return pte; | |
720 | } | |
721 | ||
722 | /* | |
723 | * arm_vm_prepare_kernel_el0_mappings: | |
724 | * alloc_only: Indicates if PTE values should be copied from the EL1 kernel | |
725 | * mappings. | |
726 | * | |
727 | * This function expands the kernel page tables to support the EL0 kernel | |
728 | * mappings, and conditionally installs the PTE values for the EL0 kernel | |
729 | * mappings (if alloc_only is false). | |
730 | */ | |
731 | static void | |
732 | arm_vm_prepare_kernel_el0_mappings(bool alloc_only) | |
733 | { | |
734 | pt_entry_t pte = 0; | |
735 | vm_offset_t start = ((vm_offset_t)&ExceptionVectorsBase) & ~PAGE_MASK; | |
736 | vm_offset_t end = (((vm_offset_t)&ExceptionVectorsEnd) + PAGE_MASK) & ~PAGE_MASK; | |
737 | vm_offset_t cur = 0; | |
738 | vm_offset_t cur_fixed = 0; | |
739 | ||
740 | /* Expand for/map the exceptions vectors in the EL0 kernel mappings. */ | |
741 | for (cur = start, cur_fixed = ARM_KERNEL_PROTECT_EXCEPTION_START; cur < end; cur += ARM_PGBYTES, cur_fixed += ARM_PGBYTES) { | |
742 | /* | |
743 | * We map the exception vectors at a different address than that | |
744 | * of the kernelcache to avoid sharing page table pages with the | |
745 | * kernelcache (as this may cause issues with TLB caching of | |
746 | * page table pages. | |
747 | */ | |
748 | if (!alloc_only) { | |
749 | pte = arm_vm_kernel_pte(cur); | |
750 | } | |
751 | ||
752 | arm_vm_kernel_el1_map(cur_fixed, pte); | |
753 | arm_vm_kernel_el0_map(cur_fixed, pte); | |
754 | } | |
755 | ||
756 | __builtin_arm_dmb(DMB_ISH); | |
757 | __builtin_arm_isb(ISB_SY); | |
758 | ||
759 | if (!alloc_only) { | |
760 | /* | |
761 | * If we have created the alternate exception vector mappings, | |
762 | * the boot CPU may now switch over to them. | |
763 | */ | |
764 | set_vbar_el1(ARM_KERNEL_PROTECT_EXCEPTION_START); | |
765 | __builtin_arm_isb(ISB_SY); | |
766 | } | |
767 | } | |
768 | ||
769 | /* | |
770 | * arm_vm_populate_kernel_el0_mappings: | |
771 | * | |
772 | * This function adds all required mappings to the EL0 kernel mappings. | |
773 | */ | |
774 | static void | |
775 | arm_vm_populate_kernel_el0_mappings(void) | |
776 | { | |
777 | arm_vm_prepare_kernel_el0_mappings(FALSE); | |
778 | } | |
779 | ||
780 | /* | |
781 | * arm_vm_expand_kernel_el0_mappings: | |
782 | * | |
783 | * This function expands the kernel page tables to accomodate the EL0 kernel | |
784 | * mappings. | |
785 | */ | |
786 | static void | |
787 | arm_vm_expand_kernel_el0_mappings(void) | |
788 | { | |
789 | arm_vm_prepare_kernel_el0_mappings(TRUE); | |
790 | } | |
791 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
792 | ||
c6bf4f31 | 793 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
5ba3f43e A |
794 | extern void bootstrap_instructions; |
795 | ||
796 | /* | |
797 | * arm_replace_identity_map takes the V=P map that we construct in start.s | |
798 | * and repurposes it in order to have it map only the page we need in order | |
799 | * to turn on the MMU. This prevents us from running into issues where | |
800 | * KTRR will cause us to fault on executable block mappings that cross the | |
801 | * KTRR boundary. | |
802 | */ | |
f427ee49 A |
803 | static void |
804 | arm_replace_identity_map(void) | |
5ba3f43e A |
805 | { |
806 | vm_offset_t addr; | |
807 | pmap_paddr_t paddr; | |
808 | ||
5ba3f43e A |
809 | pmap_paddr_t l1_ptp_phys = 0; |
810 | tt_entry_t *l1_ptp_virt = NULL; | |
811 | tt_entry_t *tte1 = NULL; | |
5ba3f43e A |
812 | pmap_paddr_t l2_ptp_phys = 0; |
813 | tt_entry_t *l2_ptp_virt = NULL; | |
814 | tt_entry_t *tte2 = NULL; | |
815 | pmap_paddr_t l3_ptp_phys = 0; | |
816 | pt_entry_t *l3_ptp_virt = NULL; | |
817 | pt_entry_t *ptep = NULL; | |
818 | ||
819 | addr = ((vm_offset_t)&bootstrap_instructions) & ~ARM_PGMASK; | |
820 | paddr = kvtophys(addr); | |
821 | ||
822 | /* | |
f427ee49 | 823 | * Grab references to the V=P page tables, and allocate an L3 page. |
5ba3f43e | 824 | */ |
f427ee49 | 825 | l1_ptp_phys = kvtophys((vm_offset_t)&bootstrap_pagetables); |
5ba3f43e | 826 | l1_ptp_virt = (tt_entry_t *)phystokv(l1_ptp_phys); |
d9a64523 | 827 | tte1 = &l1_ptp_virt[L1_TABLE_INDEX(paddr)]; |
5ba3f43e | 828 | |
d9a64523 A |
829 | l2_ptp_virt = L2_TABLE_VA(tte1); |
830 | l2_ptp_phys = (*tte1) & ARM_TTE_TABLE_MASK; | |
831 | tte2 = &l2_ptp_virt[L2_TABLE_INDEX(paddr)]; | |
5ba3f43e | 832 | |
f427ee49 | 833 | l3_ptp_virt = (pt_entry_t *)alloc_ptpage(TRUE); |
5ba3f43e | 834 | l3_ptp_phys = kvtophys((vm_offset_t)l3_ptp_virt); |
d9a64523 | 835 | ptep = &l3_ptp_virt[L3_TABLE_INDEX(paddr)]; |
5ba3f43e A |
836 | |
837 | /* | |
838 | * Replace the large V=P mapping with a mapping that provides only the | |
839 | * mappings needed to turn on the MMU. | |
840 | */ | |
d9a64523 | 841 | |
5ba3f43e A |
842 | bzero(l1_ptp_virt, ARM_PGBYTES); |
843 | *tte1 = ARM_TTE_BOOT_TABLE | (l2_ptp_phys & ARM_TTE_TABLE_MASK); | |
d9a64523 | 844 | |
5ba3f43e A |
845 | bzero(l2_ptp_virt, ARM_PGBYTES); |
846 | *tte2 = ARM_TTE_BOOT_TABLE | (l3_ptp_phys & ARM_TTE_TABLE_MASK); | |
847 | ||
848 | *ptep = (paddr & ARM_PTE_MASK) | | |
f427ee49 A |
849 | ARM_PTE_TYPE_VALID | |
850 | ARM_PTE_SH(SH_OUTER_MEMORY) | | |
851 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK) | | |
852 | ARM_PTE_AF | | |
853 | ARM_PTE_AP(AP_RONA) | | |
854 | ARM_PTE_NX; | |
5ba3f43e | 855 | } |
c6bf4f31 | 856 | #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ |
5ba3f43e | 857 | |
d9a64523 A |
858 | tt_entry_t *arm_kva_to_tte(vm_offset_t); |
859 | ||
860 | tt_entry_t * | |
861 | arm_kva_to_tte(vm_offset_t va) | |
862 | { | |
d9a64523 A |
863 | tt_entry_t *tte1, *tte2; |
864 | tte1 = cpu_tte + L1_TABLE_INDEX(va); | |
865 | tte2 = L2_TABLE_VA(tte1) + L2_TABLE_INDEX(va); | |
cb323159 | 866 | |
d9a64523 A |
867 | return tte2; |
868 | } | |
869 | ||
c6bf4f31 A |
870 | #if XNU_MONITOR |
871 | ||
872 | static inline pt_entry_t * | |
873 | arm_kva_to_pte(vm_offset_t va) | |
874 | { | |
875 | tt_entry_t *tte2 = arm_kva_to_tte(va); | |
876 | return L3_TABLE_VA(tte2) + L3_TABLE_INDEX(va); | |
877 | } | |
878 | ||
879 | #endif | |
d9a64523 | 880 | |
cb323159 A |
881 | #define ARM64_GRANULE_ALLOW_BLOCK (1 << 0) |
882 | #define ARM64_GRANULE_ALLOW_HINT (1 << 1) | |
883 | ||
5ba3f43e A |
884 | /* |
885 | * arm_vm_page_granular_helper updates protections at the L3 level. It will (if | |
886 | * neccessary) allocate a page for the L3 table and update the corresponding L2 | |
887 | * entry. Then, it will iterate over the L3 table, updating protections as necessary. | |
888 | * This expects to be invoked on a L2 entry or sub L2 entry granularity, so this should | |
889 | * not be invoked from a context that does not do L2 iteration separately (basically, | |
890 | * don't call this except from arm_vm_page_granular_prot). | |
d9a64523 | 891 | * |
cb323159 A |
892 | * unsigned granule: 0 => force to page granule, or a combination of |
893 | * ARM64_GRANULE_* flags declared above. | |
5ba3f43e | 894 | */ |
d9a64523 | 895 | |
5ba3f43e | 896 | static void |
d9a64523 | 897 | arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va, pmap_paddr_t pa_offset, |
f427ee49 A |
898 | int pte_prot_APX, int pte_prot_XN, unsigned granule, |
899 | pt_entry_t **deferred_pte, pt_entry_t *deferred_ptmp) | |
5ba3f43e A |
900 | { |
901 | if (va & ARM_TT_L2_OFFMASK) { /* ragged edge hanging over a ARM_TT_L2_SIZE boundary */ | |
5ba3f43e | 902 | tt_entry_t *tte2; |
5ba3f43e A |
903 | tt_entry_t tmplate; |
904 | pmap_paddr_t pa; | |
905 | pt_entry_t *ppte, *recursive_pte = NULL, ptmp, recursive_ptmp = 0; | |
906 | addr64_t ppte_phys; | |
907 | unsigned i; | |
908 | ||
909 | va &= ~ARM_TT_L2_OFFMASK; | |
d9a64523 | 910 | pa = va - gVirtBase + gPhysBase - pa_offset; |
5ba3f43e | 911 | |
f427ee49 | 912 | if (pa >= real_avail_end) { |
d9a64523 | 913 | return; |
f427ee49 | 914 | } |
5ba3f43e | 915 | |
d9a64523 A |
916 | tte2 = arm_kva_to_tte(va); |
917 | ||
918 | assert(_end >= va); | |
5ba3f43e A |
919 | tmplate = *tte2; |
920 | ||
921 | if (ARM_TTE_TYPE_TABLE == (tmplate & ARM_TTE_TYPE_MASK)) { | |
922 | /* pick up the existing page table. */ | |
923 | ppte = (pt_entry_t *)phystokv((tmplate & ARM_TTE_TABLE_MASK)); | |
924 | } else { | |
d9a64523 | 925 | // TTE must be reincarnated with page level mappings. |
f427ee49 A |
926 | |
927 | // ... but we don't want to break up blocks on live | |
928 | // translation tables. | |
929 | assert(!kva_active); | |
930 | ||
d9a64523 A |
931 | ppte = (pt_entry_t*)alloc_ptpage(pa_offset == 0); |
932 | bzero(ppte, ARM_PGBYTES); | |
5ba3f43e A |
933 | ppte_phys = kvtophys((vm_offset_t)ppte); |
934 | ||
d9a64523 | 935 | *tte2 = pa_to_tte(ppte_phys) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID; |
5ba3f43e A |
936 | } |
937 | ||
d9a64523 | 938 | vm_offset_t len = _end - va; |
f427ee49 | 939 | if ((pa + len) > real_avail_end) { |
d9a64523 | 940 | _end -= (pa + len - real_avail_end); |
f427ee49 | 941 | } |
d9a64523 A |
942 | assert((start - gVirtBase + gPhysBase - pa_offset) >= gPhysBase); |
943 | ||
944 | /* Round up to the nearest PAGE_SIZE boundary when creating mappings: | |
945 | * PAGE_SIZE may be a multiple of ARM_PGBYTES, and we don't want to leave | |
946 | * a ragged non-PAGE_SIZE-aligned edge. */ | |
947 | vm_offset_t rounded_end = round_page(_end); | |
5ba3f43e | 948 | /* Apply the desired protections to the specified page range */ |
f427ee49 | 949 | for (i = 0; i <= (ARM_TT_L3_INDEX_MASK >> ARM_TT_L3_SHIFT); i++) { |
d9a64523 | 950 | if ((start <= va) && (va < rounded_end)) { |
5ba3f43e A |
951 | ptmp = pa | ARM_PTE_AF | ARM_PTE_SH(SH_OUTER_MEMORY) | ARM_PTE_TYPE; |
952 | ptmp = ptmp | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT); | |
953 | ptmp = ptmp | ARM_PTE_AP(pte_prot_APX); | |
954 | ptmp = ptmp | ARM_PTE_NX; | |
5c9f4661 A |
955 | #if __ARM_KERNEL_PROTECT__ |
956 | ptmp = ptmp | ARM_PTE_NG; | |
957 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
5ba3f43e A |
958 | |
959 | if (pte_prot_XN) { | |
960 | ptmp = ptmp | ARM_PTE_PNX; | |
961 | } | |
962 | ||
963 | /* | |
964 | * If we can, apply the contiguous hint to this range. The hint is | |
d9a64523 | 965 | * applicable if the current address falls within a hint-sized range that will |
5ba3f43e A |
966 | * be fully covered by this mapping request. |
967 | */ | |
d9a64523 | 968 | if ((va >= round_up_pte_hint_address(start)) && (round_up_pte_hint_address(va + 1) <= _end) && |
cb323159 | 969 | (granule & ARM64_GRANULE_ALLOW_HINT) && use_contiguous_hint) { |
d9a64523 | 970 | assert((va & ((1 << ARM_PTE_HINT_ADDR_SHIFT) - 1)) == ((pa & ((1 << ARM_PTE_HINT_ADDR_SHIFT) - 1)))); |
5ba3f43e | 971 | ptmp |= ARM_PTE_HINT; |
cb323159 A |
972 | /* Do not attempt to reapply the hint bit to an already-active mapping. |
973 | * This very likely means we're attempting to change attributes on an already-active mapping, | |
974 | * which violates the requirement of the hint bit.*/ | |
975 | assert(!kva_active || (ppte[i] == ARM_PTE_TYPE_FAULT)); | |
5ba3f43e | 976 | } |
d9a64523 A |
977 | /* |
978 | * Do not change the contiguous bit on an active mapping. Even in a single-threaded | |
979 | * environment, it's possible for prefetch to produce a TLB conflict by trying to pull in | |
980 | * a hint-sized entry on top of one or more existing page-sized entries. It's also useful | |
981 | * to make sure we're not trying to unhint a sub-range of a larger hinted range, which | |
982 | * could produce a later TLB conflict. | |
983 | */ | |
984 | assert(!kva_active || (ppte[i] == ARM_PTE_TYPE_FAULT) || ((ppte[i] & ARM_PTE_HINT) == (ptmp & ARM_PTE_HINT))); | |
985 | ||
cb323159 | 986 | /* |
d9a64523 A |
987 | * If we reach an entry that maps the current pte page, delay updating it until the very end. |
988 | * Otherwise we might end up making the PTE page read-only, leading to a fault later on in | |
989 | * this function if we manage to outrun the TLB. This can happen on KTRR-enabled devices when | |
990 | * marking segDATACONST read-only. Mappings for this region may straddle a PT page boundary, | |
991 | * so we must also defer assignment of the following PTE. We will assume that if the region | |
992 | * were to require one or more full L3 pages, it would instead use L2 blocks where possible, | |
cb323159 | 993 | * therefore only requiring at most one L3 page at the beginning and one at the end. |
d9a64523 A |
994 | */ |
995 | if (kva_active && ((pt_entry_t*)(phystokv(pa)) == ppte)) { | |
cb323159 A |
996 | assert(recursive_pte == NULL); |
997 | assert(granule & ARM64_GRANULE_ALLOW_BLOCK); | |
5ba3f43e A |
998 | recursive_pte = &ppte[i]; |
999 | recursive_ptmp = ptmp; | |
1000 | } else if ((deferred_pte != NULL) && (&ppte[i] == &recursive_pte[1])) { | |
1001 | assert(*deferred_pte == NULL); | |
1002 | assert(deferred_ptmp != NULL); | |
1003 | *deferred_pte = &ppte[i]; | |
1004 | *deferred_ptmp = ptmp; | |
1005 | } else { | |
1006 | ppte[i] = ptmp; | |
1007 | } | |
1008 | } | |
1009 | ||
1010 | va += ARM_PGBYTES; | |
1011 | pa += ARM_PGBYTES; | |
1012 | } | |
f427ee49 | 1013 | if (recursive_pte != NULL) { |
5ba3f43e | 1014 | *recursive_pte = recursive_ptmp; |
f427ee49 | 1015 | } |
5ba3f43e A |
1016 | } |
1017 | } | |
1018 | ||
1019 | /* | |
1020 | * arm_vm_page_granular_prot updates protections by iterating over the L2 entries and | |
1021 | * changing them. If a particular chunk necessitates L3 entries (for reasons of | |
1022 | * alignment or length, or an explicit request that the entry be fully expanded), we | |
1023 | * hand off to arm_vm_page_granular_helper to deal with the L3 chunk of the logic. | |
5ba3f43e A |
1024 | */ |
1025 | static void | |
d9a64523 | 1026 | arm_vm_page_granular_prot(vm_offset_t start, unsigned long size, pmap_paddr_t pa_offset, |
f427ee49 A |
1027 | int tte_prot_XN, int pte_prot_APX, int pte_prot_XN, |
1028 | unsigned granule) | |
5ba3f43e A |
1029 | { |
1030 | pt_entry_t *deferred_pte = NULL, deferred_ptmp = 0; | |
1031 | vm_offset_t _end = start + size; | |
1032 | vm_offset_t align_start = (start + ARM_TT_L2_OFFMASK) & ~ARM_TT_L2_OFFMASK; | |
1033 | ||
f427ee49 | 1034 | if (size == 0x0UL) { |
5ba3f43e | 1035 | return; |
f427ee49 | 1036 | } |
5ba3f43e A |
1037 | |
1038 | if (align_start > _end) { | |
cb323159 | 1039 | arm_vm_page_granular_helper(start, _end, start, pa_offset, pte_prot_APX, pte_prot_XN, granule, NULL, NULL); |
5ba3f43e A |
1040 | return; |
1041 | } | |
1042 | ||
cb323159 | 1043 | arm_vm_page_granular_helper(start, align_start, start, pa_offset, pte_prot_APX, pte_prot_XN, granule, &deferred_pte, &deferred_ptmp); |
5ba3f43e | 1044 | |
d9a64523 | 1045 | while ((_end - align_start) >= ARM_TT_L2_SIZE) { |
cb323159 | 1046 | if (!(granule & ARM64_GRANULE_ALLOW_BLOCK)) { |
f427ee49 A |
1047 | arm_vm_page_granular_helper(align_start, align_start + ARM_TT_L2_SIZE, align_start + 1, pa_offset, |
1048 | pte_prot_APX, pte_prot_XN, granule, NULL, NULL); | |
cb323159 | 1049 | } else { |
d9a64523 | 1050 | pmap_paddr_t pa = align_start - gVirtBase + gPhysBase - pa_offset; |
cb323159 | 1051 | assert((pa & ARM_TT_L2_OFFMASK) == 0); |
5ba3f43e | 1052 | tt_entry_t *tte2; |
5ba3f43e A |
1053 | tt_entry_t tmplate; |
1054 | ||
d9a64523 | 1055 | tte2 = arm_kva_to_tte(align_start); |
5ba3f43e | 1056 | |
d9a64523 A |
1057 | if ((pa >= gPhysBase) && (pa < real_avail_end)) { |
1058 | tmplate = (pa & ARM_TTE_BLOCK_L2_MASK) | ARM_TTE_TYPE_BLOCK | |
f427ee49 A |
1059 | | ARM_TTE_VALID | ARM_TTE_BLOCK_AF | ARM_TTE_BLOCK_NX |
1060 | | ARM_TTE_BLOCK_AP(pte_prot_APX) | ARM_TTE_BLOCK_SH(SH_OUTER_MEMORY) | |
1061 | | ARM_TTE_BLOCK_ATTRINDX(CACHE_ATTRINDX_WRITEBACK); | |
cb323159 | 1062 | |
5c9f4661 | 1063 | #if __ARM_KERNEL_PROTECT__ |
d9a64523 | 1064 | tmplate = tmplate | ARM_TTE_BLOCK_NG; |
5c9f4661 | 1065 | #endif /* __ARM_KERNEL_PROTECT__ */ |
f427ee49 | 1066 | if (tte_prot_XN) { |
d9a64523 | 1067 | tmplate = tmplate | ARM_TTE_BLOCK_PNX; |
f427ee49 | 1068 | } |
5ba3f43e | 1069 | |
d9a64523 A |
1070 | *tte2 = tmplate; |
1071 | } | |
5ba3f43e A |
1072 | } |
1073 | align_start += ARM_TT_L2_SIZE; | |
1074 | } | |
1075 | ||
f427ee49 | 1076 | if (align_start < _end) { |
cb323159 | 1077 | arm_vm_page_granular_helper(align_start, _end, _end, pa_offset, pte_prot_APX, pte_prot_XN, granule, &deferred_pte, &deferred_ptmp); |
f427ee49 | 1078 | } |
5ba3f43e | 1079 | |
f427ee49 | 1080 | if (deferred_pte != NULL) { |
5ba3f43e | 1081 | *deferred_pte = deferred_ptmp; |
f427ee49 | 1082 | } |
5ba3f43e A |
1083 | } |
1084 | ||
1085 | static inline void | |
cb323159 | 1086 | arm_vm_page_granular_RNX(vm_offset_t start, unsigned long size, unsigned granule) |
5ba3f43e | 1087 | { |
cb323159 | 1088 | arm_vm_page_granular_prot(start, size, 0, 1, AP_RONA, 1, granule); |
5ba3f43e A |
1089 | } |
1090 | ||
1091 | static inline void | |
cb323159 | 1092 | arm_vm_page_granular_ROX(vm_offset_t start, unsigned long size, unsigned granule) |
5ba3f43e | 1093 | { |
cb323159 | 1094 | arm_vm_page_granular_prot(start, size, 0, 0, AP_RONA, 0, granule); |
5ba3f43e A |
1095 | } |
1096 | ||
1097 | static inline void | |
cb323159 | 1098 | arm_vm_page_granular_RWNX(vm_offset_t start, unsigned long size, unsigned granule) |
5ba3f43e | 1099 | { |
cb323159 | 1100 | arm_vm_page_granular_prot(start, size, 0, 1, AP_RWNA, 1, granule); |
5ba3f43e A |
1101 | } |
1102 | ||
d9a64523 A |
1103 | /* used in the chosen/memory-map node, populated by iBoot. */ |
1104 | typedef struct MemoryMapFileInfo { | |
f427ee49 A |
1105 | vm_offset_t paddr; |
1106 | size_t length; | |
d9a64523 A |
1107 | } MemoryMapFileInfo; |
1108 | ||
f427ee49 A |
1109 | // Populate seg...AuxKC and fixup AuxKC permissions |
1110 | static bool | |
1111 | arm_vm_auxkc_init(void) | |
5ba3f43e | 1112 | { |
f427ee49 A |
1113 | if (auxkc_mh == 0 || auxkc_base == 0) { |
1114 | return false; // no auxKC. | |
1115 | } | |
1116 | ||
1117 | /* Fixup AuxKC and populate seg*AuxKC globals used below */ | |
1118 | arm_auxkc_init((void*)auxkc_mh, (void*)auxkc_base); | |
1119 | ||
1120 | if (segLOWESTAuxKC != segLOWEST) { | |
1121 | panic("segLOWESTAuxKC (%p) not equal to segLOWEST (%p). auxkc_mh: %p, auxkc_base: %p", | |
1122 | (void*)segLOWESTAuxKC, (void*)segLOWEST, | |
1123 | (void*)auxkc_mh, (void*)auxkc_base); | |
1124 | } | |
5ba3f43e | 1125 | |
f427ee49 A |
1126 | /* |
1127 | * The AuxKC LINKEDIT segment needs to be covered by the RO region but is excluded | |
1128 | * from the RO address range returned by kernel_collection_adjust_mh_addrs(). | |
1129 | * Ensure the highest non-LINKEDIT address in the AuxKC is the current end of | |
1130 | * its RO region before extending it. | |
1131 | */ | |
1132 | assert(segHIGHESTROAuxKC == segHIGHESTNLEAuxKC); | |
1133 | assert(segHIGHESTAuxKC >= segHIGHESTROAuxKC); | |
1134 | if (segHIGHESTAuxKC > segHIGHESTROAuxKC) { | |
1135 | segHIGHESTROAuxKC = segHIGHESTAuxKC; | |
1136 | } | |
1137 | ||
1138 | /* | |
1139 | * The AuxKC RO region must be right below the device tree/trustcache so that it can be covered | |
1140 | * by CTRR, and the AuxKC RX region must be within the RO region. | |
1141 | */ | |
1142 | assert(segHIGHESTROAuxKC == auxkc_right_above); | |
1143 | assert(segHIGHESTRXAuxKC <= segHIGHESTROAuxKC); | |
1144 | assert(segLOWESTRXAuxKC <= segHIGHESTRXAuxKC); | |
1145 | assert(segLOWESTROAuxKC <= segLOWESTRXAuxKC); | |
1146 | assert(segLOWESTAuxKC <= segLOWESTROAuxKC); | |
1147 | ||
1148 | if (segHIGHESTRXAuxKC < segLOWEST) { | |
1149 | arm_vm_page_granular_RNX(segHIGHESTRXAuxKC, segLOWEST - segHIGHESTRXAuxKC, 0); | |
1150 | } | |
1151 | if (segLOWESTRXAuxKC < segHIGHESTRXAuxKC) { | |
1152 | arm_vm_page_granular_ROX(segLOWESTRXAuxKC, segHIGHESTRXAuxKC - segLOWESTRXAuxKC, 0); // Refined in OSKext::readPrelinkedExtensions | |
1153 | } | |
1154 | if (segLOWESTROAuxKC < segLOWESTRXAuxKC) { | |
1155 | arm_vm_page_granular_RNX(segLOWESTROAuxKC, segLOWESTRXAuxKC - segLOWESTROAuxKC, 0); | |
1156 | } | |
1157 | if (segLOWESTAuxKC < segLOWESTROAuxKC) { | |
1158 | arm_vm_page_granular_RWNX(segLOWESTAuxKC, segLOWESTROAuxKC - segLOWESTAuxKC, 0); | |
1159 | } | |
1160 | ||
1161 | return true; | |
1162 | } | |
1163 | ||
1164 | void | |
1165 | arm_vm_prot_init(__unused boot_args * args) | |
1166 | { | |
d9a64523 | 1167 | segLOWESTTEXT = UINT64_MAX; |
f427ee49 A |
1168 | if (segSizePRELINKTEXT && (segPRELINKTEXTB < segLOWESTTEXT)) { |
1169 | segLOWESTTEXT = segPRELINKTEXTB; | |
1170 | } | |
d9a64523 | 1171 | assert(segSizeTEXT); |
f427ee49 A |
1172 | if (segTEXTB < segLOWESTTEXT) { |
1173 | segLOWESTTEXT = segTEXTB; | |
1174 | } | |
d9a64523 A |
1175 | assert(segLOWESTTEXT < UINT64_MAX); |
1176 | ||
1177 | segEXTRADATA = segLOWESTTEXT; | |
1178 | segSizeEXTRADATA = 0; | |
1179 | ||
cb323159 | 1180 | segLOWEST = segLOWESTTEXT; |
f427ee49 A |
1181 | segLOWESTRO = segLOWESTTEXT; |
1182 | ||
1183 | if (segLOWESTKC && segLOWESTKC < segLOWEST) { | |
1184 | /* | |
1185 | * kernel collections have segments below the kernel. In particular the collection mach header | |
1186 | * is below PRELINK_TEXT and is not covered by any other segments already tracked. | |
1187 | */ | |
1188 | arm_vm_page_granular_RNX(segLOWESTKC, segLOWEST - segLOWESTKC, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); | |
1189 | segLOWEST = segLOWESTKC; | |
1190 | if (segLOWESTROKC && segLOWESTROKC < segLOWESTRO) { | |
1191 | segLOWESTRO = segLOWESTROKC; | |
1192 | } | |
1193 | if (segHIGHESTROKC && segHIGHESTROKC > segHIGHESTRO) { | |
1194 | segHIGHESTRO = segHIGHESTROKC; | |
1195 | } | |
1196 | } | |
cb323159 | 1197 | |
d9a64523 | 1198 | DTEntry memory_map; |
f427ee49 | 1199 | MemoryMapFileInfo const *trustCacheRange; |
d9a64523 A |
1200 | unsigned int trustCacheRangeSize; |
1201 | int err; | |
1202 | ||
f427ee49 A |
1203 | if (SecureDTIsLockedDown()) { |
1204 | segEXTRADATA = (vm_offset_t)PE_state.deviceTreeHead; | |
1205 | segSizeEXTRADATA = PE_state.deviceTreeSize; | |
1206 | } | |
1207 | ||
1208 | err = SecureDTLookupEntry(NULL, "chosen/memory-map", &memory_map); | |
d9a64523 A |
1209 | assert(err == kSuccess); |
1210 | ||
f427ee49 | 1211 | err = SecureDTGetProperty(memory_map, "TrustCache", (void const **)&trustCacheRange, &trustCacheRangeSize); |
d9a64523 A |
1212 | if (err == kSuccess) { |
1213 | assert(trustCacheRangeSize == sizeof(MemoryMapFileInfo)); | |
1214 | ||
f427ee49 A |
1215 | if (segSizeEXTRADATA == 0) { |
1216 | segEXTRADATA = phystokv(trustCacheRange->paddr); | |
1217 | segSizeEXTRADATA = trustCacheRange->length; | |
1218 | } else { | |
1219 | segSizeEXTRADATA += trustCacheRange->length; | |
1220 | } | |
1221 | } | |
d9a64523 | 1222 | |
f427ee49 | 1223 | if (segSizeEXTRADATA != 0) { |
cb323159 A |
1224 | if (segEXTRADATA <= segLOWEST) { |
1225 | segLOWEST = segEXTRADATA; | |
f427ee49 A |
1226 | if (segEXTRADATA <= segLOWESTRO) { |
1227 | segLOWESTRO = segEXTRADATA; | |
1228 | } | |
cb323159 A |
1229 | } |
1230 | #if !(DEBUG || DEVELOPMENT) | |
1231 | ||
1232 | ||
1233 | else { | |
1234 | panic("EXTRADATA is in an unexpected place: %#lx > %#lx", segEXTRADATA, segLOWEST); | |
1235 | } | |
1236 | #endif /* !(DEBUG || DEVELOPMENT) */ | |
1237 | ||
1238 | arm_vm_page_granular_RNX(segEXTRADATA, segSizeEXTRADATA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); | |
f427ee49 A |
1239 | } |
1240 | ||
1241 | const MemoryMapFileInfo *auxKC_range, *auxKC_header_range; | |
1242 | unsigned int auxKC_range_size, auxKC_header_range_size; | |
1243 | ||
1244 | err = SecureDTGetProperty(memory_map, "AuxKC", (const void**)&auxKC_range, | |
1245 | &auxKC_range_size); | |
1246 | if (err != kSuccess) { | |
1247 | goto noAuxKC; | |
1248 | } | |
1249 | assert(auxKC_range_size == sizeof(MemoryMapFileInfo)); | |
1250 | err = SecureDTGetProperty(memory_map, "AuxKC-mach_header", | |
1251 | (const void**)&auxKC_header_range, &auxKC_header_range_size); | |
1252 | if (err != kSuccess) { | |
1253 | goto noAuxKC; | |
1254 | } | |
1255 | assert(auxKC_header_range_size == sizeof(MemoryMapFileInfo)); | |
1256 | ||
1257 | auxkc_mh = phystokv(auxKC_header_range->paddr); | |
1258 | auxkc_base = phystokv(auxKC_range->paddr); | |
1259 | if (!auxkc_mh || !auxkc_base) { | |
1260 | goto noAuxKC; | |
1261 | } | |
cb323159 | 1262 | |
f427ee49 A |
1263 | if (auxkc_base < segLOWEST) { |
1264 | auxkc_right_above = segLOWEST; | |
1265 | segLOWEST = auxkc_base; | |
1266 | } else { | |
1267 | panic("auxkc_base (%p) not below segLOWEST (%p)", (void*)auxkc_base, (void*)segLOWEST); | |
d9a64523 | 1268 | } |
5ba3f43e | 1269 | |
f427ee49 A |
1270 | /* Map AuxKC RWNX initially so that arm_vm_auxkc_init can traverse |
1271 | * it and apply fixups (after we're off the bootstrap translation | |
1272 | * tables). | |
1273 | */ | |
1274 | arm_vm_page_granular_RWNX(auxkc_base, auxKC_range->length, 0); | |
1275 | ||
1276 | noAuxKC: | |
5ba3f43e | 1277 | /* Map coalesced kext TEXT segment RWNX for now */ |
cb323159 | 1278 | arm_vm_page_granular_RWNX(segPRELINKTEXTB, segSizePRELINKTEXT, ARM64_GRANULE_ALLOW_BLOCK); // Refined in OSKext::readPrelinkedExtensions |
5ba3f43e A |
1279 | |
1280 | /* Map coalesced kext DATA_CONST segment RWNX (could be empty) */ | |
cb323159 | 1281 | arm_vm_page_granular_RWNX(segPLKDATACONSTB, segSizePLKDATACONST, ARM64_GRANULE_ALLOW_BLOCK); // Refined in OSKext::readPrelinkedExtensions |
5ba3f43e | 1282 | |
cb323159 A |
1283 | /* Map coalesced kext TEXT_EXEC segment RX (could be empty) */ |
1284 | arm_vm_page_granular_ROX(segPLKTEXTEXECB, segSizePLKTEXTEXEC, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // Refined in OSKext::readPrelinkedExtensions | |
5ba3f43e A |
1285 | |
1286 | /* if new segments not present, set space between PRELINK_TEXT and xnu TEXT to RWNX | |
d9a64523 | 1287 | * otherwise we no longer expect any space between the coalesced kext read only segments and xnu rosegments |
5ba3f43e A |
1288 | */ |
1289 | if (!segSizePLKDATACONST && !segSizePLKTEXTEXEC) { | |
cb323159 A |
1290 | if (segSizePRELINKTEXT) { |
1291 | arm_vm_page_granular_RWNX(segPRELINKTEXTB + segSizePRELINKTEXT, segTEXTB - (segPRELINKTEXTB + segSizePRELINKTEXT), | |
1292 | ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); | |
1293 | } | |
5ba3f43e A |
1294 | } else { |
1295 | /* | |
1296 | * If we have the new segments, we should still protect the gap between kext | |
1297 | * read-only pages and kernel read-only pages, in the event that this gap | |
1298 | * exists. | |
1299 | */ | |
1300 | if ((segPLKDATACONSTB + segSizePLKDATACONST) < segTEXTB) { | |
cb323159 A |
1301 | arm_vm_page_granular_RWNX(segPLKDATACONSTB + segSizePLKDATACONST, segTEXTB - (segPLKDATACONSTB + segSizePLKDATACONST), |
1302 | ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); | |
5ba3f43e A |
1303 | } |
1304 | } | |
1305 | ||
1306 | /* | |
1307 | * Protection on kernel text is loose here to allow shenanigans early on. These | |
1308 | * protections are tightened in arm_vm_prot_finalize(). This is necessary because | |
1309 | * we currently patch LowResetVectorBase in cpu.c. | |
1310 | * | |
1311 | * TEXT segment contains mach headers and other non-executable data. This will become RONX later. | |
1312 | */ | |
cb323159 | 1313 | arm_vm_page_granular_RNX(segTEXTB, segSizeTEXT, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); |
5ba3f43e A |
1314 | |
1315 | /* Can DATACONST start out and stay RNX? | |
1316 | * NO, stuff in this segment gets modified during startup (viz. mac_policy_init()/mac_policy_list) | |
1317 | * Make RNX in prot_finalize | |
1318 | */ | |
cb323159 | 1319 | arm_vm_page_granular_RWNX(segDATACONSTB, segSizeDATACONST, ARM64_GRANULE_ALLOW_BLOCK); |
5ba3f43e | 1320 | |
cb323159 | 1321 | arm_vm_page_granular_ROX(segTEXTEXECB, segSizeTEXTEXEC, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); |
5ba3f43e | 1322 | |
c6bf4f31 A |
1323 | #if XNU_MONITOR |
1324 | arm_vm_page_granular_ROX(segPPLTEXTB, segSizePPLTEXT, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); | |
1325 | arm_vm_page_granular_ROX(segPPLTRAMPB, segSizePPLTRAMP, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); | |
1326 | arm_vm_page_granular_RNX(segPPLDATACONSTB, segSizePPLDATACONST, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); | |
1327 | #endif | |
5ba3f43e A |
1328 | |
1329 | /* DATA segment will remain RWNX */ | |
cb323159 | 1330 | arm_vm_page_granular_RWNX(segDATAB, segSizeDATA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); |
c6bf4f31 A |
1331 | #if XNU_MONITOR |
1332 | arm_vm_page_granular_RWNX(segPPLDATAB, segSizePPLDATA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); | |
1333 | #endif | |
5ba3f43e | 1334 | |
f427ee49 A |
1335 | arm_vm_page_granular_RWNX(segHIBDATAB, segSizeHIBDATA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); |
1336 | ||
cb323159 A |
1337 | arm_vm_page_granular_RWNX(segBOOTDATAB, segSizeBOOTDATA, 0); |
1338 | arm_vm_page_granular_RNX((vm_offset_t)&intstack_low_guard, PAGE_MAX_SIZE, 0); | |
1339 | arm_vm_page_granular_RNX((vm_offset_t)&intstack_high_guard, PAGE_MAX_SIZE, 0); | |
1340 | arm_vm_page_granular_RNX((vm_offset_t)&excepstack_high_guard, PAGE_MAX_SIZE, 0); | |
d9a64523 | 1341 | |
cb323159 | 1342 | arm_vm_page_granular_ROX(segKLDB, segSizeKLD, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); |
c3c9b80d | 1343 | arm_vm_page_granular_RNX(segKLDDATAB, segSizeKLDDATA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); |
cb323159 A |
1344 | arm_vm_page_granular_RWNX(segLINKB, segSizeLINK, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); |
1345 | arm_vm_page_granular_RWNX(segPLKLINKEDITB, segSizePLKLINKEDIT, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // Coalesced kext LINKEDIT segment | |
1346 | arm_vm_page_granular_ROX(segLASTB, segSizeLAST, ARM64_GRANULE_ALLOW_BLOCK); // __LAST may be empty, but we cannot assume this | |
f427ee49 A |
1347 | if (segLASTDATACONSTB) { |
1348 | arm_vm_page_granular_RWNX(segLASTDATACONSTB, segSizeLASTDATACONST, ARM64_GRANULE_ALLOW_BLOCK); // __LASTDATA_CONST may be empty, but we cannot assume this | |
1349 | } | |
cb323159 | 1350 | arm_vm_page_granular_RWNX(segPRELINKDATAB, segSizePRELINKDATA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // Prelink __DATA for kexts (RW data) |
5ba3f43e | 1351 | |
f427ee49 | 1352 | if (segSizePLKLLVMCOV > 0) { |
cb323159 | 1353 | arm_vm_page_granular_RWNX(segPLKLLVMCOVB, segSizePLKLLVMCOV, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // LLVM code coverage data |
f427ee49 | 1354 | } |
cb323159 | 1355 | arm_vm_page_granular_RWNX(segPRELINKINFOB, segSizePRELINKINFO, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); /* PreLinkInfoDictionary */ |
5ba3f43e | 1356 | |
f427ee49 A |
1357 | /* Record the bounds of the kernelcache. */ |
1358 | vm_kernelcache_base = segLOWEST; | |
1359 | vm_kernelcache_top = end_kern; | |
d9a64523 | 1360 | } |
5ba3f43e | 1361 | |
d9a64523 A |
1362 | /* |
1363 | * return < 0 for a < b | |
1364 | * 0 for a == b | |
1365 | * > 0 for a > b | |
1366 | */ | |
1367 | typedef int (*cmpfunc_t)(const void *a, const void *b); | |
1368 | ||
1369 | extern void | |
1370 | qsort(void *a, size_t n, size_t es, cmpfunc_t cmp); | |
1371 | ||
1372 | static int | |
1373 | cmp_ptov_entries(const void *a, const void *b) | |
1374 | { | |
1375 | const ptov_table_entry *entry_a = a; | |
1376 | const ptov_table_entry *entry_b = b; | |
1377 | // Sort in descending order of segment length | |
f427ee49 | 1378 | if (entry_a->len < entry_b->len) { |
d9a64523 | 1379 | return 1; |
f427ee49 | 1380 | } else if (entry_a->len > entry_b->len) { |
d9a64523 | 1381 | return -1; |
f427ee49 | 1382 | } else { |
d9a64523 | 1383 | return 0; |
f427ee49 | 1384 | } |
d9a64523 A |
1385 | } |
1386 | ||
1387 | SECURITY_READ_ONLY_LATE(static unsigned int) ptov_index = 0; | |
1388 | ||
f427ee49 | 1389 | #define ROUND_L1(addr) (((addr) + ARM_TT_L1_OFFMASK) & ~(ARM_TT_L1_OFFMASK)) |
d9a64523 A |
1390 | #define ROUND_TWIG(addr) (((addr) + ARM_TT_TWIG_OFFMASK) & ~(ARM_TT_TWIG_OFFMASK)) |
1391 | ||
1392 | static void | |
f427ee49 | 1393 | arm_vm_physmap_slide(ptov_table_entry *temp_ptov_table, vm_map_address_t orig_va, vm_size_t len, int pte_prot_APX, unsigned granule) |
d9a64523 A |
1394 | { |
1395 | pmap_paddr_t pa_offset; | |
1396 | ||
1397 | assert(ptov_index < PTOV_TABLE_SIZE); | |
1398 | assert((orig_va & ARM_PGMASK) == 0); | |
1399 | temp_ptov_table[ptov_index].pa = orig_va - gVirtBase + gPhysBase; | |
f427ee49 | 1400 | if (ptov_index == 0) { |
d9a64523 | 1401 | temp_ptov_table[ptov_index].va = physmap_base; |
f427ee49 | 1402 | } else { |
d9a64523 | 1403 | temp_ptov_table[ptov_index].va = temp_ptov_table[ptov_index - 1].va + temp_ptov_table[ptov_index - 1].len; |
f427ee49 | 1404 | } |
cb323159 | 1405 | if (granule & ARM64_GRANULE_ALLOW_BLOCK) { |
d9a64523 A |
1406 | vm_map_address_t orig_offset = temp_ptov_table[ptov_index].pa & ARM_TT_TWIG_OFFMASK; |
1407 | vm_map_address_t new_offset = temp_ptov_table[ptov_index].va & ARM_TT_TWIG_OFFMASK; | |
f427ee49 | 1408 | if (new_offset < orig_offset) { |
d9a64523 | 1409 | temp_ptov_table[ptov_index].va += (orig_offset - new_offset); |
f427ee49 | 1410 | } else if (new_offset > orig_offset) { |
d9a64523 | 1411 | temp_ptov_table[ptov_index].va = ROUND_TWIG(temp_ptov_table[ptov_index].va) + orig_offset; |
f427ee49 | 1412 | } |
d9a64523 A |
1413 | } |
1414 | assert((temp_ptov_table[ptov_index].va & ARM_PGMASK) == 0); | |
1415 | temp_ptov_table[ptov_index].len = round_page(len); | |
cb323159 A |
1416 | pa_offset = temp_ptov_table[ptov_index].va - orig_va; |
1417 | arm_vm_page_granular_prot(temp_ptov_table[ptov_index].va, temp_ptov_table[ptov_index].len, pa_offset, 1, pte_prot_APX, 1, granule); | |
d9a64523 | 1418 | ++ptov_index; |
5ba3f43e A |
1419 | } |
1420 | ||
c6bf4f31 A |
1421 | #if XNU_MONITOR |
1422 | ||
1423 | SECURITY_READ_ONLY_LATE(static boolean_t) keep_linkedit = FALSE; | |
1424 | ||
1425 | static void | |
f427ee49 | 1426 | arm_vm_physmap_init(boot_args *args) |
c6bf4f31 A |
1427 | { |
1428 | ptov_table_entry temp_ptov_table[PTOV_TABLE_SIZE]; | |
1429 | bzero(temp_ptov_table, sizeof(temp_ptov_table)); | |
1430 | ||
1431 | // This is memory that will either be handed back to the VM layer via ml_static_mfree(), | |
1432 | // or will be available for general-purpose use. Physical aperture mappings for this memory | |
1433 | // must be at page granularity, so that PPL ownership or cache attribute changes can be reflected | |
1434 | // in the physical aperture mappings. | |
1435 | ||
c6bf4f31 | 1436 | // Slid region between gPhysBase and beginning of protected text |
f427ee49 | 1437 | arm_vm_physmap_slide(temp_ptov_table, gVirtBase, segLOWEST - gVirtBase, AP_RWNA, 0); |
c6bf4f31 | 1438 | |
c3c9b80d A |
1439 | // kext bootstrap segments |
1440 | #if !defined(KERNEL_INTEGRITY_KTRR) && !defined(KERNEL_INTEGRITY_CTRR) | |
1441 | /* __KLD,__text is covered by the rorgn */ | |
f427ee49 | 1442 | arm_vm_physmap_slide(temp_ptov_table, segKLDB, segSizeKLD, AP_RONA, 0); |
c3c9b80d A |
1443 | #endif |
1444 | arm_vm_physmap_slide(temp_ptov_table, segKLDDATAB, segSizeKLDDATA, AP_RONA, 0); | |
c6bf4f31 A |
1445 | |
1446 | // Early-boot data | |
f427ee49 | 1447 | arm_vm_physmap_slide(temp_ptov_table, segBOOTDATAB, segSizeBOOTDATA, AP_RONA, 0); |
c6bf4f31 A |
1448 | |
1449 | #if KASAN_DYNAMIC_BLACKLIST | |
1450 | /* KASAN's dynamic blacklist needs to query the LINKEDIT segment at runtime. As such, the | |
1451 | * kext bootstrap code will not jettison LINKEDIT on kasan kernels, so don't bother to relocate it. */ | |
1452 | keep_linkedit = TRUE; | |
1453 | #else | |
1454 | PE_parse_boot_argn("keepsyms", &keep_linkedit, sizeof(keep_linkedit)); | |
f427ee49 A |
1455 | if (kernel_mach_header_is_in_fileset(&_mh_execute_header)) { |
1456 | keep_linkedit = TRUE; | |
1457 | } | |
c6bf4f31 A |
1458 | #endif |
1459 | if (!keep_linkedit) { | |
1460 | // Kernel LINKEDIT | |
f427ee49 | 1461 | arm_vm_physmap_slide(temp_ptov_table, segLINKB, segSizeLINK, AP_RWNA, 0); |
c6bf4f31 A |
1462 | |
1463 | // Prelinked kernel LINKEDIT | |
f427ee49 | 1464 | arm_vm_physmap_slide(temp_ptov_table, segPLKLINKEDITB, segSizePLKLINKEDIT, AP_RWNA, 0); |
c6bf4f31 A |
1465 | } |
1466 | ||
1467 | // Prelinked kernel plists | |
f427ee49 | 1468 | arm_vm_physmap_slide(temp_ptov_table, segPRELINKINFOB, segSizePRELINKINFO, AP_RWNA, 0); |
c6bf4f31 | 1469 | |
f427ee49 A |
1470 | // Device tree (if not locked down), ramdisk, boot args |
1471 | arm_vm_physmap_slide(temp_ptov_table, end_kern, (args->topOfKernelData - gPhysBase + gVirtBase) - end_kern, AP_RWNA, 0); | |
1472 | if (!SecureDTIsLockedDown()) { | |
1473 | PE_slide_devicetree(temp_ptov_table[ptov_index - 1].va - end_kern); | |
1474 | } | |
c6bf4f31 A |
1475 | |
1476 | // Remainder of physical memory | |
f427ee49 A |
1477 | arm_vm_physmap_slide(temp_ptov_table, (args->topOfKernelData - gPhysBase + gVirtBase), |
1478 | real_avail_end - args->topOfKernelData, AP_RWNA, 0); | |
c6bf4f31 | 1479 | |
f427ee49 | 1480 | assert((temp_ptov_table[ptov_index - 1].va + temp_ptov_table[ptov_index - 1].len) <= physmap_end); |
c6bf4f31 A |
1481 | |
1482 | // Sort in descending order of segment length. LUT traversal is linear, so largest (most likely used) | |
1483 | // segments should be placed earliest in the table to optimize lookup performance. | |
1484 | qsort(temp_ptov_table, PTOV_TABLE_SIZE, sizeof(temp_ptov_table[0]), cmp_ptov_entries); | |
1485 | ||
1486 | memcpy(ptov_table, temp_ptov_table, sizeof(ptov_table)); | |
1487 | } | |
1488 | ||
1489 | #else | |
d9a64523 A |
1490 | |
1491 | static void | |
f427ee49 | 1492 | arm_vm_physmap_init(boot_args *args) |
d9a64523 A |
1493 | { |
1494 | ptov_table_entry temp_ptov_table[PTOV_TABLE_SIZE]; | |
1495 | bzero(temp_ptov_table, sizeof(temp_ptov_table)); | |
1496 | ||
1497 | // Will be handed back to VM layer through ml_static_mfree() in arm_vm_prot_finalize() | |
f427ee49 | 1498 | arm_vm_physmap_slide(temp_ptov_table, gVirtBase, segLOWEST - gVirtBase, AP_RWNA, |
cb323159 | 1499 | ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); |
d9a64523 | 1500 | |
cb323159 | 1501 | arm_vm_page_granular_RWNX(end_kern, phystokv(args->topOfKernelData) - end_kern, |
f427ee49 | 1502 | ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); /* Device Tree (if not locked down), RAM Disk (if present), bootArgs */ |
d9a64523 | 1503 | |
f427ee49 A |
1504 | arm_vm_physmap_slide(temp_ptov_table, (args->topOfKernelData - gPhysBase + gVirtBase), |
1505 | real_avail_end - args->topOfKernelData, AP_RWNA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // rest of physmem | |
d9a64523 | 1506 | |
f427ee49 | 1507 | assert((temp_ptov_table[ptov_index - 1].va + temp_ptov_table[ptov_index - 1].len) <= physmap_end); |
d9a64523 A |
1508 | |
1509 | // Sort in descending order of segment length. LUT traversal is linear, so largest (most likely used) | |
1510 | // segments should be placed earliest in the table to optimize lookup performance. | |
cb323159 | 1511 | qsort(temp_ptov_table, PTOV_TABLE_SIZE, sizeof(temp_ptov_table[0]), cmp_ptov_entries); |
d9a64523 A |
1512 | |
1513 | memcpy(ptov_table, temp_ptov_table, sizeof(ptov_table)); | |
1514 | } | |
1515 | ||
c6bf4f31 | 1516 | #endif // XNU_MONITOR |
d9a64523 | 1517 | |
5ba3f43e | 1518 | void |
d9a64523 | 1519 | arm_vm_prot_finalize(boot_args * args __unused) |
5ba3f43e | 1520 | { |
5ba3f43e A |
1521 | /* |
1522 | * At this point, we are far enough along in the boot process that it will be | |
1523 | * safe to free up all of the memory preceeding the kernel. It may in fact | |
1524 | * be safe to do this earlier. | |
1525 | * | |
1526 | * This keeps the memory in the V-to-P mapping, but advertises it to the VM | |
1527 | * as usable. | |
1528 | */ | |
1529 | ||
1530 | /* | |
1531 | * if old style PRELINK segment exists, free memory before it, and after it before XNU text | |
1532 | * otherwise we're dealing with a new style kernel cache, so we should just free the | |
1533 | * memory before PRELINK_TEXT segment, since the rest of the KEXT read only data segments | |
1534 | * should be immediately followed by XNU's TEXT segment | |
1535 | */ | |
1536 | ||
cb323159 | 1537 | ml_static_mfree(phystokv(gPhysBase), segLOWEST - gVirtBase); |
5ba3f43e | 1538 | |
d9a64523 A |
1539 | /* |
1540 | * KTRR support means we will be mucking with these pages and trying to | |
1541 | * protect them; we cannot free the pages to the VM if we do this. | |
1542 | */ | |
1543 | if (!segSizePLKDATACONST && !segSizePLKTEXTEXEC && segSizePRELINKTEXT) { | |
5ba3f43e A |
1544 | /* If new segments not present, PRELINK_TEXT is not dynamically sized, free DRAM between it and xnu TEXT */ |
1545 | ml_static_mfree(segPRELINKTEXTB + segSizePRELINKTEXT, segTEXTB - (segPRELINKTEXTB + segSizePRELINKTEXT)); | |
1546 | } | |
1547 | ||
5ba3f43e | 1548 | /* tighten permissions on kext read only data and code */ |
cb323159 A |
1549 | arm_vm_page_granular_RNX(segPRELINKTEXTB, segSizePRELINKTEXT, ARM64_GRANULE_ALLOW_BLOCK); |
1550 | arm_vm_page_granular_RNX(segPLKDATACONSTB, segSizePLKDATACONST, ARM64_GRANULE_ALLOW_BLOCK); | |
5ba3f43e | 1551 | |
d9a64523 A |
1552 | cpu_stack_alloc(&BootCpuData); |
1553 | arm64_replace_bootstack(&BootCpuData); | |
1554 | ml_static_mfree(phystokv(segBOOTDATAB - gVirtBase + gPhysBase), segSizeBOOTDATA); | |
1555 | ||
5c9f4661 A |
1556 | #if __ARM_KERNEL_PROTECT__ |
1557 | arm_vm_populate_kernel_el0_mappings(); | |
1558 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
1559 | ||
c6bf4f31 | 1560 | #if XNU_MONITOR |
c3c9b80d A |
1561 | #if !defined(KERNEL_INTEGRITY_KTRR) && !defined(KERNEL_INTEGRITY_CTRR) |
1562 | /* __KLD,__text is covered by the rorgn */ | |
c6bf4f31 A |
1563 | for (vm_offset_t va = segKLDB; va < (segKLDB + segSizeKLD); va += ARM_PGBYTES) { |
1564 | pt_entry_t *pte = arm_kva_to_pte(va); | |
1565 | *pte = ARM_PTE_EMPTY; | |
1566 | } | |
c3c9b80d A |
1567 | #endif |
1568 | for (vm_offset_t va = segKLDDATAB; va < (segKLDDATAB + segSizeKLDDATA); va += ARM_PGBYTES) { | |
1569 | pt_entry_t *pte = arm_kva_to_pte(va); | |
1570 | *pte = ARM_PTE_EMPTY; | |
1571 | } | |
c6bf4f31 A |
1572 | /* Clear the original stack mappings; these pages should be mapped through ptov_table. */ |
1573 | for (vm_offset_t va = segBOOTDATAB; va < (segBOOTDATAB + segSizeBOOTDATA); va += ARM_PGBYTES) { | |
1574 | pt_entry_t *pte = arm_kva_to_pte(va); | |
1575 | *pte = ARM_PTE_EMPTY; | |
1576 | } | |
1577 | /* Clear the original PRELINKINFO mapping. This segment should be jettisoned during I/O Kit | |
1578 | * initialization before we reach this point. */ | |
1579 | for (vm_offset_t va = segPRELINKINFOB; va < (segPRELINKINFOB + segSizePRELINKINFO); va += ARM_PGBYTES) { | |
1580 | pt_entry_t *pte = arm_kva_to_pte(va); | |
1581 | *pte = ARM_PTE_EMPTY; | |
1582 | } | |
1583 | if (!keep_linkedit) { | |
1584 | for (vm_offset_t va = segLINKB; va < (segLINKB + segSizeLINK); va += ARM_PGBYTES) { | |
1585 | pt_entry_t *pte = arm_kva_to_pte(va); | |
1586 | *pte = ARM_PTE_EMPTY; | |
1587 | } | |
1588 | for (vm_offset_t va = segPLKLINKEDITB; va < (segPLKLINKEDITB + segSizePLKLINKEDIT); va += ARM_PGBYTES) { | |
1589 | pt_entry_t *pte = arm_kva_to_pte(va); | |
1590 | *pte = ARM_PTE_EMPTY; | |
1591 | } | |
1592 | } | |
1593 | #endif /* XNU_MONITOR */ | |
d9a64523 | 1594 | |
c6bf4f31 | 1595 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
5ba3f43e A |
1596 | /* |
1597 | * __LAST,__pinst should no longer be executable. | |
1598 | */ | |
cb323159 | 1599 | arm_vm_page_granular_RNX(segLASTB, segSizeLAST, ARM64_GRANULE_ALLOW_BLOCK); |
5ba3f43e | 1600 | |
f427ee49 A |
1601 | /* __LASTDATA_CONST should no longer be writable. */ |
1602 | if (segLASTDATACONSTB) { | |
1603 | arm_vm_page_granular_RNX(segLASTDATACONSTB, segSizeLASTDATACONST, ARM64_GRANULE_ALLOW_BLOCK); | |
1604 | } | |
1605 | ||
c3c9b80d A |
1606 | /* |
1607 | * __KLD,__text should no longer be executable. | |
1608 | */ | |
1609 | arm_vm_page_granular_RNX(segKLDB, segSizeKLD, ARM64_GRANULE_ALLOW_BLOCK); | |
1610 | ||
5ba3f43e A |
1611 | /* |
1612 | * Must wait until all other region permissions are set before locking down DATA_CONST | |
1613 | * as the kernel static page tables live in DATA_CONST on KTRR enabled systems | |
1614 | * and will become immutable. | |
1615 | */ | |
1616 | #endif | |
5c9f4661 | 1617 | |
cb323159 | 1618 | arm_vm_page_granular_RNX(segDATACONSTB, segSizeDATACONST, ARM64_GRANULE_ALLOW_BLOCK); |
5ba3f43e | 1619 | |
d9a64523 | 1620 | __builtin_arm_dsb(DSB_ISH); |
5ba3f43e A |
1621 | flush_mmu_tlb(); |
1622 | } | |
1623 | ||
1624 | #define TBI_USER 0x1 | |
1625 | #define TBI_KERNEL 0x2 | |
1626 | ||
5ba3f43e A |
1627 | /* |
1628 | * TBI (top-byte ignore) is an ARMv8 feature for ignoring the top 8 bits of | |
1629 | * address accesses. It can be enabled separately for TTBR0 (user) and | |
f427ee49 | 1630 | * TTBR1 (kernel). We enable it by default for user only. |
5ba3f43e A |
1631 | */ |
1632 | static void | |
1633 | set_tbi(void) | |
1634 | { | |
5c9f4661 | 1635 | #if !__ARM_KERNEL_PROTECT__ |
5ba3f43e | 1636 | uint64_t old_tcr, new_tcr; |
5ba3f43e | 1637 | |
5ba3f43e | 1638 | old_tcr = new_tcr = get_tcr(); |
f427ee49 | 1639 | new_tcr |= TCR_TBI0_TOPBYTE_IGNORED; |
5ba3f43e A |
1640 | |
1641 | if (old_tcr != new_tcr) { | |
1642 | set_tcr(new_tcr); | |
1643 | sysreg_restore.tcr_el1 = new_tcr; | |
1644 | } | |
5c9f4661 | 1645 | #endif /* !__ARM_KERNEL_PROTECT__ */ |
5ba3f43e A |
1646 | } |
1647 | ||
f427ee49 A |
1648 | /* |
1649 | * Initialize and enter blank (invalid) page tables in a L1 translation table for a given VA range. | |
1650 | * | |
1651 | * This is a helper function used to build up the initial page tables for the kernel translation table. | |
1652 | * With KERNEL_INTEGRITY we keep at least the root level of the kernel page table immutable, thus the need | |
1653 | * to preallocate before machine_lockdown any L1 entries necessary during the entire kernel runtime. | |
1654 | * | |
1655 | * For a given VA range, if necessary, allocate new L2 translation tables and install the table entries in | |
1656 | * the appropriate L1 table indexes. called before the translation table is active | |
1657 | * | |
1658 | * parameters: | |
1659 | * | |
1660 | * tt: virtual address of L1 translation table to modify | |
1661 | * start: beginning of VA range | |
1662 | * end: end of VA range | |
1663 | * static_map: whether to allocate the new translation table page from read only memory | |
1664 | * table_attrs: attributes of new table entry in addition to VALID and TYPE_TABLE attributes | |
1665 | * | |
1666 | */ | |
1667 | ||
1668 | static void | |
1669 | init_ptpages(tt_entry_t *tt, vm_map_address_t start, vm_map_address_t end, bool static_map, uint64_t table_attrs) | |
1670 | { | |
1671 | tt_entry_t *l1_tte; | |
1672 | vm_offset_t ptpage_vaddr; | |
1673 | ||
1674 | l1_tte = tt + ((start & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); | |
1675 | ||
1676 | while (start < end) { | |
1677 | if (*l1_tte == ARM_TTE_EMPTY) { | |
1678 | /* Allocate a page and setup L1 Table TTE in L1 */ | |
1679 | ptpage_vaddr = alloc_ptpage(static_map); | |
1680 | *l1_tte = (kvtophys(ptpage_vaddr) & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | table_attrs; | |
1681 | bzero((void *)ptpage_vaddr, ARM_PGBYTES); | |
1682 | } | |
1683 | ||
1684 | if ((start + ARM_TT_L1_SIZE) < start) { | |
1685 | /* If this is the last L1 entry, it must cover the last mapping. */ | |
1686 | break; | |
1687 | } | |
1688 | ||
1689 | start += ARM_TT_L1_SIZE; | |
1690 | l1_tte++; | |
1691 | } | |
1692 | } | |
1693 | ||
d9a64523 A |
1694 | #define ARM64_PHYSMAP_SLIDE_RANGE (1ULL << 30) // 1 GB |
1695 | #define ARM64_PHYSMAP_SLIDE_MASK (ARM64_PHYSMAP_SLIDE_RANGE - 1) | |
1696 | ||
5ba3f43e A |
1697 | void |
1698 | arm_vm_init(uint64_t memory_size, boot_args * args) | |
1699 | { | |
5ba3f43e | 1700 | vm_map_address_t va_l1, va_l1_end; |
5ba3f43e | 1701 | tt_entry_t *cpu_l1_tte; |
5ba3f43e | 1702 | vm_map_address_t va_l2, va_l2_end; |
5ba3f43e A |
1703 | tt_entry_t *cpu_l2_tte; |
1704 | pmap_paddr_t boot_ttep; | |
1705 | tt_entry_t *boot_tte; | |
1706 | uint64_t mem_segments; | |
1707 | vm_offset_t ptpage_vaddr; | |
d9a64523 | 1708 | vm_map_address_t dynamic_memory_begin; |
5ba3f43e A |
1709 | |
1710 | /* | |
f427ee49 | 1711 | * Get the virtual and physical kernel-managed memory base from boot_args. |
5ba3f43e A |
1712 | */ |
1713 | gVirtBase = args->virtBase; | |
1714 | gPhysBase = args->physBase; | |
cb323159 A |
1715 | #if KASAN |
1716 | real_phys_size = args->memSize + (shadow_ptop - shadow_pbase); | |
1717 | #else | |
1718 | real_phys_size = args->memSize; | |
1719 | #endif | |
1720 | /* | |
1721 | * Ensure the physical region we specify for the VM to manage ends on a | |
1722 | * software page boundary. Note that the software page size (PAGE_SIZE) | |
1723 | * may be a multiple of the hardware page size specified in ARM_PGBYTES. | |
1724 | * We must round the reported memory size down to the nearest PAGE_SIZE | |
1725 | * boundary to ensure the VM does not try to manage a page it does not | |
1726 | * completely own. The KASAN shadow region, if present, is managed entirely | |
1727 | * in units of the hardware page size and should not need similar treatment. | |
1728 | */ | |
1729 | gPhysSize = mem_size = ((gPhysBase + args->memSize) & ~PAGE_MASK) - gPhysBase; | |
1730 | ||
f427ee49 A |
1731 | mem_actual = args->memSizeActual ? args->memSizeActual : mem_size; |
1732 | ||
1733 | if ((memory_size != 0) && (mem_size > memory_size)) { | |
5ba3f43e | 1734 | mem_size = memory_size; |
f427ee49 A |
1735 | max_mem_actual = memory_size; |
1736 | } else { | |
1737 | max_mem_actual = mem_actual; | |
1738 | } | |
1739 | if (mem_size >= ((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 2)) { | |
d9a64523 | 1740 | panic("Unsupported memory configuration %lx\n", mem_size); |
f427ee49 | 1741 | } |
d9a64523 | 1742 | |
f427ee49 A |
1743 | #if defined(ARM_LARGE_MEMORY) |
1744 | unsigned long physmap_l1_entries = ((real_phys_size + ARM64_PHYSMAP_SLIDE_RANGE) >> ARM_TT_L1_SHIFT) + 1; | |
1745 | physmap_base = VM_MIN_KERNEL_ADDRESS - (physmap_l1_entries << ARM_TT_L1_SHIFT); | |
1746 | #else | |
1747 | physmap_base = phystokv(args->topOfKernelData); | |
1748 | #endif | |
d9a64523 A |
1749 | |
1750 | // Slide the physical aperture to a random page-aligned location within the slide range | |
1751 | uint64_t physmap_slide = early_random() & ARM64_PHYSMAP_SLIDE_MASK & ~((uint64_t)PAGE_MASK); | |
1752 | assert(physmap_slide < ARM64_PHYSMAP_SLIDE_RANGE); | |
1753 | ||
1754 | physmap_base += physmap_slide; | |
1755 | ||
c6bf4f31 A |
1756 | #if XNU_MONITOR |
1757 | physmap_base = ROUND_TWIG(physmap_base); | |
f427ee49 A |
1758 | #if defined(ARM_LARGE_MEMORY) |
1759 | static_memory_end = phystokv(args->topOfKernelData); | |
1760 | #else | |
c6bf4f31 | 1761 | static_memory_end = physmap_base + mem_size; |
f427ee49 A |
1762 | #endif // ARM_LARGE_MEMORY |
1763 | physmap_end = physmap_base + real_phys_size; | |
c6bf4f31 | 1764 | #else |
d9a64523 | 1765 | static_memory_end = physmap_base + mem_size + (PTOV_TABLE_SIZE * ARM_TT_TWIG_SIZE); // worst possible case for block alignment |
f427ee49 | 1766 | physmap_end = physmap_base + real_phys_size + (PTOV_TABLE_SIZE * ARM_TT_TWIG_SIZE); |
c6bf4f31 | 1767 | #endif |
f427ee49 A |
1768 | |
1769 | #if KASAN && !defined(ARM_LARGE_MEMORY) | |
d9a64523 A |
1770 | /* add the KASAN stolen memory to the physmap */ |
1771 | dynamic_memory_begin = static_memory_end + (shadow_ptop - shadow_pbase); | |
1772 | #else | |
1773 | dynamic_memory_begin = static_memory_end; | |
c6bf4f31 A |
1774 | #endif |
1775 | #if XNU_MONITOR | |
1776 | pmap_stacks_start = (void*)dynamic_memory_begin; | |
1777 | dynamic_memory_begin += PPL_STACK_REGION_SIZE; | |
1778 | pmap_stacks_end = (void*)dynamic_memory_begin; | |
d9a64523 | 1779 | #endif |
f427ee49 | 1780 | if (dynamic_memory_begin > VM_MAX_KERNEL_ADDRESS) { |
d9a64523 | 1781 | panic("Unsupported memory configuration %lx\n", mem_size); |
f427ee49 | 1782 | } |
5ba3f43e | 1783 | |
f427ee49 A |
1784 | boot_tte = (tt_entry_t *)&bootstrap_pagetables; |
1785 | boot_ttep = kvtophys((vm_offset_t)boot_tte); | |
5ba3f43e | 1786 | |
d9a64523 A |
1787 | #if DEVELOPMENT || DEBUG |
1788 | /* Sanity check - assert that BOOTSTRAP_TABLE_SIZE is sufficiently-large to | |
1789 | * hold our bootstrap mappings for any possible slide */ | |
1790 | size_t bytes_mapped = dynamic_memory_begin - gVirtBase; | |
1791 | size_t l1_entries = 1 + ((bytes_mapped + ARM_TT_L1_SIZE - 1) / ARM_TT_L1_SIZE); | |
1792 | /* 1 L1 each for V=P and KVA, plus 1 page for each L2 */ | |
1793 | size_t pages_used = 2 * (l1_entries + 1); | |
1794 | if (pages_used > BOOTSTRAP_TABLE_SIZE) { | |
1795 | panic("BOOTSTRAP_TABLE_SIZE too small for memory config\n"); | |
1796 | } | |
1797 | #endif | |
1798 | ||
1799 | /* | |
5ba3f43e A |
1800 | * TTBR0 L1, TTBR0 L2 - 1:1 bootstrap mapping. |
1801 | * TTBR1 L1, TTBR1 L2 - kernel mapping | |
1802 | */ | |
f427ee49 A |
1803 | |
1804 | /* | |
1805 | * TODO: free bootstrap table memory back to allocator. | |
1806 | * on large memory systems bootstrap tables could be quite large. | |
1807 | * after bootstrap complete, xnu can warm start with a single 16KB page mapping | |
1808 | * to trampoline to KVA. this requires only 3 pages to stay resident. | |
1809 | */ | |
1810 | avail_start = args->topOfKernelData; | |
5ba3f43e | 1811 | |
c6bf4f31 | 1812 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
f427ee49 | 1813 | arm_replace_identity_map(); |
5ba3f43e A |
1814 | #endif |
1815 | ||
1816 | /* Initialize invalid tte page */ | |
1817 | invalid_tte = (tt_entry_t *)alloc_ptpage(TRUE); | |
1818 | invalid_ttep = kvtophys((vm_offset_t)invalid_tte); | |
1819 | bzero(invalid_tte, ARM_PGBYTES); | |
1820 | ||
1821 | /* | |
1822 | * Initialize l1 page table page | |
1823 | */ | |
5ba3f43e A |
1824 | cpu_tte = (tt_entry_t *)alloc_ptpage(TRUE); |
1825 | cpu_ttep = kvtophys((vm_offset_t)cpu_tte); | |
1826 | bzero(cpu_tte, ARM_PGBYTES); | |
5ba3f43e | 1827 | avail_end = gPhysBase + mem_size; |
cb323159 | 1828 | assert(!(avail_end & PAGE_MASK)); |
5ba3f43e | 1829 | |
d9a64523 | 1830 | #if KASAN |
cb323159 | 1831 | real_avail_end = gPhysBase + real_phys_size; |
d9a64523 A |
1832 | #else |
1833 | real_avail_end = avail_end; | |
1834 | #endif | |
1835 | ||
5ba3f43e A |
1836 | /* |
1837 | * Initialize l1 and l2 page table pages : | |
1838 | * map physical memory at the kernel base virtual address | |
1839 | * cover the kernel dynamic address range section | |
1840 | * | |
1841 | * the so called physical aperture should be statically mapped | |
1842 | */ | |
f427ee49 | 1843 | init_ptpages(cpu_tte, gVirtBase, dynamic_memory_begin, TRUE, 0); |
5ba3f43e | 1844 | |
f427ee49 A |
1845 | #if defined(ARM_LARGE_MEMORY) |
1846 | /* | |
1847 | * Initialize l1 page table pages : | |
1848 | * on large memory systems the physical aperture exists separately below | |
1849 | * the rest of the kernel virtual address space | |
1850 | */ | |
1851 | init_ptpages(cpu_tte, physmap_base, ROUND_L1(physmap_end), TRUE, ARM_DYNAMIC_TABLE_XN); | |
1852 | #endif | |
5ba3f43e | 1853 | |
5ba3f43e | 1854 | |
5c9f4661 A |
1855 | #if __ARM_KERNEL_PROTECT__ |
1856 | /* Expand the page tables to prepare for the EL0 mappings. */ | |
1857 | arm_vm_expand_kernel_el0_mappings(); | |
1858 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
1859 | ||
5ba3f43e | 1860 | /* |
f427ee49 | 1861 | * Now retrieve addresses for various segments from kernel mach-o header |
5ba3f43e A |
1862 | */ |
1863 | segPRELINKTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_TEXT", &segSizePRELINKTEXT); | |
1864 | segPLKDATACONSTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PLK_DATA_CONST", &segSizePLKDATACONST); | |
1865 | segPLKTEXTEXECB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PLK_TEXT_EXEC", &segSizePLKTEXTEXEC); | |
1866 | segTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__TEXT", &segSizeTEXT); | |
1867 | segDATACONSTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__DATA_CONST", &segSizeDATACONST); | |
1868 | segTEXTEXECB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__TEXT_EXEC", &segSizeTEXTEXEC); | |
c6bf4f31 A |
1869 | #if XNU_MONITOR |
1870 | segPPLTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PPLTEXT", &segSizePPLTEXT); | |
1871 | segPPLTRAMPB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PPLTRAMP", &segSizePPLTRAMP); | |
1872 | segPPLDATACONSTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PPLDATA_CONST", &segSizePPLDATACONST); | |
1873 | #endif | |
5ba3f43e | 1874 | segDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__DATA", &segSizeDATA); |
c6bf4f31 A |
1875 | #if XNU_MONITOR |
1876 | segPPLDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PPLDATA", &segSizePPLDATA); | |
1877 | #endif | |
d9a64523 | 1878 | |
f427ee49 | 1879 | segBOOTDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__BOOTDATA", &segSizeBOOTDATA); |
5ba3f43e A |
1880 | segLINKB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LINKEDIT", &segSizeLINK); |
1881 | segKLDB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__KLD", &segSizeKLD); | |
c3c9b80d | 1882 | segKLDDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__KLDDATA", &segSizeKLDDATA); |
5ba3f43e A |
1883 | segPRELINKDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_DATA", &segSizePRELINKDATA); |
1884 | segPRELINKINFOB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_INFO", &segSizePRELINKINFO); | |
1885 | segPLKLLVMCOVB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PLK_LLVM_COV", &segSizePLKLLVMCOV); | |
1886 | segPLKLINKEDITB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PLK_LINKEDIT", &segSizePLKLINKEDIT); | |
1887 | segLASTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LAST", &segSizeLAST); | |
f427ee49 A |
1888 | segLASTDATACONSTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LASTDATA_CONST", &segSizeLASTDATACONST); |
1889 | ||
1890 | sectHIBTEXTB = (vm_offset_t) getsectdatafromheader(&_mh_execute_header, "__TEXT_EXEC", "__hib_text", §SizeHIBTEXT); | |
1891 | sectHIBDATACONSTB = (vm_offset_t) getsectdatafromheader(&_mh_execute_header, "__DATA_CONST", "__hib_const", §SizeHIBDATACONST); | |
1892 | segHIBDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__HIBDATA", &segSizeHIBDATA); | |
1893 | ||
1894 | if (kernel_mach_header_is_in_fileset(&_mh_execute_header)) { | |
1895 | kernel_mach_header_t *kc_mh = PE_get_kc_header(KCKindPrimary); | |
1896 | ||
1897 | // fileset has kext PLK_TEXT_EXEC under kernel collection TEXT_EXEC following kernel's LAST | |
1898 | segKCTEXTEXECB = (vm_offset_t) getsegdatafromheader(kc_mh, "__TEXT_EXEC", &segSizeKCTEXTEXEC); | |
1899 | assert(segPLKTEXTEXECB && !segSizePLKTEXTEXEC); // kernel PLK_TEXT_EXEC must be empty | |
c3c9b80d A |
1900 | |
1901 | assert(segLASTB); // kernel LAST can be empty, but it must have | |
1902 | // a valid address for computations below. | |
1903 | ||
f427ee49 A |
1904 | assert(segKCTEXTEXECB <= segLASTB); // KC TEXT_EXEC must contain kernel LAST |
1905 | assert(segKCTEXTEXECB + segSizeKCTEXTEXEC >= segLASTB + segSizeLAST); | |
1906 | segPLKTEXTEXECB = segLASTB + segSizeLAST; | |
1907 | segSizePLKTEXTEXEC = segSizeKCTEXTEXEC - (segPLKTEXTEXECB - segKCTEXTEXECB); | |
1908 | ||
1909 | // fileset has kext PLK_DATA_CONST under kernel collection DATA_CONST following kernel's LASTDATA_CONST | |
1910 | segKCDATACONSTB = (vm_offset_t) getsegdatafromheader(kc_mh, "__DATA_CONST", &segSizeKCDATACONST); | |
1911 | assert(segPLKDATACONSTB && !segSizePLKDATACONST); // kernel PLK_DATA_CONST must be empty | |
1912 | assert(segLASTDATACONSTB && segSizeLASTDATACONST); // kernel LASTDATA_CONST must be non-empty | |
1913 | assert(segKCDATACONSTB <= segLASTDATACONSTB); // KC DATA_CONST must contain kernel LASTDATA_CONST | |
1914 | assert(segKCDATACONSTB + segSizeKCDATACONST >= segLASTDATACONSTB + segSizeLASTDATACONST); | |
1915 | segPLKDATACONSTB = segLASTDATACONSTB + segSizeLASTDATACONST; | |
1916 | segSizePLKDATACONST = segSizeKCDATACONST - (segPLKDATACONSTB - segKCDATACONSTB); | |
1917 | ||
1918 | // fileset has kext PRELINK_DATA under kernel collection DATA following kernel's empty PRELINK_DATA | |
1919 | segKCDATAB = (vm_offset_t) getsegdatafromheader(kc_mh, "__DATA", &segSizeKCDATA); | |
1920 | assert(segPRELINKDATAB && !segSizePRELINKDATA); // kernel PRELINK_DATA must be empty | |
1921 | assert(segKCDATAB <= segPRELINKDATAB); // KC DATA must contain kernel PRELINK_DATA | |
1922 | assert(segKCDATAB + segSizeKCDATA >= segPRELINKDATAB + segSizePRELINKDATA); | |
1923 | segSizePRELINKDATA = segSizeKCDATA - (segPRELINKDATAB - segKCDATAB); | |
1924 | ||
1925 | // fileset has consolidated PRELINK_TEXT, PRELINK_INFO and LINKEDIT at the kernel collection level | |
1926 | assert(segPRELINKTEXTB && !segSizePRELINKTEXT); // kernel PRELINK_TEXT must be empty | |
1927 | segPRELINKTEXTB = (vm_offset_t) getsegdatafromheader(kc_mh, "__PRELINK_TEXT", &segSizePRELINKTEXT); | |
1928 | assert(segPRELINKINFOB && !segSizePRELINKINFO); // kernel PRELINK_INFO must be empty | |
1929 | segPRELINKINFOB = (vm_offset_t) getsegdatafromheader(kc_mh, "__PRELINK_INFO", &segSizePRELINKINFO); | |
1930 | segLINKB = (vm_offset_t) getsegdatafromheader(kc_mh, "__LINKEDIT", &segSizeLINK); | |
1931 | } | |
5ba3f43e A |
1932 | |
1933 | (void) PE_parse_boot_argn("use_contiguous_hint", &use_contiguous_hint, sizeof(use_contiguous_hint)); | |
1934 | assert(segSizePRELINKTEXT < 0x03000000); /* 23355738 */ | |
1935 | ||
1936 | /* if one of the new segments is present, the other one better be as well */ | |
1937 | if (segSizePLKDATACONST || segSizePLKTEXTEXEC) { | |
1938 | assert(segSizePLKDATACONST && segSizePLKTEXTEXEC); | |
1939 | } | |
1940 | ||
1941 | etext = (vm_offset_t) segTEXTB + segSizeTEXT; | |
1942 | sdata = (vm_offset_t) segDATAB; | |
1943 | edata = (vm_offset_t) segDATAB + segSizeDATA; | |
f427ee49 | 1944 | end_kern = round_page(segHIGHESTKC ? segHIGHESTKC : getlastaddr()); /* Force end to next page */ |
5ba3f43e A |
1945 | |
1946 | vm_set_page_size(); | |
1947 | ||
1948 | vm_kernel_base = segTEXTB; | |
1949 | vm_kernel_top = (vm_offset_t) &last_kernel_symbol; | |
1950 | vm_kext_base = segPRELINKTEXTB; | |
1951 | vm_kext_top = vm_kext_base + segSizePRELINKTEXT; | |
1952 | ||
1953 | vm_prelink_stext = segPRELINKTEXTB; | |
1954 | if (!segSizePLKTEXTEXEC && !segSizePLKDATACONST) { | |
1955 | vm_prelink_etext = segPRELINKTEXTB + segSizePRELINKTEXT; | |
1956 | } else { | |
1957 | vm_prelink_etext = segPRELINKTEXTB + segSizePRELINKTEXT + segSizePLKDATACONST + segSizePLKTEXTEXEC; | |
1958 | } | |
1959 | vm_prelink_sinfo = segPRELINKINFOB; | |
1960 | vm_prelink_einfo = segPRELINKINFOB + segSizePRELINKINFO; | |
1961 | vm_slinkedit = segLINKB; | |
1962 | vm_elinkedit = segLINKB + segSizeLINK; | |
1963 | ||
1964 | vm_prelink_sdata = segPRELINKDATAB; | |
1965 | vm_prelink_edata = segPRELINKDATAB + segSizePRELINKDATA; | |
1966 | ||
1967 | arm_vm_prot_init(args); | |
1968 | ||
f427ee49 | 1969 | vm_page_kernelcache_count = (unsigned int) (atop_64(end_kern - segLOWEST)); |
5ba3f43e A |
1970 | |
1971 | /* | |
1972 | * Initialize the page tables for the low globals: | |
1973 | * cover this address range: | |
1974 | * LOW_GLOBAL_BASE_ADDRESS + 2MB | |
1975 | */ | |
5ba3f43e A |
1976 | va_l1 = va_l2 = LOW_GLOBAL_BASE_ADDRESS; |
1977 | cpu_l1_tte = cpu_tte + ((va_l1 & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); | |
1978 | cpu_l2_tte = ((tt_entry_t *) phystokv(((*cpu_l1_tte) & ARM_TTE_TABLE_MASK))) + ((va_l2 & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT); | |
5ba3f43e A |
1979 | ptpage_vaddr = alloc_ptpage(TRUE); |
1980 | *cpu_l2_tte = (kvtophys(ptpage_vaddr) & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | ARM_TTE_TABLE_PXN | ARM_TTE_TABLE_XN; | |
1981 | bzero((void *)ptpage_vaddr, ARM_PGBYTES); | |
1982 | ||
1983 | /* | |
1984 | * Initialize l2 page table pages : | |
1985 | * cover this address range: | |
1986 | * KERNEL_DYNAMIC_ADDR - VM_MAX_KERNEL_ADDRESS | |
1987 | */ | |
f427ee49 A |
1988 | #if defined(ARM_LARGE_MEMORY) |
1989 | /* | |
1990 | * dynamic mapped memory outside the VM allocator VA range required to bootstrap VM system | |
1991 | * don't expect to exceed 64GB, no sense mapping any more space between here and the VM heap range | |
1992 | */ | |
1993 | init_ptpages(cpu_tte, dynamic_memory_begin, ROUND_L1(dynamic_memory_begin), FALSE, ARM_DYNAMIC_TABLE_XN); | |
1994 | #else | |
1995 | /* | |
1996 | * TODO: do these pages really need to come from RO memory? | |
1997 | * With legacy 3 level table systems we never mapped more than a single L1 entry so this may be dead code | |
1998 | */ | |
1999 | init_ptpages(cpu_tte, dynamic_memory_begin, VM_MAX_KERNEL_ADDRESS, TRUE, ARM_DYNAMIC_TABLE_XN); | |
2000 | #endif | |
5ba3f43e A |
2001 | |
2002 | #if KASAN | |
d9a64523 A |
2003 | /* record the extent of the physmap */ |
2004 | physmap_vbase = physmap_base; | |
f427ee49 | 2005 | physmap_vtop = physmap_end; |
5ba3f43e | 2006 | kasan_init(); |
cb323159 A |
2007 | #endif /* KASAN */ |
2008 | ||
2009 | #if MONOTONIC | |
2010 | mt_early_init(); | |
2011 | #endif /* MONOTONIC */ | |
5ba3f43e | 2012 | |
d9a64523 | 2013 | set_tbi(); |
d9a64523 | 2014 | |
f427ee49 | 2015 | arm_vm_physmap_init(args); |
5ba3f43e | 2016 | set_mmu_ttb_alternate(cpu_ttep & TTBR_BADDR_MASK); |
cb323159 | 2017 | |
2a1bd2d3 | 2018 | ml_enable_monitor(); |
cb323159 A |
2019 | |
2020 | set_mmu_ttb(invalid_ttep & TTBR_BADDR_MASK); | |
2021 | ||
5ba3f43e | 2022 | flush_mmu_tlb(); |
c6bf4f31 A |
2023 | #if defined(HAS_VMSA_LOCK) |
2024 | vmsa_lock(); | |
2025 | #endif | |
d9a64523 A |
2026 | kva_active = TRUE; |
2027 | // global table pointers may need to be different due to physical aperture remapping | |
2028 | cpu_tte = (tt_entry_t*)(phystokv(cpu_ttep)); | |
2029 | invalid_tte = (tt_entry_t*)(phystokv(invalid_ttep)); | |
5ba3f43e | 2030 | |
f427ee49 A |
2031 | // From here on out, we're off the bootstrap translation tables. |
2032 | ||
2033 | ||
2034 | /* AuxKC initialization has to be deferred until this point, since | |
2035 | * the AuxKC may not have been fully mapped in the bootstrap | |
2036 | * tables, if it spilled downwards into the prior L2 block. | |
2037 | * | |
2038 | * Now that its mapping set up by arm_vm_prot_init() is active, | |
2039 | * we can traverse and fix it up. | |
2040 | */ | |
2041 | ||
2042 | if (arm_vm_auxkc_init()) { | |
2043 | if (segLOWESTROAuxKC < segLOWESTRO) { | |
2044 | segLOWESTRO = segLOWESTROAuxKC; | |
2045 | } | |
2046 | if (segHIGHESTROAuxKC > segHIGHESTRO) { | |
2047 | segHIGHESTRO = segHIGHESTROAuxKC; | |
2048 | } | |
2049 | if (segLOWESTRXAuxKC < segLOWESTTEXT) { | |
2050 | segLOWESTTEXT = segLOWESTRXAuxKC; | |
2051 | } | |
2052 | assert(segLOWEST == segLOWESTAuxKC); | |
2053 | ||
2054 | // The preliminary auxKC mapping has been broken up. | |
2055 | flush_mmu_tlb(); | |
2056 | } | |
2057 | ||
5ba3f43e A |
2058 | sane_size = mem_size - (avail_start - gPhysBase); |
2059 | max_mem = mem_size; | |
d9a64523 | 2060 | vm_kernel_slid_base = segLOWESTTEXT; |
5ba3f43e | 2061 | vm_kernel_slid_top = vm_prelink_einfo; |
f427ee49 | 2062 | // vm_kernel_slide is set by arm_init()->arm_slide_rebase_and_sign_image() |
5ba3f43e | 2063 | vm_kernel_stext = segTEXTB; |
f427ee49 A |
2064 | |
2065 | if (kernel_mach_header_is_in_fileset(&_mh_execute_header)) { | |
2066 | // fileset has kext TEXT before kernel DATA_CONST | |
2067 | assert(segTEXTEXECB == segTEXTB + segSizeTEXT); | |
2068 | vm_kernel_etext = segTEXTB + segSizeTEXT + segSizeTEXTEXEC; | |
2069 | } else { | |
2070 | assert(segDATACONSTB == segTEXTB + segSizeTEXT); | |
2071 | assert(segTEXTEXECB == segDATACONSTB + segSizeDATACONST); | |
2072 | vm_kernel_etext = segTEXTB + segSizeTEXT + segSizeDATACONST + segSizeTEXTEXEC; | |
2073 | } | |
5ba3f43e | 2074 | |
d9a64523 | 2075 | dynamic_memory_begin = ROUND_TWIG(dynamic_memory_begin); |
c6bf4f31 A |
2076 | #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) |
2077 | // reserve a 32MB region without permission overrides to use later for a CTRR unit test | |
2078 | { | |
2079 | extern vm_offset_t ctrr_test_page; | |
2080 | tt_entry_t *new_tte; | |
2081 | ||
2082 | ctrr_test_page = dynamic_memory_begin; | |
2083 | dynamic_memory_begin += ARM_TT_L2_SIZE; | |
2084 | cpu_l1_tte = cpu_tte + ((ctrr_test_page & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); | |
2085 | assert((*cpu_l1_tte) & ARM_TTE_VALID); | |
2086 | cpu_l2_tte = ((tt_entry_t *) phystokv(((*cpu_l1_tte) & ARM_TTE_TABLE_MASK))) + ((ctrr_test_page & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT); | |
2087 | assert((*cpu_l2_tte) == ARM_TTE_EMPTY); | |
2088 | new_tte = (tt_entry_t *)alloc_ptpage(FALSE); | |
2089 | bzero(new_tte, ARM_PGBYTES); | |
2090 | *cpu_l2_tte = (kvtophys((vm_offset_t)new_tte) & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID; | |
2091 | } | |
2092 | #endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */ | |
2093 | #if XNU_MONITOR | |
2094 | for (vm_offset_t cur = (vm_offset_t)pmap_stacks_start; cur < (vm_offset_t)pmap_stacks_end; cur += ARM_PGBYTES) { | |
2095 | arm_vm_map(cpu_tte, cur, ARM_PTE_EMPTY); | |
2096 | } | |
2097 | #endif | |
d9a64523 A |
2098 | pmap_bootstrap(dynamic_memory_begin); |
2099 | ||
2100 | disable_preemption(); | |
5ba3f43e A |
2101 | |
2102 | /* | |
2103 | * Initialize l3 page table pages : | |
2104 | * cover this address range: | |
2105 | * 2MB + FrameBuffer size + 10MB for each 256MB segment | |
2106 | */ | |
2107 | ||
2108 | mem_segments = (mem_size + 0x0FFFFFFF) >> 28; | |
2109 | ||
d9a64523 | 2110 | va_l1 = dynamic_memory_begin; |
5ba3f43e A |
2111 | va_l1_end = va_l1 + ((2 + (mem_segments * 10)) << 20); |
2112 | va_l1_end += round_page(args->Video.v_height * args->Video.v_rowBytes); | |
2113 | va_l1_end = (va_l1_end + 0x00000000007FFFFFULL) & 0xFFFFFFFFFF800000ULL; | |
2114 | ||
2115 | cpu_l1_tte = cpu_tte + ((va_l1 & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); | |
2116 | ||
2117 | while (va_l1 < va_l1_end) { | |
5ba3f43e A |
2118 | va_l2 = va_l1; |
2119 | ||
f427ee49 | 2120 | if (((va_l1 & ~ARM_TT_L1_OFFMASK) + ARM_TT_L1_SIZE) < va_l1) { |
5ba3f43e A |
2121 | /* If this is the last L1 entry, it must cover the last mapping. */ |
2122 | va_l2_end = va_l1_end; | |
2123 | } else { | |
f427ee49 | 2124 | va_l2_end = MIN((va_l1 & ~ARM_TT_L1_OFFMASK) + ARM_TT_L1_SIZE, va_l1_end); |
5ba3f43e A |
2125 | } |
2126 | ||
2127 | cpu_l2_tte = ((tt_entry_t *) phystokv(((*cpu_l1_tte) & ARM_TTE_TABLE_MASK))) + ((va_l2 & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT); | |
5ba3f43e A |
2128 | |
2129 | while (va_l2 < va_l2_end) { | |
2130 | pt_entry_t * ptp; | |
2131 | pmap_paddr_t ptp_phys; | |
2132 | ||
2133 | /* Allocate a page and setup L3 Table TTE in L2 */ | |
2134 | ptp = (pt_entry_t *) alloc_ptpage(FALSE); | |
2135 | ptp_phys = (pmap_paddr_t)kvtophys((vm_offset_t)ptp); | |
2136 | ||
f427ee49 A |
2137 | bzero(ptp, ARM_PGBYTES); |
2138 | pmap_init_pte_page(kernel_pmap, ptp, va_l2, 3, TRUE); | |
5ba3f43e | 2139 | |
f427ee49 | 2140 | *cpu_l2_tte = (pa_to_tte(ptp_phys)) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | ARM_DYNAMIC_TABLE_XN; |
5ba3f43e A |
2141 | |
2142 | va_l2 += ARM_TT_L2_SIZE; | |
2143 | cpu_l2_tte++; | |
f427ee49 | 2144 | } |
cb323159 | 2145 | |
5ba3f43e A |
2146 | va_l1 = va_l2_end; |
2147 | cpu_l1_tte++; | |
2148 | } | |
5ba3f43e | 2149 | |
f427ee49 A |
2150 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
2151 | /* | |
2152 | * In this configuration, the bootstrap mappings (arm_vm_init) and | |
2153 | * the heap mappings occupy separate L1 regions. Explicitly set up | |
2154 | * the heap L1 allocations here. | |
2155 | */ | |
2156 | #if defined(ARM_LARGE_MEMORY) | |
2157 | init_ptpages(cpu_tte, KERNEL_PMAP_HEAP_RANGE_START & ~ARM_TT_L1_OFFMASK, VM_MAX_KERNEL_ADDRESS, FALSE, ARM_DYNAMIC_TABLE_XN); | |
2158 | #else // defined(ARM_LARGE_MEMORY) | |
2159 | va_l1 = VM_MIN_KERNEL_ADDRESS & ~ARM_TT_L1_OFFMASK; | |
2160 | init_ptpages(cpu_tte, VM_MIN_KERNEL_ADDRESS & ~ARM_TT_L1_OFFMASK, VM_MAX_KERNEL_ADDRESS, FALSE, ARM_DYNAMIC_TABLE_XN); | |
2161 | #endif // defined(ARM_LARGE_MEMORY) | |
2162 | #endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) | |
2163 | ||
5ba3f43e A |
2164 | /* |
2165 | * Initialize l3 page table pages : | |
2166 | * cover this address range: | |
f427ee49 | 2167 | * ((VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) - PE_EARLY_BOOT_VA) to VM_MAX_KERNEL_ADDRESS |
5ba3f43e | 2168 | */ |
f427ee49 | 2169 | va_l1 = (VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) - PE_EARLY_BOOT_VA; |
5ba3f43e A |
2170 | va_l1_end = VM_MAX_KERNEL_ADDRESS; |
2171 | ||
2172 | cpu_l1_tte = cpu_tte + ((va_l1 & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); | |
2173 | ||
2174 | while (va_l1 < va_l1_end) { | |
5ba3f43e A |
2175 | va_l2 = va_l1; |
2176 | ||
f427ee49 | 2177 | if (((va_l1 & ~ARM_TT_L1_OFFMASK) + ARM_TT_L1_SIZE) < va_l1) { |
5ba3f43e A |
2178 | /* If this is the last L1 entry, it must cover the last mapping. */ |
2179 | va_l2_end = va_l1_end; | |
2180 | } else { | |
f427ee49 | 2181 | va_l2_end = MIN((va_l1 & ~ARM_TT_L1_OFFMASK) + ARM_TT_L1_SIZE, va_l1_end); |
5ba3f43e A |
2182 | } |
2183 | ||
2184 | cpu_l2_tte = ((tt_entry_t *) phystokv(((*cpu_l1_tte) & ARM_TTE_TABLE_MASK))) + ((va_l2 & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT); | |
5ba3f43e A |
2185 | |
2186 | while (va_l2 < va_l2_end) { | |
2187 | pt_entry_t * ptp; | |
2188 | pmap_paddr_t ptp_phys; | |
2189 | ||
2190 | /* Allocate a page and setup L3 Table TTE in L2 */ | |
2191 | ptp = (pt_entry_t *) alloc_ptpage(FALSE); | |
2192 | ptp_phys = (pmap_paddr_t)kvtophys((vm_offset_t)ptp); | |
2193 | ||
f427ee49 A |
2194 | bzero(ptp, ARM_PGBYTES); |
2195 | pmap_init_pte_page(kernel_pmap, ptp, va_l2, 3, TRUE); | |
5ba3f43e | 2196 | |
f427ee49 | 2197 | *cpu_l2_tte = (pa_to_tte(ptp_phys)) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | ARM_DYNAMIC_TABLE_XN; |
5ba3f43e A |
2198 | |
2199 | va_l2 += ARM_TT_L2_SIZE; | |
2200 | cpu_l2_tte++; | |
f427ee49 | 2201 | } |
cb323159 | 2202 | |
5ba3f43e A |
2203 | va_l1 = va_l2_end; |
2204 | cpu_l1_tte++; | |
2205 | } | |
5ba3f43e | 2206 | |
5ba3f43e A |
2207 | |
2208 | /* | |
2209 | * Adjust avail_start so that the range that the VM owns | |
2210 | * starts on a PAGE_SIZE aligned boundary. | |
2211 | */ | |
2212 | avail_start = (avail_start + PAGE_MASK) & ~PAGE_MASK; | |
2213 | ||
c6bf4f31 A |
2214 | #if XNU_MONITOR |
2215 | pmap_static_allocations_done(); | |
2216 | #endif | |
5ba3f43e A |
2217 | first_avail = avail_start; |
2218 | patch_low_glo_static_region(args->topOfKernelData, avail_start - args->topOfKernelData); | |
d9a64523 | 2219 | enable_preemption(); |
5ba3f43e | 2220 | } |