]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <arm/proc_reg.h> | |
29 | #include <arm64/asm.h> | |
30 | #include <arm64/proc_reg.h> | |
31 | #include <pexpert/arm64/board_config.h> | |
5ba3f43e A |
32 | #include <mach_assert.h> |
33 | #include <machine/asm.h> | |
34 | #include "assym.s" | |
f427ee49 | 35 | #include <arm64/tunables/tunables.s> |
cb323159 | 36 | #include <arm64/exception_asm.h> |
5ba3f43e | 37 | |
5c9f4661 A |
38 | #if __ARM_KERNEL_PROTECT__ |
39 | #include <arm/pmap.h> | |
40 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
41 | ||
5ba3f43e | 42 | |
c6bf4f31 A |
43 | #if __APRR_SUPPORTED__ |
44 | ||
45 | .macro MSR_APRR_EL1_X0 | |
46 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) | |
47 | bl EXT(pinst_set_aprr_el1) | |
48 | #else | |
49 | msr APRR_EL1, x0 | |
50 | #endif | |
51 | .endmacro | |
52 | ||
53 | .macro MSR_APRR_EL0_X0 | |
54 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) | |
55 | bl EXT(pinst_set_aprr_el0) | |
56 | #else | |
57 | msr APRR_EL0, x0 | |
58 | #endif | |
59 | .endmacro | |
60 | ||
61 | .macro MSR_APRR_SHADOW_MASK_EN_EL1_X0 | |
62 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) | |
63 | bl EXT(pinst_set_aprr_shadow_mask_en_el1) | |
64 | #else | |
65 | msr APRR_SHADOW_MASK_EN_EL1, x0 | |
66 | #endif | |
67 | .endmacro | |
68 | ||
69 | #endif /* __APRR_SUPPORTED__ */ | |
cb323159 | 70 | |
5ba3f43e A |
71 | .macro MSR_VBAR_EL1_X0 |
72 | #if defined(KERNEL_INTEGRITY_KTRR) | |
73 | mov x1, lr | |
74 | bl EXT(pinst_set_vbar) | |
75 | mov lr, x1 | |
76 | #else | |
77 | msr VBAR_EL1, x0 | |
78 | #endif | |
79 | .endmacro | |
80 | ||
81 | .macro MSR_TCR_EL1_X1 | |
82 | #if defined(KERNEL_INTEGRITY_KTRR) | |
83 | mov x0, x1 | |
84 | mov x1, lr | |
cb323159 | 85 | bl EXT(pinst_set_tcr) |
5ba3f43e A |
86 | mov lr, x1 |
87 | #else | |
88 | msr TCR_EL1, x1 | |
89 | #endif | |
90 | .endmacro | |
91 | ||
92 | .macro MSR_TTBR1_EL1_X0 | |
93 | #if defined(KERNEL_INTEGRITY_KTRR) | |
94 | mov x1, lr | |
cb323159 | 95 | bl EXT(pinst_set_ttbr1) |
5ba3f43e A |
96 | mov lr, x1 |
97 | #else | |
98 | msr TTBR1_EL1, x0 | |
99 | #endif | |
100 | .endmacro | |
101 | ||
102 | .macro MSR_SCTLR_EL1_X0 | |
f427ee49 | 103 | #if defined(KERNEL_INTEGRITY_KTRR) |
5ba3f43e A |
104 | mov x1, lr |
105 | ||
106 | // This may abort, do so on SP1 | |
cb323159 | 107 | bl EXT(pinst_spsel_1) |
5ba3f43e | 108 | |
cb323159 | 109 | bl EXT(pinst_set_sctlr) |
5ba3f43e A |
110 | msr SPSel, #0 // Back to SP0 |
111 | mov lr, x1 | |
112 | #else | |
113 | msr SCTLR_EL1, x0 | |
114 | #endif /* defined(KERNEL_INTEGRITY_KTRR) */ | |
115 | .endmacro | |
116 | ||
117 | /* | |
118 | * Checks the reset handler for global and CPU-specific reset-assist functions, | |
119 | * then jumps to the reset handler with boot args and cpu data. This is copied | |
120 | * to the first physical page during CPU bootstrap (see cpu.c). | |
121 | * | |
122 | * Variables: | |
123 | * x19 - Reset handler data pointer | |
124 | * x20 - Boot args pointer | |
125 | * x21 - CPU data pointer | |
126 | */ | |
127 | .text | |
128 | .align 12 | |
129 | .globl EXT(LowResetVectorBase) | |
130 | LEXT(LowResetVectorBase) | |
cb323159 A |
131 | /* |
132 | * On reset, both RVBAR_EL1 and VBAR_EL1 point here. SPSel.SP is 1, | |
133 | * so on reset the CPU will jump to offset 0x0 and on exceptions | |
134 | * the CPU will jump to offset 0x200, 0x280, 0x300, or 0x380. | |
135 | * In order for both the reset vector and exception vectors to | |
136 | * coexist in the same space, the reset code is moved to the end | |
137 | * of the exception vector area. | |
138 | */ | |
139 | b EXT(reset_vector) | |
5ba3f43e | 140 | |
cb323159 A |
141 | /* EL1 SP1: These vectors trap errors during early startup on non-boot CPUs. */ |
142 | .align 9 | |
143 | b . | |
144 | .align 7 | |
145 | b . | |
146 | .align 7 | |
147 | b . | |
148 | .align 7 | |
149 | b . | |
150 | ||
151 | .align 7 | |
152 | .globl EXT(reset_vector) | |
153 | LEXT(reset_vector) | |
154 | // Preserve x0 for start_first_cpu, if called | |
5ba3f43e A |
155 | // Unlock the core for debugging |
156 | msr OSLAR_EL1, xzr | |
d9a64523 | 157 | msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts |
5ba3f43e | 158 | |
c6bf4f31 | 159 | #if !(defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)) |
5ba3f43e A |
160 | // Set low reset vector before attempting any loads |
161 | adrp x0, EXT(LowExceptionVectorBase)@page | |
162 | add x0, x0, EXT(LowExceptionVectorBase)@pageoff | |
163 | msr VBAR_EL1, x0 | |
164 | #endif | |
165 | ||
c6bf4f31 A |
166 | #if __APRR_SUPPORTED__ |
167 | MOV64 x0, APRR_EL1_DEFAULT | |
168 | #if XNU_MONITOR | |
169 | adrp x4, EXT(pmap_ppl_locked_down)@page | |
170 | ldrb w5, [x4, #EXT(pmap_ppl_locked_down)@pageoff] | |
171 | cmp w5, #0 | |
172 | b.ne 1f | |
173 | ||
174 | // If the PPL is not locked down, we start in PPL mode. | |
175 | MOV64 x0, APRR_EL1_PPL | |
176 | 1: | |
177 | #endif /* XNU_MONITOR */ | |
178 | ||
179 | MSR_APRR_EL1_X0 | |
180 | ||
181 | // Load up the default APRR_EL0 value. | |
182 | MOV64 x0, APRR_EL0_DEFAULT | |
183 | MSR_APRR_EL0_X0 | |
184 | #endif /* __APRR_SUPPORTED__ */ | |
5ba3f43e A |
185 | |
186 | #if defined(KERNEL_INTEGRITY_KTRR) | |
187 | /* | |
188 | * Set KTRR registers immediately after wake/resume | |
189 | * | |
190 | * During power on reset, XNU stashed the kernel text region range values | |
191 | * into __DATA,__const which should be protected by AMCC RoRgn at this point. | |
192 | * Read this data and program/lock KTRR registers accordingly. | |
193 | * If either values are zero, we're debugging kernel so skip programming KTRR. | |
194 | */ | |
195 | ||
f427ee49 | 196 | /* refuse to boot if machine_lockdown() hasn't completed */ |
cb323159 | 197 | adrp x17, EXT(lockdown_done)@page |
f427ee49 A |
198 | ldr w17, [x17, EXT(lockdown_done)@pageoff] |
199 | cbz w17, . | |
d9a64523 | 200 | |
5ba3f43e | 201 | // load stashed rorgn_begin |
f427ee49 A |
202 | adrp x17, EXT(ctrr_begin)@page |
203 | add x17, x17, EXT(ctrr_begin)@pageoff | |
5ba3f43e | 204 | ldr x17, [x17] |
f427ee49 | 205 | #if DEBUG || DEVELOPMENT || CONFIG_DTRACE |
5ba3f43e | 206 | // if rorgn_begin is zero, we're debugging. skip enabling ktrr |
d9a64523 | 207 | cbz x17, Lskip_ktrr |
f427ee49 A |
208 | #else |
209 | cbz x17, . | |
210 | #endif | |
5ba3f43e A |
211 | |
212 | // load stashed rorgn_end | |
f427ee49 A |
213 | adrp x19, EXT(ctrr_end)@page |
214 | add x19, x19, EXT(ctrr_end)@pageoff | |
5ba3f43e | 215 | ldr x19, [x19] |
f427ee49 | 216 | #if DEBUG || DEVELOPMENT || CONFIG_DTRACE |
d9a64523 | 217 | cbz x19, Lskip_ktrr |
f427ee49 A |
218 | #else |
219 | cbz x19, . | |
220 | #endif | |
5ba3f43e | 221 | |
5ba3f43e | 222 | msr ARM64_REG_KTRR_LOWER_EL1, x17 |
5ba3f43e A |
223 | msr ARM64_REG_KTRR_UPPER_EL1, x19 |
224 | mov x17, #1 | |
225 | msr ARM64_REG_KTRR_LOCK_EL1, x17 | |
d9a64523 | 226 | Lskip_ktrr: |
cb323159 | 227 | #endif /* defined(KERNEL_INTEGRITY_KTRR) */ |
5ba3f43e A |
228 | |
229 | // Process reset handlers | |
230 | adrp x19, EXT(ResetHandlerData)@page // Get address of the reset handler data | |
231 | add x19, x19, EXT(ResetHandlerData)@pageoff | |
232 | mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number | |
c6bf4f31 A |
233 | #if HAS_CLUSTER |
234 | and x0, x15, #0xFFFF // CPU number in Affinity0, cluster ID in Affinity1 | |
235 | #else | |
5ba3f43e | 236 | and x0, x15, #0xFF // CPU number is in MPIDR Affinity Level 0 |
c6bf4f31 | 237 | #endif |
5ba3f43e | 238 | ldr x1, [x19, CPU_DATA_ENTRIES] // Load start of data entries |
f427ee49 | 239 | add x3, x1, MAX_CPUS * 16 // end addr of data entries = start + (16 * MAX_CPUS) |
5ba3f43e A |
240 | Lcheck_cpu_data_entry: |
241 | ldr x21, [x1, CPU_DATA_PADDR] // Load physical CPU data address | |
242 | cbz x21, Lnext_cpu_data_entry | |
243 | ldr w2, [x21, CPU_PHYS_ID] // Load ccc cpu phys id | |
244 | cmp x0, x2 // Compare cpu data phys cpu and MPIDR_EL1 phys cpu | |
cb323159 | 245 | b.eq Lfound_cpu_data_entry // Branch if match |
5ba3f43e A |
246 | Lnext_cpu_data_entry: |
247 | add x1, x1, #16 // Increment to the next cpu data entry | |
248 | cmp x1, x3 | |
cb323159 | 249 | b.eq Lskip_cpu_reset_handler // Not found |
5ba3f43e A |
250 | b Lcheck_cpu_data_entry // loop |
251 | Lfound_cpu_data_entry: | |
c6bf4f31 A |
252 | #if defined(KERNEL_INTEGRITY_CTRR) |
253 | /* | |
254 | * Program and lock CTRR if this CPU is non-boot cluster master. boot cluster will be locked | |
255 | * in machine_lockdown. pinst insns protected by VMSA_LOCK | |
256 | * A_PXN and A_MMUON_WRPROTECT options provides something close to KTRR behavior | |
257 | */ | |
258 | ||
f427ee49 | 259 | /* refuse to boot if machine_lockdown() hasn't completed */ |
c6bf4f31 | 260 | adrp x17, EXT(lockdown_done)@page |
f427ee49 A |
261 | ldr w17, [x17, EXT(lockdown_done)@pageoff] |
262 | cbz w17, . | |
c6bf4f31 A |
263 | |
264 | // load stashed rorgn_begin | |
f427ee49 A |
265 | adrp x17, EXT(ctrr_begin)@page |
266 | add x17, x17, EXT(ctrr_begin)@pageoff | |
c6bf4f31 | 267 | ldr x17, [x17] |
f427ee49 | 268 | #if DEBUG || DEVELOPMENT || CONFIG_DTRACE |
c6bf4f31 A |
269 | // if rorgn_begin is zero, we're debugging. skip enabling ctrr |
270 | cbz x17, Lskip_ctrr | |
f427ee49 A |
271 | #else |
272 | cbz x17, . | |
273 | #endif | |
c6bf4f31 A |
274 | |
275 | // load stashed rorgn_end | |
f427ee49 A |
276 | adrp x19, EXT(ctrr_end)@page |
277 | add x19, x19, EXT(ctrr_end)@pageoff | |
c6bf4f31 | 278 | ldr x19, [x19] |
f427ee49 | 279 | #if DEBUG || DEVELOPMENT || CONFIG_DTRACE |
c6bf4f31 | 280 | cbz x19, Lskip_ctrr |
f427ee49 A |
281 | #else |
282 | cbz x19, . | |
283 | #endif | |
c6bf4f31 A |
284 | |
285 | mrs x18, ARM64_REG_CTRR_LOCK_EL1 | |
286 | cbnz x18, Lskip_ctrr /* don't touch if already locked */ | |
c6bf4f31 A |
287 | msr ARM64_REG_CTRR_A_LWR_EL1, x17 |
288 | msr ARM64_REG_CTRR_A_UPR_EL1, x19 | |
289 | mov x18, #(CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT) | |
290 | msr ARM64_REG_CTRR_CTL_EL1, x18 | |
291 | mov x18, #1 | |
292 | msr ARM64_REG_CTRR_LOCK_EL1, x18 | |
293 | ||
294 | ||
295 | isb | |
296 | tlbi vmalle1 | |
297 | dsb ish | |
298 | isb | |
299 | Lspin_ctrr_unlocked: | |
300 | /* we shouldn't ever be here as cpu start is serialized by cluster in cpu_start(), | |
301 | * and first core started in cluster is designated cluster master and locks | |
302 | * both core and cluster. subsequent cores in same cluster will run locked from | |
303 | * from reset vector */ | |
304 | mrs x18, ARM64_REG_CTRR_LOCK_EL1 | |
305 | cbz x18, Lspin_ctrr_unlocked | |
306 | Lskip_ctrr: | |
307 | #endif | |
cb323159 | 308 | adrp x20, EXT(const_boot_args)@page |
5ba3f43e A |
309 | add x20, x20, EXT(const_boot_args)@pageoff |
310 | ldr x0, [x21, CPU_RESET_HANDLER] // Call CPU reset handler | |
311 | cbz x0, Lskip_cpu_reset_handler | |
312 | ||
313 | // Validate that our handler is one of the two expected handlers | |
314 | adrp x2, EXT(resume_idle_cpu)@page | |
315 | add x2, x2, EXT(resume_idle_cpu)@pageoff | |
316 | cmp x0, x2 | |
317 | beq 1f | |
318 | adrp x2, EXT(start_cpu)@page | |
319 | add x2, x2, EXT(start_cpu)@pageoff | |
320 | cmp x0, x2 | |
cb323159 | 321 | bne Lskip_cpu_reset_handler |
5ba3f43e A |
322 | 1: |
323 | ||
c6bf4f31 A |
324 | #if HAS_BP_RET |
325 | bl EXT(set_bp_ret) | |
326 | #endif | |
5ba3f43e | 327 | |
5c9f4661 A |
328 | #if __ARM_KERNEL_PROTECT__ && defined(KERNEL_INTEGRITY_KTRR) |
329 | /* | |
330 | * Populate TPIDR_EL1 (in case the CPU takes an exception while | |
331 | * turning on the MMU). | |
332 | */ | |
333 | ldr x13, [x21, CPU_ACTIVE_THREAD] | |
334 | msr TPIDR_EL1, x13 | |
335 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
336 | ||
5ba3f43e A |
337 | blr x0 |
338 | Lskip_cpu_reset_handler: | |
339 | b . // Hang if the handler is NULL or returns | |
340 | ||
cb323159 | 341 | .align 3 |
5ba3f43e A |
342 | .global EXT(LowResetVectorEnd) |
343 | LEXT(LowResetVectorEnd) | |
344 | .global EXT(SleepToken) | |
345 | #if WITH_CLASSIC_S2R | |
346 | LEXT(SleepToken) | |
347 | .space (stSize_NUM),0 | |
348 | #endif | |
349 | ||
cb323159 A |
350 | .section __DATA_CONST,__const |
351 | .align 3 | |
352 | .globl EXT(ResetHandlerData) | |
353 | LEXT(ResetHandlerData) | |
354 | .space (rhdSize_NUM),0 // (filled with 0s) | |
355 | .text | |
356 | ||
5ba3f43e A |
357 | |
358 | /* | |
359 | * __start trampoline is located at a position relative to LowResetVectorBase | |
360 | * so that iBoot can compute the reset vector position to set IORVBAR using | |
361 | * only the kernel entry point. Reset vector = (__start & ~0xfff) | |
362 | */ | |
363 | .align 3 | |
364 | .globl EXT(_start) | |
365 | LEXT(_start) | |
366 | b EXT(start_first_cpu) | |
367 | ||
368 | ||
369 | /* | |
370 | * Provides an early-boot exception vector so that the processor will spin | |
371 | * and preserve exception information (e.g., ELR_EL1) when early CPU bootstrap | |
372 | * code triggers an exception. This is copied to the second physical page | |
373 | * during CPU bootstrap (see cpu.c). | |
374 | */ | |
375 | .align 12, 0 | |
376 | .global EXT(LowExceptionVectorBase) | |
377 | LEXT(LowExceptionVectorBase) | |
378 | /* EL1 SP 0 */ | |
379 | b . | |
380 | .align 7 | |
381 | b . | |
382 | .align 7 | |
383 | b . | |
384 | .align 7 | |
385 | b . | |
386 | /* EL1 SP1 */ | |
387 | .align 7 | |
388 | b . | |
389 | .align 7 | |
390 | b . | |
391 | .align 7 | |
392 | b . | |
393 | .align 7 | |
394 | b . | |
395 | /* EL0 64 */ | |
396 | .align 7 | |
397 | b . | |
398 | .align 7 | |
399 | b . | |
400 | .align 7 | |
401 | b . | |
402 | .align 7 | |
403 | b . | |
404 | /* EL0 32 */ | |
405 | .align 7 | |
406 | b . | |
407 | .align 7 | |
408 | b . | |
409 | .align 7 | |
410 | b . | |
411 | .align 7 | |
412 | b . | |
413 | .align 12, 0 | |
414 | ||
c6bf4f31 | 415 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
5ba3f43e A |
416 | /* |
417 | * Provide a global symbol so that we can narrow the V=P mapping to cover | |
418 | * this page during arm_vm_init. | |
419 | */ | |
420 | .align ARM_PGSHIFT | |
421 | .globl EXT(bootstrap_instructions) | |
422 | LEXT(bootstrap_instructions) | |
cb323159 | 423 | |
c6bf4f31 | 424 | #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ |
5ba3f43e A |
425 | .align 2 |
426 | .globl EXT(resume_idle_cpu) | |
427 | LEXT(resume_idle_cpu) | |
428 | adrp lr, EXT(arm_init_idle_cpu)@page | |
429 | add lr, lr, EXT(arm_init_idle_cpu)@pageoff | |
430 | b start_cpu | |
431 | ||
432 | .align 2 | |
433 | .globl EXT(start_cpu) | |
434 | LEXT(start_cpu) | |
435 | adrp lr, EXT(arm_init_cpu)@page | |
436 | add lr, lr, EXT(arm_init_cpu)@pageoff | |
437 | b start_cpu | |
438 | ||
439 | .align 2 | |
440 | start_cpu: | |
c6bf4f31 | 441 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
5ba3f43e A |
442 | // This is done right away in reset vector for pre-KTRR devices |
443 | // Set low reset vector now that we are in the KTRR-free zone | |
444 | adrp x0, EXT(LowExceptionVectorBase)@page | |
445 | add x0, x0, EXT(LowExceptionVectorBase)@pageoff | |
446 | MSR_VBAR_EL1_X0 | |
c6bf4f31 | 447 | #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ |
5ba3f43e A |
448 | |
449 | // x20 set to BootArgs phys address | |
450 | // x21 set to cpu data phys address | |
5ba3f43e A |
451 | |
452 | // Get the kernel memory parameters from the boot args | |
453 | ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base | |
454 | ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base | |
455 | ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size | |
f427ee49 | 456 | adrp x25, EXT(bootstrap_pagetables)@page // Get the start of the page tables |
d9a64523 | 457 | ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags |
5ba3f43e | 458 | |
cb323159 | 459 | |
5ba3f43e A |
460 | // Set TPIDRRO_EL0 with the CPU number |
461 | ldr x0, [x21, CPU_NUMBER_GS] | |
462 | msr TPIDRRO_EL0, x0 | |
463 | ||
464 | // Set the exception stack pointer | |
465 | ldr x0, [x21, CPU_EXCEPSTACK_TOP] | |
466 | ||
467 | ||
468 | // Set SP_EL1 to exception stack | |
c6bf4f31 | 469 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
5ba3f43e | 470 | mov x1, lr |
cb323159 | 471 | bl EXT(pinst_spsel_1) |
5ba3f43e A |
472 | mov lr, x1 |
473 | #else | |
474 | msr SPSel, #1 | |
475 | #endif | |
476 | mov sp, x0 | |
477 | ||
478 | // Set the interrupt stack pointer | |
479 | ldr x0, [x21, CPU_INTSTACK_TOP] | |
480 | msr SPSel, #0 | |
481 | mov sp, x0 | |
482 | ||
483 | // Convert lr to KVA | |
484 | add lr, lr, x22 | |
485 | sub lr, lr, x23 | |
486 | ||
487 | b common_start | |
488 | ||
489 | /* | |
490 | * create_l1_table_entry | |
491 | * | |
492 | * Given a virtual address, creates a table entry in an L1 translation table | |
493 | * to point to an L2 translation table. | |
494 | * arg0 - Virtual address | |
495 | * arg1 - L1 table address | |
496 | * arg2 - L2 table address | |
497 | * arg3 - Scratch register | |
498 | * arg4 - Scratch register | |
499 | * arg5 - Scratch register | |
500 | */ | |
501 | .macro create_l1_table_entry | |
502 | and $3, $0, #(ARM_TT_L1_INDEX_MASK) | |
503 | lsr $3, $3, #(ARM_TT_L1_SHIFT) // Get index in L1 table for L2 table | |
504 | lsl $3, $3, #(TTE_SHIFT) // Convert index into pointer offset | |
505 | add $3, $1, $3 // Get L1 entry pointer | |
506 | mov $4, #(ARM_TTE_BOOT_TABLE) // Get L1 table entry template | |
507 | and $5, $2, #(ARM_TTE_TABLE_MASK) // Get address bits of L2 table | |
508 | orr $5, $4, $5 // Create table entry for L2 table | |
509 | str $5, [$3] // Write entry to L1 table | |
510 | .endmacro | |
511 | ||
512 | /* | |
513 | * create_l2_block_entries | |
514 | * | |
515 | * Given base virtual and physical addresses, creates consecutive block entries | |
516 | * in an L2 translation table. | |
517 | * arg0 - Virtual address | |
518 | * arg1 - Physical address | |
519 | * arg2 - L2 table address | |
520 | * arg3 - Number of entries | |
521 | * arg4 - Scratch register | |
522 | * arg5 - Scratch register | |
523 | * arg6 - Scratch register | |
524 | * arg7 - Scratch register | |
525 | */ | |
526 | .macro create_l2_block_entries | |
527 | and $4, $0, #(ARM_TT_L2_INDEX_MASK) | |
528 | lsr $4, $4, #(ARM_TTE_BLOCK_L2_SHIFT) // Get index in L2 table for block entry | |
529 | lsl $4, $4, #(TTE_SHIFT) // Convert index into pointer offset | |
530 | add $4, $2, $4 // Get L2 entry pointer | |
531 | mov $5, #(ARM_TTE_BOOT_BLOCK) // Get L2 block entry template | |
532 | and $6, $1, #(ARM_TTE_BLOCK_L2_MASK) // Get address bits of block mapping | |
533 | orr $6, $5, $6 | |
534 | mov $5, $3 | |
535 | mov $7, #(ARM_TT_L2_SIZE) | |
536 | 1: | |
537 | str $6, [$4], #(1 << TTE_SHIFT) // Write entry to L2 table and advance | |
538 | add $6, $6, $7 // Increment the output address | |
539 | subs $5, $5, #1 // Decrement the number of entries | |
540 | b.ne 1b | |
541 | .endmacro | |
542 | ||
d9a64523 A |
543 | /* |
544 | * arg0 - virtual start address | |
545 | * arg1 - physical start address | |
546 | * arg2 - number of entries to map | |
547 | * arg3 - L1 table address | |
548 | * arg4 - free space pointer | |
549 | * arg5 - scratch (entries mapped per loop) | |
550 | * arg6 - scratch | |
551 | * arg7 - scratch | |
552 | * arg8 - scratch | |
553 | * arg9 - scratch | |
554 | */ | |
555 | .macro create_bootstrap_mapping | |
556 | /* calculate entries left in this page */ | |
557 | and $5, $0, #(ARM_TT_L2_INDEX_MASK) | |
558 | lsr $5, $5, #(ARM_TT_L2_SHIFT) | |
559 | mov $6, #(TTE_PGENTRIES) | |
560 | sub $5, $6, $5 | |
561 | ||
562 | /* allocate an L2 table */ | |
563 | 3: add $4, $4, PGBYTES | |
564 | ||
565 | /* create_l1_table_entry(virt_base, L1 table, L2 table, scratch1, scratch2, scratch3) */ | |
566 | create_l1_table_entry $0, $3, $4, $6, $7, $8 | |
567 | ||
568 | /* determine how many entries to map this loop - the smaller of entries | |
569 | * remaining in page and total entries left */ | |
570 | cmp $2, $5 | |
571 | csel $5, $2, $5, lt | |
572 | ||
573 | /* create_l2_block_entries(virt_base, phys_base, L2 table, num_ents, scratch1, scratch2, scratch3) */ | |
574 | create_l2_block_entries $0, $1, $4, $5, $6, $7, $8, $9 | |
575 | ||
576 | /* subtract entries just mapped and bail out if we're done */ | |
577 | subs $2, $2, $5 | |
578 | beq 2f | |
579 | ||
580 | /* entries left to map - advance base pointers */ | |
581 | add $0, $0, $5, lsl #(ARM_TT_L2_SHIFT) | |
582 | add $1, $1, $5, lsl #(ARM_TT_L2_SHIFT) | |
583 | ||
584 | mov $5, #(TTE_PGENTRIES) /* subsequent loops map (up to) a whole L2 page */ | |
585 | b 3b | |
586 | 2: | |
587 | .endmacro | |
588 | ||
5ba3f43e A |
589 | /* |
590 | * _start_first_cpu | |
591 | * Cold boot init routine. Called from __start | |
592 | * x0 - Boot args | |
593 | */ | |
594 | .align 2 | |
595 | .globl EXT(start_first_cpu) | |
596 | LEXT(start_first_cpu) | |
597 | ||
598 | // Unlock the core for debugging | |
599 | msr OSLAR_EL1, xzr | |
d9a64523 | 600 | msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts |
cb323159 | 601 | |
5ba3f43e | 602 | mov x20, x0 |
d9a64523 | 603 | mov x21, #0 |
5ba3f43e A |
604 | |
605 | // Set low reset vector before attempting any loads | |
606 | adrp x0, EXT(LowExceptionVectorBase)@page | |
607 | add x0, x0, EXT(LowExceptionVectorBase)@pageoff | |
608 | MSR_VBAR_EL1_X0 | |
609 | ||
c6bf4f31 A |
610 | #if __APRR_SUPPORTED__ |
611 | // Save the LR | |
612 | mov x1, lr | |
613 | ||
614 | #if XNU_MONITOR | |
615 | // If the PPL is supported, we start out in PPL mode. | |
616 | MOV64 x0, APRR_EL1_PPL | |
617 | #else | |
618 | // Otherwise, we start out in default mode. | |
619 | MOV64 x0, APRR_EL1_DEFAULT | |
620 | #endif | |
621 | ||
622 | // Set the APRR state for EL1. | |
623 | MSR_APRR_EL1_X0 | |
624 | ||
625 | // Set the APRR state for EL0. | |
626 | MOV64 x0, APRR_EL0_DEFAULT | |
627 | MSR_APRR_EL0_X0 | |
628 | ||
629 | ||
630 | // Restore the LR. | |
631 | mov lr, x1 | |
632 | #endif /* __APRR_SUPPORTED__ */ | |
5ba3f43e | 633 | |
5ba3f43e A |
634 | // Get the kernel memory parameters from the boot args |
635 | ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base | |
636 | ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base | |
637 | ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size | |
f427ee49 | 638 | adrp x25, EXT(bootstrap_pagetables)@page // Get the start of the page tables |
d9a64523 | 639 | ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags |
5ba3f43e | 640 | |
d9a64523 A |
641 | // Clear the register that will be used to store the userspace thread pointer and CPU number. |
642 | // We may not actually be booting from ordinal CPU 0, so this register will be updated | |
643 | // in ml_parse_cpu_topology(), which happens later in bootstrap. | |
5ba3f43e A |
644 | msr TPIDRRO_EL0, x21 |
645 | ||
646 | // Set up exception stack pointer | |
647 | adrp x0, EXT(excepstack_top)@page // Load top of exception stack | |
648 | add x0, x0, EXT(excepstack_top)@pageoff | |
649 | add x0, x0, x22 // Convert to KVA | |
650 | sub x0, x0, x23 | |
651 | ||
652 | // Set SP_EL1 to exception stack | |
c6bf4f31 | 653 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
cb323159 | 654 | bl EXT(pinst_spsel_1) |
5ba3f43e A |
655 | #else |
656 | msr SPSel, #1 | |
657 | #endif | |
658 | ||
659 | mov sp, x0 | |
660 | ||
661 | // Set up interrupt stack pointer | |
662 | adrp x0, EXT(intstack_top)@page // Load top of irq stack | |
663 | add x0, x0, EXT(intstack_top)@pageoff | |
664 | add x0, x0, x22 // Convert to KVA | |
665 | sub x0, x0, x23 | |
666 | msr SPSel, #0 // Set SP_EL0 to interrupt stack | |
667 | mov sp, x0 | |
668 | ||
669 | // Load address to the C init routine into link register | |
670 | adrp lr, EXT(arm_init)@page | |
671 | add lr, lr, EXT(arm_init)@pageoff | |
672 | add lr, lr, x22 // Convert to KVA | |
673 | sub lr, lr, x23 | |
674 | ||
675 | /* | |
676 | * Set up the bootstrap page tables with a single block entry for the V=P | |
677 | * mapping, a single block entry for the trampolined kernel address (KVA), | |
678 | * and all else invalid. This requires four pages: | |
679 | * Page 1 - V=P L1 table | |
680 | * Page 2 - V=P L2 table | |
681 | * Page 3 - KVA L1 table | |
682 | * Page 4 - KVA L2 table | |
683 | */ | |
5ba3f43e A |
684 | |
685 | // Invalidate all entries in the bootstrap page tables | |
686 | mov x0, #(ARM_TTE_EMPTY) // Load invalid entry template | |
f427ee49 | 687 | mov x1, x25 // Start at V=P pagetable root |
5ba3f43e | 688 | mov x2, #(TTE_PGENTRIES) // Load number of entries per page |
5ba3f43e | 689 | lsl x2, x2, #2 // Shift by 2 for num entries on 4 pages |
cb323159 | 690 | |
5ba3f43e A |
691 | Linvalidate_bootstrap: // do { |
692 | str x0, [x1], #(1 << TTE_SHIFT) // Invalidate and advance | |
693 | subs x2, x2, #1 // entries-- | |
694 | b.ne Linvalidate_bootstrap // } while (entries != 0) | |
695 | ||
5ba3f43e A |
696 | /* |
697 | * In order to reclaim memory on targets where TZ0 (or some other entity) | |
698 | * must be located at the base of memory, iBoot may set the virtual and | |
699 | * physical base addresses to immediately follow whatever lies at the | |
700 | * base of physical memory. | |
701 | * | |
702 | * If the base address belongs to TZ0, it may be dangerous for xnu to map | |
703 | * it (as it may be prefetched, despite being technically inaccessible). | |
704 | * In order to avoid this issue while keeping the mapping code simple, we | |
f427ee49 A |
705 | * may continue to use block mappings, but we will only map the kernelcache |
706 | * mach header to the end of memory. | |
5ba3f43e A |
707 | * |
708 | * Given that iBoot guarantees that the unslid kernelcache base address | |
709 | * will begin on an L2 boundary, this should prevent us from accidentally | |
710 | * mapping TZ0. | |
711 | */ | |
f427ee49 A |
712 | adrp x0, EXT(_mh_execute_header)@page // address of kernel mach header |
713 | add x0, x0, EXT(_mh_execute_header)@pageoff | |
714 | ldr w1, [x0, #0x18] // load mach_header->flags | |
715 | tbz w1, #0x1f, Lkernelcache_base_found // if MH_DYLIB_IN_CACHE unset, base is kernel mach header | |
716 | ldr w1, [x0, #0x20] // load first segment cmd (offset sizeof(kernel_mach_header_t)) | |
717 | cmp w1, #0x19 // must be LC_SEGMENT_64 | |
718 | bne . | |
719 | ldr x1, [x0, #0x38] // load first segment vmaddr | |
720 | sub x1, x0, x1 // compute slide | |
721 | MOV64 x0, VM_KERNEL_LINK_ADDRESS | |
722 | add x0, x0, x1 // base is kernel link address + slide | |
d9a64523 | 723 | |
f427ee49 | 724 | Lkernelcache_base_found: |
5ba3f43e | 725 | /* |
d9a64523 A |
726 | * Adjust physical and virtual base addresses to account for physical |
727 | * memory preceeding xnu Mach-O header | |
728 | * x22 - Kernel virtual base | |
729 | * x23 - Kernel physical base | |
730 | * x24 - Physical memory size | |
5ba3f43e | 731 | */ |
d9a64523 A |
732 | sub x18, x0, x23 |
733 | sub x24, x24, x18 | |
734 | add x22, x22, x18 | |
735 | add x23, x23, x18 | |
736 | ||
5ba3f43e | 737 | /* |
d9a64523 A |
738 | * x0 - V=P virtual cursor |
739 | * x4 - V=P physical cursor | |
740 | * x14 - KVA virtual cursor | |
741 | * x15 - KVA physical cursor | |
5ba3f43e | 742 | */ |
d9a64523 A |
743 | mov x4, x0 |
744 | mov x14, x22 | |
745 | mov x15, x23 | |
5ba3f43e | 746 | |
d9a64523 A |
747 | /* |
748 | * Allocate L1 tables | |
749 | * x1 - V=P L1 page | |
750 | * x3 - KVA L1 page | |
751 | * x2 - free mem pointer from which we allocate a variable number of L2 | |
752 | * pages. The maximum number of bootstrap page table pages is limited to | |
753 | * BOOTSTRAP_TABLE_SIZE. For a 2G 4k page device, assuming the worst-case | |
754 | * slide, we need 1xL1 and up to 3xL2 pages (1GB mapped per L1 entry), so | |
755 | * 8 total pages for V=P and KVA. | |
5ba3f43e | 756 | */ |
d9a64523 A |
757 | mov x1, x25 |
758 | add x3, x1, PGBYTES | |
759 | mov x2, x3 | |
5ba3f43e | 760 | |
d9a64523 A |
761 | /* |
762 | * Setup the V=P bootstrap mapping | |
763 | * x5 - total number of L2 entries to allocate | |
5ba3f43e | 764 | */ |
d9a64523 A |
765 | lsr x5, x24, #(ARM_TT_L2_SHIFT) |
766 | /* create_bootstrap_mapping(vbase, pbase, num_ents, L1 table, freeptr) */ | |
767 | create_bootstrap_mapping x0, x4, x5, x1, x2, x6, x10, x11, x12, x13 | |
5ba3f43e | 768 | |
d9a64523 A |
769 | /* Setup the KVA bootstrap mapping */ |
770 | lsr x5, x24, #(ARM_TT_L2_SHIFT) | |
771 | create_bootstrap_mapping x14, x15, x5, x3, x2, x9, x10, x11, x12, x13 | |
5ba3f43e A |
772 | |
773 | /* Ensure TTEs are visible */ | |
774 | dsb ish | |
775 | ||
cb323159 | 776 | |
5ba3f43e A |
777 | b common_start |
778 | ||
779 | /* | |
780 | * Begin common CPU initialization | |
781 | * | |
782 | * Regster state: | |
783 | * x20 - PA of boot args | |
784 | * x21 - zero on cold boot, PA of cpu data on warm reset | |
785 | * x22 - Kernel virtual base | |
786 | * x23 - Kernel physical base | |
f427ee49 | 787 | * x25 - PA of the V=P pagetable root |
5ba3f43e A |
788 | * lr - KVA of C init routine |
789 | * sp - SP_EL0 selected | |
790 | * | |
791 | * SP_EL0 - KVA of CPU's interrupt stack | |
792 | * SP_EL1 - KVA of CPU's exception stack | |
793 | * TPIDRRO_EL0 - CPU number | |
794 | */ | |
795 | common_start: | |
f427ee49 A |
796 | |
797 | #if HAS_NEX_PG | |
798 | mov x19, lr | |
799 | bl EXT(set_nex_pg) | |
800 | mov lr, x19 | |
801 | #endif | |
802 | ||
5ba3f43e A |
803 | // Set the translation control register. |
804 | adrp x0, EXT(sysreg_restore)@page // Load TCR value from the system register restore structure | |
805 | add x0, x0, EXT(sysreg_restore)@pageoff | |
806 | ldr x1, [x0, SR_RESTORE_TCR_EL1] | |
807 | MSR_TCR_EL1_X1 | |
808 | ||
809 | /* Set up translation table base registers. | |
810 | * TTBR0 - V=P table @ top of kernel | |
d9a64523 | 811 | * TTBR1 - KVA table @ top of kernel + 1 page |
5ba3f43e | 812 | */ |
c6bf4f31 | 813 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
5ba3f43e A |
814 | /* Note that for KTRR configurations, the V=P map will be modified by |
815 | * arm_vm_init.c. | |
816 | */ | |
817 | #endif | |
818 | and x0, x25, #(TTBR_BADDR_MASK) | |
d9a64523 A |
819 | mov x19, lr |
820 | bl EXT(set_mmu_ttb) | |
821 | mov lr, x19 | |
822 | add x0, x25, PGBYTES | |
5ba3f43e A |
823 | and x0, x0, #(TTBR_BADDR_MASK) |
824 | MSR_TTBR1_EL1_X0 | |
825 | ||
826 | // Set up MAIR attr0 for normal memory, attr1 for device memory | |
827 | mov x0, xzr | |
828 | mov x1, #(MAIR_WRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITEBACK)) | |
829 | orr x0, x0, x1 | |
830 | mov x1, #(MAIR_INNERWRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_INNERWRITEBACK)) | |
831 | orr x0, x0, x1 | |
832 | mov x1, #(MAIR_DISABLE << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_DISABLE)) | |
833 | orr x0, x0, x1 | |
834 | mov x1, #(MAIR_WRITETHRU << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITETHRU)) | |
835 | orr x0, x0, x1 | |
836 | mov x1, #(MAIR_WRITECOMB << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITECOMB)) | |
837 | orr x0, x0, x1 | |
838 | mov x1, #(MAIR_POSTED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED)) | |
839 | orr x0, x0, x1 | |
cb323159 A |
840 | mov x1, #(MAIR_POSTED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_REORDERED)) |
841 | orr x0, x0, x1 | |
842 | mov x1, #(MAIR_POSTED_COMBINED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_COMBINED_REORDERED)) | |
843 | orr x0, x0, x1 | |
5ba3f43e | 844 | msr MAIR_EL1, x0 |
f427ee49 A |
845 | isb |
846 | tlbi vmalle1 | |
847 | dsb ish | |
5ba3f43e | 848 | |
5ba3f43e | 849 | #if defined(APPLEHURRICANE) |
5ba3f43e A |
850 | // <rdar://problem/26726624> Increase Snoop reservation in EDB to reduce starvation risk |
851 | // Needs to be done before MMU is enabled | |
f427ee49 | 852 | HID_INSERT_BITS ARM64_REG_HID5, ARM64_REG_HID5_CrdEdbSnpRsvd_mask, ARM64_REG_HID5_CrdEdbSnpRsvd_VALUE, x12 |
5ba3f43e A |
853 | #endif |
854 | ||
d9a64523 A |
855 | #if defined(BCM2837) |
856 | // Setup timer interrupt routing; must be done before MMU is enabled | |
857 | mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number | |
858 | and x15, x15, #0xFF // CPU number is in MPIDR Affinity Level 0 | |
859 | mov x0, #0x4000 | |
860 | lsl x0, x0, #16 | |
861 | add x0, x0, #0x0040 // x0: 0x4000004X Core Timers interrupt control | |
862 | add x0, x0, x15, lsl #2 | |
863 | mov w1, #0xF0 // x1: 0xF0 Route to Core FIQs | |
864 | str w1, [x0] | |
865 | isb sy | |
866 | #endif | |
867 | ||
5ba3f43e A |
868 | #ifndef __ARM_IC_NOALIAS_ICACHE__ |
869 | /* Invalidate the TLB and icache on systems that do not guarantee that the | |
870 | * caches are invalidated on reset. | |
871 | */ | |
872 | tlbi vmalle1 | |
873 | ic iallu | |
874 | #endif | |
875 | ||
876 | /* If x21 is not 0, then this is either the start_cpu path or | |
877 | * the resume_idle_cpu path. cpu_ttep should already be | |
878 | * populated, so just switch to the kernel_pmap now. | |
879 | */ | |
880 | ||
881 | cbz x21, 1f | |
882 | adrp x0, EXT(cpu_ttep)@page | |
883 | add x0, x0, EXT(cpu_ttep)@pageoff | |
884 | ldr x0, [x0] | |
885 | MSR_TTBR1_EL1_X0 | |
886 | 1: | |
887 | ||
888 | // Set up the exception vectors | |
5c9f4661 A |
889 | #if __ARM_KERNEL_PROTECT__ |
890 | /* If this is not the first reset of the boot CPU, the alternate mapping | |
891 | * for the exception vectors will be set up, so use it. Otherwise, we | |
892 | * should use the mapping located in the kernelcache mapping. | |
893 | */ | |
894 | MOV64 x0, ARM_KERNEL_PROTECT_EXCEPTION_START | |
895 | ||
896 | cbnz x21, 1f | |
897 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
5ba3f43e A |
898 | adrp x0, EXT(ExceptionVectorsBase)@page // Load exception vectors base address |
899 | add x0, x0, EXT(ExceptionVectorsBase)@pageoff | |
900 | add x0, x0, x22 // Convert exception vector address to KVA | |
901 | sub x0, x0, x23 | |
5c9f4661 | 902 | 1: |
5ba3f43e A |
903 | MSR_VBAR_EL1_X0 |
904 | ||
cb323159 A |
905 | 1: |
906 | #ifdef HAS_APPLE_PAC | |
907 | #ifdef __APSTS_SUPPORTED__ | |
908 | mrs x0, ARM64_REG_APSTS_EL1 | |
909 | and x1, x0, #(APSTS_EL1_MKEYVld) | |
910 | cbz x1, 1b // Poll APSTS_EL1.MKEYVld | |
911 | mrs x0, ARM64_REG_APCTL_EL1 | |
912 | orr x0, x0, #(APCTL_EL1_AppleMode) | |
f427ee49 A |
913 | #ifdef HAS_APCTL_EL1_USERKEYEN |
914 | orr x0, x0, #(APCTL_EL1_UserKeyEn) | |
915 | and x0, x0, #~(APCTL_EL1_KernKeyEn) | |
916 | #else /* !HAS_APCTL_EL1_USERKEYEN */ | |
cb323159 | 917 | orr x0, x0, #(APCTL_EL1_KernKeyEn) |
f427ee49 | 918 | #endif /* HAS_APCTL_EL1_USERKEYEN */ |
cb323159 A |
919 | and x0, x0, #~(APCTL_EL1_EnAPKey0) |
920 | msr ARM64_REG_APCTL_EL1, x0 | |
f427ee49 A |
921 | |
922 | ||
cb323159 A |
923 | #else |
924 | mrs x0, ARM64_REG_APCTL_EL1 | |
925 | and x1, x0, #(APCTL_EL1_MKEYVld) | |
926 | cbz x1, 1b // Poll APCTL_EL1.MKEYVld | |
927 | orr x0, x0, #(APCTL_EL1_AppleMode) | |
928 | orr x0, x0, #(APCTL_EL1_KernKeyEn) | |
929 | msr ARM64_REG_APCTL_EL1, x0 | |
930 | #endif /* APSTS_SUPPORTED */ | |
931 | ||
932 | /* ISB necessary to ensure APCTL_EL1_AppleMode logic enabled before proceeding */ | |
933 | isb sy | |
934 | /* Load static kernel key diversification values */ | |
935 | ldr x0, =KERNEL_ROP_ID | |
936 | /* set ROP key. must write at least once to pickup mkey per boot diversification */ | |
937 | msr APIBKeyLo_EL1, x0 | |
938 | add x0, x0, #1 | |
939 | msr APIBKeyHi_EL1, x0 | |
940 | add x0, x0, #1 | |
941 | msr APDBKeyLo_EL1, x0 | |
942 | add x0, x0, #1 | |
943 | msr APDBKeyHi_EL1, x0 | |
944 | add x0, x0, #1 | |
945 | msr ARM64_REG_KERNELKEYLO_EL1, x0 | |
946 | add x0, x0, #1 | |
947 | msr ARM64_REG_KERNELKEYHI_EL1, x0 | |
948 | /* set JOP key. must write at least once to pickup mkey per boot diversification */ | |
949 | add x0, x0, #1 | |
950 | msr APIAKeyLo_EL1, x0 | |
951 | add x0, x0, #1 | |
952 | msr APIAKeyHi_EL1, x0 | |
953 | add x0, x0, #1 | |
954 | msr APDAKeyLo_EL1, x0 | |
955 | add x0, x0, #1 | |
956 | msr APDAKeyHi_EL1, x0 | |
957 | /* set G key */ | |
958 | add x0, x0, #1 | |
959 | msr APGAKeyLo_EL1, x0 | |
960 | add x0, x0, #1 | |
961 | msr APGAKeyHi_EL1, x0 | |
962 | ||
963 | // Enable caches, MMU, ROP and JOP | |
f427ee49 | 964 | MOV64 x0, SCTLR_EL1_DEFAULT |
cb323159 A |
965 | orr x0, x0, #(SCTLR_PACIB_ENABLED) /* IB is ROP */ |
966 | ||
cb323159 | 967 | #if __APCFG_SUPPORTED__ |
f427ee49 | 968 | // for APCFG systems, JOP keys are always on for EL1. |
cb323159 | 969 | // JOP keys for EL0 will be toggled on the first time we pmap_switch to a pmap that has JOP enabled |
cb323159 | 970 | #else /* __APCFG_SUPPORTED__ */ |
cb323159 A |
971 | MOV64 x1, SCTLR_JOP_KEYS_ENABLED |
972 | orr x0, x0, x1 | |
973 | #endif /* !__APCFG_SUPPORTED__ */ | |
cb323159 | 974 | #else /* HAS_APPLE_PAC */ |
5ba3f43e A |
975 | |
976 | // Enable caches and MMU | |
f427ee49 | 977 | MOV64 x0, SCTLR_EL1_DEFAULT |
cb323159 | 978 | #endif /* HAS_APPLE_PAC */ |
5ba3f43e A |
979 | MSR_SCTLR_EL1_X0 |
980 | isb sy | |
981 | ||
f427ee49 | 982 | MOV64 x1, SCTLR_EL1_DEFAULT |
cb323159 A |
983 | #if HAS_APPLE_PAC |
984 | orr x1, x1, #(SCTLR_PACIB_ENABLED) | |
985 | #if !__APCFG_SUPPORTED__ | |
986 | MOV64 x2, SCTLR_JOP_KEYS_ENABLED | |
cb323159 | 987 | orr x1, x1, x2 |
cb323159 A |
988 | #endif /* !__APCFG_SUPPORTED__ */ |
989 | #endif /* HAS_APPLE_PAC */ | |
990 | cmp x0, x1 | |
991 | bne . | |
992 | ||
5ba3f43e A |
993 | #if (!CONFIG_KERNEL_INTEGRITY || (CONFIG_KERNEL_INTEGRITY && !defined(KERNEL_INTEGRITY_WT))) |
994 | /* Watchtower | |
995 | * | |
996 | * If we have a Watchtower monitor it will setup CPACR_EL1 for us, touching | |
997 | * it here would trap to EL3. | |
998 | */ | |
999 | ||
1000 | // Enable NEON | |
1001 | mov x0, #(CPACR_FPEN_ENABLE) | |
1002 | msr CPACR_EL1, x0 | |
1003 | #endif | |
1004 | ||
1005 | // Clear thread pointer | |
f427ee49 A |
1006 | msr TPIDR_EL1, xzr // Set thread register |
1007 | ||
5ba3f43e | 1008 | |
a39ff7e2 A |
1009 | #if defined(APPLE_ARM64_ARCH_FAMILY) |
1010 | // Initialization common to all Apple targets | |
1011 | ARM64_IS_PCORE x15 | |
1012 | ARM64_READ_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4 | |
1013 | orr x12, x12, ARM64_REG_HID4_DisDcMVAOps | |
1014 | orr x12, x12, ARM64_REG_HID4_DisDcSWL2Ops | |
1015 | ARM64_WRITE_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4 | |
1016 | #endif // APPLE_ARM64_ARCH_FAMILY | |
1017 | ||
f427ee49 A |
1018 | // Read MIDR before start of per-SoC tunables |
1019 | mrs x12, MIDR_EL1 | |
c6bf4f31 A |
1020 | |
1021 | #if defined(APPLELIGHTNING) | |
f427ee49 A |
1022 | // Cebu <B0 is deprecated and unsupported (see rdar://problem/42835678) |
1023 | EXEC_COREEQ_REVLO MIDR_CEBU_LIGHTNING, CPU_VERSION_B0, x12, x13 | |
1024 | b . | |
1025 | EXEC_END | |
1026 | EXEC_COREEQ_REVLO MIDR_CEBU_THUNDER, CPU_VERSION_B0, x12, x13 | |
1027 | b . | |
1028 | EXEC_END | |
1029 | #endif | |
c6bf4f31 | 1030 | |
f427ee49 | 1031 | APPLY_TUNABLES x12, x13 |
c6bf4f31 | 1032 | |
cb323159 A |
1033 | |
1034 | ||
f427ee49 A |
1035 | #if HAS_CLUSTER |
1036 | // Unmask external IRQs if we're restarting from non-retention WFI | |
1037 | mrs x9, ARM64_REG_CYC_OVRD | |
1038 | and x9, x9, #(~(ARM64_REG_CYC_OVRD_irq_mask | ARM64_REG_CYC_OVRD_fiq_mask)) | |
1039 | msr ARM64_REG_CYC_OVRD, x9 | |
1040 | #endif | |
cb323159 | 1041 | |
5ba3f43e A |
1042 | // If x21 != 0, we're doing a warm reset, so we need to trampoline to the kernel pmap. |
1043 | cbnz x21, Ltrampoline | |
1044 | ||
1045 | // Set KVA of boot args as first arg | |
1046 | add x0, x20, x22 | |
1047 | sub x0, x0, x23 | |
1048 | ||
1049 | #if KASAN | |
1050 | mov x20, x0 | |
1051 | mov x21, lr | |
1052 | ||
1053 | // x0: boot args | |
1054 | // x1: KVA page table phys base | |
1055 | mrs x1, TTBR1_EL1 | |
cb323159 | 1056 | bl EXT(kasan_bootstrap) |
5ba3f43e A |
1057 | |
1058 | mov x0, x20 | |
1059 | mov lr, x21 | |
1060 | #endif | |
1061 | ||
1062 | // Return to arm_init() | |
1063 | ret | |
1064 | ||
1065 | Ltrampoline: | |
1066 | // Load VA of the trampoline | |
1067 | adrp x0, arm_init_tramp@page | |
1068 | add x0, x0, arm_init_tramp@pageoff | |
1069 | add x0, x0, x22 | |
1070 | sub x0, x0, x23 | |
1071 | ||
1072 | // Branch to the trampoline | |
1073 | br x0 | |
1074 | ||
1075 | /* | |
1076 | * V=P to KVA trampoline. | |
1077 | * x0 - KVA of cpu data pointer | |
1078 | */ | |
1079 | .text | |
1080 | .align 2 | |
1081 | arm_init_tramp: | |
1082 | /* On a warm boot, the full kernel translation table is initialized in | |
1083 | * addition to the bootstrap tables. The layout is as follows: | |
1084 | * | |
1085 | * +--Top of Memory--+ | |
1086 | * ... | |
1087 | * | | | |
1088 | * | Primary Kernel | | |
1089 | * | Trans. Table | | |
1090 | * | | | |
1091 | * +--Top + 5 pages--+ | |
1092 | * | | | |
1093 | * | Invalid Table | | |
1094 | * | | | |
1095 | * +--Top + 4 pages--+ | |
1096 | * | | | |
1097 | * | KVA Table | | |
1098 | * | | | |
1099 | * +--Top + 2 pages--+ | |
1100 | * | | | |
1101 | * | V=P Table | | |
1102 | * | | | |
1103 | * +--Top of Kernel--+ | |
1104 | * | | | |
1105 | * | Kernel Mach-O | | |
1106 | * | | | |
1107 | * ... | |
1108 | * +---Kernel Base---+ | |
1109 | */ | |
1110 | ||
cb323159 | 1111 | |
d9a64523 | 1112 | mov x19, lr |
c6bf4f31 A |
1113 | #if defined(HAS_VMSA_LOCK) |
1114 | bl EXT(vmsa_lock) | |
1115 | #endif | |
5ba3f43e | 1116 | // Convert CPU data PA to VA and set as first argument |
d9a64523 A |
1117 | mov x0, x21 |
1118 | bl EXT(phystokv) | |
5ba3f43e | 1119 | |
d9a64523 | 1120 | mov lr, x19 |
5ba3f43e A |
1121 | |
1122 | /* Return to arm_init() */ | |
1123 | ret | |
1124 | ||
1125 | //#include "globals_asm.h" | |
1126 | ||
1127 | /* vim: set ts=4: */ |