]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/start.s
xnu-7195.81.3.tar.gz
[apple/xnu.git] / osfmk / arm64 / start.s
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <arm/proc_reg.h>
29#include <arm64/asm.h>
30#include <arm64/proc_reg.h>
31#include <pexpert/arm64/board_config.h>
5ba3f43e
A
32#include <mach_assert.h>
33#include <machine/asm.h>
34#include "assym.s"
f427ee49 35#include <arm64/tunables/tunables.s>
cb323159 36#include <arm64/exception_asm.h>
5ba3f43e 37
5c9f4661
A
38#if __ARM_KERNEL_PROTECT__
39#include <arm/pmap.h>
40#endif /* __ARM_KERNEL_PROTECT__ */
41
5ba3f43e 42
cb323159 43
5ba3f43e
A
44.macro MSR_VBAR_EL1_X0
45#if defined(KERNEL_INTEGRITY_KTRR)
46 mov x1, lr
47 bl EXT(pinst_set_vbar)
48 mov lr, x1
49#else
50 msr VBAR_EL1, x0
51#endif
52.endmacro
53
54.macro MSR_TCR_EL1_X1
55#if defined(KERNEL_INTEGRITY_KTRR)
56 mov x0, x1
57 mov x1, lr
cb323159 58 bl EXT(pinst_set_tcr)
5ba3f43e
A
59 mov lr, x1
60#else
61 msr TCR_EL1, x1
62#endif
63.endmacro
64
65.macro MSR_TTBR1_EL1_X0
66#if defined(KERNEL_INTEGRITY_KTRR)
67 mov x1, lr
cb323159 68 bl EXT(pinst_set_ttbr1)
5ba3f43e
A
69 mov lr, x1
70#else
71 msr TTBR1_EL1, x0
72#endif
73.endmacro
74
75.macro MSR_SCTLR_EL1_X0
f427ee49 76#if defined(KERNEL_INTEGRITY_KTRR)
5ba3f43e
A
77 mov x1, lr
78
79 // This may abort, do so on SP1
cb323159 80 bl EXT(pinst_spsel_1)
5ba3f43e 81
cb323159 82 bl EXT(pinst_set_sctlr)
5ba3f43e
A
83 msr SPSel, #0 // Back to SP0
84 mov lr, x1
85#else
86 msr SCTLR_EL1, x0
87#endif /* defined(KERNEL_INTEGRITY_KTRR) */
88.endmacro
89
90/*
91 * Checks the reset handler for global and CPU-specific reset-assist functions,
92 * then jumps to the reset handler with boot args and cpu data. This is copied
93 * to the first physical page during CPU bootstrap (see cpu.c).
94 *
95 * Variables:
96 * x19 - Reset handler data pointer
97 * x20 - Boot args pointer
98 * x21 - CPU data pointer
99 */
100 .text
101 .align 12
102 .globl EXT(LowResetVectorBase)
103LEXT(LowResetVectorBase)
cb323159
A
104 /*
105 * On reset, both RVBAR_EL1 and VBAR_EL1 point here. SPSel.SP is 1,
106 * so on reset the CPU will jump to offset 0x0 and on exceptions
107 * the CPU will jump to offset 0x200, 0x280, 0x300, or 0x380.
108 * In order for both the reset vector and exception vectors to
109 * coexist in the same space, the reset code is moved to the end
110 * of the exception vector area.
111 */
112 b EXT(reset_vector)
5ba3f43e 113
cb323159
A
114 /* EL1 SP1: These vectors trap errors during early startup on non-boot CPUs. */
115 .align 9
116 b .
117 .align 7
118 b .
119 .align 7
120 b .
121 .align 7
122 b .
123
124 .align 7
125 .globl EXT(reset_vector)
126LEXT(reset_vector)
127 // Preserve x0 for start_first_cpu, if called
5ba3f43e
A
128 // Unlock the core for debugging
129 msr OSLAR_EL1, xzr
d9a64523 130 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts
5ba3f43e 131
c6bf4f31 132#if !(defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR))
5ba3f43e
A
133 // Set low reset vector before attempting any loads
134 adrp x0, EXT(LowExceptionVectorBase)@page
135 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
136 msr VBAR_EL1, x0
137#endif
138
139
140#if defined(KERNEL_INTEGRITY_KTRR)
141 /*
142 * Set KTRR registers immediately after wake/resume
143 *
144 * During power on reset, XNU stashed the kernel text region range values
145 * into __DATA,__const which should be protected by AMCC RoRgn at this point.
146 * Read this data and program/lock KTRR registers accordingly.
147 * If either values are zero, we're debugging kernel so skip programming KTRR.
148 */
149
f427ee49 150 /* refuse to boot if machine_lockdown() hasn't completed */
cb323159 151 adrp x17, EXT(lockdown_done)@page
f427ee49
A
152 ldr w17, [x17, EXT(lockdown_done)@pageoff]
153 cbz w17, .
d9a64523 154
5ba3f43e 155 // load stashed rorgn_begin
f427ee49
A
156 adrp x17, EXT(ctrr_begin)@page
157 add x17, x17, EXT(ctrr_begin)@pageoff
5ba3f43e 158 ldr x17, [x17]
f427ee49 159#if DEBUG || DEVELOPMENT || CONFIG_DTRACE
5ba3f43e 160 // if rorgn_begin is zero, we're debugging. skip enabling ktrr
d9a64523 161 cbz x17, Lskip_ktrr
f427ee49
A
162#else
163 cbz x17, .
164#endif
5ba3f43e
A
165
166 // load stashed rorgn_end
f427ee49
A
167 adrp x19, EXT(ctrr_end)@page
168 add x19, x19, EXT(ctrr_end)@pageoff
5ba3f43e 169 ldr x19, [x19]
f427ee49 170#if DEBUG || DEVELOPMENT || CONFIG_DTRACE
d9a64523 171 cbz x19, Lskip_ktrr
f427ee49
A
172#else
173 cbz x19, .
174#endif
5ba3f43e 175
5ba3f43e 176 msr ARM64_REG_KTRR_LOWER_EL1, x17
5ba3f43e
A
177 msr ARM64_REG_KTRR_UPPER_EL1, x19
178 mov x17, #1
179 msr ARM64_REG_KTRR_LOCK_EL1, x17
d9a64523 180Lskip_ktrr:
cb323159 181#endif /* defined(KERNEL_INTEGRITY_KTRR) */
5ba3f43e
A
182
183 // Process reset handlers
184 adrp x19, EXT(ResetHandlerData)@page // Get address of the reset handler data
185 add x19, x19, EXT(ResetHandlerData)@pageoff
186 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number
c6bf4f31
A
187#if HAS_CLUSTER
188 and x0, x15, #0xFFFF // CPU number in Affinity0, cluster ID in Affinity1
189#else
5ba3f43e 190 and x0, x15, #0xFF // CPU number is in MPIDR Affinity Level 0
c6bf4f31 191#endif
5ba3f43e 192 ldr x1, [x19, CPU_DATA_ENTRIES] // Load start of data entries
f427ee49 193 add x3, x1, MAX_CPUS * 16 // end addr of data entries = start + (16 * MAX_CPUS)
5ba3f43e
A
194Lcheck_cpu_data_entry:
195 ldr x21, [x1, CPU_DATA_PADDR] // Load physical CPU data address
196 cbz x21, Lnext_cpu_data_entry
197 ldr w2, [x21, CPU_PHYS_ID] // Load ccc cpu phys id
198 cmp x0, x2 // Compare cpu data phys cpu and MPIDR_EL1 phys cpu
cb323159 199 b.eq Lfound_cpu_data_entry // Branch if match
5ba3f43e
A
200Lnext_cpu_data_entry:
201 add x1, x1, #16 // Increment to the next cpu data entry
202 cmp x1, x3
cb323159 203 b.eq Lskip_cpu_reset_handler // Not found
5ba3f43e
A
204 b Lcheck_cpu_data_entry // loop
205Lfound_cpu_data_entry:
c6bf4f31
A
206#if defined(KERNEL_INTEGRITY_CTRR)
207 /*
208 * Program and lock CTRR if this CPU is non-boot cluster master. boot cluster will be locked
209 * in machine_lockdown. pinst insns protected by VMSA_LOCK
210 * A_PXN and A_MMUON_WRPROTECT options provides something close to KTRR behavior
211 */
212
f427ee49 213 /* refuse to boot if machine_lockdown() hasn't completed */
c6bf4f31 214 adrp x17, EXT(lockdown_done)@page
f427ee49
A
215 ldr w17, [x17, EXT(lockdown_done)@pageoff]
216 cbz w17, .
c6bf4f31
A
217
218 // load stashed rorgn_begin
f427ee49
A
219 adrp x17, EXT(ctrr_begin)@page
220 add x17, x17, EXT(ctrr_begin)@pageoff
c6bf4f31 221 ldr x17, [x17]
f427ee49 222#if DEBUG || DEVELOPMENT || CONFIG_DTRACE
c6bf4f31
A
223 // if rorgn_begin is zero, we're debugging. skip enabling ctrr
224 cbz x17, Lskip_ctrr
f427ee49
A
225#else
226 cbz x17, .
227#endif
c6bf4f31
A
228
229 // load stashed rorgn_end
f427ee49
A
230 adrp x19, EXT(ctrr_end)@page
231 add x19, x19, EXT(ctrr_end)@pageoff
c6bf4f31 232 ldr x19, [x19]
f427ee49 233#if DEBUG || DEVELOPMENT || CONFIG_DTRACE
c6bf4f31 234 cbz x19, Lskip_ctrr
f427ee49
A
235#else
236 cbz x19, .
237#endif
c6bf4f31
A
238
239 mrs x18, ARM64_REG_CTRR_LOCK_EL1
240 cbnz x18, Lskip_ctrr /* don't touch if already locked */
c6bf4f31
A
241 msr ARM64_REG_CTRR_A_LWR_EL1, x17
242 msr ARM64_REG_CTRR_A_UPR_EL1, x19
243 mov x18, #(CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT)
244 msr ARM64_REG_CTRR_CTL_EL1, x18
245 mov x18, #1
246 msr ARM64_REG_CTRR_LOCK_EL1, x18
247
248
249 isb
250 tlbi vmalle1
251 dsb ish
252 isb
253Lspin_ctrr_unlocked:
254 /* we shouldn't ever be here as cpu start is serialized by cluster in cpu_start(),
255 * and first core started in cluster is designated cluster master and locks
256 * both core and cluster. subsequent cores in same cluster will run locked from
257 * from reset vector */
258 mrs x18, ARM64_REG_CTRR_LOCK_EL1
259 cbz x18, Lspin_ctrr_unlocked
260Lskip_ctrr:
261#endif
cb323159 262 adrp x20, EXT(const_boot_args)@page
5ba3f43e
A
263 add x20, x20, EXT(const_boot_args)@pageoff
264 ldr x0, [x21, CPU_RESET_HANDLER] // Call CPU reset handler
265 cbz x0, Lskip_cpu_reset_handler
266
267 // Validate that our handler is one of the two expected handlers
268 adrp x2, EXT(resume_idle_cpu)@page
269 add x2, x2, EXT(resume_idle_cpu)@pageoff
270 cmp x0, x2
271 beq 1f
272 adrp x2, EXT(start_cpu)@page
273 add x2, x2, EXT(start_cpu)@pageoff
274 cmp x0, x2
cb323159 275 bne Lskip_cpu_reset_handler
5ba3f43e
A
2761:
277
c6bf4f31
A
278#if HAS_BP_RET
279 bl EXT(set_bp_ret)
280#endif
5ba3f43e 281
5c9f4661
A
282#if __ARM_KERNEL_PROTECT__ && defined(KERNEL_INTEGRITY_KTRR)
283 /*
284 * Populate TPIDR_EL1 (in case the CPU takes an exception while
285 * turning on the MMU).
286 */
287 ldr x13, [x21, CPU_ACTIVE_THREAD]
288 msr TPIDR_EL1, x13
289#endif /* __ARM_KERNEL_PROTECT__ */
290
5ba3f43e
A
291 blr x0
292Lskip_cpu_reset_handler:
293 b . // Hang if the handler is NULL or returns
294
cb323159 295 .align 3
5ba3f43e
A
296 .global EXT(LowResetVectorEnd)
297LEXT(LowResetVectorEnd)
298 .global EXT(SleepToken)
299#if WITH_CLASSIC_S2R
300LEXT(SleepToken)
301 .space (stSize_NUM),0
302#endif
303
cb323159
A
304 .section __DATA_CONST,__const
305 .align 3
306 .globl EXT(ResetHandlerData)
307LEXT(ResetHandlerData)
308 .space (rhdSize_NUM),0 // (filled with 0s)
309 .text
310
5ba3f43e
A
311
312/*
313 * __start trampoline is located at a position relative to LowResetVectorBase
314 * so that iBoot can compute the reset vector position to set IORVBAR using
315 * only the kernel entry point. Reset vector = (__start & ~0xfff)
316 */
317 .align 3
318 .globl EXT(_start)
319LEXT(_start)
320 b EXT(start_first_cpu)
321
322
323/*
324 * Provides an early-boot exception vector so that the processor will spin
325 * and preserve exception information (e.g., ELR_EL1) when early CPU bootstrap
326 * code triggers an exception. This is copied to the second physical page
327 * during CPU bootstrap (see cpu.c).
328 */
329 .align 12, 0
330 .global EXT(LowExceptionVectorBase)
331LEXT(LowExceptionVectorBase)
332 /* EL1 SP 0 */
333 b .
334 .align 7
335 b .
336 .align 7
337 b .
338 .align 7
339 b .
340 /* EL1 SP1 */
341 .align 7
342 b .
343 .align 7
344 b .
345 .align 7
346 b .
347 .align 7
348 b .
349 /* EL0 64 */
350 .align 7
351 b .
352 .align 7
353 b .
354 .align 7
355 b .
356 .align 7
357 b .
358 /* EL0 32 */
359 .align 7
360 b .
361 .align 7
362 b .
363 .align 7
364 b .
365 .align 7
366 b .
367 .align 12, 0
368
c6bf4f31 369#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
5ba3f43e
A
370/*
371 * Provide a global symbol so that we can narrow the V=P mapping to cover
372 * this page during arm_vm_init.
373 */
374.align ARM_PGSHIFT
375.globl EXT(bootstrap_instructions)
376LEXT(bootstrap_instructions)
cb323159 377
c6bf4f31 378#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
5ba3f43e
A
379 .align 2
380 .globl EXT(resume_idle_cpu)
381LEXT(resume_idle_cpu)
382 adrp lr, EXT(arm_init_idle_cpu)@page
383 add lr, lr, EXT(arm_init_idle_cpu)@pageoff
384 b start_cpu
385
386 .align 2
387 .globl EXT(start_cpu)
388LEXT(start_cpu)
389 adrp lr, EXT(arm_init_cpu)@page
390 add lr, lr, EXT(arm_init_cpu)@pageoff
391 b start_cpu
392
393 .align 2
394start_cpu:
c6bf4f31 395#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
5ba3f43e
A
396 // This is done right away in reset vector for pre-KTRR devices
397 // Set low reset vector now that we are in the KTRR-free zone
398 adrp x0, EXT(LowExceptionVectorBase)@page
399 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
400 MSR_VBAR_EL1_X0
c6bf4f31 401#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
5ba3f43e
A
402
403 // x20 set to BootArgs phys address
404 // x21 set to cpu data phys address
5ba3f43e
A
405
406 // Get the kernel memory parameters from the boot args
407 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
408 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base
409 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size
f427ee49 410 adrp x25, EXT(bootstrap_pagetables)@page // Get the start of the page tables
d9a64523 411 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags
5ba3f43e 412
cb323159 413
5ba3f43e
A
414 // Set TPIDRRO_EL0 with the CPU number
415 ldr x0, [x21, CPU_NUMBER_GS]
416 msr TPIDRRO_EL0, x0
417
418 // Set the exception stack pointer
419 ldr x0, [x21, CPU_EXCEPSTACK_TOP]
420
421
422 // Set SP_EL1 to exception stack
c6bf4f31 423#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
5ba3f43e 424 mov x1, lr
cb323159 425 bl EXT(pinst_spsel_1)
5ba3f43e
A
426 mov lr, x1
427#else
428 msr SPSel, #1
429#endif
430 mov sp, x0
431
432 // Set the interrupt stack pointer
433 ldr x0, [x21, CPU_INTSTACK_TOP]
434 msr SPSel, #0
435 mov sp, x0
436
437 // Convert lr to KVA
438 add lr, lr, x22
439 sub lr, lr, x23
440
441 b common_start
442
443/*
444 * create_l1_table_entry
445 *
446 * Given a virtual address, creates a table entry in an L1 translation table
447 * to point to an L2 translation table.
448 * arg0 - Virtual address
449 * arg1 - L1 table address
450 * arg2 - L2 table address
451 * arg3 - Scratch register
452 * arg4 - Scratch register
453 * arg5 - Scratch register
454 */
455.macro create_l1_table_entry
456 and $3, $0, #(ARM_TT_L1_INDEX_MASK)
457 lsr $3, $3, #(ARM_TT_L1_SHIFT) // Get index in L1 table for L2 table
458 lsl $3, $3, #(TTE_SHIFT) // Convert index into pointer offset
459 add $3, $1, $3 // Get L1 entry pointer
460 mov $4, #(ARM_TTE_BOOT_TABLE) // Get L1 table entry template
461 and $5, $2, #(ARM_TTE_TABLE_MASK) // Get address bits of L2 table
462 orr $5, $4, $5 // Create table entry for L2 table
463 str $5, [$3] // Write entry to L1 table
464.endmacro
465
466/*
467 * create_l2_block_entries
468 *
469 * Given base virtual and physical addresses, creates consecutive block entries
470 * in an L2 translation table.
471 * arg0 - Virtual address
472 * arg1 - Physical address
473 * arg2 - L2 table address
474 * arg3 - Number of entries
475 * arg4 - Scratch register
476 * arg5 - Scratch register
477 * arg6 - Scratch register
478 * arg7 - Scratch register
479 */
480.macro create_l2_block_entries
481 and $4, $0, #(ARM_TT_L2_INDEX_MASK)
482 lsr $4, $4, #(ARM_TTE_BLOCK_L2_SHIFT) // Get index in L2 table for block entry
483 lsl $4, $4, #(TTE_SHIFT) // Convert index into pointer offset
484 add $4, $2, $4 // Get L2 entry pointer
485 mov $5, #(ARM_TTE_BOOT_BLOCK) // Get L2 block entry template
486 and $6, $1, #(ARM_TTE_BLOCK_L2_MASK) // Get address bits of block mapping
487 orr $6, $5, $6
488 mov $5, $3
489 mov $7, #(ARM_TT_L2_SIZE)
4901:
491 str $6, [$4], #(1 << TTE_SHIFT) // Write entry to L2 table and advance
492 add $6, $6, $7 // Increment the output address
493 subs $5, $5, #1 // Decrement the number of entries
494 b.ne 1b
495.endmacro
496
d9a64523
A
497/*
498 * arg0 - virtual start address
499 * arg1 - physical start address
500 * arg2 - number of entries to map
501 * arg3 - L1 table address
502 * arg4 - free space pointer
503 * arg5 - scratch (entries mapped per loop)
504 * arg6 - scratch
505 * arg7 - scratch
506 * arg8 - scratch
507 * arg9 - scratch
508 */
509.macro create_bootstrap_mapping
510 /* calculate entries left in this page */
511 and $5, $0, #(ARM_TT_L2_INDEX_MASK)
512 lsr $5, $5, #(ARM_TT_L2_SHIFT)
513 mov $6, #(TTE_PGENTRIES)
514 sub $5, $6, $5
515
516 /* allocate an L2 table */
5173: add $4, $4, PGBYTES
518
519 /* create_l1_table_entry(virt_base, L1 table, L2 table, scratch1, scratch2, scratch3) */
520 create_l1_table_entry $0, $3, $4, $6, $7, $8
521
522 /* determine how many entries to map this loop - the smaller of entries
523 * remaining in page and total entries left */
524 cmp $2, $5
525 csel $5, $2, $5, lt
526
527 /* create_l2_block_entries(virt_base, phys_base, L2 table, num_ents, scratch1, scratch2, scratch3) */
528 create_l2_block_entries $0, $1, $4, $5, $6, $7, $8, $9
529
530 /* subtract entries just mapped and bail out if we're done */
531 subs $2, $2, $5
532 beq 2f
533
534 /* entries left to map - advance base pointers */
535 add $0, $0, $5, lsl #(ARM_TT_L2_SHIFT)
536 add $1, $1, $5, lsl #(ARM_TT_L2_SHIFT)
537
538 mov $5, #(TTE_PGENTRIES) /* subsequent loops map (up to) a whole L2 page */
539 b 3b
5402:
541.endmacro
542
5ba3f43e
A
543/*
544 * _start_first_cpu
545 * Cold boot init routine. Called from __start
546 * x0 - Boot args
547 */
548 .align 2
549 .globl EXT(start_first_cpu)
550LEXT(start_first_cpu)
551
552 // Unlock the core for debugging
553 msr OSLAR_EL1, xzr
d9a64523 554 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts
cb323159 555
5ba3f43e 556 mov x20, x0
d9a64523 557 mov x21, #0
5ba3f43e
A
558
559 // Set low reset vector before attempting any loads
560 adrp x0, EXT(LowExceptionVectorBase)@page
561 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
562 MSR_VBAR_EL1_X0
563
564
5ba3f43e
A
565 // Get the kernel memory parameters from the boot args
566 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
567 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base
568 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size
f427ee49 569 adrp x25, EXT(bootstrap_pagetables)@page // Get the start of the page tables
d9a64523 570 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags
5ba3f43e 571
d9a64523
A
572 // Clear the register that will be used to store the userspace thread pointer and CPU number.
573 // We may not actually be booting from ordinal CPU 0, so this register will be updated
574 // in ml_parse_cpu_topology(), which happens later in bootstrap.
5ba3f43e
A
575 msr TPIDRRO_EL0, x21
576
577 // Set up exception stack pointer
578 adrp x0, EXT(excepstack_top)@page // Load top of exception stack
579 add x0, x0, EXT(excepstack_top)@pageoff
580 add x0, x0, x22 // Convert to KVA
581 sub x0, x0, x23
582
583 // Set SP_EL1 to exception stack
c6bf4f31 584#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
cb323159 585 bl EXT(pinst_spsel_1)
5ba3f43e
A
586#else
587 msr SPSel, #1
588#endif
589
590 mov sp, x0
591
592 // Set up interrupt stack pointer
593 adrp x0, EXT(intstack_top)@page // Load top of irq stack
594 add x0, x0, EXT(intstack_top)@pageoff
595 add x0, x0, x22 // Convert to KVA
596 sub x0, x0, x23
597 msr SPSel, #0 // Set SP_EL0 to interrupt stack
598 mov sp, x0
599
600 // Load address to the C init routine into link register
601 adrp lr, EXT(arm_init)@page
602 add lr, lr, EXT(arm_init)@pageoff
603 add lr, lr, x22 // Convert to KVA
604 sub lr, lr, x23
605
606 /*
607 * Set up the bootstrap page tables with a single block entry for the V=P
608 * mapping, a single block entry for the trampolined kernel address (KVA),
609 * and all else invalid. This requires four pages:
610 * Page 1 - V=P L1 table
611 * Page 2 - V=P L2 table
612 * Page 3 - KVA L1 table
613 * Page 4 - KVA L2 table
614 */
5ba3f43e
A
615
616 // Invalidate all entries in the bootstrap page tables
617 mov x0, #(ARM_TTE_EMPTY) // Load invalid entry template
f427ee49 618 mov x1, x25 // Start at V=P pagetable root
5ba3f43e 619 mov x2, #(TTE_PGENTRIES) // Load number of entries per page
5ba3f43e 620 lsl x2, x2, #2 // Shift by 2 for num entries on 4 pages
cb323159 621
5ba3f43e
A
622Linvalidate_bootstrap: // do {
623 str x0, [x1], #(1 << TTE_SHIFT) // Invalidate and advance
624 subs x2, x2, #1 // entries--
625 b.ne Linvalidate_bootstrap // } while (entries != 0)
626
5ba3f43e
A
627 /*
628 * In order to reclaim memory on targets where TZ0 (or some other entity)
629 * must be located at the base of memory, iBoot may set the virtual and
630 * physical base addresses to immediately follow whatever lies at the
631 * base of physical memory.
632 *
633 * If the base address belongs to TZ0, it may be dangerous for xnu to map
634 * it (as it may be prefetched, despite being technically inaccessible).
635 * In order to avoid this issue while keeping the mapping code simple, we
f427ee49
A
636 * may continue to use block mappings, but we will only map the kernelcache
637 * mach header to the end of memory.
5ba3f43e
A
638 *
639 * Given that iBoot guarantees that the unslid kernelcache base address
640 * will begin on an L2 boundary, this should prevent us from accidentally
641 * mapping TZ0.
642 */
f427ee49
A
643 adrp x0, EXT(_mh_execute_header)@page // address of kernel mach header
644 add x0, x0, EXT(_mh_execute_header)@pageoff
645 ldr w1, [x0, #0x18] // load mach_header->flags
646 tbz w1, #0x1f, Lkernelcache_base_found // if MH_DYLIB_IN_CACHE unset, base is kernel mach header
647 ldr w1, [x0, #0x20] // load first segment cmd (offset sizeof(kernel_mach_header_t))
648 cmp w1, #0x19 // must be LC_SEGMENT_64
649 bne .
650 ldr x1, [x0, #0x38] // load first segment vmaddr
651 sub x1, x0, x1 // compute slide
652 MOV64 x0, VM_KERNEL_LINK_ADDRESS
653 add x0, x0, x1 // base is kernel link address + slide
d9a64523 654
f427ee49 655Lkernelcache_base_found:
5ba3f43e 656 /*
d9a64523
A
657 * Adjust physical and virtual base addresses to account for physical
658 * memory preceeding xnu Mach-O header
659 * x22 - Kernel virtual base
660 * x23 - Kernel physical base
661 * x24 - Physical memory size
5ba3f43e 662 */
d9a64523
A
663 sub x18, x0, x23
664 sub x24, x24, x18
665 add x22, x22, x18
666 add x23, x23, x18
667
5ba3f43e 668 /*
d9a64523
A
669 * x0 - V=P virtual cursor
670 * x4 - V=P physical cursor
671 * x14 - KVA virtual cursor
672 * x15 - KVA physical cursor
5ba3f43e 673 */
d9a64523
A
674 mov x4, x0
675 mov x14, x22
676 mov x15, x23
5ba3f43e 677
d9a64523
A
678 /*
679 * Allocate L1 tables
680 * x1 - V=P L1 page
681 * x3 - KVA L1 page
682 * x2 - free mem pointer from which we allocate a variable number of L2
683 * pages. The maximum number of bootstrap page table pages is limited to
684 * BOOTSTRAP_TABLE_SIZE. For a 2G 4k page device, assuming the worst-case
685 * slide, we need 1xL1 and up to 3xL2 pages (1GB mapped per L1 entry), so
686 * 8 total pages for V=P and KVA.
5ba3f43e 687 */
d9a64523
A
688 mov x1, x25
689 add x3, x1, PGBYTES
690 mov x2, x3
5ba3f43e 691
d9a64523
A
692 /*
693 * Setup the V=P bootstrap mapping
694 * x5 - total number of L2 entries to allocate
5ba3f43e 695 */
d9a64523
A
696 lsr x5, x24, #(ARM_TT_L2_SHIFT)
697 /* create_bootstrap_mapping(vbase, pbase, num_ents, L1 table, freeptr) */
698 create_bootstrap_mapping x0, x4, x5, x1, x2, x6, x10, x11, x12, x13
5ba3f43e 699
d9a64523
A
700 /* Setup the KVA bootstrap mapping */
701 lsr x5, x24, #(ARM_TT_L2_SHIFT)
702 create_bootstrap_mapping x14, x15, x5, x3, x2, x9, x10, x11, x12, x13
5ba3f43e
A
703
704 /* Ensure TTEs are visible */
705 dsb ish
706
cb323159 707
5ba3f43e
A
708 b common_start
709
710/*
711 * Begin common CPU initialization
712 *
713 * Regster state:
714 * x20 - PA of boot args
715 * x21 - zero on cold boot, PA of cpu data on warm reset
716 * x22 - Kernel virtual base
717 * x23 - Kernel physical base
f427ee49 718 * x25 - PA of the V=P pagetable root
5ba3f43e
A
719 * lr - KVA of C init routine
720 * sp - SP_EL0 selected
721 *
722 * SP_EL0 - KVA of CPU's interrupt stack
723 * SP_EL1 - KVA of CPU's exception stack
724 * TPIDRRO_EL0 - CPU number
725 */
726common_start:
f427ee49
A
727
728#if HAS_NEX_PG
729 mov x19, lr
730 bl EXT(set_nex_pg)
731 mov lr, x19
732#endif
733
5ba3f43e
A
734 // Set the translation control register.
735 adrp x0, EXT(sysreg_restore)@page // Load TCR value from the system register restore structure
736 add x0, x0, EXT(sysreg_restore)@pageoff
737 ldr x1, [x0, SR_RESTORE_TCR_EL1]
738 MSR_TCR_EL1_X1
739
740 /* Set up translation table base registers.
741 * TTBR0 - V=P table @ top of kernel
d9a64523 742 * TTBR1 - KVA table @ top of kernel + 1 page
5ba3f43e 743 */
c6bf4f31 744#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
5ba3f43e
A
745 /* Note that for KTRR configurations, the V=P map will be modified by
746 * arm_vm_init.c.
747 */
748#endif
749 and x0, x25, #(TTBR_BADDR_MASK)
d9a64523
A
750 mov x19, lr
751 bl EXT(set_mmu_ttb)
752 mov lr, x19
753 add x0, x25, PGBYTES
5ba3f43e
A
754 and x0, x0, #(TTBR_BADDR_MASK)
755 MSR_TTBR1_EL1_X0
756
757 // Set up MAIR attr0 for normal memory, attr1 for device memory
758 mov x0, xzr
759 mov x1, #(MAIR_WRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITEBACK))
760 orr x0, x0, x1
761 mov x1, #(MAIR_INNERWRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_INNERWRITEBACK))
762 orr x0, x0, x1
763 mov x1, #(MAIR_DISABLE << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_DISABLE))
764 orr x0, x0, x1
765 mov x1, #(MAIR_WRITETHRU << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITETHRU))
766 orr x0, x0, x1
767 mov x1, #(MAIR_WRITECOMB << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITECOMB))
768 orr x0, x0, x1
769 mov x1, #(MAIR_POSTED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED))
770 orr x0, x0, x1
cb323159
A
771 mov x1, #(MAIR_POSTED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_REORDERED))
772 orr x0, x0, x1
773 mov x1, #(MAIR_POSTED_COMBINED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_COMBINED_REORDERED))
774 orr x0, x0, x1
5ba3f43e 775 msr MAIR_EL1, x0
f427ee49
A
776 isb
777 tlbi vmalle1
778 dsb ish
5ba3f43e 779
5ba3f43e 780#if defined(APPLEHURRICANE)
5ba3f43e
A
781 // <rdar://problem/26726624> Increase Snoop reservation in EDB to reduce starvation risk
782 // Needs to be done before MMU is enabled
f427ee49 783 HID_INSERT_BITS ARM64_REG_HID5, ARM64_REG_HID5_CrdEdbSnpRsvd_mask, ARM64_REG_HID5_CrdEdbSnpRsvd_VALUE, x12
5ba3f43e
A
784#endif
785
d9a64523
A
786#if defined(BCM2837)
787 // Setup timer interrupt routing; must be done before MMU is enabled
788 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number
789 and x15, x15, #0xFF // CPU number is in MPIDR Affinity Level 0
790 mov x0, #0x4000
791 lsl x0, x0, #16
792 add x0, x0, #0x0040 // x0: 0x4000004X Core Timers interrupt control
793 add x0, x0, x15, lsl #2
794 mov w1, #0xF0 // x1: 0xF0 Route to Core FIQs
795 str w1, [x0]
796 isb sy
797#endif
798
5ba3f43e
A
799#ifndef __ARM_IC_NOALIAS_ICACHE__
800 /* Invalidate the TLB and icache on systems that do not guarantee that the
801 * caches are invalidated on reset.
802 */
803 tlbi vmalle1
804 ic iallu
805#endif
806
807 /* If x21 is not 0, then this is either the start_cpu path or
808 * the resume_idle_cpu path. cpu_ttep should already be
809 * populated, so just switch to the kernel_pmap now.
810 */
811
812 cbz x21, 1f
813 adrp x0, EXT(cpu_ttep)@page
814 add x0, x0, EXT(cpu_ttep)@pageoff
815 ldr x0, [x0]
816 MSR_TTBR1_EL1_X0
8171:
818
819 // Set up the exception vectors
5c9f4661
A
820#if __ARM_KERNEL_PROTECT__
821 /* If this is not the first reset of the boot CPU, the alternate mapping
822 * for the exception vectors will be set up, so use it. Otherwise, we
823 * should use the mapping located in the kernelcache mapping.
824 */
825 MOV64 x0, ARM_KERNEL_PROTECT_EXCEPTION_START
826
827 cbnz x21, 1f
828#endif /* __ARM_KERNEL_PROTECT__ */
5ba3f43e
A
829 adrp x0, EXT(ExceptionVectorsBase)@page // Load exception vectors base address
830 add x0, x0, EXT(ExceptionVectorsBase)@pageoff
831 add x0, x0, x22 // Convert exception vector address to KVA
832 sub x0, x0, x23
5c9f4661 8331:
5ba3f43e
A
834 MSR_VBAR_EL1_X0
835
cb323159
A
8361:
837#ifdef HAS_APPLE_PAC
cb323159
A
838
839 // Enable caches, MMU, ROP and JOP
f427ee49 840 MOV64 x0, SCTLR_EL1_DEFAULT
cb323159
A
841 orr x0, x0, #(SCTLR_PACIB_ENABLED) /* IB is ROP */
842
cb323159
A
843 MOV64 x1, SCTLR_JOP_KEYS_ENABLED
844 orr x0, x0, x1
cb323159 845#else /* HAS_APPLE_PAC */
5ba3f43e
A
846
847 // Enable caches and MMU
f427ee49 848 MOV64 x0, SCTLR_EL1_DEFAULT
cb323159 849#endif /* HAS_APPLE_PAC */
5ba3f43e
A
850 MSR_SCTLR_EL1_X0
851 isb sy
852
f427ee49 853 MOV64 x1, SCTLR_EL1_DEFAULT
cb323159
A
854#if HAS_APPLE_PAC
855 orr x1, x1, #(SCTLR_PACIB_ENABLED)
cb323159 856 MOV64 x2, SCTLR_JOP_KEYS_ENABLED
cb323159 857 orr x1, x1, x2
cb323159
A
858#endif /* HAS_APPLE_PAC */
859 cmp x0, x1
860 bne .
861
5ba3f43e
A
862#if (!CONFIG_KERNEL_INTEGRITY || (CONFIG_KERNEL_INTEGRITY && !defined(KERNEL_INTEGRITY_WT)))
863 /* Watchtower
864 *
865 * If we have a Watchtower monitor it will setup CPACR_EL1 for us, touching
866 * it here would trap to EL3.
867 */
868
869 // Enable NEON
870 mov x0, #(CPACR_FPEN_ENABLE)
871 msr CPACR_EL1, x0
872#endif
873
874 // Clear thread pointer
f427ee49
A
875 msr TPIDR_EL1, xzr // Set thread register
876
5ba3f43e 877
a39ff7e2
A
878#if defined(APPLE_ARM64_ARCH_FAMILY)
879 // Initialization common to all Apple targets
880 ARM64_IS_PCORE x15
881 ARM64_READ_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4
882 orr x12, x12, ARM64_REG_HID4_DisDcMVAOps
883 orr x12, x12, ARM64_REG_HID4_DisDcSWL2Ops
884 ARM64_WRITE_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4
885#endif // APPLE_ARM64_ARCH_FAMILY
886
f427ee49
A
887 // Read MIDR before start of per-SoC tunables
888 mrs x12, MIDR_EL1
c6bf4f31
A
889
890#if defined(APPLELIGHTNING)
f427ee49
A
891 // Cebu <B0 is deprecated and unsupported (see rdar://problem/42835678)
892 EXEC_COREEQ_REVLO MIDR_CEBU_LIGHTNING, CPU_VERSION_B0, x12, x13
893 b .
894 EXEC_END
895 EXEC_COREEQ_REVLO MIDR_CEBU_THUNDER, CPU_VERSION_B0, x12, x13
896 b .
897 EXEC_END
898#endif
c6bf4f31 899
f427ee49 900 APPLY_TUNABLES x12, x13
c6bf4f31 901
cb323159
A
902
903
f427ee49
A
904#if HAS_CLUSTER
905 // Unmask external IRQs if we're restarting from non-retention WFI
906 mrs x9, ARM64_REG_CYC_OVRD
907 and x9, x9, #(~(ARM64_REG_CYC_OVRD_irq_mask | ARM64_REG_CYC_OVRD_fiq_mask))
908 msr ARM64_REG_CYC_OVRD, x9
909#endif
cb323159 910
5ba3f43e
A
911 // If x21 != 0, we're doing a warm reset, so we need to trampoline to the kernel pmap.
912 cbnz x21, Ltrampoline
913
914 // Set KVA of boot args as first arg
915 add x0, x20, x22
916 sub x0, x0, x23
917
918#if KASAN
919 mov x20, x0
920 mov x21, lr
921
922 // x0: boot args
923 // x1: KVA page table phys base
924 mrs x1, TTBR1_EL1
cb323159 925 bl EXT(kasan_bootstrap)
5ba3f43e
A
926
927 mov x0, x20
928 mov lr, x21
929#endif
930
931 // Return to arm_init()
932 ret
933
934Ltrampoline:
935 // Load VA of the trampoline
936 adrp x0, arm_init_tramp@page
937 add x0, x0, arm_init_tramp@pageoff
938 add x0, x0, x22
939 sub x0, x0, x23
940
941 // Branch to the trampoline
942 br x0
943
944/*
945 * V=P to KVA trampoline.
946 * x0 - KVA of cpu data pointer
947 */
948 .text
949 .align 2
950arm_init_tramp:
951 /* On a warm boot, the full kernel translation table is initialized in
952 * addition to the bootstrap tables. The layout is as follows:
953 *
954 * +--Top of Memory--+
955 * ...
956 * | |
957 * | Primary Kernel |
958 * | Trans. Table |
959 * | |
960 * +--Top + 5 pages--+
961 * | |
962 * | Invalid Table |
963 * | |
964 * +--Top + 4 pages--+
965 * | |
966 * | KVA Table |
967 * | |
968 * +--Top + 2 pages--+
969 * | |
970 * | V=P Table |
971 * | |
972 * +--Top of Kernel--+
973 * | |
974 * | Kernel Mach-O |
975 * | |
976 * ...
977 * +---Kernel Base---+
978 */
979
cb323159 980
d9a64523 981 mov x19, lr
c6bf4f31
A
982#if defined(HAS_VMSA_LOCK)
983 bl EXT(vmsa_lock)
984#endif
5ba3f43e 985 // Convert CPU data PA to VA and set as first argument
d9a64523
A
986 mov x0, x21
987 bl EXT(phystokv)
5ba3f43e 988
d9a64523 989 mov lr, x19
5ba3f43e
A
990
991 /* Return to arm_init() */
992 ret
993
994//#include "globals_asm.h"
995
996/* vim: set ts=4: */