]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/start.s
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / start.s
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <arm/proc_reg.h>
29#include <arm64/asm.h>
30#include <arm64/proc_reg.h>
31#include <pexpert/arm64/board_config.h>
5ba3f43e
A
32#include <mach_assert.h>
33#include <machine/asm.h>
34#include "assym.s"
cb323159 35#include <arm64/exception_asm.h>
5ba3f43e 36
5c9f4661
A
37#if __ARM_KERNEL_PROTECT__
38#include <arm/pmap.h>
39#endif /* __ARM_KERNEL_PROTECT__ */
40
5ba3f43e 41
c6bf4f31
A
42#if __APRR_SUPPORTED__
43
44.macro MSR_APRR_EL1_X0
45#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
46 bl EXT(pinst_set_aprr_el1)
47#else
48 msr APRR_EL1, x0
49#endif
50.endmacro
51
52.macro MSR_APRR_EL0_X0
53#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
54 bl EXT(pinst_set_aprr_el0)
55#else
56 msr APRR_EL0, x0
57#endif
58.endmacro
59
60.macro MSR_APRR_SHADOW_MASK_EN_EL1_X0
61#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
62 bl EXT(pinst_set_aprr_shadow_mask_en_el1)
63#else
64 msr APRR_SHADOW_MASK_EN_EL1, x0
65#endif
66.endmacro
67
68#endif /* __APRR_SUPPORTED__ */
cb323159 69
5ba3f43e
A
70.macro MSR_VBAR_EL1_X0
71#if defined(KERNEL_INTEGRITY_KTRR)
72 mov x1, lr
73 bl EXT(pinst_set_vbar)
74 mov lr, x1
75#else
76 msr VBAR_EL1, x0
77#endif
78.endmacro
79
80.macro MSR_TCR_EL1_X1
81#if defined(KERNEL_INTEGRITY_KTRR)
82 mov x0, x1
83 mov x1, lr
cb323159 84 bl EXT(pinst_set_tcr)
5ba3f43e
A
85 mov lr, x1
86#else
87 msr TCR_EL1, x1
88#endif
89.endmacro
90
91.macro MSR_TTBR1_EL1_X0
92#if defined(KERNEL_INTEGRITY_KTRR)
93 mov x1, lr
cb323159 94 bl EXT(pinst_set_ttbr1)
5ba3f43e
A
95 mov lr, x1
96#else
97 msr TTBR1_EL1, x0
98#endif
99.endmacro
100
101.macro MSR_SCTLR_EL1_X0
102#if defined(KERNEL_INTEGRITY_KTRR)
103 mov x1, lr
104
105 // This may abort, do so on SP1
cb323159 106 bl EXT(pinst_spsel_1)
5ba3f43e 107
cb323159 108 bl EXT(pinst_set_sctlr)
5ba3f43e
A
109 msr SPSel, #0 // Back to SP0
110 mov lr, x1
111#else
112 msr SCTLR_EL1, x0
113#endif /* defined(KERNEL_INTEGRITY_KTRR) */
114.endmacro
115
116/*
117 * Checks the reset handler for global and CPU-specific reset-assist functions,
118 * then jumps to the reset handler with boot args and cpu data. This is copied
119 * to the first physical page during CPU bootstrap (see cpu.c).
120 *
121 * Variables:
122 * x19 - Reset handler data pointer
123 * x20 - Boot args pointer
124 * x21 - CPU data pointer
125 */
126 .text
127 .align 12
128 .globl EXT(LowResetVectorBase)
129LEXT(LowResetVectorBase)
cb323159
A
130 /*
131 * On reset, both RVBAR_EL1 and VBAR_EL1 point here. SPSel.SP is 1,
132 * so on reset the CPU will jump to offset 0x0 and on exceptions
133 * the CPU will jump to offset 0x200, 0x280, 0x300, or 0x380.
134 * In order for both the reset vector and exception vectors to
135 * coexist in the same space, the reset code is moved to the end
136 * of the exception vector area.
137 */
138 b EXT(reset_vector)
5ba3f43e 139
cb323159
A
140 /* EL1 SP1: These vectors trap errors during early startup on non-boot CPUs. */
141 .align 9
142 b .
143 .align 7
144 b .
145 .align 7
146 b .
147 .align 7
148 b .
149
150 .align 7
151 .globl EXT(reset_vector)
152LEXT(reset_vector)
153 // Preserve x0 for start_first_cpu, if called
5ba3f43e
A
154 // Unlock the core for debugging
155 msr OSLAR_EL1, xzr
d9a64523 156 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts
5ba3f43e 157
c6bf4f31 158#if !(defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR))
5ba3f43e
A
159 // Set low reset vector before attempting any loads
160 adrp x0, EXT(LowExceptionVectorBase)@page
161 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
162 msr VBAR_EL1, x0
163#endif
164
c6bf4f31
A
165#if __APRR_SUPPORTED__
166 MOV64 x0, APRR_EL1_DEFAULT
167#if XNU_MONITOR
168 adrp x4, EXT(pmap_ppl_locked_down)@page
169 ldrb w5, [x4, #EXT(pmap_ppl_locked_down)@pageoff]
170 cmp w5, #0
171 b.ne 1f
172
173 // If the PPL is not locked down, we start in PPL mode.
174 MOV64 x0, APRR_EL1_PPL
1751:
176#endif /* XNU_MONITOR */
177
178 MSR_APRR_EL1_X0
179
180 // Load up the default APRR_EL0 value.
181 MOV64 x0, APRR_EL0_DEFAULT
182 MSR_APRR_EL0_X0
183#endif /* __APRR_SUPPORTED__ */
5ba3f43e
A
184
185#if defined(KERNEL_INTEGRITY_KTRR)
186 /*
187 * Set KTRR registers immediately after wake/resume
188 *
189 * During power on reset, XNU stashed the kernel text region range values
190 * into __DATA,__const which should be protected by AMCC RoRgn at this point.
191 * Read this data and program/lock KTRR registers accordingly.
192 * If either values are zero, we're debugging kernel so skip programming KTRR.
193 */
194
cb323159
A
195 /* spin until bootstrap core has completed machine lockdown */
196 adrp x17, EXT(lockdown_done)@page
1971:
ea3f0419
A
198 ldr w18, [x17, EXT(lockdown_done)@pageoff]
199 cbz w18, 1b
d9a64523 200
5ba3f43e
A
201 // load stashed rorgn_begin
202 adrp x17, EXT(rorgn_begin)@page
203 add x17, x17, EXT(rorgn_begin)@pageoff
204 ldr x17, [x17]
205 // if rorgn_begin is zero, we're debugging. skip enabling ktrr
d9a64523 206 cbz x17, Lskip_ktrr
5ba3f43e
A
207
208 // load stashed rorgn_end
209 adrp x19, EXT(rorgn_end)@page
210 add x19, x19, EXT(rorgn_end)@pageoff
211 ldr x19, [x19]
d9a64523 212 cbz x19, Lskip_ktrr
5ba3f43e
A
213
214 // program and lock down KTRR
215 // subtract one page from rorgn_end to make pinst insns NX
216 msr ARM64_REG_KTRR_LOWER_EL1, x17
217 sub x19, x19, #(1 << (ARM_PTE_SHIFT-12)), lsl #12
218 msr ARM64_REG_KTRR_UPPER_EL1, x19
219 mov x17, #1
220 msr ARM64_REG_KTRR_LOCK_EL1, x17
d9a64523 221Lskip_ktrr:
cb323159 222#endif /* defined(KERNEL_INTEGRITY_KTRR) */
5ba3f43e
A
223
224 // Process reset handlers
225 adrp x19, EXT(ResetHandlerData)@page // Get address of the reset handler data
226 add x19, x19, EXT(ResetHandlerData)@pageoff
227 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number
c6bf4f31
A
228#if HAS_CLUSTER
229 and x0, x15, #0xFFFF // CPU number in Affinity0, cluster ID in Affinity1
230#else
5ba3f43e 231 and x0, x15, #0xFF // CPU number is in MPIDR Affinity Level 0
c6bf4f31 232#endif
5ba3f43e
A
233 ldr x1, [x19, CPU_DATA_ENTRIES] // Load start of data entries
234 add x3, x1, MAX_CPUS * 16 // end addr of data entries = start + (16 * MAX_CPUS)
235Lcheck_cpu_data_entry:
236 ldr x21, [x1, CPU_DATA_PADDR] // Load physical CPU data address
237 cbz x21, Lnext_cpu_data_entry
238 ldr w2, [x21, CPU_PHYS_ID] // Load ccc cpu phys id
239 cmp x0, x2 // Compare cpu data phys cpu and MPIDR_EL1 phys cpu
cb323159 240 b.eq Lfound_cpu_data_entry // Branch if match
5ba3f43e
A
241Lnext_cpu_data_entry:
242 add x1, x1, #16 // Increment to the next cpu data entry
243 cmp x1, x3
cb323159 244 b.eq Lskip_cpu_reset_handler // Not found
5ba3f43e
A
245 b Lcheck_cpu_data_entry // loop
246Lfound_cpu_data_entry:
c6bf4f31
A
247#if defined(KERNEL_INTEGRITY_CTRR)
248 /*
249 * Program and lock CTRR if this CPU is non-boot cluster master. boot cluster will be locked
250 * in machine_lockdown. pinst insns protected by VMSA_LOCK
251 * A_PXN and A_MMUON_WRPROTECT options provides something close to KTRR behavior
252 */
253
254 /* spin until bootstrap core has completed machine lockdown */
255 adrp x17, EXT(lockdown_done)@page
2561:
ea3f0419
A
257 ldr w18, [x17, EXT(lockdown_done)@pageoff]
258 cbz w18, 1b
c6bf4f31
A
259
260 // load stashed rorgn_begin
261 adrp x17, EXT(rorgn_begin)@page
262 add x17, x17, EXT(rorgn_begin)@pageoff
263 ldr x17, [x17]
264 // if rorgn_begin is zero, we're debugging. skip enabling ctrr
265 cbz x17, Lskip_ctrr
266
267 // load stashed rorgn_end
268 adrp x19, EXT(rorgn_end)@page
269 add x19, x19, EXT(rorgn_end)@pageoff
270 ldr x19, [x19]
271 cbz x19, Lskip_ctrr
272
273 mrs x18, ARM64_REG_CTRR_LOCK_EL1
274 cbnz x18, Lskip_ctrr /* don't touch if already locked */
275 ldr w18, [x21, CLUSTER_MASTER] /* cluster master is unsigned int (32bit) */
276 cbz w18, Lspin_ctrr_unlocked /* non-cluster master spins if CTRR unlocked (unexpected) */
277 msr ARM64_REG_CTRR_A_LWR_EL1, x17
278 msr ARM64_REG_CTRR_A_UPR_EL1, x19
279 mov x18, #(CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT)
280 msr ARM64_REG_CTRR_CTL_EL1, x18
281 mov x18, #1
282 msr ARM64_REG_CTRR_LOCK_EL1, x18
283
284
285 isb
286 tlbi vmalle1
287 dsb ish
288 isb
289Lspin_ctrr_unlocked:
290 /* we shouldn't ever be here as cpu start is serialized by cluster in cpu_start(),
291 * and first core started in cluster is designated cluster master and locks
292 * both core and cluster. subsequent cores in same cluster will run locked from
293 * from reset vector */
294 mrs x18, ARM64_REG_CTRR_LOCK_EL1
295 cbz x18, Lspin_ctrr_unlocked
296Lskip_ctrr:
297#endif
cb323159 298 adrp x20, EXT(const_boot_args)@page
5ba3f43e
A
299 add x20, x20, EXT(const_boot_args)@pageoff
300 ldr x0, [x21, CPU_RESET_HANDLER] // Call CPU reset handler
301 cbz x0, Lskip_cpu_reset_handler
302
303 // Validate that our handler is one of the two expected handlers
304 adrp x2, EXT(resume_idle_cpu)@page
305 add x2, x2, EXT(resume_idle_cpu)@pageoff
306 cmp x0, x2
307 beq 1f
308 adrp x2, EXT(start_cpu)@page
309 add x2, x2, EXT(start_cpu)@pageoff
310 cmp x0, x2
cb323159 311 bne Lskip_cpu_reset_handler
5ba3f43e
A
3121:
313
c6bf4f31
A
314#if HAS_NEX_PG
315 bl EXT(set_nex_pg)
316#endif
5ba3f43e 317
c6bf4f31
A
318#if HAS_BP_RET
319 bl EXT(set_bp_ret)
320#endif
5ba3f43e 321
5c9f4661
A
322#if __ARM_KERNEL_PROTECT__ && defined(KERNEL_INTEGRITY_KTRR)
323 /*
324 * Populate TPIDR_EL1 (in case the CPU takes an exception while
325 * turning on the MMU).
326 */
327 ldr x13, [x21, CPU_ACTIVE_THREAD]
328 msr TPIDR_EL1, x13
329#endif /* __ARM_KERNEL_PROTECT__ */
330
5ba3f43e
A
331 blr x0
332Lskip_cpu_reset_handler:
333 b . // Hang if the handler is NULL or returns
334
cb323159 335 .align 3
5ba3f43e
A
336 .global EXT(LowResetVectorEnd)
337LEXT(LowResetVectorEnd)
338 .global EXT(SleepToken)
339#if WITH_CLASSIC_S2R
340LEXT(SleepToken)
341 .space (stSize_NUM),0
342#endif
343
cb323159
A
344 .section __DATA_CONST,__const
345 .align 3
346 .globl EXT(ResetHandlerData)
347LEXT(ResetHandlerData)
348 .space (rhdSize_NUM),0 // (filled with 0s)
349 .text
350
5ba3f43e
A
351
352/*
353 * __start trampoline is located at a position relative to LowResetVectorBase
354 * so that iBoot can compute the reset vector position to set IORVBAR using
355 * only the kernel entry point. Reset vector = (__start & ~0xfff)
356 */
357 .align 3
358 .globl EXT(_start)
359LEXT(_start)
360 b EXT(start_first_cpu)
361
362
363/*
364 * Provides an early-boot exception vector so that the processor will spin
365 * and preserve exception information (e.g., ELR_EL1) when early CPU bootstrap
366 * code triggers an exception. This is copied to the second physical page
367 * during CPU bootstrap (see cpu.c).
368 */
369 .align 12, 0
370 .global EXT(LowExceptionVectorBase)
371LEXT(LowExceptionVectorBase)
372 /* EL1 SP 0 */
373 b .
374 .align 7
375 b .
376 .align 7
377 b .
378 .align 7
379 b .
380 /* EL1 SP1 */
381 .align 7
382 b .
383 .align 7
384 b .
385 .align 7
386 b .
387 .align 7
388 b .
389 /* EL0 64 */
390 .align 7
391 b .
392 .align 7
393 b .
394 .align 7
395 b .
396 .align 7
397 b .
398 /* EL0 32 */
399 .align 7
400 b .
401 .align 7
402 b .
403 .align 7
404 b .
405 .align 7
406 b .
407 .align 12, 0
408
c6bf4f31 409#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
5ba3f43e
A
410/*
411 * Provide a global symbol so that we can narrow the V=P mapping to cover
412 * this page during arm_vm_init.
413 */
414.align ARM_PGSHIFT
415.globl EXT(bootstrap_instructions)
416LEXT(bootstrap_instructions)
cb323159 417
c6bf4f31 418#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
5ba3f43e
A
419 .align 2
420 .globl EXT(resume_idle_cpu)
421LEXT(resume_idle_cpu)
422 adrp lr, EXT(arm_init_idle_cpu)@page
423 add lr, lr, EXT(arm_init_idle_cpu)@pageoff
424 b start_cpu
425
426 .align 2
427 .globl EXT(start_cpu)
428LEXT(start_cpu)
429 adrp lr, EXT(arm_init_cpu)@page
430 add lr, lr, EXT(arm_init_cpu)@pageoff
431 b start_cpu
432
433 .align 2
434start_cpu:
c6bf4f31 435#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
5ba3f43e
A
436 // This is done right away in reset vector for pre-KTRR devices
437 // Set low reset vector now that we are in the KTRR-free zone
438 adrp x0, EXT(LowExceptionVectorBase)@page
439 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
440 MSR_VBAR_EL1_X0
c6bf4f31 441#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
5ba3f43e
A
442
443 // x20 set to BootArgs phys address
444 // x21 set to cpu data phys address
5ba3f43e
A
445
446 // Get the kernel memory parameters from the boot args
447 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
448 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base
449 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size
450 ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data
d9a64523 451 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags
5ba3f43e 452
cb323159 453
5ba3f43e
A
454 // Set TPIDRRO_EL0 with the CPU number
455 ldr x0, [x21, CPU_NUMBER_GS]
456 msr TPIDRRO_EL0, x0
457
458 // Set the exception stack pointer
459 ldr x0, [x21, CPU_EXCEPSTACK_TOP]
460
461
462 // Set SP_EL1 to exception stack
c6bf4f31 463#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
5ba3f43e 464 mov x1, lr
cb323159 465 bl EXT(pinst_spsel_1)
5ba3f43e
A
466 mov lr, x1
467#else
468 msr SPSel, #1
469#endif
470 mov sp, x0
471
472 // Set the interrupt stack pointer
473 ldr x0, [x21, CPU_INTSTACK_TOP]
474 msr SPSel, #0
475 mov sp, x0
476
477 // Convert lr to KVA
478 add lr, lr, x22
479 sub lr, lr, x23
480
481 b common_start
482
483/*
484 * create_l1_table_entry
485 *
486 * Given a virtual address, creates a table entry in an L1 translation table
487 * to point to an L2 translation table.
488 * arg0 - Virtual address
489 * arg1 - L1 table address
490 * arg2 - L2 table address
491 * arg3 - Scratch register
492 * arg4 - Scratch register
493 * arg5 - Scratch register
494 */
495.macro create_l1_table_entry
496 and $3, $0, #(ARM_TT_L1_INDEX_MASK)
497 lsr $3, $3, #(ARM_TT_L1_SHIFT) // Get index in L1 table for L2 table
498 lsl $3, $3, #(TTE_SHIFT) // Convert index into pointer offset
499 add $3, $1, $3 // Get L1 entry pointer
500 mov $4, #(ARM_TTE_BOOT_TABLE) // Get L1 table entry template
501 and $5, $2, #(ARM_TTE_TABLE_MASK) // Get address bits of L2 table
502 orr $5, $4, $5 // Create table entry for L2 table
503 str $5, [$3] // Write entry to L1 table
504.endmacro
505
506/*
507 * create_l2_block_entries
508 *
509 * Given base virtual and physical addresses, creates consecutive block entries
510 * in an L2 translation table.
511 * arg0 - Virtual address
512 * arg1 - Physical address
513 * arg2 - L2 table address
514 * arg3 - Number of entries
515 * arg4 - Scratch register
516 * arg5 - Scratch register
517 * arg6 - Scratch register
518 * arg7 - Scratch register
519 */
520.macro create_l2_block_entries
521 and $4, $0, #(ARM_TT_L2_INDEX_MASK)
522 lsr $4, $4, #(ARM_TTE_BLOCK_L2_SHIFT) // Get index in L2 table for block entry
523 lsl $4, $4, #(TTE_SHIFT) // Convert index into pointer offset
524 add $4, $2, $4 // Get L2 entry pointer
525 mov $5, #(ARM_TTE_BOOT_BLOCK) // Get L2 block entry template
526 and $6, $1, #(ARM_TTE_BLOCK_L2_MASK) // Get address bits of block mapping
527 orr $6, $5, $6
528 mov $5, $3
529 mov $7, #(ARM_TT_L2_SIZE)
5301:
531 str $6, [$4], #(1 << TTE_SHIFT) // Write entry to L2 table and advance
532 add $6, $6, $7 // Increment the output address
533 subs $5, $5, #1 // Decrement the number of entries
534 b.ne 1b
535.endmacro
536
d9a64523
A
537/*
538 * arg0 - virtual start address
539 * arg1 - physical start address
540 * arg2 - number of entries to map
541 * arg3 - L1 table address
542 * arg4 - free space pointer
543 * arg5 - scratch (entries mapped per loop)
544 * arg6 - scratch
545 * arg7 - scratch
546 * arg8 - scratch
547 * arg9 - scratch
548 */
549.macro create_bootstrap_mapping
550 /* calculate entries left in this page */
551 and $5, $0, #(ARM_TT_L2_INDEX_MASK)
552 lsr $5, $5, #(ARM_TT_L2_SHIFT)
553 mov $6, #(TTE_PGENTRIES)
554 sub $5, $6, $5
555
556 /* allocate an L2 table */
5573: add $4, $4, PGBYTES
558
559 /* create_l1_table_entry(virt_base, L1 table, L2 table, scratch1, scratch2, scratch3) */
560 create_l1_table_entry $0, $3, $4, $6, $7, $8
561
562 /* determine how many entries to map this loop - the smaller of entries
563 * remaining in page and total entries left */
564 cmp $2, $5
565 csel $5, $2, $5, lt
566
567 /* create_l2_block_entries(virt_base, phys_base, L2 table, num_ents, scratch1, scratch2, scratch3) */
568 create_l2_block_entries $0, $1, $4, $5, $6, $7, $8, $9
569
570 /* subtract entries just mapped and bail out if we're done */
571 subs $2, $2, $5
572 beq 2f
573
574 /* entries left to map - advance base pointers */
575 add $0, $0, $5, lsl #(ARM_TT_L2_SHIFT)
576 add $1, $1, $5, lsl #(ARM_TT_L2_SHIFT)
577
578 mov $5, #(TTE_PGENTRIES) /* subsequent loops map (up to) a whole L2 page */
579 b 3b
5802:
581.endmacro
582
5ba3f43e
A
583/*
584 * _start_first_cpu
585 * Cold boot init routine. Called from __start
586 * x0 - Boot args
587 */
588 .align 2
589 .globl EXT(start_first_cpu)
590LEXT(start_first_cpu)
591
592 // Unlock the core for debugging
593 msr OSLAR_EL1, xzr
d9a64523 594 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts
cb323159 595
5ba3f43e 596 mov x20, x0
d9a64523 597 mov x21, #0
5ba3f43e
A
598
599 // Set low reset vector before attempting any loads
600 adrp x0, EXT(LowExceptionVectorBase)@page
601 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
602 MSR_VBAR_EL1_X0
603
c6bf4f31
A
604#if __APRR_SUPPORTED__
605 // Save the LR
606 mov x1, lr
607
608#if XNU_MONITOR
609 // If the PPL is supported, we start out in PPL mode.
610 MOV64 x0, APRR_EL1_PPL
611#else
612 // Otherwise, we start out in default mode.
613 MOV64 x0, APRR_EL1_DEFAULT
614#endif
615
616 // Set the APRR state for EL1.
617 MSR_APRR_EL1_X0
618
619 // Set the APRR state for EL0.
620 MOV64 x0, APRR_EL0_DEFAULT
621 MSR_APRR_EL0_X0
622
623
624 // Restore the LR.
625 mov lr, x1
626#endif /* __APRR_SUPPORTED__ */
5ba3f43e 627
5ba3f43e
A
628 // Get the kernel memory parameters from the boot args
629 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
630 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base
631 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size
632 ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data
d9a64523 633 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags
5ba3f43e 634
d9a64523
A
635 // Clear the register that will be used to store the userspace thread pointer and CPU number.
636 // We may not actually be booting from ordinal CPU 0, so this register will be updated
637 // in ml_parse_cpu_topology(), which happens later in bootstrap.
5ba3f43e
A
638 msr TPIDRRO_EL0, x21
639
640 // Set up exception stack pointer
641 adrp x0, EXT(excepstack_top)@page // Load top of exception stack
642 add x0, x0, EXT(excepstack_top)@pageoff
643 add x0, x0, x22 // Convert to KVA
644 sub x0, x0, x23
645
646 // Set SP_EL1 to exception stack
c6bf4f31 647#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
cb323159 648 bl EXT(pinst_spsel_1)
5ba3f43e
A
649#else
650 msr SPSel, #1
651#endif
652
653 mov sp, x0
654
655 // Set up interrupt stack pointer
656 adrp x0, EXT(intstack_top)@page // Load top of irq stack
657 add x0, x0, EXT(intstack_top)@pageoff
658 add x0, x0, x22 // Convert to KVA
659 sub x0, x0, x23
660 msr SPSel, #0 // Set SP_EL0 to interrupt stack
661 mov sp, x0
662
663 // Load address to the C init routine into link register
664 adrp lr, EXT(arm_init)@page
665 add lr, lr, EXT(arm_init)@pageoff
666 add lr, lr, x22 // Convert to KVA
667 sub lr, lr, x23
668
669 /*
670 * Set up the bootstrap page tables with a single block entry for the V=P
671 * mapping, a single block entry for the trampolined kernel address (KVA),
672 * and all else invalid. This requires four pages:
673 * Page 1 - V=P L1 table
674 * Page 2 - V=P L2 table
675 * Page 3 - KVA L1 table
676 * Page 4 - KVA L2 table
677 */
5ba3f43e
A
678
679 // Invalidate all entries in the bootstrap page tables
680 mov x0, #(ARM_TTE_EMPTY) // Load invalid entry template
681 mov x1, x25 // Start at top of kernel
682 mov x2, #(TTE_PGENTRIES) // Load number of entries per page
5ba3f43e 683 lsl x2, x2, #2 // Shift by 2 for num entries on 4 pages
cb323159 684
5ba3f43e
A
685Linvalidate_bootstrap: // do {
686 str x0, [x1], #(1 << TTE_SHIFT) // Invalidate and advance
687 subs x2, x2, #1 // entries--
688 b.ne Linvalidate_bootstrap // } while (entries != 0)
689
5ba3f43e
A
690 /*
691 * In order to reclaim memory on targets where TZ0 (or some other entity)
692 * must be located at the base of memory, iBoot may set the virtual and
693 * physical base addresses to immediately follow whatever lies at the
694 * base of physical memory.
695 *
696 * If the base address belongs to TZ0, it may be dangerous for xnu to map
697 * it (as it may be prefetched, despite being technically inaccessible).
698 * In order to avoid this issue while keeping the mapping code simple, we
699 * may continue to use block mappings, but we will only map xnu's mach
700 * header to the end of memory.
701 *
702 * Given that iBoot guarantees that the unslid kernelcache base address
703 * will begin on an L2 boundary, this should prevent us from accidentally
704 * mapping TZ0.
705 */
706 adrp x0, EXT(_mh_execute_header)@page // Use xnu's mach header as the start address
d9a64523
A
707 add x0, x0, EXT(_mh_execute_header)@pageoff
708
5ba3f43e 709 /*
d9a64523
A
710 * Adjust physical and virtual base addresses to account for physical
711 * memory preceeding xnu Mach-O header
712 * x22 - Kernel virtual base
713 * x23 - Kernel physical base
714 * x24 - Physical memory size
5ba3f43e 715 */
d9a64523
A
716 sub x18, x0, x23
717 sub x24, x24, x18
718 add x22, x22, x18
719 add x23, x23, x18
720
5ba3f43e 721 /*
d9a64523
A
722 * x0 - V=P virtual cursor
723 * x4 - V=P physical cursor
724 * x14 - KVA virtual cursor
725 * x15 - KVA physical cursor
5ba3f43e 726 */
d9a64523
A
727 mov x4, x0
728 mov x14, x22
729 mov x15, x23
5ba3f43e 730
d9a64523
A
731 /*
732 * Allocate L1 tables
733 * x1 - V=P L1 page
734 * x3 - KVA L1 page
735 * x2 - free mem pointer from which we allocate a variable number of L2
736 * pages. The maximum number of bootstrap page table pages is limited to
737 * BOOTSTRAP_TABLE_SIZE. For a 2G 4k page device, assuming the worst-case
738 * slide, we need 1xL1 and up to 3xL2 pages (1GB mapped per L1 entry), so
739 * 8 total pages for V=P and KVA.
5ba3f43e 740 */
d9a64523
A
741 mov x1, x25
742 add x3, x1, PGBYTES
743 mov x2, x3
5ba3f43e 744
d9a64523
A
745 /*
746 * Setup the V=P bootstrap mapping
747 * x5 - total number of L2 entries to allocate
5ba3f43e 748 */
d9a64523
A
749 lsr x5, x24, #(ARM_TT_L2_SHIFT)
750 /* create_bootstrap_mapping(vbase, pbase, num_ents, L1 table, freeptr) */
751 create_bootstrap_mapping x0, x4, x5, x1, x2, x6, x10, x11, x12, x13
5ba3f43e 752
d9a64523
A
753 /* Setup the KVA bootstrap mapping */
754 lsr x5, x24, #(ARM_TT_L2_SHIFT)
755 create_bootstrap_mapping x14, x15, x5, x3, x2, x9, x10, x11, x12, x13
5ba3f43e
A
756
757 /* Ensure TTEs are visible */
758 dsb ish
759
cb323159 760
5ba3f43e
A
761 b common_start
762
763/*
764 * Begin common CPU initialization
765 *
766 * Regster state:
767 * x20 - PA of boot args
768 * x21 - zero on cold boot, PA of cpu data on warm reset
769 * x22 - Kernel virtual base
770 * x23 - Kernel physical base
d9a64523 771 * x25 - PA of the end of the kernel
5ba3f43e
A
772 * lr - KVA of C init routine
773 * sp - SP_EL0 selected
774 *
775 * SP_EL0 - KVA of CPU's interrupt stack
776 * SP_EL1 - KVA of CPU's exception stack
777 * TPIDRRO_EL0 - CPU number
778 */
779common_start:
780 // Set the translation control register.
781 adrp x0, EXT(sysreg_restore)@page // Load TCR value from the system register restore structure
782 add x0, x0, EXT(sysreg_restore)@pageoff
783 ldr x1, [x0, SR_RESTORE_TCR_EL1]
784 MSR_TCR_EL1_X1
785
786 /* Set up translation table base registers.
787 * TTBR0 - V=P table @ top of kernel
d9a64523 788 * TTBR1 - KVA table @ top of kernel + 1 page
5ba3f43e 789 */
c6bf4f31 790#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
5ba3f43e
A
791 /* Note that for KTRR configurations, the V=P map will be modified by
792 * arm_vm_init.c.
793 */
794#endif
795 and x0, x25, #(TTBR_BADDR_MASK)
d9a64523
A
796 mov x19, lr
797 bl EXT(set_mmu_ttb)
798 mov lr, x19
799 add x0, x25, PGBYTES
5ba3f43e
A
800 and x0, x0, #(TTBR_BADDR_MASK)
801 MSR_TTBR1_EL1_X0
802
803 // Set up MAIR attr0 for normal memory, attr1 for device memory
804 mov x0, xzr
805 mov x1, #(MAIR_WRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITEBACK))
806 orr x0, x0, x1
807 mov x1, #(MAIR_INNERWRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_INNERWRITEBACK))
808 orr x0, x0, x1
809 mov x1, #(MAIR_DISABLE << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_DISABLE))
810 orr x0, x0, x1
811 mov x1, #(MAIR_WRITETHRU << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITETHRU))
812 orr x0, x0, x1
813 mov x1, #(MAIR_WRITECOMB << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITECOMB))
814 orr x0, x0, x1
815 mov x1, #(MAIR_POSTED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED))
816 orr x0, x0, x1
cb323159
A
817 mov x1, #(MAIR_POSTED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_REORDERED))
818 orr x0, x0, x1
819 mov x1, #(MAIR_POSTED_COMBINED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_COMBINED_REORDERED))
820 orr x0, x0, x1
5ba3f43e
A
821 msr MAIR_EL1, x0
822
5ba3f43e
A
823#if defined(APPLEHURRICANE)
824
825 // <rdar://problem/26726624> Increase Snoop reservation in EDB to reduce starvation risk
826 // Needs to be done before MMU is enabled
827 mrs x12, ARM64_REG_HID5
828 and x12, x12, (~ARM64_REG_HID5_CrdEdbSnpRsvd_mask)
829 orr x12, x12, ARM64_REG_HID5_CrdEdbSnpRsvd_VALUE
830 msr ARM64_REG_HID5, x12
831
832#endif
833
d9a64523
A
834#if defined(BCM2837)
835 // Setup timer interrupt routing; must be done before MMU is enabled
836 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number
837 and x15, x15, #0xFF // CPU number is in MPIDR Affinity Level 0
838 mov x0, #0x4000
839 lsl x0, x0, #16
840 add x0, x0, #0x0040 // x0: 0x4000004X Core Timers interrupt control
841 add x0, x0, x15, lsl #2
842 mov w1, #0xF0 // x1: 0xF0 Route to Core FIQs
843 str w1, [x0]
844 isb sy
845#endif
846
5ba3f43e 847
cb323159 848
5ba3f43e
A
849#ifndef __ARM_IC_NOALIAS_ICACHE__
850 /* Invalidate the TLB and icache on systems that do not guarantee that the
851 * caches are invalidated on reset.
852 */
853 tlbi vmalle1
854 ic iallu
855#endif
856
857 /* If x21 is not 0, then this is either the start_cpu path or
858 * the resume_idle_cpu path. cpu_ttep should already be
859 * populated, so just switch to the kernel_pmap now.
860 */
861
862 cbz x21, 1f
863 adrp x0, EXT(cpu_ttep)@page
864 add x0, x0, EXT(cpu_ttep)@pageoff
865 ldr x0, [x0]
866 MSR_TTBR1_EL1_X0
8671:
868
869 // Set up the exception vectors
5c9f4661
A
870#if __ARM_KERNEL_PROTECT__
871 /* If this is not the first reset of the boot CPU, the alternate mapping
872 * for the exception vectors will be set up, so use it. Otherwise, we
873 * should use the mapping located in the kernelcache mapping.
874 */
875 MOV64 x0, ARM_KERNEL_PROTECT_EXCEPTION_START
876
877 cbnz x21, 1f
878#endif /* __ARM_KERNEL_PROTECT__ */
5ba3f43e
A
879 adrp x0, EXT(ExceptionVectorsBase)@page // Load exception vectors base address
880 add x0, x0, EXT(ExceptionVectorsBase)@pageoff
881 add x0, x0, x22 // Convert exception vector address to KVA
882 sub x0, x0, x23
5c9f4661 8831:
5ba3f43e
A
884 MSR_VBAR_EL1_X0
885
cb323159
A
8861:
887#ifdef HAS_APPLE_PAC
888#ifdef __APSTS_SUPPORTED__
889 mrs x0, ARM64_REG_APSTS_EL1
890 and x1, x0, #(APSTS_EL1_MKEYVld)
891 cbz x1, 1b // Poll APSTS_EL1.MKEYVld
892 mrs x0, ARM64_REG_APCTL_EL1
893 orr x0, x0, #(APCTL_EL1_AppleMode)
894 orr x0, x0, #(APCTL_EL1_KernKeyEn)
895 and x0, x0, #~(APCTL_EL1_EnAPKey0)
896 msr ARM64_REG_APCTL_EL1, x0
897#else
898 mrs x0, ARM64_REG_APCTL_EL1
899 and x1, x0, #(APCTL_EL1_MKEYVld)
900 cbz x1, 1b // Poll APCTL_EL1.MKEYVld
901 orr x0, x0, #(APCTL_EL1_AppleMode)
902 orr x0, x0, #(APCTL_EL1_KernKeyEn)
903 msr ARM64_REG_APCTL_EL1, x0
904#endif /* APSTS_SUPPORTED */
905
906 /* ISB necessary to ensure APCTL_EL1_AppleMode logic enabled before proceeding */
907 isb sy
908 /* Load static kernel key diversification values */
909 ldr x0, =KERNEL_ROP_ID
910 /* set ROP key. must write at least once to pickup mkey per boot diversification */
911 msr APIBKeyLo_EL1, x0
912 add x0, x0, #1
913 msr APIBKeyHi_EL1, x0
914 add x0, x0, #1
915 msr APDBKeyLo_EL1, x0
916 add x0, x0, #1
917 msr APDBKeyHi_EL1, x0
918 add x0, x0, #1
919 msr ARM64_REG_KERNELKEYLO_EL1, x0
920 add x0, x0, #1
921 msr ARM64_REG_KERNELKEYHI_EL1, x0
922 /* set JOP key. must write at least once to pickup mkey per boot diversification */
923 add x0, x0, #1
924 msr APIAKeyLo_EL1, x0
925 add x0, x0, #1
926 msr APIAKeyHi_EL1, x0
927 add x0, x0, #1
928 msr APDAKeyLo_EL1, x0
929 add x0, x0, #1
930 msr APDAKeyHi_EL1, x0
931 /* set G key */
932 add x0, x0, #1
933 msr APGAKeyLo_EL1, x0
934 add x0, x0, #1
935 msr APGAKeyHi_EL1, x0
936
937 // Enable caches, MMU, ROP and JOP
938 mov x0, #(SCTLR_EL1_DEFAULT & 0xFFFF)
939 mov x1, #(SCTLR_EL1_DEFAULT & 0xFFFF0000)
940 orr x0, x0, x1
941 orr x0, x0, #(SCTLR_PACIB_ENABLED) /* IB is ROP */
942
943#if DEBUG || DEVELOPMENT
944 and x2, x26, BA_BOOT_FLAGS_DISABLE_JOP
945#if __APCFG_SUPPORTED__
946 // for APCFG systems, JOP keys are always on for EL1 unless ELXENKEY is cleared.
947 // JOP keys for EL0 will be toggled on the first time we pmap_switch to a pmap that has JOP enabled
948 cbz x2, Lenable_mmu
949 mrs x3, APCFG_EL1
950 and x3, x3, #~(APCFG_EL1_ELXENKEY)
951 msr APCFG_EL1, x3
952#else /* __APCFG_SUPPORTED__ */
953 cbnz x2, Lenable_mmu
954#endif /* __APCFG_SUPPORTED__ */
955#endif /* DEBUG || DEVELOPMENT */
956
957#if !__APCFG_SUPPORTED__
958 MOV64 x1, SCTLR_JOP_KEYS_ENABLED
959 orr x0, x0, x1
960#endif /* !__APCFG_SUPPORTED__ */
961Lenable_mmu:
962#else /* HAS_APPLE_PAC */
5ba3f43e
A
963
964 // Enable caches and MMU
965 mov x0, #(SCTLR_EL1_DEFAULT & 0xFFFF)
966 mov x1, #(SCTLR_EL1_DEFAULT & 0xFFFF0000)
967 orr x0, x0, x1
cb323159 968#endif /* HAS_APPLE_PAC */
5ba3f43e
A
969 MSR_SCTLR_EL1_X0
970 isb sy
971
cb323159
A
972 MOV32 x1, SCTLR_EL1_DEFAULT
973#if HAS_APPLE_PAC
974 orr x1, x1, #(SCTLR_PACIB_ENABLED)
975#if !__APCFG_SUPPORTED__
976 MOV64 x2, SCTLR_JOP_KEYS_ENABLED
977#if (DEBUG || DEVELOPMENT)
978 // Ignore the JOP bits, since we can't predict at compile time whether BA_BOOT_FLAGS_DISABLE_JOP is set
979 bic x0, x0, x2
980#else
981 orr x1, x1, x2
982#endif /* (DEBUG || DEVELOPMENT) */
983#endif /* !__APCFG_SUPPORTED__ */
984#endif /* HAS_APPLE_PAC */
985 cmp x0, x1
986 bne .
987
5ba3f43e
A
988#if (!CONFIG_KERNEL_INTEGRITY || (CONFIG_KERNEL_INTEGRITY && !defined(KERNEL_INTEGRITY_WT)))
989 /* Watchtower
990 *
991 * If we have a Watchtower monitor it will setup CPACR_EL1 for us, touching
992 * it here would trap to EL3.
993 */
994
995 // Enable NEON
996 mov x0, #(CPACR_FPEN_ENABLE)
997 msr CPACR_EL1, x0
998#endif
999
1000 // Clear thread pointer
1001 mov x0, #0
1002 msr TPIDR_EL1, x0 // Set thread register
1003
a39ff7e2
A
1004#if defined(APPLE_ARM64_ARCH_FAMILY)
1005 // Initialization common to all Apple targets
1006 ARM64_IS_PCORE x15
1007 ARM64_READ_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4
1008 orr x12, x12, ARM64_REG_HID4_DisDcMVAOps
1009 orr x12, x12, ARM64_REG_HID4_DisDcSWL2Ops
1010 ARM64_WRITE_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4
1011#endif // APPLE_ARM64_ARCH_FAMILY
1012
cb323159 1013#if defined(APPLETYPHOON)
5ba3f43e 1014 //
cb323159
A
1015 // Typhoon-Specific initialization
1016 // For tunable summary, see <rdar://problem/13503621>
5ba3f43e
A
1017 //
1018
1019 //
1020 // Disable LSP flush with context switch to work around bug in LSP
cb323159
A
1021 // that can cause Typhoon to wedge when CONTEXTIDR is written.
1022 // <rdar://problem/12387704>
5ba3f43e
A
1023 //
1024
1025 mrs x12, ARM64_REG_HID0
1026 orr x12, x12, ARM64_REG_HID0_LoopBuffDisb
1027 msr ARM64_REG_HID0, x12
cb323159 1028
5ba3f43e
A
1029 mrs x12, ARM64_REG_HID1
1030 orr x12, x12, ARM64_REG_HID1_rccDisStallInactiveIexCtl
5ba3f43e
A
1031 msr ARM64_REG_HID1, x12
1032
1033 mrs x12, ARM64_REG_HID3
1034 orr x12, x12, ARM64_REG_HID3_DisXmonSnpEvictTriggerL2StarvationMode
1035 msr ARM64_REG_HID3, x12
1036
5ba3f43e
A
1037 mrs x12, ARM64_REG_HID5
1038 and x12, x12, (~ARM64_REG_HID5_DisHwpLd)
1039 and x12, x12, (~ARM64_REG_HID5_DisHwpSt)
1040 msr ARM64_REG_HID5, x12
1041
1042 // Change the default memcache data set ID from 0 to 15 for all agents
1043 mrs x12, ARM64_REG_HID8
1044 orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE)
1045#if ARM64_BOARD_CONFIG_T7001
1046 orr x12, x12, ARM64_REG_HID8_DataSetID2_VALUE
1047#endif // ARM64_BOARD_CONFIG_T7001
1048 msr ARM64_REG_HID8, x12
1049 isb sy
cb323159 1050#endif // APPLETYPHOON
5ba3f43e
A
1051
1052#if defined(APPLETWISTER)
a39ff7e2
A
1053
1054 // rdar://problem/36112905: Set CYC_CFG:skipInit to pull in isAlive by one DCLK
1055 // to work around potential hang. Must only be applied to Maui C0.
1056 mrs x12, MIDR_EL1
1057 ubfx x13, x12, #MIDR_EL1_PNUM_SHIFT, #12
1058 cmp x13, #4 // Part number 4 => Maui, 5 => Malta/Elba
1059 bne Lskip_isalive
1060 ubfx x13, x12, #MIDR_EL1_VAR_SHIFT, #4
1061 cmp x13, #2 // variant 2 => Maui C0
1062 b.lt Lskip_isalive
1063
1064 mrs x12, ARM64_REG_CYC_CFG
1065 orr x12, x12, ARM64_REG_CYC_CFG_skipInit
1066 msr ARM64_REG_CYC_CFG, x12
1067
1068Lskip_isalive:
1069
1070 mrs x12, ARM64_REG_HID11
1071 and x12, x12, (~ARM64_REG_HID11_DisFillC1BubOpt)
1072 msr ARM64_REG_HID11, x12
5ba3f43e
A
1073
1074 // Change the default memcache data set ID from 0 to 15 for all agents
1075 mrs x12, ARM64_REG_HID8
1076 orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE)
a39ff7e2 1077 orr x12, x12, (ARM64_REG_HID8_DataSetID2_VALUE | ARM64_REG_HID8_DataSetID3_VALUE)
5ba3f43e
A
1078 msr ARM64_REG_HID8, x12
1079
1080 // Use 4-cycle MUL latency to avoid denormal stalls
a39ff7e2
A
1081 mrs x12, ARM64_REG_HID7
1082 orr x12, x12, #ARM64_REG_HID7_disNexFastFmul
1083 msr ARM64_REG_HID7, x12
5ba3f43e
A
1084
1085 // disable reporting of TLB-multi-hit-error
1086 // <rdar://problem/22163216>
1087 mrs x12, ARM64_REG_LSU_ERR_STS
1088 and x12, x12, (~ARM64_REG_LSU_ERR_STS_L1DTlbMultiHitEN)
1089 msr ARM64_REG_LSU_ERR_STS, x12
1090
1091 isb sy
1092#endif // APPLETWISTER
1093
1094#if defined(APPLEHURRICANE)
1095
1096 // IC prefetch configuration
1097 // <rdar://problem/23019425>
1098 mrs x12, ARM64_REG_HID0
1099 and x12, x12, (~ARM64_REG_HID0_ICPrefDepth_bmsk)
1100 orr x12, x12, (1 << ARM64_REG_HID0_ICPrefDepth_bshift)
1101 orr x12, x12, ARM64_REG_HID0_ICPrefLimitOneBrn
1102 msr ARM64_REG_HID0, x12
1103
1104 // disable reporting of TLB-multi-hit-error
1105 // <rdar://problem/22163216>
1106 mrs x12, ARM64_REG_LSU_ERR_CTL
1107 and x12, x12, (~ARM64_REG_LSU_ERR_CTL_L1DTlbMultiHitEN)
1108 msr ARM64_REG_LSU_ERR_CTL, x12
1109
1110 // disable crypto fusion across decode groups
1111 // <rdar://problem/27306424>
1112 mrs x12, ARM64_REG_HID1
1113 orr x12, x12, ARM64_REG_HID1_disAESFuseAcrossGrp
1114 msr ARM64_REG_HID1, x12
1115
1116#if defined(ARM64_BOARD_CONFIG_T8011)
1117 // Clear DisDcZvaCmdOnly
1118 // Per Myst A0/B0 tunables document
5ba3f43e
A
1119 // <rdar://problem/27627428> Myst: Confirm ACC Per-CPU Tunables
1120 mrs x12, ARM64_REG_HID3
1121 and x12, x12, ~ARM64_REG_HID3_DisDcZvaCmdOnly
1122 msr ARM64_REG_HID3, x12
1123
1124 mrs x12, ARM64_REG_EHID3
1125 and x12, x12, ~ARM64_REG_EHID3_DisDcZvaCmdOnly
1126 msr ARM64_REG_EHID3, x12
1127#endif /* defined(ARM64_BOARD_CONFIG_T8011) */
1128
1129#endif // APPLEHURRICANE
1130
d9a64523
A
1131#if defined(APPLEMONSOON)
1132
1133 /***** Tunables that apply to all skye cores, all chip revs *****/
1134
1135 // <rdar://problem/28512310> SW WAR/eval: WKdm write ack lost when bif_wke_colorWrAck_XXaH asserts concurrently for both colors
1136 mrs x12, ARM64_REG_HID8
1137 orr x12, x12, #ARM64_REG_HID8_WkeForceStrictOrder
1138 msr ARM64_REG_HID8, x12
1139
1140 // Skip if not E-core
1141 ARM64_IS_PCORE x15
1142 cbnz x15, Lskip_skye_ecore_only
1143
1144 /***** Tunables that only apply to skye e-cores, all chip revs *****/
1145
1146 // <rdar://problem/30423928>: Atomic launch eligibility is erroneously taken away when a store at SMB gets invalidated
1147 mrs x12, ARM64_REG_EHID11
1148 and x12, x12, ~(ARM64_REG_EHID11_SmbDrainThresh_mask)
1149 msr ARM64_REG_EHID11, x12
1150
1151Lskip_skye_ecore_only:
1152
1153 SKIP_IF_CPU_VERSION_GREATER_OR_EQUAL x12, MONSOON_CPU_VERSION_B0, Lskip_skye_a0_workarounds
1154
1155 // Skip if not E-core
1156 cbnz x15, Lskip_skye_a0_ecore_only
1157
1158 /***** Tunables that only apply to skye e-cores, chip revs < B0 *****/
1159
1160 // Disable downstream fill bypass logic
1161 // <rdar://problem/28545159> [Tunable] Skye - L2E fill bypass collision from both pipes to ecore
1162 mrs x12, ARM64_REG_EHID5
1163 orr x12, x12, ARM64_REG_EHID5_DisFillByp
1164 msr ARM64_REG_EHID5, x12
1165
1166 // Disable forwarding of return addresses to the NFP
1167 // <rdar://problem/30387067> Skye: FED incorrectly taking illegal va exception
1168 mrs x12, ARM64_REG_EHID0
1169 orr x12, x12, ARM64_REG_EHID0_nfpRetFwdDisb
1170 msr ARM64_REG_EHID0, x12
1171
1172Lskip_skye_a0_ecore_only:
1173
1174 /***** Tunables that apply to all skye cores, chip revs < B0 *****/
1175
1176 // Disable clock divider gating
1177 // <rdar://problem/30854420> [Tunable/Errata][cpu_1p_1e] [CPGV2] ACC power down issue when link FSM switches from GO_DN to CANCEL and at the same time upStreamDrain request is set.
1178 mrs x12, ARM64_REG_HID6
1179 orr x12, x12, ARM64_REG_HID6_DisClkDivGating
1180 msr ARM64_REG_HID6, x12
1181
1182 // Disable clock dithering
1183 // <rdar://problem/29022199> [Tunable] Skye A0: Linux: LLC PIO Errors
1184 mrs x12, ARM64_REG_ACC_OVRD
1185 orr x12, x12, ARM64_REG_ACC_OVRD_dsblClkDtr
1186 msr ARM64_REG_ACC_OVRD, x12
1187
1188 mrs x12, ARM64_REG_ACC_EBLK_OVRD
1189 orr x12, x12, ARM64_REG_ACC_OVRD_dsblClkDtr
1190 msr ARM64_REG_ACC_EBLK_OVRD, x12
1191
1192Lskip_skye_a0_workarounds:
1193
1194 SKIP_IF_CPU_VERSION_LESS_THAN x12, MONSOON_CPU_VERSION_B0, Lskip_skye_post_a1_workarounds
1195
1196 /***** Tunables that apply to all skye cores, chip revs >= B0 *****/
1197
1198 // <rdar://problem/32512836>: Disable refcount syncing between E and P
1199 mrs x12, ARM64_REG_CYC_OVRD
1200 and x12, x12, ~ARM64_REG_CYC_OVRD_dsblSnoopTime_mask
1201 orr x12, x12, ARM64_REG_CYC_OVRD_dsblSnoopPTime
1202 msr ARM64_REG_CYC_OVRD, x12
1203
1204Lskip_skye_post_a1_workarounds:
1205
1206#endif /* defined(APPLEMONSOON) */
1207
c6bf4f31 1208#if defined(APPLEVORTEX)
5ba3f43e 1209
c6bf4f31 1210 ARM64_IS_PCORE x15
cb323159 1211
c6bf4f31
A
1212 // Skip if not P-core
1213 cbz x15, Lskip_cyprus_pcore_only
cb323159 1214
c6bf4f31
A
1215 mrs x12, ARM64_REG_HID1
1216
1217 mrs x13, MIDR_EL1
1218 ubfx x14, x13, #MIDR_EL1_PNUM_SHIFT, #12
1219 // Should be applied to all Aruba variants, but only Cyprus variants B0 and later
1220 cmp x14, #0xb // Part number 11 => Cyprus, 16 => Aruba
1221 bne Lbr_kill
1222 ubfx x14, x13, #MIDR_EL1_VAR_SHIFT, #4
1223 cbz x14, Lskip_br_kill // variant 0 => Cyprus AX, 1 => Cyprus BX
1224
1225Lbr_kill:
1226
1227 // rdar://problem/36716477: data corruption due to incorrect branch predictor resolution
1228 orr x12, x12, ARM64_REG_HID1_enaBrKillLimit
1229
1230Lskip_br_kill:
1231
1232 // rdar://problem/34435356: segfaults due to IEX clock-gating
1233 orr x12, x12, ARM64_REG_HID1_rccForceAllIexL3ClksOn
1234 msr ARM64_REG_HID1, x12
1235
1236#if ARM64_BOARD_CONFIG_T8027
1237 // rdar://problem/40695685: Enable BIF fill buffer stall logic to prevent skid buffer overflow (Aruba A1 only)
1238 mrs x12, ARM64_REG_HID5
1239 orr x12, x12, ARM64_REG_HID5_EnableDnFIFORdStall
1240 msr ARM64_REG_HID5, x12
1241
1242#endif /* ARM64_BOARD_CONFIG_T8027 */
1243
1244 // Prevent ordered loads from being dispatched from LSU until all prior loads have completed.
1245 // rdar://problem/34095873: AF2 ordering rules allow ARM device ordering violations
1246 mrs x12, ARM64_REG_HID4
1247 orr x12, x12, ARM64_REG_HID4_ForceNsOrdLdReqNoOlderLd
1248 msr ARM64_REG_HID4, x12
1249
1250 // rdar://problem/38482968: [Cyprus Tunable] Poisoned cache line crossing younger load is not redirected by older load-barrier
1251 mrs x12, ARM64_REG_HID3
1252 orr x12, x12, ARM64_REG_HID3_DisColorOpt
1253 msr ARM64_REG_HID3, x12
1254
1255 // rdar://problem/41056604: disable faster launches of uncacheable unaligned stores to workaround load/load ordering violation
1256 mrs x12, ARM64_REG_HID11
1257 orr x12, x12, ARM64_REG_HID11_DisX64NTLnchOpt
1258 msr ARM64_REG_HID11, x12
1259
1260 b Lskip_cyprus_ecore_only
1261
1262Lskip_cyprus_pcore_only:
1263
1264 // Prevent ordered loads from being dispatched from LSU until all prior loads have completed.
1265 // rdar://problem/34095873: AF2 ordering rules allow ARM device ordering violations
1266 mrs x12, ARM64_REG_EHID4
1267 orr x12, x12, ARM64_REG_HID4_ForceNsOrdLdReqNoOlderLd
1268 msr ARM64_REG_EHID4, x12
1269
1270 // rdar://problem/36595004: Poisoned younger load is not redirected by older load-acquire
1271 mrs x12, ARM64_REG_EHID3
1272 orr x12, x12, ARM64_REG_EHID3_DisColorOpt
1273 msr ARM64_REG_EHID3, x12
1274
1275 // rdar://problem/37949166: Disable the extension of prefetcher training pipe clock gating, revert to default gating
1276 mrs x12, ARM64_REG_EHID10
1277 orr x12, x12, ARM64_REG_EHID10_rccDisPwrSavePrfClkOff
1278 msr ARM64_REG_EHID10, x12
1279
1280Lskip_cyprus_ecore_only:
1281
1282#endif /* defined (APPLEVORTEX) */
1283
1284#if defined(ARM64_BOARD_CONFIG_T8030)
1285 // Cebu <B0 is deprecated and unsupported (see rdar://problem/42835678)
1286 SKIP_IF_CPU_VERSION_LESS_THAN x12, LIGHTNING_CPU_VERSION_B0, .
1287
1288 ARM64_IS_PCORE x15
1289
1290 // Skip if not P-core
1291 cbz x15, Lskip_cebu_pcore_only
1292
1293 // rdar://problem/50664291: [Cebu B0/B1 Tunables][PerfVerif][LSU] Post-silicon tuning of STNT widget contiguous counter threshold
1294 mrs x12, ARM64_REG_HID4
1295 and x12, x12, ~ARM64_REG_HID4_CnfCntrThresh_mask
1296 orr x12, x12, 3 << ARM64_REG_HID4_CnfCntrThresh_shift
1297 msr ARM64_REG_HID4, x12
1298
1299 mrs x12, ARM64_REG_HID9
1300 // rdar://problem/47744434: Barrier Load Ordering property is not satisfied for x64-loads
1301 orr x12, x12, ARM64_REG_HID9_EnableFixBug47221499
1302 // rdar://problem/50664291: [Cebu B0/B1 Tunables][PerfVerif][LSU] Post-silicon tuning of STNT widget contiguous counter threshold
1303 orr x12, x12, ARM64_REG_HID9_DisSTNTWidgetForUnalign
1304 msr ARM64_REG_HID9, x12
1305
1306 // rdar://problem/47865629: RF bank and Multipass conflict forward progress widget does not handle 3+ cycle livelock
1307 mrs x12, ARM64_REG_HID16
1308 orr x12, x12, ARM64_REG_HID16_EnRs4Sec
1309 and x12, x12, ~ARM64_REG_HID16_DisxPickRs45
1310 orr x12, x12, ARM64_REG_HID16_EnMPxPick45
1311 orr x12, x12, ARM64_REG_HID16_EnMPCyc7
1312 msr ARM64_REG_HID16, x12
1313
1314 mrs x12, ARM64_REG_HID4
1315 // Prevent ordered loads from being dispatched from LSU until all prior loads have completed.
1316 // rdar://problem/34095873: AF2 ordering rules allow ARM device ordering violations
1317 orr x12, x12, ARM64_REG_HID4_ForceNsOrdLdReqNoOlderLd
1318 // rdar://problem/51690962: Disable Store-Non-Temporal downgrade widget
1319 orr x12, x12, ARM64_REG_HID4_DisSTNTWidget
1320 msr ARM64_REG_HID4, x12
1321
1322 // rdar://problem/41056604: disable faster launches of uncacheable unaligned stores to workaround load/load ordering violation
1323 mrs x12, ARM64_REG_HID11
1324 orr x12, x12, ARM64_REG_HID11_DisX64NTLnchOpt
1325 msr ARM64_REG_HID11, x12
1326
1327 // rdar://problem/41029832: configure dummy cycles to work around incorrect temp sensor readings on NEX power gating
1328 mrs x12, ARM64_REG_HID13
1329 and x12, x12, ~ARM64_REG_HID13_PreCyc_mask
1330 orr x12, x12, 4 << ARM64_REG_HID13_PreCyc_shift
1331 msr ARM64_REG_HID13, x12
1332
1333 // rdar://problem/45024523: enable aggressive LEQ throttling to work around LEQ credit leak
1334 mrs x12, ARM64_REG_HID16
1335 orr x12, x12, ARM64_REG_HID16_leqThrottleAggr
1336 msr ARM64_REG_HID16, x12
1337
1338 b Lskip_cebu_ecore_only
1339
1340Lskip_cebu_pcore_only:
1341
1342 // Prevent ordered loads from being dispatched from LSU until all prior loads have completed.
1343 // rdar://problem/34095873: AF2 ordering rules allow ARM device ordering violations
1344 mrs x12, ARM64_REG_EHID4
1345 orr x12, x12, ARM64_REG_HID4_ForceNsOrdLdReqNoOlderLd
1346 msr ARM64_REG_EHID4, x12
1347
1348 // rdar://problem/37949166: Disable the extension of prefetcher training pipe clock gating, revert to default gating
1349 mrs x12, ARM64_REG_EHID10
1350 orr x12, x12, ARM64_REG_EHID10_rccDisPwrSavePrfClkOff
1351 msr ARM64_REG_EHID10, x12
1352
1353Lskip_cebu_ecore_only:
1354#endif /* defined(ARM64_BOARD_CONFIG_T8030) */
1355
1356#if defined(APPLELIGHTNING)
1357 // rdar://54225210 (Incorrect fusing of a direct branch with AMX/EAS instruction at cross-beat location)
1358 ARM64_IS_PCORE x15
1359 cbz x15, not_cebu_pcore
1360
1361 mrs x12, ARM64_REG_HID0
1362 orr x12, x12, ARM64_REG_HID0_CacheFusionDisable
1363 msr ARM64_REG_HID0, x12
1364
1365not_cebu_pcore:
1366#endif /* defined(APPLELIGHTNING) */
1367
1368#if defined(APPLELIGHTNING)
1369
1370 // rdar://53907283 ([Cebu ACC Errata] Sibling Merge in LLC can cause UC load to violate ARM Memory Ordering Rules.)
1371 mrs x12, ARM64_REG_HID5
1372 orr x12, x12, ARM64_REG_HID5_DisFill2cMerge
1373 msr ARM64_REG_HID5, x12
1374
1375 // Skip if not E-core or not a two-cluster CPU
1376#if defined(CPU_CLUSTER_OFFSETS)
1377 ARM64_IS_PCORE x15
1378 cbnz x15, Lskip_h12_h13_ecore_only
1379
1380 // rdar://problem/48476033: Prevent store-to-load forwarding for UC memory to avoid barrier ordering violation
1381 mrs x12, ARM64_REG_EHID10
1382 orr x12, x12, ARM64_REG_EHID10_ForceWStDrainUc
1383 msr ARM64_REG_EHID10, x12
1384
1385Lskip_h12_h13_ecore_only:
1386#endif /* defined(CPU_CLUSTER_OFFSETS) */
1387#endif /* defined(APPLELIGHTNING)*/
cb323159
A
1388
1389
1390
5ba3f43e
A
1391 // If x21 != 0, we're doing a warm reset, so we need to trampoline to the kernel pmap.
1392 cbnz x21, Ltrampoline
1393
1394 // Set KVA of boot args as first arg
1395 add x0, x20, x22
1396 sub x0, x0, x23
1397
1398#if KASAN
1399 mov x20, x0
1400 mov x21, lr
1401
1402 // x0: boot args
1403 // x1: KVA page table phys base
1404 mrs x1, TTBR1_EL1
cb323159 1405 bl EXT(kasan_bootstrap)
5ba3f43e
A
1406
1407 mov x0, x20
1408 mov lr, x21
1409#endif
1410
1411 // Return to arm_init()
1412 ret
1413
1414Ltrampoline:
1415 // Load VA of the trampoline
1416 adrp x0, arm_init_tramp@page
1417 add x0, x0, arm_init_tramp@pageoff
1418 add x0, x0, x22
1419 sub x0, x0, x23
1420
1421 // Branch to the trampoline
1422 br x0
1423
1424/*
1425 * V=P to KVA trampoline.
1426 * x0 - KVA of cpu data pointer
1427 */
1428 .text
1429 .align 2
1430arm_init_tramp:
1431 /* On a warm boot, the full kernel translation table is initialized in
1432 * addition to the bootstrap tables. The layout is as follows:
1433 *
1434 * +--Top of Memory--+
1435 * ...
1436 * | |
1437 * | Primary Kernel |
1438 * | Trans. Table |
1439 * | |
1440 * +--Top + 5 pages--+
1441 * | |
1442 * | Invalid Table |
1443 * | |
1444 * +--Top + 4 pages--+
1445 * | |
1446 * | KVA Table |
1447 * | |
1448 * +--Top + 2 pages--+
1449 * | |
1450 * | V=P Table |
1451 * | |
1452 * +--Top of Kernel--+
1453 * | |
1454 * | Kernel Mach-O |
1455 * | |
1456 * ...
1457 * +---Kernel Base---+
1458 */
1459
cb323159 1460
d9a64523 1461 mov x19, lr
c6bf4f31
A
1462#if defined(HAS_VMSA_LOCK)
1463 bl EXT(vmsa_lock)
1464#endif
5ba3f43e 1465 // Convert CPU data PA to VA and set as first argument
d9a64523
A
1466 mov x0, x21
1467 bl EXT(phystokv)
5ba3f43e 1468
d9a64523 1469 mov lr, x19
5ba3f43e
A
1470
1471 /* Return to arm_init() */
1472 ret
1473
1474//#include "globals_asm.h"
1475
1476/* vim: set ts=4: */