2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <arm/proc_reg.h>
29 #include <arm64/asm.h>
30 #include <arm64/proc_reg.h>
31 #include <pexpert/arm64/board_config.h>
32 #include <mach_assert.h>
33 #include <machine/asm.h>
35 #include <arm64/exception_asm.h>
37 #if __ARM_KERNEL_PROTECT__
39 #endif /* __ARM_KERNEL_PROTECT__ */
42 #if __APRR_SUPPORTED__
44 .macro MSR_APRR_EL1_X0
45 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
46 bl EXT(pinst_set_aprr_el1)
52 .macro MSR_APRR_EL0_X0
53 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
54 bl EXT(pinst_set_aprr_el0)
60 .macro MSR_APRR_SHADOW_MASK_EN_EL1_X0
61 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
62 bl EXT(pinst_set_aprr_shadow_mask_en_el1)
64 msr APRR_SHADOW_MASK_EN_EL1, x0
68 #endif /* __APRR_SUPPORTED__ */
70 .macro MSR_VBAR_EL1_X0
71 #if defined(KERNEL_INTEGRITY_KTRR)
73 bl EXT(pinst_set_vbar)
81 #if defined(KERNEL_INTEGRITY_KTRR)
91 .macro MSR_TTBR1_EL1_X0
92 #if defined(KERNEL_INTEGRITY_KTRR)
94 bl EXT(pinst_set_ttbr1)
101 .macro MSR_SCTLR_EL1_X0
102 #if defined(KERNEL_INTEGRITY_KTRR)
105 // This may abort, do so on SP1
106 bl EXT(pinst_spsel_1)
108 bl EXT(pinst_set_sctlr)
109 msr SPSel, #0 // Back to SP0
113 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
117 * Checks the reset handler for global and CPU-specific reset-assist functions,
118 * then jumps to the reset handler with boot args and cpu data. This is copied
119 * to the first physical page during CPU bootstrap (see cpu.c).
122 * x19 - Reset handler data pointer
123 * x20 - Boot args pointer
124 * x21 - CPU data pointer
128 .globl EXT(LowResetVectorBase)
129 LEXT(LowResetVectorBase)
131 * On reset, both RVBAR_EL1 and VBAR_EL1 point here. SPSel.SP is 1,
132 * so on reset the CPU will jump to offset 0x0 and on exceptions
133 * the CPU will jump to offset 0x200, 0x280, 0x300, or 0x380.
134 * In order for both the reset vector and exception vectors to
135 * coexist in the same space, the reset code is moved to the end
136 * of the exception vector area.
140 /* EL1 SP1: These vectors trap errors during early startup on non-boot CPUs. */
151 .globl EXT(reset_vector)
153 // Preserve x0 for start_first_cpu, if called
154 // Unlock the core for debugging
156 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts
158 #if !(defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR))
159 // Set low reset vector before attempting any loads
160 adrp x0, EXT(LowExceptionVectorBase)@page
161 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
165 #if __APRR_SUPPORTED__
166 MOV64 x0, APRR_EL1_DEFAULT
168 adrp x4, EXT(pmap_ppl_locked_down)@page
169 ldrb w5, [x4, #EXT(pmap_ppl_locked_down)@pageoff]
173 // If the PPL is not locked down, we start in PPL mode.
174 MOV64 x0, APRR_EL1_PPL
176 #endif /* XNU_MONITOR */
180 // Load up the default APRR_EL0 value.
181 MOV64 x0, APRR_EL0_DEFAULT
183 #endif /* __APRR_SUPPORTED__ */
185 #if defined(KERNEL_INTEGRITY_KTRR)
187 * Set KTRR registers immediately after wake/resume
189 * During power on reset, XNU stashed the kernel text region range values
190 * into __DATA,__const which should be protected by AMCC RoRgn at this point.
191 * Read this data and program/lock KTRR registers accordingly.
192 * If either values are zero, we're debugging kernel so skip programming KTRR.
195 /* spin until bootstrap core has completed machine lockdown */
196 adrp x17, EXT(lockdown_done)@page
198 ldr x18, [x17, EXT(lockdown_done)@pageoff]
201 // load stashed rorgn_begin
202 adrp x17, EXT(rorgn_begin)@page
203 add x17, x17, EXT(rorgn_begin)@pageoff
205 // if rorgn_begin is zero, we're debugging. skip enabling ktrr
208 // load stashed rorgn_end
209 adrp x19, EXT(rorgn_end)@page
210 add x19, x19, EXT(rorgn_end)@pageoff
214 // program and lock down KTRR
215 // subtract one page from rorgn_end to make pinst insns NX
216 msr ARM64_REG_KTRR_LOWER_EL1, x17
217 sub x19, x19, #(1 << (ARM_PTE_SHIFT-12)), lsl #12
218 msr ARM64_REG_KTRR_UPPER_EL1, x19
220 msr ARM64_REG_KTRR_LOCK_EL1, x17
222 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
224 // Process reset handlers
225 adrp x19, EXT(ResetHandlerData)@page // Get address of the reset handler data
226 add x19, x19, EXT(ResetHandlerData)@pageoff
227 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number
229 and x0, x15, #0xFFFF // CPU number in Affinity0, cluster ID in Affinity1
231 and x0, x15, #0xFF // CPU number is in MPIDR Affinity Level 0
233 ldr x1, [x19, CPU_DATA_ENTRIES] // Load start of data entries
234 add x3, x1, MAX_CPUS * 16 // end addr of data entries = start + (16 * MAX_CPUS)
235 Lcheck_cpu_data_entry:
236 ldr x21, [x1, CPU_DATA_PADDR] // Load physical CPU data address
237 cbz x21, Lnext_cpu_data_entry
238 ldr w2, [x21, CPU_PHYS_ID] // Load ccc cpu phys id
239 cmp x0, x2 // Compare cpu data phys cpu and MPIDR_EL1 phys cpu
240 b.eq Lfound_cpu_data_entry // Branch if match
241 Lnext_cpu_data_entry:
242 add x1, x1, #16 // Increment to the next cpu data entry
244 b.eq Lskip_cpu_reset_handler // Not found
245 b Lcheck_cpu_data_entry // loop
246 Lfound_cpu_data_entry:
247 #if defined(KERNEL_INTEGRITY_CTRR)
249 * Program and lock CTRR if this CPU is non-boot cluster master. boot cluster will be locked
250 * in machine_lockdown. pinst insns protected by VMSA_LOCK
251 * A_PXN and A_MMUON_WRPROTECT options provides something close to KTRR behavior
254 /* spin until bootstrap core has completed machine lockdown */
255 adrp x17, EXT(lockdown_done)@page
257 ldr x18, [x17, EXT(lockdown_done)@pageoff]
260 // load stashed rorgn_begin
261 adrp x17, EXT(rorgn_begin)@page
262 add x17, x17, EXT(rorgn_begin)@pageoff
264 // if rorgn_begin is zero, we're debugging. skip enabling ctrr
267 // load stashed rorgn_end
268 adrp x19, EXT(rorgn_end)@page
269 add x19, x19, EXT(rorgn_end)@pageoff
273 mrs x18, ARM64_REG_CTRR_LOCK_EL1
274 cbnz x18, Lskip_ctrr /* don't touch if already locked */
275 ldr w18, [x21, CLUSTER_MASTER] /* cluster master is unsigned int (32bit) */
276 cbz w18, Lspin_ctrr_unlocked /* non-cluster master spins if CTRR unlocked (unexpected) */
277 msr ARM64_REG_CTRR_A_LWR_EL1, x17
278 msr ARM64_REG_CTRR_A_UPR_EL1, x19
279 mov x18, #(CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT)
280 msr ARM64_REG_CTRR_CTL_EL1, x18
282 msr ARM64_REG_CTRR_LOCK_EL1, x18
290 /* we shouldn't ever be here as cpu start is serialized by cluster in cpu_start(),
291 * and first core started in cluster is designated cluster master and locks
292 * both core and cluster. subsequent cores in same cluster will run locked from
293 * from reset vector */
294 mrs x18, ARM64_REG_CTRR_LOCK_EL1
295 cbz x18, Lspin_ctrr_unlocked
298 adrp x20, EXT(const_boot_args)@page
299 add x20, x20, EXT(const_boot_args)@pageoff
300 ldr x0, [x21, CPU_RESET_HANDLER] // Call CPU reset handler
301 cbz x0, Lskip_cpu_reset_handler
303 // Validate that our handler is one of the two expected handlers
304 adrp x2, EXT(resume_idle_cpu)@page
305 add x2, x2, EXT(resume_idle_cpu)@pageoff
308 adrp x2, EXT(start_cpu)@page
309 add x2, x2, EXT(start_cpu)@pageoff
311 bne Lskip_cpu_reset_handler
322 #if __ARM_KERNEL_PROTECT__ && defined(KERNEL_INTEGRITY_KTRR)
324 * Populate TPIDR_EL1 (in case the CPU takes an exception while
325 * turning on the MMU).
327 ldr x13, [x21, CPU_ACTIVE_THREAD]
329 #endif /* __ARM_KERNEL_PROTECT__ */
332 Lskip_cpu_reset_handler:
333 b . // Hang if the handler is NULL or returns
336 .global EXT(LowResetVectorEnd)
337 LEXT(LowResetVectorEnd)
338 .global EXT(SleepToken)
341 .space (stSize_NUM),0
344 .section __DATA_CONST,__const
346 .globl EXT(ResetHandlerData)
347 LEXT(ResetHandlerData)
348 .space (rhdSize_NUM),0 // (filled with 0s)
353 * __start trampoline is located at a position relative to LowResetVectorBase
354 * so that iBoot can compute the reset vector position to set IORVBAR using
355 * only the kernel entry point. Reset vector = (__start & ~0xfff)
360 b EXT(start_first_cpu)
364 * Provides an early-boot exception vector so that the processor will spin
365 * and preserve exception information (e.g., ELR_EL1) when early CPU bootstrap
366 * code triggers an exception. This is copied to the second physical page
367 * during CPU bootstrap (see cpu.c).
370 .global EXT(LowExceptionVectorBase)
371 LEXT(LowExceptionVectorBase)
409 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
411 * Provide a global symbol so that we can narrow the V=P mapping to cover
412 * this page during arm_vm_init.
415 .globl EXT(bootstrap_instructions)
416 LEXT(bootstrap_instructions)
418 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
420 .globl EXT(resume_idle_cpu)
421 LEXT(resume_idle_cpu)
422 adrp lr, EXT(arm_init_idle_cpu)@page
423 add lr, lr, EXT(arm_init_idle_cpu)@pageoff
427 .globl EXT(start_cpu)
429 adrp lr, EXT(arm_init_cpu)@page
430 add lr, lr, EXT(arm_init_cpu)@pageoff
435 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
436 // This is done right away in reset vector for pre-KTRR devices
437 // Set low reset vector now that we are in the KTRR-free zone
438 adrp x0, EXT(LowExceptionVectorBase)@page
439 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
441 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
443 // x20 set to BootArgs phys address
444 // x21 set to cpu data phys address
446 // Get the kernel memory parameters from the boot args
447 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
448 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base
449 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size
450 ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data
451 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags
454 // Set TPIDRRO_EL0 with the CPU number
455 ldr x0, [x21, CPU_NUMBER_GS]
458 // Set the exception stack pointer
459 ldr x0, [x21, CPU_EXCEPSTACK_TOP]
462 // Set SP_EL1 to exception stack
463 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
465 bl EXT(pinst_spsel_1)
472 // Set the interrupt stack pointer
473 ldr x0, [x21, CPU_INTSTACK_TOP]
484 * create_l1_table_entry
486 * Given a virtual address, creates a table entry in an L1 translation table
487 * to point to an L2 translation table.
488 * arg0 - Virtual address
489 * arg1 - L1 table address
490 * arg2 - L2 table address
491 * arg3 - Scratch register
492 * arg4 - Scratch register
493 * arg5 - Scratch register
495 .macro create_l1_table_entry
496 and $3, $0, #(ARM_TT_L1_INDEX_MASK)
497 lsr $3, $3, #(ARM_TT_L1_SHIFT) // Get index in L1 table for L2 table
498 lsl $3, $3, #(TTE_SHIFT) // Convert index into pointer offset
499 add $3, $1, $3 // Get L1 entry pointer
500 mov $4, #(ARM_TTE_BOOT_TABLE) // Get L1 table entry template
501 and $5, $2, #(ARM_TTE_TABLE_MASK) // Get address bits of L2 table
502 orr $5, $4, $5 // Create table entry for L2 table
503 str $5, [$3] // Write entry to L1 table
507 * create_l2_block_entries
509 * Given base virtual and physical addresses, creates consecutive block entries
510 * in an L2 translation table.
511 * arg0 - Virtual address
512 * arg1 - Physical address
513 * arg2 - L2 table address
514 * arg3 - Number of entries
515 * arg4 - Scratch register
516 * arg5 - Scratch register
517 * arg6 - Scratch register
518 * arg7 - Scratch register
520 .macro create_l2_block_entries
521 and $4, $0, #(ARM_TT_L2_INDEX_MASK)
522 lsr $4, $4, #(ARM_TTE_BLOCK_L2_SHIFT) // Get index in L2 table for block entry
523 lsl $4, $4, #(TTE_SHIFT) // Convert index into pointer offset
524 add $4, $2, $4 // Get L2 entry pointer
525 mov $5, #(ARM_TTE_BOOT_BLOCK) // Get L2 block entry template
526 and $6, $1, #(ARM_TTE_BLOCK_L2_MASK) // Get address bits of block mapping
529 mov $7, #(ARM_TT_L2_SIZE)
531 str $6, [$4], #(1 << TTE_SHIFT) // Write entry to L2 table and advance
532 add $6, $6, $7 // Increment the output address
533 subs $5, $5, #1 // Decrement the number of entries
538 * arg0 - virtual start address
539 * arg1 - physical start address
540 * arg2 - number of entries to map
541 * arg3 - L1 table address
542 * arg4 - free space pointer
543 * arg5 - scratch (entries mapped per loop)
549 .macro create_bootstrap_mapping
550 /* calculate entries left in this page */
551 and $5, $0, #(ARM_TT_L2_INDEX_MASK)
552 lsr $5, $5, #(ARM_TT_L2_SHIFT)
553 mov $6, #(TTE_PGENTRIES)
556 /* allocate an L2 table */
557 3: add $4, $4, PGBYTES
559 /* create_l1_table_entry(virt_base, L1 table, L2 table, scratch1, scratch2, scratch3) */
560 create_l1_table_entry $0, $3, $4, $6, $7, $8
562 /* determine how many entries to map this loop - the smaller of entries
563 * remaining in page and total entries left */
567 /* create_l2_block_entries(virt_base, phys_base, L2 table, num_ents, scratch1, scratch2, scratch3) */
568 create_l2_block_entries $0, $1, $4, $5, $6, $7, $8, $9
570 /* subtract entries just mapped and bail out if we're done */
574 /* entries left to map - advance base pointers */
575 add $0, $0, $5, lsl #(ARM_TT_L2_SHIFT)
576 add $1, $1, $5, lsl #(ARM_TT_L2_SHIFT)
578 mov $5, #(TTE_PGENTRIES) /* subsequent loops map (up to) a whole L2 page */
585 * Cold boot init routine. Called from __start
589 .globl EXT(start_first_cpu)
590 LEXT(start_first_cpu)
592 // Unlock the core for debugging
594 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts
599 // Set low reset vector before attempting any loads
600 adrp x0, EXT(LowExceptionVectorBase)@page
601 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
604 #if __APRR_SUPPORTED__
609 // If the PPL is supported, we start out in PPL mode.
610 MOV64 x0, APRR_EL1_PPL
612 // Otherwise, we start out in default mode.
613 MOV64 x0, APRR_EL1_DEFAULT
616 // Set the APRR state for EL1.
619 // Set the APRR state for EL0.
620 MOV64 x0, APRR_EL0_DEFAULT
626 #endif /* __APRR_SUPPORTED__ */
628 // Get the kernel memory parameters from the boot args
629 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
630 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base
631 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size
632 ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data
633 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags
635 // Clear the register that will be used to store the userspace thread pointer and CPU number.
636 // We may not actually be booting from ordinal CPU 0, so this register will be updated
637 // in ml_parse_cpu_topology(), which happens later in bootstrap.
640 // Set up exception stack pointer
641 adrp x0, EXT(excepstack_top)@page // Load top of exception stack
642 add x0, x0, EXT(excepstack_top)@pageoff
643 add x0, x0, x22 // Convert to KVA
646 // Set SP_EL1 to exception stack
647 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
648 bl EXT(pinst_spsel_1)
655 // Set up interrupt stack pointer
656 adrp x0, EXT(intstack_top)@page // Load top of irq stack
657 add x0, x0, EXT(intstack_top)@pageoff
658 add x0, x0, x22 // Convert to KVA
660 msr SPSel, #0 // Set SP_EL0 to interrupt stack
663 // Load address to the C init routine into link register
664 adrp lr, EXT(arm_init)@page
665 add lr, lr, EXT(arm_init)@pageoff
666 add lr, lr, x22 // Convert to KVA
670 * Set up the bootstrap page tables with a single block entry for the V=P
671 * mapping, a single block entry for the trampolined kernel address (KVA),
672 * and all else invalid. This requires four pages:
673 * Page 1 - V=P L1 table
674 * Page 2 - V=P L2 table
675 * Page 3 - KVA L1 table
676 * Page 4 - KVA L2 table
679 // Invalidate all entries in the bootstrap page tables
680 mov x0, #(ARM_TTE_EMPTY) // Load invalid entry template
681 mov x1, x25 // Start at top of kernel
682 mov x2, #(TTE_PGENTRIES) // Load number of entries per page
683 lsl x2, x2, #2 // Shift by 2 for num entries on 4 pages
685 Linvalidate_bootstrap: // do {
686 str x0, [x1], #(1 << TTE_SHIFT) // Invalidate and advance
687 subs x2, x2, #1 // entries--
688 b.ne Linvalidate_bootstrap // } while (entries != 0)
691 * In order to reclaim memory on targets where TZ0 (or some other entity)
692 * must be located at the base of memory, iBoot may set the virtual and
693 * physical base addresses to immediately follow whatever lies at the
694 * base of physical memory.
696 * If the base address belongs to TZ0, it may be dangerous for xnu to map
697 * it (as it may be prefetched, despite being technically inaccessible).
698 * In order to avoid this issue while keeping the mapping code simple, we
699 * may continue to use block mappings, but we will only map xnu's mach
700 * header to the end of memory.
702 * Given that iBoot guarantees that the unslid kernelcache base address
703 * will begin on an L2 boundary, this should prevent us from accidentally
706 adrp x0, EXT(_mh_execute_header)@page // Use xnu's mach header as the start address
707 add x0, x0, EXT(_mh_execute_header)@pageoff
710 * Adjust physical and virtual base addresses to account for physical
711 * memory preceeding xnu Mach-O header
712 * x22 - Kernel virtual base
713 * x23 - Kernel physical base
714 * x24 - Physical memory size
722 * x0 - V=P virtual cursor
723 * x4 - V=P physical cursor
724 * x14 - KVA virtual cursor
725 * x15 - KVA physical cursor
735 * x2 - free mem pointer from which we allocate a variable number of L2
736 * pages. The maximum number of bootstrap page table pages is limited to
737 * BOOTSTRAP_TABLE_SIZE. For a 2G 4k page device, assuming the worst-case
738 * slide, we need 1xL1 and up to 3xL2 pages (1GB mapped per L1 entry), so
739 * 8 total pages for V=P and KVA.
746 * Setup the V=P bootstrap mapping
747 * x5 - total number of L2 entries to allocate
749 lsr x5, x24, #(ARM_TT_L2_SHIFT)
750 /* create_bootstrap_mapping(vbase, pbase, num_ents, L1 table, freeptr) */
751 create_bootstrap_mapping x0, x4, x5, x1, x2, x6, x10, x11, x12, x13
753 /* Setup the KVA bootstrap mapping */
754 lsr x5, x24, #(ARM_TT_L2_SHIFT)
755 create_bootstrap_mapping x14, x15, x5, x3, x2, x9, x10, x11, x12, x13
757 /* Ensure TTEs are visible */
764 * Begin common CPU initialization
767 * x20 - PA of boot args
768 * x21 - zero on cold boot, PA of cpu data on warm reset
769 * x22 - Kernel virtual base
770 * x23 - Kernel physical base
771 * x25 - PA of the end of the kernel
772 * lr - KVA of C init routine
773 * sp - SP_EL0 selected
775 * SP_EL0 - KVA of CPU's interrupt stack
776 * SP_EL1 - KVA of CPU's exception stack
777 * TPIDRRO_EL0 - CPU number
780 // Set the translation control register.
781 adrp x0, EXT(sysreg_restore)@page // Load TCR value from the system register restore structure
782 add x0, x0, EXT(sysreg_restore)@pageoff
783 ldr x1, [x0, SR_RESTORE_TCR_EL1]
786 /* Set up translation table base registers.
787 * TTBR0 - V=P table @ top of kernel
788 * TTBR1 - KVA table @ top of kernel + 1 page
790 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
791 /* Note that for KTRR configurations, the V=P map will be modified by
795 and x0, x25, #(TTBR_BADDR_MASK)
800 and x0, x0, #(TTBR_BADDR_MASK)
803 // Set up MAIR attr0 for normal memory, attr1 for device memory
805 mov x1, #(MAIR_WRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITEBACK))
807 mov x1, #(MAIR_INNERWRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_INNERWRITEBACK))
809 mov x1, #(MAIR_DISABLE << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_DISABLE))
811 mov x1, #(MAIR_WRITETHRU << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITETHRU))
813 mov x1, #(MAIR_WRITECOMB << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITECOMB))
815 mov x1, #(MAIR_POSTED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED))
817 mov x1, #(MAIR_POSTED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_REORDERED))
819 mov x1, #(MAIR_POSTED_COMBINED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_COMBINED_REORDERED))
823 #if defined(APPLEHURRICANE)
825 // <rdar://problem/26726624> Increase Snoop reservation in EDB to reduce starvation risk
826 // Needs to be done before MMU is enabled
827 mrs x12, ARM64_REG_HID5
828 and x12, x12, (~ARM64_REG_HID5_CrdEdbSnpRsvd_mask)
829 orr x12, x12, ARM64_REG_HID5_CrdEdbSnpRsvd_VALUE
830 msr ARM64_REG_HID5, x12
835 // Setup timer interrupt routing; must be done before MMU is enabled
836 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number
837 and x15, x15, #0xFF // CPU number is in MPIDR Affinity Level 0
840 add x0, x0, #0x0040 // x0: 0x4000004X Core Timers interrupt control
841 add x0, x0, x15, lsl #2
842 mov w1, #0xF0 // x1: 0xF0 Route to Core FIQs
849 #ifndef __ARM_IC_NOALIAS_ICACHE__
850 /* Invalidate the TLB and icache on systems that do not guarantee that the
851 * caches are invalidated on reset.
857 /* If x21 is not 0, then this is either the start_cpu path or
858 * the resume_idle_cpu path. cpu_ttep should already be
859 * populated, so just switch to the kernel_pmap now.
863 adrp x0, EXT(cpu_ttep)@page
864 add x0, x0, EXT(cpu_ttep)@pageoff
869 // Set up the exception vectors
870 #if __ARM_KERNEL_PROTECT__
871 /* If this is not the first reset of the boot CPU, the alternate mapping
872 * for the exception vectors will be set up, so use it. Otherwise, we
873 * should use the mapping located in the kernelcache mapping.
875 MOV64 x0, ARM_KERNEL_PROTECT_EXCEPTION_START
878 #endif /* __ARM_KERNEL_PROTECT__ */
879 adrp x0, EXT(ExceptionVectorsBase)@page // Load exception vectors base address
880 add x0, x0, EXT(ExceptionVectorsBase)@pageoff
881 add x0, x0, x22 // Convert exception vector address to KVA
888 #ifdef __APSTS_SUPPORTED__
889 mrs x0, ARM64_REG_APSTS_EL1
890 and x1, x0, #(APSTS_EL1_MKEYVld)
891 cbz x1, 1b // Poll APSTS_EL1.MKEYVld
892 mrs x0, ARM64_REG_APCTL_EL1
893 orr x0, x0, #(APCTL_EL1_AppleMode)
894 orr x0, x0, #(APCTL_EL1_KernKeyEn)
895 and x0, x0, #~(APCTL_EL1_EnAPKey0)
896 msr ARM64_REG_APCTL_EL1, x0
898 mrs x0, ARM64_REG_APCTL_EL1
899 and x1, x0, #(APCTL_EL1_MKEYVld)
900 cbz x1, 1b // Poll APCTL_EL1.MKEYVld
901 orr x0, x0, #(APCTL_EL1_AppleMode)
902 orr x0, x0, #(APCTL_EL1_KernKeyEn)
903 msr ARM64_REG_APCTL_EL1, x0
904 #endif /* APSTS_SUPPORTED */
906 /* ISB necessary to ensure APCTL_EL1_AppleMode logic enabled before proceeding */
908 /* Load static kernel key diversification values */
909 ldr x0, =KERNEL_ROP_ID
910 /* set ROP key. must write at least once to pickup mkey per boot diversification */
911 msr APIBKeyLo_EL1, x0
913 msr APIBKeyHi_EL1, x0
915 msr APDBKeyLo_EL1, x0
917 msr APDBKeyHi_EL1, x0
919 msr ARM64_REG_KERNELKEYLO_EL1, x0
921 msr ARM64_REG_KERNELKEYHI_EL1, x0
922 /* set JOP key. must write at least once to pickup mkey per boot diversification */
924 msr APIAKeyLo_EL1, x0
926 msr APIAKeyHi_EL1, x0
928 msr APDAKeyLo_EL1, x0
930 msr APDAKeyHi_EL1, x0
933 msr APGAKeyLo_EL1, x0
935 msr APGAKeyHi_EL1, x0
937 // Enable caches, MMU, ROP and JOP
938 mov x0, #(SCTLR_EL1_DEFAULT & 0xFFFF)
939 mov x1, #(SCTLR_EL1_DEFAULT & 0xFFFF0000)
941 orr x0, x0, #(SCTLR_PACIB_ENABLED) /* IB is ROP */
943 #if DEBUG || DEVELOPMENT
944 and x2, x26, BA_BOOT_FLAGS_DISABLE_JOP
945 #if __APCFG_SUPPORTED__
946 // for APCFG systems, JOP keys are always on for EL1 unless ELXENKEY is cleared.
947 // JOP keys for EL0 will be toggled on the first time we pmap_switch to a pmap that has JOP enabled
950 and x3, x3, #~(APCFG_EL1_ELXENKEY)
952 #else /* __APCFG_SUPPORTED__ */
954 #endif /* __APCFG_SUPPORTED__ */
955 #endif /* DEBUG || DEVELOPMENT */
957 #if !__APCFG_SUPPORTED__
958 MOV64 x1, SCTLR_JOP_KEYS_ENABLED
960 #endif /* !__APCFG_SUPPORTED__ */
962 #else /* HAS_APPLE_PAC */
964 // Enable caches and MMU
965 mov x0, #(SCTLR_EL1_DEFAULT & 0xFFFF)
966 mov x1, #(SCTLR_EL1_DEFAULT & 0xFFFF0000)
968 #endif /* HAS_APPLE_PAC */
972 MOV32 x1, SCTLR_EL1_DEFAULT
974 orr x1, x1, #(SCTLR_PACIB_ENABLED)
975 #if !__APCFG_SUPPORTED__
976 MOV64 x2, SCTLR_JOP_KEYS_ENABLED
977 #if (DEBUG || DEVELOPMENT)
978 // Ignore the JOP bits, since we can't predict at compile time whether BA_BOOT_FLAGS_DISABLE_JOP is set
982 #endif /* (DEBUG || DEVELOPMENT) */
983 #endif /* !__APCFG_SUPPORTED__ */
984 #endif /* HAS_APPLE_PAC */
988 #if (!CONFIG_KERNEL_INTEGRITY || (CONFIG_KERNEL_INTEGRITY && !defined(KERNEL_INTEGRITY_WT)))
991 * If we have a Watchtower monitor it will setup CPACR_EL1 for us, touching
992 * it here would trap to EL3.
996 mov x0, #(CPACR_FPEN_ENABLE)
1000 // Clear thread pointer
1002 msr TPIDR_EL1, x0 // Set thread register
1004 #if defined(APPLE_ARM64_ARCH_FAMILY)
1005 // Initialization common to all Apple targets
1007 ARM64_READ_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4
1008 orr x12, x12, ARM64_REG_HID4_DisDcMVAOps
1009 orr x12, x12, ARM64_REG_HID4_DisDcSWL2Ops
1010 ARM64_WRITE_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4
1011 #endif // APPLE_ARM64_ARCH_FAMILY
1013 #if defined(APPLETYPHOON)
1015 // Typhoon-Specific initialization
1016 // For tunable summary, see <rdar://problem/13503621>
1020 // Disable LSP flush with context switch to work around bug in LSP
1021 // that can cause Typhoon to wedge when CONTEXTIDR is written.
1022 // <rdar://problem/12387704>
1025 mrs x12, ARM64_REG_HID0
1026 orr x12, x12, ARM64_REG_HID0_LoopBuffDisb
1027 msr ARM64_REG_HID0, x12
1029 mrs x12, ARM64_REG_HID1
1030 orr x12, x12, ARM64_REG_HID1_rccDisStallInactiveIexCtl
1031 msr ARM64_REG_HID1, x12
1033 mrs x12, ARM64_REG_HID3
1034 orr x12, x12, ARM64_REG_HID3_DisXmonSnpEvictTriggerL2StarvationMode
1035 msr ARM64_REG_HID3, x12
1037 mrs x12, ARM64_REG_HID5
1038 and x12, x12, (~ARM64_REG_HID5_DisHwpLd)
1039 and x12, x12, (~ARM64_REG_HID5_DisHwpSt)
1040 msr ARM64_REG_HID5, x12
1042 // Change the default memcache data set ID from 0 to 15 for all agents
1043 mrs x12, ARM64_REG_HID8
1044 orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE)
1045 #if ARM64_BOARD_CONFIG_T7001
1046 orr x12, x12, ARM64_REG_HID8_DataSetID2_VALUE
1047 #endif // ARM64_BOARD_CONFIG_T7001
1048 msr ARM64_REG_HID8, x12
1050 #endif // APPLETYPHOON
1052 #if defined(APPLETWISTER)
1054 // rdar://problem/36112905: Set CYC_CFG:skipInit to pull in isAlive by one DCLK
1055 // to work around potential hang. Must only be applied to Maui C0.
1057 ubfx x13, x12, #MIDR_EL1_PNUM_SHIFT, #12
1058 cmp x13, #4 // Part number 4 => Maui, 5 => Malta/Elba
1060 ubfx x13, x12, #MIDR_EL1_VAR_SHIFT, #4
1061 cmp x13, #2 // variant 2 => Maui C0
1064 mrs x12, ARM64_REG_CYC_CFG
1065 orr x12, x12, ARM64_REG_CYC_CFG_skipInit
1066 msr ARM64_REG_CYC_CFG, x12
1070 mrs x12, ARM64_REG_HID11
1071 and x12, x12, (~ARM64_REG_HID11_DisFillC1BubOpt)
1072 msr ARM64_REG_HID11, x12
1074 // Change the default memcache data set ID from 0 to 15 for all agents
1075 mrs x12, ARM64_REG_HID8
1076 orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE)
1077 orr x12, x12, (ARM64_REG_HID8_DataSetID2_VALUE | ARM64_REG_HID8_DataSetID3_VALUE)
1078 msr ARM64_REG_HID8, x12
1080 // Use 4-cycle MUL latency to avoid denormal stalls
1081 mrs x12, ARM64_REG_HID7
1082 orr x12, x12, #ARM64_REG_HID7_disNexFastFmul
1083 msr ARM64_REG_HID7, x12
1085 // disable reporting of TLB-multi-hit-error
1086 // <rdar://problem/22163216>
1087 mrs x12, ARM64_REG_LSU_ERR_STS
1088 and x12, x12, (~ARM64_REG_LSU_ERR_STS_L1DTlbMultiHitEN)
1089 msr ARM64_REG_LSU_ERR_STS, x12
1092 #endif // APPLETWISTER
1094 #if defined(APPLEHURRICANE)
1096 // IC prefetch configuration
1097 // <rdar://problem/23019425>
1098 mrs x12, ARM64_REG_HID0
1099 and x12, x12, (~ARM64_REG_HID0_ICPrefDepth_bmsk)
1100 orr x12, x12, (1 << ARM64_REG_HID0_ICPrefDepth_bshift)
1101 orr x12, x12, ARM64_REG_HID0_ICPrefLimitOneBrn
1102 msr ARM64_REG_HID0, x12
1104 // disable reporting of TLB-multi-hit-error
1105 // <rdar://problem/22163216>
1106 mrs x12, ARM64_REG_LSU_ERR_CTL
1107 and x12, x12, (~ARM64_REG_LSU_ERR_CTL_L1DTlbMultiHitEN)
1108 msr ARM64_REG_LSU_ERR_CTL, x12
1110 // disable crypto fusion across decode groups
1111 // <rdar://problem/27306424>
1112 mrs x12, ARM64_REG_HID1
1113 orr x12, x12, ARM64_REG_HID1_disAESFuseAcrossGrp
1114 msr ARM64_REG_HID1, x12
1116 #if defined(ARM64_BOARD_CONFIG_T8011)
1117 // Clear DisDcZvaCmdOnly
1118 // Per Myst A0/B0 tunables document
1119 // <rdar://problem/27627428> Myst: Confirm ACC Per-CPU Tunables
1120 mrs x12, ARM64_REG_HID3
1121 and x12, x12, ~ARM64_REG_HID3_DisDcZvaCmdOnly
1122 msr ARM64_REG_HID3, x12
1124 mrs x12, ARM64_REG_EHID3
1125 and x12, x12, ~ARM64_REG_EHID3_DisDcZvaCmdOnly
1126 msr ARM64_REG_EHID3, x12
1127 #endif /* defined(ARM64_BOARD_CONFIG_T8011) */
1129 #endif // APPLEHURRICANE
1131 #if defined(APPLEMONSOON)
1133 /***** Tunables that apply to all skye cores, all chip revs *****/
1135 // <rdar://problem/28512310> SW WAR/eval: WKdm write ack lost when bif_wke_colorWrAck_XXaH asserts concurrently for both colors
1136 mrs x12, ARM64_REG_HID8
1137 orr x12, x12, #ARM64_REG_HID8_WkeForceStrictOrder
1138 msr ARM64_REG_HID8, x12
1140 // Skip if not E-core
1142 cbnz x15, Lskip_skye_ecore_only
1144 /***** Tunables that only apply to skye e-cores, all chip revs *****/
1146 // <rdar://problem/30423928>: Atomic launch eligibility is erroneously taken away when a store at SMB gets invalidated
1147 mrs x12, ARM64_REG_EHID11
1148 and x12, x12, ~(ARM64_REG_EHID11_SmbDrainThresh_mask)
1149 msr ARM64_REG_EHID11, x12
1151 Lskip_skye_ecore_only:
1153 SKIP_IF_CPU_VERSION_GREATER_OR_EQUAL x12, MONSOON_CPU_VERSION_B0, Lskip_skye_a0_workarounds
1155 // Skip if not E-core
1156 cbnz x15, Lskip_skye_a0_ecore_only
1158 /***** Tunables that only apply to skye e-cores, chip revs < B0 *****/
1160 // Disable downstream fill bypass logic
1161 // <rdar://problem/28545159> [Tunable] Skye - L2E fill bypass collision from both pipes to ecore
1162 mrs x12, ARM64_REG_EHID5
1163 orr x12, x12, ARM64_REG_EHID5_DisFillByp
1164 msr ARM64_REG_EHID5, x12
1166 // Disable forwarding of return addresses to the NFP
1167 // <rdar://problem/30387067> Skye: FED incorrectly taking illegal va exception
1168 mrs x12, ARM64_REG_EHID0
1169 orr x12, x12, ARM64_REG_EHID0_nfpRetFwdDisb
1170 msr ARM64_REG_EHID0, x12
1172 Lskip_skye_a0_ecore_only:
1174 /***** Tunables that apply to all skye cores, chip revs < B0 *****/
1176 // Disable clock divider gating
1177 // <rdar://problem/30854420> [Tunable/Errata][cpu_1p_1e] [CPGV2] ACC power down issue when link FSM switches from GO_DN to CANCEL and at the same time upStreamDrain request is set.
1178 mrs x12, ARM64_REG_HID6
1179 orr x12, x12, ARM64_REG_HID6_DisClkDivGating
1180 msr ARM64_REG_HID6, x12
1182 // Disable clock dithering
1183 // <rdar://problem/29022199> [Tunable] Skye A0: Linux: LLC PIO Errors
1184 mrs x12, ARM64_REG_ACC_OVRD
1185 orr x12, x12, ARM64_REG_ACC_OVRD_dsblClkDtr
1186 msr ARM64_REG_ACC_OVRD, x12
1188 mrs x12, ARM64_REG_ACC_EBLK_OVRD
1189 orr x12, x12, ARM64_REG_ACC_OVRD_dsblClkDtr
1190 msr ARM64_REG_ACC_EBLK_OVRD, x12
1192 Lskip_skye_a0_workarounds:
1194 SKIP_IF_CPU_VERSION_LESS_THAN x12, MONSOON_CPU_VERSION_B0, Lskip_skye_post_a1_workarounds
1196 /***** Tunables that apply to all skye cores, chip revs >= B0 *****/
1198 // <rdar://problem/32512836>: Disable refcount syncing between E and P
1199 mrs x12, ARM64_REG_CYC_OVRD
1200 and x12, x12, ~ARM64_REG_CYC_OVRD_dsblSnoopTime_mask
1201 orr x12, x12, ARM64_REG_CYC_OVRD_dsblSnoopPTime
1202 msr ARM64_REG_CYC_OVRD, x12
1204 Lskip_skye_post_a1_workarounds:
1206 #endif /* defined(APPLEMONSOON) */
1208 #if defined(APPLEVORTEX)
1212 // Skip if not P-core
1213 cbz x15, Lskip_cyprus_pcore_only
1215 mrs x12, ARM64_REG_HID1
1218 ubfx x14, x13, #MIDR_EL1_PNUM_SHIFT, #12
1219 // Should be applied to all Aruba variants, but only Cyprus variants B0 and later
1220 cmp x14, #0xb // Part number 11 => Cyprus, 16 => Aruba
1222 ubfx x14, x13, #MIDR_EL1_VAR_SHIFT, #4
1223 cbz x14, Lskip_br_kill // variant 0 => Cyprus AX, 1 => Cyprus BX
1227 // rdar://problem/36716477: data corruption due to incorrect branch predictor resolution
1228 orr x12, x12, ARM64_REG_HID1_enaBrKillLimit
1232 // rdar://problem/34435356: segfaults due to IEX clock-gating
1233 orr x12, x12, ARM64_REG_HID1_rccForceAllIexL3ClksOn
1234 msr ARM64_REG_HID1, x12
1236 #if ARM64_BOARD_CONFIG_T8027
1237 // rdar://problem/40695685: Enable BIF fill buffer stall logic to prevent skid buffer overflow (Aruba A1 only)
1238 mrs x12, ARM64_REG_HID5
1239 orr x12, x12, ARM64_REG_HID5_EnableDnFIFORdStall
1240 msr ARM64_REG_HID5, x12
1242 #endif /* ARM64_BOARD_CONFIG_T8027 */
1244 // Prevent ordered loads from being dispatched from LSU until all prior loads have completed.
1245 // rdar://problem/34095873: AF2 ordering rules allow ARM device ordering violations
1246 mrs x12, ARM64_REG_HID4
1247 orr x12, x12, ARM64_REG_HID4_ForceNsOrdLdReqNoOlderLd
1248 msr ARM64_REG_HID4, x12
1250 // rdar://problem/38482968: [Cyprus Tunable] Poisoned cache line crossing younger load is not redirected by older load-barrier
1251 mrs x12, ARM64_REG_HID3
1252 orr x12, x12, ARM64_REG_HID3_DisColorOpt
1253 msr ARM64_REG_HID3, x12
1255 // rdar://problem/41056604: disable faster launches of uncacheable unaligned stores to workaround load/load ordering violation
1256 mrs x12, ARM64_REG_HID11
1257 orr x12, x12, ARM64_REG_HID11_DisX64NTLnchOpt
1258 msr ARM64_REG_HID11, x12
1260 b Lskip_cyprus_ecore_only
1262 Lskip_cyprus_pcore_only:
1264 // Prevent ordered loads from being dispatched from LSU until all prior loads have completed.
1265 // rdar://problem/34095873: AF2 ordering rules allow ARM device ordering violations
1266 mrs x12, ARM64_REG_EHID4
1267 orr x12, x12, ARM64_REG_HID4_ForceNsOrdLdReqNoOlderLd
1268 msr ARM64_REG_EHID4, x12
1270 // rdar://problem/36595004: Poisoned younger load is not redirected by older load-acquire
1271 mrs x12, ARM64_REG_EHID3
1272 orr x12, x12, ARM64_REG_EHID3_DisColorOpt
1273 msr ARM64_REG_EHID3, x12
1275 // rdar://problem/37949166: Disable the extension of prefetcher training pipe clock gating, revert to default gating
1276 mrs x12, ARM64_REG_EHID10
1277 orr x12, x12, ARM64_REG_EHID10_rccDisPwrSavePrfClkOff
1278 msr ARM64_REG_EHID10, x12
1280 Lskip_cyprus_ecore_only:
1282 #endif /* defined (APPLEVORTEX) */
1284 #if defined(ARM64_BOARD_CONFIG_T8030)
1285 // Cebu <B0 is deprecated and unsupported (see rdar://problem/42835678)
1286 SKIP_IF_CPU_VERSION_LESS_THAN x12, LIGHTNING_CPU_VERSION_B0, .
1290 // Skip if not P-core
1291 cbz x15, Lskip_cebu_pcore_only
1293 // rdar://problem/50664291: [Cebu B0/B1 Tunables][PerfVerif][LSU] Post-silicon tuning of STNT widget contiguous counter threshold
1294 mrs x12, ARM64_REG_HID4
1295 and x12, x12, ~ARM64_REG_HID4_CnfCntrThresh_mask
1296 orr x12, x12, 3 << ARM64_REG_HID4_CnfCntrThresh_shift
1297 msr ARM64_REG_HID4, x12
1299 mrs x12, ARM64_REG_HID9
1300 // rdar://problem/47744434: Barrier Load Ordering property is not satisfied for x64-loads
1301 orr x12, x12, ARM64_REG_HID9_EnableFixBug47221499
1302 // rdar://problem/50664291: [Cebu B0/B1 Tunables][PerfVerif][LSU] Post-silicon tuning of STNT widget contiguous counter threshold
1303 orr x12, x12, ARM64_REG_HID9_DisSTNTWidgetForUnalign
1304 msr ARM64_REG_HID9, x12
1306 // rdar://problem/47865629: RF bank and Multipass conflict forward progress widget does not handle 3+ cycle livelock
1307 mrs x12, ARM64_REG_HID16
1308 orr x12, x12, ARM64_REG_HID16_EnRs4Sec
1309 and x12, x12, ~ARM64_REG_HID16_DisxPickRs45
1310 orr x12, x12, ARM64_REG_HID16_EnMPxPick45
1311 orr x12, x12, ARM64_REG_HID16_EnMPCyc7
1312 msr ARM64_REG_HID16, x12
1314 mrs x12, ARM64_REG_HID4
1315 // Prevent ordered loads from being dispatched from LSU until all prior loads have completed.
1316 // rdar://problem/34095873: AF2 ordering rules allow ARM device ordering violations
1317 orr x12, x12, ARM64_REG_HID4_ForceNsOrdLdReqNoOlderLd
1318 // rdar://problem/51690962: Disable Store-Non-Temporal downgrade widget
1319 orr x12, x12, ARM64_REG_HID4_DisSTNTWidget
1320 msr ARM64_REG_HID4, x12
1322 // rdar://problem/41056604: disable faster launches of uncacheable unaligned stores to workaround load/load ordering violation
1323 mrs x12, ARM64_REG_HID11
1324 orr x12, x12, ARM64_REG_HID11_DisX64NTLnchOpt
1325 msr ARM64_REG_HID11, x12
1327 // rdar://problem/41029832: configure dummy cycles to work around incorrect temp sensor readings on NEX power gating
1328 mrs x12, ARM64_REG_HID13
1329 and x12, x12, ~ARM64_REG_HID13_PreCyc_mask
1330 orr x12, x12, 4 << ARM64_REG_HID13_PreCyc_shift
1331 msr ARM64_REG_HID13, x12
1333 // rdar://problem/45024523: enable aggressive LEQ throttling to work around LEQ credit leak
1334 mrs x12, ARM64_REG_HID16
1335 orr x12, x12, ARM64_REG_HID16_leqThrottleAggr
1336 msr ARM64_REG_HID16, x12
1338 b Lskip_cebu_ecore_only
1340 Lskip_cebu_pcore_only:
1342 // Prevent ordered loads from being dispatched from LSU until all prior loads have completed.
1343 // rdar://problem/34095873: AF2 ordering rules allow ARM device ordering violations
1344 mrs x12, ARM64_REG_EHID4
1345 orr x12, x12, ARM64_REG_HID4_ForceNsOrdLdReqNoOlderLd
1346 msr ARM64_REG_EHID4, x12
1348 // rdar://problem/37949166: Disable the extension of prefetcher training pipe clock gating, revert to default gating
1349 mrs x12, ARM64_REG_EHID10
1350 orr x12, x12, ARM64_REG_EHID10_rccDisPwrSavePrfClkOff
1351 msr ARM64_REG_EHID10, x12
1353 Lskip_cebu_ecore_only:
1354 #endif /* defined(ARM64_BOARD_CONFIG_T8030) */
1356 #if defined(APPLELIGHTNING)
1357 // rdar://54225210 (Incorrect fusing of a direct branch with AMX/EAS instruction at cross-beat location)
1359 cbz x15, not_cebu_pcore
1361 mrs x12, ARM64_REG_HID0
1362 orr x12, x12, ARM64_REG_HID0_CacheFusionDisable
1363 msr ARM64_REG_HID0, x12
1366 #endif /* defined(APPLELIGHTNING) */
1368 #if defined(APPLELIGHTNING)
1370 // rdar://53907283 ([Cebu ACC Errata] Sibling Merge in LLC can cause UC load to violate ARM Memory Ordering Rules.)
1371 mrs x12, ARM64_REG_HID5
1372 orr x12, x12, ARM64_REG_HID5_DisFill2cMerge
1373 msr ARM64_REG_HID5, x12
1375 // Skip if not E-core or not a two-cluster CPU
1376 #if defined(CPU_CLUSTER_OFFSETS)
1378 cbnz x15, Lskip_h12_h13_ecore_only
1380 // rdar://problem/48476033: Prevent store-to-load forwarding for UC memory to avoid barrier ordering violation
1381 mrs x12, ARM64_REG_EHID10
1382 orr x12, x12, ARM64_REG_EHID10_ForceWStDrainUc
1383 msr ARM64_REG_EHID10, x12
1385 Lskip_h12_h13_ecore_only:
1386 #endif /* defined(CPU_CLUSTER_OFFSETS) */
1387 #endif /* defined(APPLELIGHTNING)*/
1391 // If x21 != 0, we're doing a warm reset, so we need to trampoline to the kernel pmap.
1392 cbnz x21, Ltrampoline
1394 // Set KVA of boot args as first arg
1403 // x1: KVA page table phys base
1405 bl EXT(kasan_bootstrap)
1411 // Return to arm_init()
1415 // Load VA of the trampoline
1416 adrp x0, arm_init_tramp@page
1417 add x0, x0, arm_init_tramp@pageoff
1421 // Branch to the trampoline
1425 * V=P to KVA trampoline.
1426 * x0 - KVA of cpu data pointer
1431 /* On a warm boot, the full kernel translation table is initialized in
1432 * addition to the bootstrap tables. The layout is as follows:
1434 * +--Top of Memory--+
1437 * | Primary Kernel |
1440 * +--Top + 5 pages--+
1444 * +--Top + 4 pages--+
1448 * +--Top + 2 pages--+
1452 * +--Top of Kernel--+
1457 * +---Kernel Base---+
1462 #if defined(HAS_VMSA_LOCK)
1465 // Convert CPU data PA to VA and set as first argument
1471 /* Return to arm_init() */
1474 //#include "globals_asm.h"
1476 /* vim: set ts=4: */