2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <arm/proc_reg.h>
29 #include <arm64/asm.h>
30 #include <arm64/proc_reg.h>
31 #include <pexpert/arm64/board_config.h>
32 #include <mach_assert.h>
33 #include <machine/asm.h>
36 #if __ARM_KERNEL_PROTECT__
38 #endif /* __ARM_KERNEL_PROTECT__ */
41 .macro MSR_VBAR_EL1_X0
42 #if defined(KERNEL_INTEGRITY_KTRR)
44 bl EXT(pinst_set_vbar)
52 #if defined(KERNEL_INTEGRITY_KTRR)
62 .macro MSR_TTBR1_EL1_X0
63 #if defined(KERNEL_INTEGRITY_KTRR)
72 .macro MSR_SCTLR_EL1_X0
73 #if defined(KERNEL_INTEGRITY_KTRR)
76 // This may abort, do so on SP1
80 msr SPSel, #0 // Back to SP0
84 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
88 * Checks the reset handler for global and CPU-specific reset-assist functions,
89 * then jumps to the reset handler with boot args and cpu data. This is copied
90 * to the first physical page during CPU bootstrap (see cpu.c).
93 * x19 - Reset handler data pointer
94 * x20 - Boot args pointer
95 * x21 - CPU data pointer
99 .globl EXT(LowResetVectorBase)
100 LEXT(LowResetVectorBase)
101 // Preserve x0 for start_first_cpu, if called
103 // Unlock the core for debugging
105 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts
107 #if !(defined(KERNEL_INTEGRITY_KTRR))
108 // Set low reset vector before attempting any loads
109 adrp x0, EXT(LowExceptionVectorBase)@page
110 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
115 #if defined(KERNEL_INTEGRITY_KTRR)
117 * Set KTRR registers immediately after wake/resume
119 * During power on reset, XNU stashed the kernel text region range values
120 * into __DATA,__const which should be protected by AMCC RoRgn at this point.
121 * Read this data and program/lock KTRR registers accordingly.
122 * If either values are zero, we're debugging kernel so skip programming KTRR.
126 // load stashed rorgn_begin
127 adrp x17, EXT(rorgn_begin)@page
128 add x17, x17, EXT(rorgn_begin)@pageoff
130 // if rorgn_begin is zero, we're debugging. skip enabling ktrr
133 // load stashed rorgn_end
134 adrp x19, EXT(rorgn_end)@page
135 add x19, x19, EXT(rorgn_end)@pageoff
139 // program and lock down KTRR
140 // subtract one page from rorgn_end to make pinst insns NX
141 msr ARM64_REG_KTRR_LOWER_EL1, x17
142 sub x19, x19, #(1 << (ARM_PTE_SHIFT-12)), lsl #12
143 msr ARM64_REG_KTRR_UPPER_EL1, x19
145 msr ARM64_REG_KTRR_LOCK_EL1, x17
147 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
149 // Process reset handlers
150 adrp x19, EXT(ResetHandlerData)@page // Get address of the reset handler data
151 add x19, x19, EXT(ResetHandlerData)@pageoff
152 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number
153 and x0, x15, #0xFF // CPU number is in MPIDR Affinity Level 0
154 ldr x1, [x19, CPU_DATA_ENTRIES] // Load start of data entries
155 add x3, x1, MAX_CPUS * 16 // end addr of data entries = start + (16 * MAX_CPUS)
156 Lcheck_cpu_data_entry:
157 ldr x21, [x1, CPU_DATA_PADDR] // Load physical CPU data address
158 cbz x21, Lnext_cpu_data_entry
159 ldr w2, [x21, CPU_PHYS_ID] // Load ccc cpu phys id
160 cmp x0, x2 // Compare cpu data phys cpu and MPIDR_EL1 phys cpu
161 b.eq Lfound_cpu_data_entry // Branch if match
162 Lnext_cpu_data_entry:
163 add x1, x1, #16 // Increment to the next cpu data entry
165 b.eq Lskip_cpu_reset_handler // Not found
166 b Lcheck_cpu_data_entry // loop
167 Lfound_cpu_data_entry:
168 adrp x20, EXT(const_boot_args)@page
169 add x20, x20, EXT(const_boot_args)@pageoff
170 ldr x0, [x21, CPU_RESET_HANDLER] // Call CPU reset handler
171 cbz x0, Lskip_cpu_reset_handler
173 // Validate that our handler is one of the two expected handlers
174 adrp x2, EXT(resume_idle_cpu)@page
175 add x2, x2, EXT(resume_idle_cpu)@pageoff
178 adrp x2, EXT(start_cpu)@page
179 add x2, x2, EXT(start_cpu)@pageoff
181 bne Lskip_cpu_reset_handler
186 #if __ARM_KERNEL_PROTECT__ && defined(KERNEL_INTEGRITY_KTRR)
188 * Populate TPIDR_EL1 (in case the CPU takes an exception while
189 * turning on the MMU).
191 ldr x13, [x21, CPU_ACTIVE_THREAD]
193 #endif /* __ARM_KERNEL_PROTECT__ */
196 Lskip_cpu_reset_handler:
197 b . // Hang if the handler is NULL or returns
200 .globl EXT(ResetHandlerData)
201 LEXT(ResetHandlerData)
202 .space (rhdSize_NUM),0 // (filled with 0s)
205 .global EXT(LowResetVectorEnd)
206 LEXT(LowResetVectorEnd)
207 .global EXT(SleepToken)
210 .space (stSize_NUM),0
215 * __start trampoline is located at a position relative to LowResetVectorBase
216 * so that iBoot can compute the reset vector position to set IORVBAR using
217 * only the kernel entry point. Reset vector = (__start & ~0xfff)
222 b EXT(start_first_cpu)
226 * Provides an early-boot exception vector so that the processor will spin
227 * and preserve exception information (e.g., ELR_EL1) when early CPU bootstrap
228 * code triggers an exception. This is copied to the second physical page
229 * during CPU bootstrap (see cpu.c).
232 .global EXT(LowExceptionVectorBase)
233 LEXT(LowExceptionVectorBase)
271 #if defined(KERNEL_INTEGRITY_KTRR)
273 * Provide a global symbol so that we can narrow the V=P mapping to cover
274 * this page during arm_vm_init.
277 .globl EXT(bootstrap_instructions)
278 LEXT(bootstrap_instructions)
279 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
281 .globl EXT(resume_idle_cpu)
282 LEXT(resume_idle_cpu)
283 adrp lr, EXT(arm_init_idle_cpu)@page
284 add lr, lr, EXT(arm_init_idle_cpu)@pageoff
288 .globl EXT(start_cpu)
290 adrp lr, EXT(arm_init_cpu)@page
291 add lr, lr, EXT(arm_init_cpu)@pageoff
296 #if defined(KERNEL_INTEGRITY_KTRR)
297 // This is done right away in reset vector for pre-KTRR devices
298 // Set low reset vector now that we are in the KTRR-free zone
299 adrp x0, EXT(LowExceptionVectorBase)@page
300 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
302 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
304 // x20 set to BootArgs phys address
305 // x21 set to cpu data phys address
307 // Get the kernel memory parameters from the boot args
308 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
309 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base
310 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size
311 ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data
312 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags
314 // Set TPIDRRO_EL0 with the CPU number
315 ldr x0, [x21, CPU_NUMBER_GS]
318 // Set the exception stack pointer
319 ldr x0, [x21, CPU_EXCEPSTACK_TOP]
322 // Set SP_EL1 to exception stack
323 #if defined(KERNEL_INTEGRITY_KTRR)
332 // Set the interrupt stack pointer
333 ldr x0, [x21, CPU_INTSTACK_TOP]
344 * create_l1_table_entry
346 * Given a virtual address, creates a table entry in an L1 translation table
347 * to point to an L2 translation table.
348 * arg0 - Virtual address
349 * arg1 - L1 table address
350 * arg2 - L2 table address
351 * arg3 - Scratch register
352 * arg4 - Scratch register
353 * arg5 - Scratch register
355 .macro create_l1_table_entry
356 and $3, $0, #(ARM_TT_L1_INDEX_MASK)
357 lsr $3, $3, #(ARM_TT_L1_SHIFT) // Get index in L1 table for L2 table
358 lsl $3, $3, #(TTE_SHIFT) // Convert index into pointer offset
359 add $3, $1, $3 // Get L1 entry pointer
360 mov $4, #(ARM_TTE_BOOT_TABLE) // Get L1 table entry template
361 and $5, $2, #(ARM_TTE_TABLE_MASK) // Get address bits of L2 table
362 orr $5, $4, $5 // Create table entry for L2 table
363 str $5, [$3] // Write entry to L1 table
367 * create_l2_block_entries
369 * Given base virtual and physical addresses, creates consecutive block entries
370 * in an L2 translation table.
371 * arg0 - Virtual address
372 * arg1 - Physical address
373 * arg2 - L2 table address
374 * arg3 - Number of entries
375 * arg4 - Scratch register
376 * arg5 - Scratch register
377 * arg6 - Scratch register
378 * arg7 - Scratch register
380 .macro create_l2_block_entries
381 and $4, $0, #(ARM_TT_L2_INDEX_MASK)
382 lsr $4, $4, #(ARM_TTE_BLOCK_L2_SHIFT) // Get index in L2 table for block entry
383 lsl $4, $4, #(TTE_SHIFT) // Convert index into pointer offset
384 add $4, $2, $4 // Get L2 entry pointer
385 mov $5, #(ARM_TTE_BOOT_BLOCK) // Get L2 block entry template
386 and $6, $1, #(ARM_TTE_BLOCK_L2_MASK) // Get address bits of block mapping
389 mov $7, #(ARM_TT_L2_SIZE)
391 str $6, [$4], #(1 << TTE_SHIFT) // Write entry to L2 table and advance
392 add $6, $6, $7 // Increment the output address
393 subs $5, $5, #1 // Decrement the number of entries
398 * arg0 - virtual start address
399 * arg1 - physical start address
400 * arg2 - number of entries to map
401 * arg3 - L1 table address
402 * arg4 - free space pointer
403 * arg5 - scratch (entries mapped per loop)
409 .macro create_bootstrap_mapping
410 /* calculate entries left in this page */
411 and $5, $0, #(ARM_TT_L2_INDEX_MASK)
412 lsr $5, $5, #(ARM_TT_L2_SHIFT)
413 mov $6, #(TTE_PGENTRIES)
416 /* allocate an L2 table */
417 3: add $4, $4, PGBYTES
419 /* create_l1_table_entry(virt_base, L1 table, L2 table, scratch1, scratch2, scratch3) */
420 create_l1_table_entry $0, $3, $4, $6, $7, $8
422 /* determine how many entries to map this loop - the smaller of entries
423 * remaining in page and total entries left */
427 /* create_l2_block_entries(virt_base, phys_base, L2 table, num_ents, scratch1, scratch2, scratch3) */
428 create_l2_block_entries $0, $1, $4, $5, $6, $7, $8, $9
430 /* subtract entries just mapped and bail out if we're done */
434 /* entries left to map - advance base pointers */
435 add $0, $0, $5, lsl #(ARM_TT_L2_SHIFT)
436 add $1, $1, $5, lsl #(ARM_TT_L2_SHIFT)
438 mov $5, #(TTE_PGENTRIES) /* subsequent loops map (up to) a whole L2 page */
445 * Cold boot init routine. Called from __start
449 .globl EXT(start_first_cpu)
450 LEXT(start_first_cpu)
452 // Unlock the core for debugging
454 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts
458 // Set low reset vector before attempting any loads
459 adrp x0, EXT(LowExceptionVectorBase)@page
460 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
464 // Get the kernel memory parameters from the boot args
465 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
466 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base
467 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size
468 ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data
469 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags
471 // Clear the register that will be used to store the userspace thread pointer and CPU number.
472 // We may not actually be booting from ordinal CPU 0, so this register will be updated
473 // in ml_parse_cpu_topology(), which happens later in bootstrap.
476 // Set up exception stack pointer
477 adrp x0, EXT(excepstack_top)@page // Load top of exception stack
478 add x0, x0, EXT(excepstack_top)@pageoff
479 add x0, x0, x22 // Convert to KVA
482 // Set SP_EL1 to exception stack
483 #if defined(KERNEL_INTEGRITY_KTRR)
491 // Set up interrupt stack pointer
492 adrp x0, EXT(intstack_top)@page // Load top of irq stack
493 add x0, x0, EXT(intstack_top)@pageoff
494 add x0, x0, x22 // Convert to KVA
496 msr SPSel, #0 // Set SP_EL0 to interrupt stack
499 // Load address to the C init routine into link register
500 adrp lr, EXT(arm_init)@page
501 add lr, lr, EXT(arm_init)@pageoff
502 add lr, lr, x22 // Convert to KVA
506 * Set up the bootstrap page tables with a single block entry for the V=P
507 * mapping, a single block entry for the trampolined kernel address (KVA),
508 * and all else invalid. This requires four pages:
509 * Page 1 - V=P L1 table
510 * Page 2 - V=P L2 table
511 * Page 3 - KVA L1 table
512 * Page 4 - KVA L2 table
514 #if __ARM64_TWO_LEVEL_PMAP__
516 * If we are using a two level scheme, we don't need the L1 entries, so:
517 * Page 1 - V=P L2 table
518 * Page 2 - KVA L2 table
522 // Invalidate all entries in the bootstrap page tables
523 mov x0, #(ARM_TTE_EMPTY) // Load invalid entry template
524 mov x1, x25 // Start at top of kernel
525 mov x2, #(TTE_PGENTRIES) // Load number of entries per page
526 #if __ARM64_TWO_LEVEL_PMAP__
527 lsl x2, x2, #1 // Shift by 1 for num entries on 2 pages
529 lsl x2, x2, #2 // Shift by 2 for num entries on 4 pages
531 Linvalidate_bootstrap: // do {
532 str x0, [x1], #(1 << TTE_SHIFT) // Invalidate and advance
533 subs x2, x2, #1 // entries--
534 b.ne Linvalidate_bootstrap // } while (entries != 0)
537 * In order to reclaim memory on targets where TZ0 (or some other entity)
538 * must be located at the base of memory, iBoot may set the virtual and
539 * physical base addresses to immediately follow whatever lies at the
540 * base of physical memory.
542 * If the base address belongs to TZ0, it may be dangerous for xnu to map
543 * it (as it may be prefetched, despite being technically inaccessible).
544 * In order to avoid this issue while keeping the mapping code simple, we
545 * may continue to use block mappings, but we will only map xnu's mach
546 * header to the end of memory.
548 * Given that iBoot guarantees that the unslid kernelcache base address
549 * will begin on an L2 boundary, this should prevent us from accidentally
552 adrp x0, EXT(_mh_execute_header)@page // Use xnu's mach header as the start address
553 add x0, x0, EXT(_mh_execute_header)@pageoff
556 * Adjust physical and virtual base addresses to account for physical
557 * memory preceeding xnu Mach-O header
558 * x22 - Kernel virtual base
559 * x23 - Kernel physical base
560 * x24 - Physical memory size
568 * x0 - V=P virtual cursor
569 * x4 - V=P physical cursor
570 * x14 - KVA virtual cursor
571 * x15 - KVA physical cursor
581 * x2 - free mem pointer from which we allocate a variable number of L2
582 * pages. The maximum number of bootstrap page table pages is limited to
583 * BOOTSTRAP_TABLE_SIZE. For a 2G 4k page device, assuming the worst-case
584 * slide, we need 1xL1 and up to 3xL2 pages (1GB mapped per L1 entry), so
585 * 8 total pages for V=P and KVA.
592 * Setup the V=P bootstrap mapping
593 * x5 - total number of L2 entries to allocate
595 lsr x5, x24, #(ARM_TT_L2_SHIFT)
596 /* create_bootstrap_mapping(vbase, pbase, num_ents, L1 table, freeptr) */
597 create_bootstrap_mapping x0, x4, x5, x1, x2, x6, x10, x11, x12, x13
599 /* Setup the KVA bootstrap mapping */
600 lsr x5, x24, #(ARM_TT_L2_SHIFT)
601 create_bootstrap_mapping x14, x15, x5, x3, x2, x9, x10, x11, x12, x13
603 /* Ensure TTEs are visible */
609 * Begin common CPU initialization
612 * x20 - PA of boot args
613 * x21 - zero on cold boot, PA of cpu data on warm reset
614 * x22 - Kernel virtual base
615 * x23 - Kernel physical base
616 * x25 - PA of the end of the kernel
617 * lr - KVA of C init routine
618 * sp - SP_EL0 selected
620 * SP_EL0 - KVA of CPU's interrupt stack
621 * SP_EL1 - KVA of CPU's exception stack
622 * TPIDRRO_EL0 - CPU number
625 // Set the translation control register.
626 adrp x0, EXT(sysreg_restore)@page // Load TCR value from the system register restore structure
627 add x0, x0, EXT(sysreg_restore)@pageoff
628 ldr x1, [x0, SR_RESTORE_TCR_EL1]
631 /* Set up translation table base registers.
632 * TTBR0 - V=P table @ top of kernel
633 * TTBR1 - KVA table @ top of kernel + 1 page
635 #if defined(KERNEL_INTEGRITY_KTRR)
636 /* Note that for KTRR configurations, the V=P map will be modified by
640 and x0, x25, #(TTBR_BADDR_MASK)
645 and x0, x0, #(TTBR_BADDR_MASK)
648 // Set up MAIR attr0 for normal memory, attr1 for device memory
650 mov x1, #(MAIR_WRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITEBACK))
652 mov x1, #(MAIR_INNERWRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_INNERWRITEBACK))
654 mov x1, #(MAIR_DISABLE << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_DISABLE))
656 mov x1, #(MAIR_WRITETHRU << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITETHRU))
658 mov x1, #(MAIR_WRITECOMB << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITECOMB))
660 mov x1, #(MAIR_POSTED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED))
664 #if defined(APPLEHURRICANE)
666 // <rdar://problem/26726624> Increase Snoop reservation in EDB to reduce starvation risk
667 // Needs to be done before MMU is enabled
668 mrs x12, ARM64_REG_HID5
669 and x12, x12, (~ARM64_REG_HID5_CrdEdbSnpRsvd_mask)
670 orr x12, x12, ARM64_REG_HID5_CrdEdbSnpRsvd_VALUE
671 msr ARM64_REG_HID5, x12
676 // Setup timer interrupt routing; must be done before MMU is enabled
677 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number
678 and x15, x15, #0xFF // CPU number is in MPIDR Affinity Level 0
681 add x0, x0, #0x0040 // x0: 0x4000004X Core Timers interrupt control
682 add x0, x0, x15, lsl #2
683 mov w1, #0xF0 // x1: 0xF0 Route to Core FIQs
689 #ifndef __ARM_IC_NOALIAS_ICACHE__
690 /* Invalidate the TLB and icache on systems that do not guarantee that the
691 * caches are invalidated on reset.
697 /* If x21 is not 0, then this is either the start_cpu path or
698 * the resume_idle_cpu path. cpu_ttep should already be
699 * populated, so just switch to the kernel_pmap now.
703 adrp x0, EXT(cpu_ttep)@page
704 add x0, x0, EXT(cpu_ttep)@pageoff
709 // Set up the exception vectors
710 #if __ARM_KERNEL_PROTECT__
711 /* If this is not the first reset of the boot CPU, the alternate mapping
712 * for the exception vectors will be set up, so use it. Otherwise, we
713 * should use the mapping located in the kernelcache mapping.
715 MOV64 x0, ARM_KERNEL_PROTECT_EXCEPTION_START
718 #endif /* __ARM_KERNEL_PROTECT__ */
719 adrp x0, EXT(ExceptionVectorsBase)@page // Load exception vectors base address
720 add x0, x0, EXT(ExceptionVectorsBase)@pageoff
721 add x0, x0, x22 // Convert exception vector address to KVA
727 // Enable caches and MMU
728 mov x0, #(SCTLR_EL1_DEFAULT & 0xFFFF)
729 mov x1, #(SCTLR_EL1_DEFAULT & 0xFFFF0000)
734 #if (!CONFIG_KERNEL_INTEGRITY || (CONFIG_KERNEL_INTEGRITY && !defined(KERNEL_INTEGRITY_WT)))
737 * If we have a Watchtower monitor it will setup CPACR_EL1 for us, touching
738 * it here would trap to EL3.
742 mov x0, #(CPACR_FPEN_ENABLE)
746 // Clear thread pointer
748 msr TPIDR_EL1, x0 // Set thread register
750 #if defined(APPLE_ARM64_ARCH_FAMILY)
751 // Initialization common to all Apple targets
753 ARM64_READ_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4
754 orr x12, x12, ARM64_REG_HID4_DisDcMVAOps
755 orr x12, x12, ARM64_REG_HID4_DisDcSWL2Ops
756 ARM64_WRITE_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4
757 #endif // APPLE_ARM64_ARCH_FAMILY
759 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
761 // Cyclone/Typhoon-Specific initialization
762 // For tunable summary, see <rdar://problem/13503621> Alcatraz/H6: Confirm Cyclone CPU tunables have been set
766 // Disable LSP flush with context switch to work around bug in LSP
767 // that can cause Cyclone to wedge when CONTEXTIDR is written.
768 // <rdar://problem/12387704> Innsbruck11A175: panic(cpu 0 caller 0xffffff800024e30c): "wait queue deadlock - wq=0xffffff805a7a63c0, cpu=0\n"
771 mrs x12, ARM64_REG_HID0
772 orr x12, x12, ARM64_REG_HID0_LoopBuffDisb
773 msr ARM64_REG_HID0, x12
775 mrs x12, ARM64_REG_HID1
776 orr x12, x12, ARM64_REG_HID1_rccDisStallInactiveIexCtl
777 #if defined(APPLECYCLONE)
778 orr x12, x12, ARM64_REG_HID1_disLspFlushWithContextSwitch
780 msr ARM64_REG_HID1, x12
782 mrs x12, ARM64_REG_HID3
783 orr x12, x12, ARM64_REG_HID3_DisXmonSnpEvictTriggerL2StarvationMode
784 msr ARM64_REG_HID3, x12
786 mrs x12, ARM64_REG_HID5
787 and x12, x12, (~ARM64_REG_HID5_DisHwpLd)
788 and x12, x12, (~ARM64_REG_HID5_DisHwpSt)
789 msr ARM64_REG_HID5, x12
791 // Change the default memcache data set ID from 0 to 15 for all agents
792 mrs x12, ARM64_REG_HID8
793 orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE)
794 #if ARM64_BOARD_CONFIG_T7001
795 orr x12, x12, ARM64_REG_HID8_DataSetID2_VALUE
796 #endif // ARM64_BOARD_CONFIG_T7001
797 msr ARM64_REG_HID8, x12
799 #endif // APPLECYCLONE || APPLETYPHOON
801 #if defined(APPLETWISTER)
803 // rdar://problem/36112905: Set CYC_CFG:skipInit to pull in isAlive by one DCLK
804 // to work around potential hang. Must only be applied to Maui C0.
806 ubfx x13, x12, #MIDR_EL1_PNUM_SHIFT, #12
807 cmp x13, #4 // Part number 4 => Maui, 5 => Malta/Elba
809 ubfx x13, x12, #MIDR_EL1_VAR_SHIFT, #4
810 cmp x13, #2 // variant 2 => Maui C0
813 mrs x12, ARM64_REG_CYC_CFG
814 orr x12, x12, ARM64_REG_CYC_CFG_skipInit
815 msr ARM64_REG_CYC_CFG, x12
819 mrs x12, ARM64_REG_HID11
820 and x12, x12, (~ARM64_REG_HID11_DisFillC1BubOpt)
821 msr ARM64_REG_HID11, x12
823 // Change the default memcache data set ID from 0 to 15 for all agents
824 mrs x12, ARM64_REG_HID8
825 orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE)
826 orr x12, x12, (ARM64_REG_HID8_DataSetID2_VALUE | ARM64_REG_HID8_DataSetID3_VALUE)
827 msr ARM64_REG_HID8, x12
829 // Use 4-cycle MUL latency to avoid denormal stalls
830 mrs x12, ARM64_REG_HID7
831 orr x12, x12, #ARM64_REG_HID7_disNexFastFmul
832 msr ARM64_REG_HID7, x12
834 // disable reporting of TLB-multi-hit-error
835 // <rdar://problem/22163216>
836 mrs x12, ARM64_REG_LSU_ERR_STS
837 and x12, x12, (~ARM64_REG_LSU_ERR_STS_L1DTlbMultiHitEN)
838 msr ARM64_REG_LSU_ERR_STS, x12
841 #endif // APPLETWISTER
843 #if defined(APPLEHURRICANE)
845 // IC prefetch configuration
846 // <rdar://problem/23019425>
847 mrs x12, ARM64_REG_HID0
848 and x12, x12, (~ARM64_REG_HID0_ICPrefDepth_bmsk)
849 orr x12, x12, (1 << ARM64_REG_HID0_ICPrefDepth_bshift)
850 orr x12, x12, ARM64_REG_HID0_ICPrefLimitOneBrn
851 msr ARM64_REG_HID0, x12
853 // disable reporting of TLB-multi-hit-error
854 // <rdar://problem/22163216>
855 mrs x12, ARM64_REG_LSU_ERR_CTL
856 and x12, x12, (~ARM64_REG_LSU_ERR_CTL_L1DTlbMultiHitEN)
857 msr ARM64_REG_LSU_ERR_CTL, x12
859 // disable crypto fusion across decode groups
860 // <rdar://problem/27306424>
861 mrs x12, ARM64_REG_HID1
862 orr x12, x12, ARM64_REG_HID1_disAESFuseAcrossGrp
863 msr ARM64_REG_HID1, x12
865 #if defined(ARM64_BOARD_CONFIG_T8011)
866 // Clear DisDcZvaCmdOnly
867 // Per Myst A0/B0 tunables document
868 // <rdar://problem/27627428> Myst: Confirm ACC Per-CPU Tunables
869 mrs x12, ARM64_REG_HID3
870 and x12, x12, ~ARM64_REG_HID3_DisDcZvaCmdOnly
871 msr ARM64_REG_HID3, x12
873 mrs x12, ARM64_REG_EHID3
874 and x12, x12, ~ARM64_REG_EHID3_DisDcZvaCmdOnly
875 msr ARM64_REG_EHID3, x12
876 #endif /* defined(ARM64_BOARD_CONFIG_T8011) */
878 #endif // APPLEHURRICANE
880 #if defined(APPLEMONSOON)
882 /***** Tunables that apply to all skye cores, all chip revs *****/
884 // <rdar://problem/28512310> SW WAR/eval: WKdm write ack lost when bif_wke_colorWrAck_XXaH asserts concurrently for both colors
885 mrs x12, ARM64_REG_HID8
886 orr x12, x12, #ARM64_REG_HID8_WkeForceStrictOrder
887 msr ARM64_REG_HID8, x12
889 // Skip if not E-core
891 cbnz x15, Lskip_skye_ecore_only
893 /***** Tunables that only apply to skye e-cores, all chip revs *****/
895 // <rdar://problem/30423928>: Atomic launch eligibility is erroneously taken away when a store at SMB gets invalidated
896 mrs x12, ARM64_REG_EHID11
897 and x12, x12, ~(ARM64_REG_EHID11_SmbDrainThresh_mask)
898 msr ARM64_REG_EHID11, x12
900 Lskip_skye_ecore_only:
902 SKIP_IF_CPU_VERSION_GREATER_OR_EQUAL x12, MONSOON_CPU_VERSION_B0, Lskip_skye_a0_workarounds
904 // Skip if not E-core
905 cbnz x15, Lskip_skye_a0_ecore_only
907 /***** Tunables that only apply to skye e-cores, chip revs < B0 *****/
909 // Disable downstream fill bypass logic
910 // <rdar://problem/28545159> [Tunable] Skye - L2E fill bypass collision from both pipes to ecore
911 mrs x12, ARM64_REG_EHID5
912 orr x12, x12, ARM64_REG_EHID5_DisFillByp
913 msr ARM64_REG_EHID5, x12
915 // Disable forwarding of return addresses to the NFP
916 // <rdar://problem/30387067> Skye: FED incorrectly taking illegal va exception
917 mrs x12, ARM64_REG_EHID0
918 orr x12, x12, ARM64_REG_EHID0_nfpRetFwdDisb
919 msr ARM64_REG_EHID0, x12
921 Lskip_skye_a0_ecore_only:
923 /***** Tunables that apply to all skye cores, chip revs < B0 *****/
925 // Disable clock divider gating
926 // <rdar://problem/30854420> [Tunable/Errata][cpu_1p_1e] [CPGV2] ACC power down issue when link FSM switches from GO_DN to CANCEL and at the same time upStreamDrain request is set.
927 mrs x12, ARM64_REG_HID6
928 orr x12, x12, ARM64_REG_HID6_DisClkDivGating
929 msr ARM64_REG_HID6, x12
931 // Disable clock dithering
932 // <rdar://problem/29022199> [Tunable] Skye A0: Linux: LLC PIO Errors
933 mrs x12, ARM64_REG_ACC_OVRD
934 orr x12, x12, ARM64_REG_ACC_OVRD_dsblClkDtr
935 msr ARM64_REG_ACC_OVRD, x12
937 mrs x12, ARM64_REG_ACC_EBLK_OVRD
938 orr x12, x12, ARM64_REG_ACC_OVRD_dsblClkDtr
939 msr ARM64_REG_ACC_EBLK_OVRD, x12
941 Lskip_skye_a0_workarounds:
943 SKIP_IF_CPU_VERSION_LESS_THAN x12, MONSOON_CPU_VERSION_B0, Lskip_skye_post_a1_workarounds
945 /***** Tunables that apply to all skye cores, chip revs >= B0 *****/
947 // <rdar://problem/32512836>: Disable refcount syncing between E and P
948 mrs x12, ARM64_REG_CYC_OVRD
949 and x12, x12, ~ARM64_REG_CYC_OVRD_dsblSnoopTime_mask
950 orr x12, x12, ARM64_REG_CYC_OVRD_dsblSnoopPTime
951 msr ARM64_REG_CYC_OVRD, x12
953 Lskip_skye_post_a1_workarounds:
955 #endif /* defined(APPLEMONSOON) */
958 // If x21 != 0, we're doing a warm reset, so we need to trampoline to the kernel pmap.
959 cbnz x21, Ltrampoline
961 // Set KVA of boot args as first arg
970 // x1: KVA page table phys base
978 // Return to arm_init()
982 // Load VA of the trampoline
983 adrp x0, arm_init_tramp@page
984 add x0, x0, arm_init_tramp@pageoff
988 // Branch to the trampoline
992 * V=P to KVA trampoline.
993 * x0 - KVA of cpu data pointer
998 /* On a warm boot, the full kernel translation table is initialized in
999 * addition to the bootstrap tables. The layout is as follows:
1001 * +--Top of Memory--+
1004 * | Primary Kernel |
1007 * +--Top + 5 pages--+
1011 * +--Top + 4 pages--+
1015 * +--Top + 2 pages--+
1019 * +--Top of Kernel--+
1024 * +---Kernel Base---+
1028 // Convert CPU data PA to VA and set as first argument
1034 /* Return to arm_init() */
1037 //#include "globals_asm.h"
1039 /* vim: set ts=4: */