2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <arm/proc_reg.h>
29 #include <arm64/asm.h>
30 #include <arm64/proc_reg.h>
31 #include <pexpert/arm64/board_config.h>
32 #include <pexpert/arm64/cyclone.h>
33 #include <pexpert/arm64/hurricane.h>
34 #include <mach_assert.h>
35 #include <machine/asm.h>
39 .macro MSR_VBAR_EL1_X0
40 #if defined(KERNEL_INTEGRITY_KTRR)
42 bl EXT(pinst_set_vbar)
50 #if defined(KERNEL_INTEGRITY_KTRR)
60 .macro MSR_TTBR1_EL1_X0
61 #if defined(KERNEL_INTEGRITY_KTRR)
70 .macro MSR_SCTLR_EL1_X0
71 #if defined(KERNEL_INTEGRITY_KTRR)
74 // This may abort, do so on SP1
78 msr SPSel, #0 // Back to SP0
82 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
86 * Checks the reset handler for global and CPU-specific reset-assist functions,
87 * then jumps to the reset handler with boot args and cpu data. This is copied
88 * to the first physical page during CPU bootstrap (see cpu.c).
91 * x19 - Reset handler data pointer
92 * x20 - Boot args pointer
93 * x21 - CPU data pointer
97 .globl EXT(LowResetVectorBase)
98 LEXT(LowResetVectorBase)
99 // Preserve x0 for start_first_cpu, if called
101 // Unlock the core for debugging
104 #if !(defined(KERNEL_INTEGRITY_KTRR))
105 // Set low reset vector before attempting any loads
106 adrp x0, EXT(LowExceptionVectorBase)@page
107 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
112 #if defined(KERNEL_INTEGRITY_KTRR)
114 * Set KTRR registers immediately after wake/resume
116 * During power on reset, XNU stashed the kernel text region range values
117 * into __DATA,__const which should be protected by AMCC RoRgn at this point.
118 * Read this data and program/lock KTRR registers accordingly.
119 * If either values are zero, we're debugging kernel so skip programming KTRR.
122 // load stashed rorgn_begin
123 adrp x17, EXT(rorgn_begin)@page
124 add x17, x17, EXT(rorgn_begin)@pageoff
126 // if rorgn_begin is zero, we're debugging. skip enabling ktrr
129 // load stashed rorgn_end
130 adrp x19, EXT(rorgn_end)@page
131 add x19, x19, EXT(rorgn_end)@pageoff
135 // program and lock down KTRR
136 // subtract one page from rorgn_end to make pinst insns NX
137 msr ARM64_REG_KTRR_LOWER_EL1, x17
138 sub x19, x19, #(1 << (ARM_PTE_SHIFT-12)), lsl #12
139 msr ARM64_REG_KTRR_UPPER_EL1, x19
141 msr ARM64_REG_KTRR_LOCK_EL1, x17
144 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
146 // Process reset handlers
147 adrp x19, EXT(ResetHandlerData)@page // Get address of the reset handler data
148 add x19, x19, EXT(ResetHandlerData)@pageoff
149 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number
150 and x0, x15, #0xFF // CPU number is in MPIDR Affinity Level 0
151 ldr x1, [x19, CPU_DATA_ENTRIES] // Load start of data entries
152 add x3, x1, MAX_CPUS * 16 // end addr of data entries = start + (16 * MAX_CPUS)
153 Lcheck_cpu_data_entry:
154 ldr x21, [x1, CPU_DATA_PADDR] // Load physical CPU data address
155 cbz x21, Lnext_cpu_data_entry
156 ldr w2, [x21, CPU_PHYS_ID] // Load ccc cpu phys id
157 cmp x0, x2 // Compare cpu data phys cpu and MPIDR_EL1 phys cpu
158 b.eq Lfound_cpu_data_entry // Branch if match
159 Lnext_cpu_data_entry:
160 add x1, x1, #16 // Increment to the next cpu data entry
162 b.eq Lskip_cpu_reset_handler // Not found
163 b Lcheck_cpu_data_entry // loop
164 Lfound_cpu_data_entry:
165 adrp x20, EXT(const_boot_args)@page
166 add x20, x20, EXT(const_boot_args)@pageoff
167 ldr x0, [x21, CPU_RESET_HANDLER] // Call CPU reset handler
168 cbz x0, Lskip_cpu_reset_handler
170 // Validate that our handler is one of the two expected handlers
171 adrp x2, EXT(resume_idle_cpu)@page
172 add x2, x2, EXT(resume_idle_cpu)@pageoff
175 adrp x2, EXT(start_cpu)@page
176 add x2, x2, EXT(start_cpu)@pageoff
178 bne Lskip_cpu_reset_handler
184 Lskip_cpu_reset_handler:
185 b . // Hang if the handler is NULL or returns
188 .globl EXT(ResetHandlerData)
189 LEXT(ResetHandlerData)
190 .space (rhdSize_NUM),0 // (filled with 0s)
193 .global EXT(LowResetVectorEnd)
194 LEXT(LowResetVectorEnd)
195 .global EXT(SleepToken)
198 .space (stSize_NUM),0
203 * __start trampoline is located at a position relative to LowResetVectorBase
204 * so that iBoot can compute the reset vector position to set IORVBAR using
205 * only the kernel entry point. Reset vector = (__start & ~0xfff)
210 b EXT(start_first_cpu)
214 * Provides an early-boot exception vector so that the processor will spin
215 * and preserve exception information (e.g., ELR_EL1) when early CPU bootstrap
216 * code triggers an exception. This is copied to the second physical page
217 * during CPU bootstrap (see cpu.c).
220 .global EXT(LowExceptionVectorBase)
221 LEXT(LowExceptionVectorBase)
259 #if defined(KERNEL_INTEGRITY_KTRR)
261 * Provide a global symbol so that we can narrow the V=P mapping to cover
262 * this page during arm_vm_init.
265 .globl EXT(bootstrap_instructions)
266 LEXT(bootstrap_instructions)
267 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
269 .globl EXT(resume_idle_cpu)
270 LEXT(resume_idle_cpu)
271 adrp lr, EXT(arm_init_idle_cpu)@page
272 add lr, lr, EXT(arm_init_idle_cpu)@pageoff
276 .globl EXT(start_cpu)
278 adrp lr, EXT(arm_init_cpu)@page
279 add lr, lr, EXT(arm_init_cpu)@pageoff
284 #if defined(KERNEL_INTEGRITY_KTRR)
285 // This is done right away in reset vector for pre-KTRR devices
286 // Set low reset vector now that we are in the KTRR-free zone
287 adrp x0, EXT(LowExceptionVectorBase)@page
288 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
290 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
292 // x20 set to BootArgs phys address
293 // x21 set to cpu data phys address
294 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts
296 // Get the kernel memory parameters from the boot args
297 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
298 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base
299 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size
300 ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data
302 // Set TPIDRRO_EL0 with the CPU number
303 ldr x0, [x21, CPU_NUMBER_GS]
306 // Set the exception stack pointer
307 ldr x0, [x21, CPU_EXCEPSTACK_TOP]
310 // Set SP_EL1 to exception stack
311 #if defined(KERNEL_INTEGRITY_KTRR)
320 // Set the interrupt stack pointer
321 ldr x0, [x21, CPU_INTSTACK_TOP]
332 * create_l1_table_entry
334 * Given a virtual address, creates a table entry in an L1 translation table
335 * to point to an L2 translation table.
336 * arg0 - Virtual address
337 * arg1 - L1 table address
338 * arg2 - L2 table address
339 * arg3 - Scratch register
340 * arg4 - Scratch register
341 * arg5 - Scratch register
343 .macro create_l1_table_entry
344 and $3, $0, #(ARM_TT_L1_INDEX_MASK)
345 lsr $3, $3, #(ARM_TT_L1_SHIFT) // Get index in L1 table for L2 table
346 lsl $3, $3, #(TTE_SHIFT) // Convert index into pointer offset
347 add $3, $1, $3 // Get L1 entry pointer
348 mov $4, #(ARM_TTE_BOOT_TABLE) // Get L1 table entry template
349 and $5, $2, #(ARM_TTE_TABLE_MASK) // Get address bits of L2 table
350 orr $5, $4, $5 // Create table entry for L2 table
351 str $5, [$3] // Write entry to L1 table
355 * create_l2_block_entries
357 * Given base virtual and physical addresses, creates consecutive block entries
358 * in an L2 translation table.
359 * arg0 - Virtual address
360 * arg1 - Physical address
361 * arg2 - L2 table address
362 * arg3 - Number of entries
363 * arg4 - Scratch register
364 * arg5 - Scratch register
365 * arg6 - Scratch register
366 * arg7 - Scratch register
368 .macro create_l2_block_entries
369 and $4, $0, #(ARM_TT_L2_INDEX_MASK)
370 lsr $4, $4, #(ARM_TTE_BLOCK_L2_SHIFT) // Get index in L2 table for block entry
371 lsl $4, $4, #(TTE_SHIFT) // Convert index into pointer offset
372 add $4, $2, $4 // Get L2 entry pointer
373 mov $5, #(ARM_TTE_BOOT_BLOCK) // Get L2 block entry template
374 and $6, $1, #(ARM_TTE_BLOCK_L2_MASK) // Get address bits of block mapping
377 mov $7, #(ARM_TT_L2_SIZE)
379 str $6, [$4], #(1 << TTE_SHIFT) // Write entry to L2 table and advance
380 add $6, $6, $7 // Increment the output address
381 subs $5, $5, #1 // Decrement the number of entries
387 * Cold boot init routine. Called from __start
391 .globl EXT(start_first_cpu)
392 LEXT(start_first_cpu)
394 // Unlock the core for debugging
399 // Set low reset vector before attempting any loads
400 adrp x0, EXT(LowExceptionVectorBase)@page
401 add x0, x0, EXT(LowExceptionVectorBase)@pageoff
406 // Get the kernel memory parameters from the boot args
407 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
408 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base
409 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size
410 ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data
412 // Set CPU number to 0
415 // Set up exception stack pointer
416 adrp x0, EXT(excepstack_top)@page // Load top of exception stack
417 add x0, x0, EXT(excepstack_top)@pageoff
418 add x0, x0, x22 // Convert to KVA
421 // Set SP_EL1 to exception stack
422 #if defined(KERNEL_INTEGRITY_KTRR)
430 // Set up interrupt stack pointer
431 adrp x0, EXT(intstack_top)@page // Load top of irq stack
432 add x0, x0, EXT(intstack_top)@pageoff
433 add x0, x0, x22 // Convert to KVA
435 msr SPSel, #0 // Set SP_EL0 to interrupt stack
438 // Load address to the C init routine into link register
439 adrp lr, EXT(arm_init)@page
440 add lr, lr, EXT(arm_init)@pageoff
441 add lr, lr, x22 // Convert to KVA
445 * Set up the bootstrap page tables with a single block entry for the V=P
446 * mapping, a single block entry for the trampolined kernel address (KVA),
447 * and all else invalid. This requires four pages:
448 * Page 1 - V=P L1 table
449 * Page 2 - V=P L2 table
450 * Page 3 - KVA L1 table
451 * Page 4 - KVA L2 table
453 #if __ARM64_TWO_LEVEL_PMAP__
455 * If we are using a two level scheme, we don't need the L1 entries, so:
456 * Page 1 - V=P L2 table
457 * Page 2 - KVA L2 table
461 // Invalidate all entries in the bootstrap page tables
462 mov x0, #(ARM_TTE_EMPTY) // Load invalid entry template
463 mov x1, x25 // Start at top of kernel
464 mov x2, #(TTE_PGENTRIES) // Load number of entries per page
465 #if __ARM64_TWO_LEVEL_PMAP__
466 lsl x2, x2, #1 // Shift by 1 for num entries on 2 pages
468 lsl x2, x2, #2 // Shift by 2 for num entries on 4 pages
470 sub x2, x2, #1 // Subtract one to terminate on last entry
471 Linvalidate_bootstrap: // do {
472 str x0, [x1], #(1 << TTE_SHIFT) // Invalidate and advance
473 subs x2, x2, #1 // entries--
474 b.ne Linvalidate_bootstrap // } while (entries != 0)
476 /* Load addresses for page table construction macros
477 * x0 - Physical base (used to identify V=P section to set up)
478 * x1 - V=P L1 table base
479 * x2 - V=P L2 table base
480 * x3 - KVA L1 table base
481 * x4 - KVA L2 table base
482 * x5 - Mem size in entries (up to 1GB)
486 * In order to reclaim memory on targets where TZ0 (or some other entity)
487 * must be located at the base of memory, iBoot may set the virtual and
488 * physical base addresses to immediately follow whatever lies at the
489 * base of physical memory.
491 * If the base address belongs to TZ0, it may be dangerous for xnu to map
492 * it (as it may be prefetched, despite being technically inaccessible).
493 * In order to avoid this issue while keeping the mapping code simple, we
494 * may continue to use block mappings, but we will only map xnu's mach
495 * header to the end of memory.
497 * Given that iBoot guarantees that the unslid kernelcache base address
498 * will begin on an L2 boundary, this should prevent us from accidentally
501 adrp x0, EXT(_mh_execute_header)@page // Use xnu's mach header as the start address
502 add x0, x0, EXT(_mh_execute_header)@pageoff
503 #if __ARM64_TWO_LEVEL_PMAP__
505 * We don't need the L1 entries in this case, so skip them.
507 mov x2, x25 // Load V=P L2 table address
508 add x4, x2, PGBYTES // Load KVA L2 table address
510 mov x1, x25 // Load V=P L1 table address
511 add x2, x1, PGBYTES // Load V=P L2 table address
512 add x3, x2, PGBYTES // Load KVA L1 table address
513 add x4, x3, PGBYTES // Load KVA L2 table address
516 * We must adjust the amount we wish to map in order to account for the
517 * memory preceeding xnu's mach header.
519 sub x5, x0, x23 // Map from the mach header up to the end of our memory
521 lsr x5, x5, #(ARM_TT_L2_SHIFT)
522 mov x6, #(TTE_PGENTRIES) // Load number of L2 entries per page
523 cmp x5, x6 // If memsize requires more than 1 page of entries
524 csel x5, x5, x6, lt // ... round down to a single page (first 1GB)
526 #if !__ARM64_TWO_LEVEL_PMAP__
527 /* Create entry for L2 table in V=P L1 table
528 * create_l1_table_entry(V=P, L1 table, L2 table, scratch1, scratch2, scratch3)
530 create_l1_table_entry x0, x1, x2, x10, x11, x12
533 /* Create block entry in V=P L2 table
534 * create_l2_block_entries(V=P virt, V=P phys, L2 table, num_ents, scratch1, scratch2, scratch3)
536 create_l2_block_entries x0, x0, x2, x5, x10, x11, x12, x13
538 #if !__ARM64_TWO_LEVEL_PMAP__
539 /* Create entry for L2 table in KVA L1 table
540 * create_l1_table_entry(virt_base, L1 table, L2 table, scratch1, scratch2, scratch3)
542 create_l1_table_entry x22, x3, x4, x10, x11, x12
545 /* Create block entries in KVA L2 table
546 * create_l2_block_entries(virt_base, phys_base, L2 table, num_ents, scratch1, scratch2, scratch3)
548 create_l2_block_entries x22, x23, x4, x5, x10, x11, x12, x13
550 /* Ensure TTEs are visible */
556 * Begin common CPU initialization
559 * x20 - PA of boot args
560 * x21 - zero on cold boot, PA of cpu data on warm reset
561 * x22 - Kernel virtual base
562 * x23 - Kernel physical base
563 * x24 - Physical memory size
564 * x25 - PA of the end of the kernl
565 * lr - KVA of C init routine
566 * sp - SP_EL0 selected
568 * SP_EL0 - KVA of CPU's interrupt stack
569 * SP_EL1 - KVA of CPU's exception stack
570 * TPIDRRO_EL0 - CPU number
573 // Set the translation control register.
574 adrp x0, EXT(sysreg_restore)@page // Load TCR value from the system register restore structure
575 add x0, x0, EXT(sysreg_restore)@pageoff
576 ldr x1, [x0, SR_RESTORE_TCR_EL1]
579 /* Set up translation table base registers.
580 * TTBR0 - V=P table @ top of kernel
581 * TTBR1 - KVA table @ top of kernel + 2 pages
583 #if defined(KERNEL_INTEGRITY_KTRR)
584 /* Note that for KTRR configurations, the V=P map will be modified by
588 and x0, x25, #(TTBR_BADDR_MASK)
590 #if __ARM64_TWO_LEVEL_PMAP__
592 * If we're using a two level pmap, we'll only need a
593 * single page per bootstrap pmap.
598 * If we're using a three level pmap, we'll need two
599 * pages per bootstrap pmap.
603 add x0, x25, x12, lsl PGSHIFT
604 and x0, x0, #(TTBR_BADDR_MASK)
607 // Set up MAIR attr0 for normal memory, attr1 for device memory
609 mov x1, #(MAIR_WRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITEBACK))
611 mov x1, #(MAIR_INNERWRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_INNERWRITEBACK))
613 mov x1, #(MAIR_DISABLE << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_DISABLE))
615 mov x1, #(MAIR_WRITETHRU << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITETHRU))
617 mov x1, #(MAIR_WRITECOMB << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITECOMB))
619 mov x1, #(MAIR_POSTED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED))
623 // Disable interrupts
624 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF)
626 #if defined(APPLEHURRICANE)
628 // <rdar://problem/26726624> Increase Snoop reservation in EDB to reduce starvation risk
629 // Needs to be done before MMU is enabled
630 mrs x12, ARM64_REG_HID5
631 and x12, x12, (~ARM64_REG_HID5_CrdEdbSnpRsvd_mask)
632 orr x12, x12, ARM64_REG_HID5_CrdEdbSnpRsvd_VALUE
633 msr ARM64_REG_HID5, x12
638 #ifndef __ARM_IC_NOALIAS_ICACHE__
639 /* Invalidate the TLB and icache on systems that do not guarantee that the
640 * caches are invalidated on reset.
646 /* If x21 is not 0, then this is either the start_cpu path or
647 * the resume_idle_cpu path. cpu_ttep should already be
648 * populated, so just switch to the kernel_pmap now.
652 adrp x0, EXT(cpu_ttep)@page
653 add x0, x0, EXT(cpu_ttep)@pageoff
658 // Set up the exception vectors
659 adrp x0, EXT(ExceptionVectorsBase)@page // Load exception vectors base address
660 add x0, x0, EXT(ExceptionVectorsBase)@pageoff
661 add x0, x0, x22 // Convert exception vector address to KVA
666 // Enable caches and MMU
667 mov x0, #(SCTLR_EL1_DEFAULT & 0xFFFF)
668 mov x1, #(SCTLR_EL1_DEFAULT & 0xFFFF0000)
673 #if (!CONFIG_KERNEL_INTEGRITY || (CONFIG_KERNEL_INTEGRITY && !defined(KERNEL_INTEGRITY_WT)))
676 * If we have a Watchtower monitor it will setup CPACR_EL1 for us, touching
677 * it here would trap to EL3.
681 mov x0, #(CPACR_FPEN_ENABLE)
685 // Clear thread pointer
687 msr TPIDR_EL1, x0 // Set thread register
689 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
691 // Cyclone/Typhoon-Specific initialization
692 // For tunable summary, see <rdar://problem/13503621> Alcatraz/H6: Confirm Cyclone CPU tunables have been set
696 // Disable LSP flush with context switch to work around bug in LSP
697 // that can cause Cyclone to wedge when CONTEXTIDR is written.
698 // <rdar://problem/12387704> Innsbruck11A175: panic(cpu 0 caller 0xffffff800024e30c): "wait queue deadlock - wq=0xffffff805a7a63c0, cpu=0\n"
701 mrs x12, ARM64_REG_HID0
702 orr x12, x12, ARM64_REG_HID0_LoopBuffDisb
703 msr ARM64_REG_HID0, x12
705 mrs x12, ARM64_REG_HID1
706 orr x12, x12, ARM64_REG_HID1_rccDisStallInactiveIexCtl
707 #if defined(APPLECYCLONE)
708 orr x12, x12, ARM64_REG_HID1_disLspFlushWithContextSwitch
710 msr ARM64_REG_HID1, x12
712 mrs x12, ARM64_REG_HID3
713 orr x12, x12, ARM64_REG_HID3_DisXmonSnpEvictTriggerL2StarvationMode
714 msr ARM64_REG_HID3, x12
716 // Do not disable cache ops -- XNU's cache operations already are no-op'ed for Cyclone, but explicit _Force variants are provided
717 // for when we really do need the L2 cache to be cleaned: <rdar://problem/14350417> Innsbruck11A416: Panic logs not preserved on h6
719 mrs x12, ARM64_REG_HID4
720 orr x12, x12, ARM64_REG_HID4_DisDcMVAOps
721 orr x12, x12, ARM64_REG_HID4_DisDcSWL2Ops
722 msr ARM64_REG_HID4, x12
725 mrs x12, ARM64_REG_HID5
726 and x12, x12, (~ARM64_REG_HID5_DisHwpLd)
727 and x12, x12, (~ARM64_REG_HID5_DisHwpSt)
728 msr ARM64_REG_HID5, x12
730 // Change the default memcache data set ID from 0 to 15 for all agents
731 mrs x12, ARM64_REG_HID8
732 orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE)
733 #if ARM64_BOARD_CONFIG_T7001
734 orr x12, x12, ARM64_REG_HID8_DataSetID2_VALUE
735 #endif // ARM64_BOARD_CONFIG_T7001
736 msr ARM64_REG_HID8, x12
738 #endif // APPLECYCLONE || APPLETYPHOON
740 #if defined(APPLETWISTER)
741 mrs x12, ARM64_REG_HID11
742 and x12, x12, (~ARM64_REG_HID11_DisFillC1BubOpt)
743 msr ARM64_REG_HID11, x12
745 // Change the default memcache data set ID from 0 to 15 for all agents
746 mrs x12, ARM64_REG_HID8
747 orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE)
748 orr x12, x12, (ARM64_REG_HID8_DataSetID2_VALUE | ARM64_REG_HID8_DataSetID3_VALUE)
749 msr ARM64_REG_HID8, x12
751 // Use 4-cycle MUL latency to avoid denormal stalls
752 mrs x12, ARM64_REG_HID7
753 orr x12, x12, #ARM64_REG_HID7_disNexFastFmul
754 msr ARM64_REG_HID7, x12
756 // disable reporting of TLB-multi-hit-error
757 // <rdar://problem/22163216>
758 mrs x12, ARM64_REG_LSU_ERR_STS
759 and x12, x12, (~ARM64_REG_LSU_ERR_STS_L1DTlbMultiHitEN)
760 msr ARM64_REG_LSU_ERR_STS, x12
763 #endif // APPLETWISTER
765 #if defined(APPLEHURRICANE)
767 // IC prefetch configuration
768 // <rdar://problem/23019425>
769 mrs x12, ARM64_REG_HID0
770 and x12, x12, (~ARM64_REG_HID0_ICPrefDepth_bmsk)
771 orr x12, x12, (1 << ARM64_REG_HID0_ICPrefDepth_bshift)
772 orr x12, x12, ARM64_REG_HID0_ICPrefLimitOneBrn
773 msr ARM64_REG_HID0, x12
775 // disable reporting of TLB-multi-hit-error
776 // <rdar://problem/22163216>
777 mrs x12, ARM64_REG_LSU_ERR_CTL
778 and x12, x12, (~ARM64_REG_LSU_ERR_CTL_L1DTlbMultiHitEN)
779 msr ARM64_REG_LSU_ERR_CTL, x12
781 // disable crypto fusion across decode groups
782 // <rdar://problem/27306424>
783 mrs x12, ARM64_REG_HID1
784 orr x12, x12, ARM64_REG_HID1_disAESFuseAcrossGrp
785 msr ARM64_REG_HID1, x12
787 #if defined(ARM64_BOARD_CONFIG_T8011)
788 // Clear DisDcZvaCmdOnly
789 // Per Myst A0/B0 tunables document
790 // https://seg-docs.csg.apple.com/projects/myst//release/UserManual/tunables_a0/ACC.html
791 // <rdar://problem/27627428> Myst: Confirm ACC Per-CPU Tunables
792 mrs x12, ARM64_REG_HID3
793 and x12, x12, ~ARM64_REG_HID3_DisDcZvaCmdOnly
794 msr ARM64_REG_HID3, x12
796 mrs x12, ARM64_REG_EHID3
797 and x12, x12, ~ARM64_REG_EHID3_DisDcZvaCmdOnly
798 msr ARM64_REG_EHID3, x12
799 #endif /* defined(ARM64_BOARD_CONFIG_T8011) */
801 #endif // APPLEHURRICANE
804 // If x21 != 0, we're doing a warm reset, so we need to trampoline to the kernel pmap.
805 cbnz x21, Ltrampoline
807 // Set KVA of boot args as first arg
816 // x1: KVA page table phys base
824 // Return to arm_init()
828 // Load VA of the trampoline
829 adrp x0, arm_init_tramp@page
830 add x0, x0, arm_init_tramp@pageoff
834 // Branch to the trampoline
838 * V=P to KVA trampoline.
839 * x0 - KVA of cpu data pointer
844 /* On a warm boot, the full kernel translation table is initialized in
845 * addition to the bootstrap tables. The layout is as follows:
847 * +--Top of Memory--+
853 * +--Top + 5 pages--+
857 * +--Top + 4 pages--+
861 * +--Top + 2 pages--+
865 * +--Top of Kernel--+
870 * +---Kernel Base---+
874 adrp x0, EXT(invalid_ttep)@page
875 add x0, x0, EXT(invalid_ttep)@pageoff
880 // Convert CPU data PA to VA and set as first argument
885 // Make sure that the TLB flush happens after the registers are set!
888 // Synchronize system for TTBR updates
893 /* Return to arm_init() */
896 //#include "globals_asm.h"