2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <machine/asm.h>
30 #include <arm64/exception_asm.h>
31 #include <arm64/machine_machdep.h>
32 #include <arm64/proc_reg.h>
34 #include <pexpert/arm64/board_config.h>
35 #include <sys/errno.h>
43 * void set_bp_ret(void)
44 * Helper function to enable branch predictor state retention
49 .globl EXT(set_bp_ret)
51 // Load bpret boot-arg
52 adrp x14, EXT(bp_ret)@page
53 add x14, x14, EXT(bp_ret)@pageoff
56 mrs x13, ARM64_REG_ACC_CFG
57 and x13, x13, (~(ARM64_REG_ACC_CFG_bpSlp_mask << ARM64_REG_ACC_CFG_bpSlp_shift))
58 and x14, x14, #(ARM64_REG_ACC_CFG_bpSlp_mask)
59 orr x13, x13, x14, lsl #(ARM64_REG_ACC_CFG_bpSlp_shift)
60 msr ARM64_REG_ACC_CFG, x13
67 .globl EXT(set_nex_pg)
70 // Skip if this isn't a p-core; NEX powergating isn't available for e-cores
71 and x14, x14, #(MPIDR_PNE)
74 // Set the SEG-recommended value of 12 additional reset cycles
75 HID_INSERT_BITS ARM64_REG_HID13, ARM64_REG_HID13_RstCyc_mask, ARM64_REG_HID13_RstCyc_val, x13
76 HID_SET_BITS ARM64_REG_HID14, ARM64_REG_HID14_NexPwgEn, x13
83 /* uint32_t get_fpscr(void):
84 * Returns (FPSR | FPCR).
90 mrs x1, FPSR // Grab FPSR
91 mov x4, #(FPSR_MASK & 0xFFFF)
92 mov x5, #(FPSR_MASK & 0xFFFF0000)
94 and x1, x1, x0 // Be paranoid, and clear bits we expect to
96 mrs x2, FPCR // Grab FPCR
97 mov x4, #(FPCR_MASK & 0xFFFF)
98 mov x5, #(FPCR_MASK & 0xFFFF0000)
100 and x2, x2, x0 // Be paranoid, and clear bits we expect to
102 orr x0, x1, x2 // OR them to get FPSCR equivalent state
108 .globl EXT(set_fpscr)
109 /* void set_fpscr(uint32_t value):
110 * Set the FPCR and FPSR registers, based on the given value; a
111 * noteworthy point is that unlike 32-bit mode, 64-bit mode FPSR
112 * and FPCR are not responsible for condition codes.
116 mov x4, #(FPSR_MASK & 0xFFFF)
117 mov x5, #(FPSR_MASK & 0xFFFF0000)
119 and x1, x1, x0 // Clear the bits that don't apply to FPSR
120 mov x4, #(FPCR_MASK & 0xFFFF)
121 mov x5, #(FPCR_MASK & 0xFFFF0000)
123 and x2, x2, x0 // Clear the bits that don't apply to FPCR
124 msr FPSR, x1 // Write FPCR
125 msr FPCR, x2 // Write FPSR
126 dsb ish // FPCR requires synchronization
131 * void update_mdscr(unsigned long clear, unsigned long set)
132 * Clears and sets the specified bits in MDSCR_EL1.
134 * Setting breakpoints in EL1 is effectively a KTRR bypass. The ability to do so is
135 * controlled by MDSCR.KDE. The MSR to set MDSCR must be present to allow
136 * self-hosted user mode debug. Any checks before the MRS can be skipped with ROP,
137 * so we need to put the checks after the MRS where they can't be skipped. That
138 * still leaves a small window if a breakpoint is set on the instruction
139 * immediately after the MRS. To handle that, we also do a check and then set of
140 * the breakpoint control registers. This allows us to guarantee that a given
141 * core will never have both KDE set and a breakpoint targeting EL1.
143 * If KDE gets set, unset it and then panic
146 .globl EXT(update_mdscr)
155 #if defined(CONFIG_KERNEL_INTEGRITY)
157 * verify KDE didn't get set (including via ROP)
158 * If set, clear it and then panic
164 b.ne Lupdate_mdscr_panic
169 adrp x0, Lupdate_mdscr_panic_str@page
170 add x0, x0, Lupdate_mdscr_panic_str@pageoff
174 Lupdate_mdscr_panic_str:
175 .asciz "MDSCR.KDE was set"
179 * Set MMU Translation Table Base Alternate
183 .globl EXT(set_mmu_ttb_alternate)
184 LEXT(set_mmu_ttb_alternate)
186 #if defined(KERNEL_INTEGRITY_KTRR)
188 bl EXT(pinst_set_ttbr1)
191 #if defined(HAS_VMSA_LOCK)
192 #if DEBUG || DEVELOPMENT
193 mrs x1, ARM64_REG_VMSA_LOCK_EL1
194 and x1, x1, #(VMSA_LOCK_TTBR1_EL1)
195 cbnz x1, L_set_locked_reg_panic
196 #endif /* DEBUG || DEVELOPMENT */
197 #endif /* defined(HAS_VMSA_LOCK) */
199 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
204 .section __PPLTEXT,__text,regular,pure_instructions
209 .globl EXT(set_mmu_ttb)
211 #if __ARM_KERNEL_PROTECT__
212 /* All EL1-mode ASIDs are odd. */
213 orr x0, x0, #(1 << TTBR_ASID_SHIFT)
214 #endif /* __ARM_KERNEL_PROTECT__ */
224 .globl EXT(ml_get_ppl_cpu_data)
225 LEXT(ml_get_ppl_cpu_data)
226 LOAD_PMAP_CPU_DATA x0, x1, x2
231 * set AUX control register
235 .globl EXT(set_aux_control)
236 LEXT(set_aux_control)
238 // Synchronize system
242 #if __ARM_KERNEL_PROTECT__
245 .globl EXT(set_vbar_el1)
247 #if defined(KERNEL_INTEGRITY_KTRR)
248 b EXT(pinst_set_vbar)
253 #endif /* __ARM_KERNEL_PROTECT__ */
255 #if defined(HAS_VMSA_LOCK)
258 .globl EXT(vmsa_lock)
261 mov x1, #(VMSA_LOCK_SCTLR_M_BIT)
262 #if __ARM_MIXED_PAGE_SIZE__
263 mov x0, #(VMSA_LOCK_TTBR1_EL1 | VMSA_LOCK_VBAR_EL1)
265 mov x0, #(VMSA_LOCK_TTBR1_EL1 | VMSA_LOCK_TCR_EL1 | VMSA_LOCK_VBAR_EL1)
268 msr ARM64_REG_VMSA_LOCK_EL1, x0
271 #endif /* defined(HAS_VMSA_LOCK) */
274 * set translation control register
280 #if defined(APPLE_ARM64_ARCH_FAMILY)
281 #if DEBUG || DEVELOPMENT
282 // Assert that T0Z is always equal to T1Z
283 eor x1, x0, x0, lsr #(TCR_T1SZ_SHIFT - TCR_T0SZ_SHIFT)
284 and x1, x1, #(TCR_TSZ_MASK << TCR_T0SZ_SHIFT)
285 cbnz x1, L_set_tcr_panic
286 #endif /* DEBUG || DEVELOPMENT */
287 #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */
288 #if defined(KERNEL_INTEGRITY_KTRR)
290 bl EXT(pinst_set_tcr)
293 #if defined(HAS_VMSA_LOCK)
294 #if DEBUG || DEVELOPMENT
295 // assert TCR unlocked
296 mrs x1, ARM64_REG_VMSA_LOCK_EL1
297 and x1, x1, #(VMSA_LOCK_TCR_EL1)
298 cbnz x1, L_set_locked_reg_panic
299 #endif /* DEBUG || DEVELOPMENT */
300 #endif /* defined(HAS_VMSA_LOCK) */
302 #endif /* defined(KERNEL_INTRITY_KTRR) */
306 #if DEBUG || DEVELOPMENT
311 adr x0, L_set_tcr_panic_str
314 L_set_locked_reg_panic:
318 adr x0, L_set_locked_reg_panic_str
323 .asciz "set_tcr: t0sz, t1sz not equal (%llx)\n"
326 L_set_locked_reg_panic_str:
327 .asciz "attempt to set locked register: (%llx)\n"
328 #endif /* DEBUG || DEVELOPMENT */
331 * MMU kernel virtual to physical address translation
335 .globl EXT(mmu_kvtop)
337 mrs x2, DAIF // Load current DAIF
338 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
339 at s1e1r, x0 // Translation Stage 1 EL1
341 mrs x1, PAR_EL1 // Read result
342 msr DAIF, x2 // Restore interrupt state
343 tbnz x1, #0, L_mmu_kvtop_invalid // Test Translation not valid
344 bfm x1, x0, #0, #11 // Add page offset
345 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
348 mov x0, #0 // Return invalid
352 * MMU user virtual to physical address translation
356 .globl EXT(mmu_uvtop)
358 lsr x8, x0, #56 // Extract top byte
359 cbnz x8, L_mmu_uvtop_invalid // Tagged pointers are invalid
360 mrs x2, DAIF // Load current DAIF
361 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
362 at s1e0r, x0 // Translation Stage 1 EL0
364 mrs x1, PAR_EL1 // Read result
365 msr DAIF, x2 // Restore interrupt state
366 tbnz x1, #0, L_mmu_uvtop_invalid // Test Translation not valid
367 bfm x1, x0, #0, #11 // Add page offset
368 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
371 mov x0, #0 // Return invalid
375 * MMU kernel virtual to physical address preflight write access
379 .globl EXT(mmu_kvtop_wpreflight)
380 LEXT(mmu_kvtop_wpreflight)
381 mrs x2, DAIF // Load current DAIF
382 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
383 at s1e1w, x0 // Translation Stage 1 EL1
384 mrs x1, PAR_EL1 // Read result
385 msr DAIF, x2 // Restore interrupt state
386 tbnz x1, #0, L_mmu_kvtop_wpreflight_invalid // Test Translation not valid
387 bfm x1, x0, #0, #11 // Add page offset
388 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
390 L_mmu_kvtop_wpreflight_invalid:
391 mov x0, #0 // Return invalid
395 * SET_RECOVERY_HANDLER
397 * Sets up a page fault recovery handler. This macro clobbers x16 and x17.
399 * label - recovery label
400 * tpidr - persisted thread pointer
401 * old_handler - persisted recovery handler
402 * label_in_adr_range - whether \label is within 1 MB of PC
404 .macro SET_RECOVERY_HANDLER label, tpidr=x16, old_handler=x10, label_in_adr_range=0
405 // Note: x16 and x17 are designated for use as temporaries in
406 // interruptible PAC routines. DO NOT CHANGE THESE REGISTER ASSIGNMENTS.
407 .if \label_in_adr_range==1 // Load the recovery handler address
410 adrp x17, \label@page
411 add x17, x17, \label@pageoff
413 #if defined(HAS_APPLE_PAC)
415 add x16, x16, TH_RECOVER
416 movk x16, #PAC_DISCRIMINATOR_RECOVER, lsl 48
417 pacia x17, x16 // Sign with IAKey + blended discriminator
420 mrs \tpidr, TPIDR_EL1 // Load thread pointer
421 ldr \old_handler, [\tpidr, TH_RECOVER] // Save previous recovery handler
422 str x17, [\tpidr, TH_RECOVER] // Set new signed recovery handler
426 * CLEAR_RECOVERY_HANDLER
428 * Clears page fault handler set by SET_RECOVERY_HANDLER
430 * tpidr - thread pointer saved by SET_RECOVERY_HANDLER
431 * old_handler - old recovery handler saved by SET_RECOVERY_HANDLER
433 .macro CLEAR_RECOVERY_HANDLER tpidr=x16, old_handler=x10
434 str \old_handler, [\tpidr, TH_RECOVER] // Restore the previous recovery handler
441 CLEAR_RECOVERY_HANDLER
442 mov x0, #EFAULT // Return an EFAULT error
447 * int _bcopyin(const char *src, char *dst, vm_size_t len)
455 SET_RECOVERY_HANDLER copyio_error
456 /* If len is less than 16 bytes, just do a bytewise copy */
461 /* 16 bytes at a time */
462 ldp x3, x4, [x0], #16
463 stp x3, x4, [x1], #16
466 /* Fixup the len and test for completion */
475 CLEAR_RECOVERY_HANDLER
481 * int _copyin_atomic32(const char *src, uint32_t *dst)
485 .globl EXT(_copyin_atomic32)
486 LEXT(_copyin_atomic32)
489 SET_RECOVERY_HANDLER copyio_error
493 CLEAR_RECOVERY_HANDLER
498 * int _copyin_atomic32_wait_if_equals(const char *src, uint32_t value)
502 .globl EXT(_copyin_atomic32_wait_if_equals)
503 LEXT(_copyin_atomic32_wait_if_equals)
506 SET_RECOVERY_HANDLER copyio_error
515 CLEAR_RECOVERY_HANDLER
520 * int _copyin_atomic64(const char *src, uint32_t *dst)
524 .globl EXT(_copyin_atomic64)
525 LEXT(_copyin_atomic64)
528 SET_RECOVERY_HANDLER copyio_error
532 CLEAR_RECOVERY_HANDLER
538 * int _copyout_atomic32(uint32_t value, char *dst)
542 .globl EXT(_copyout_atomic32)
543 LEXT(_copyout_atomic32)
546 SET_RECOVERY_HANDLER copyio_error
549 CLEAR_RECOVERY_HANDLER
554 * int _copyout_atomic64(uint64_t value, char *dst)
558 .globl EXT(_copyout_atomic64)
559 LEXT(_copyout_atomic64)
562 SET_RECOVERY_HANDLER copyio_error
565 CLEAR_RECOVERY_HANDLER
571 * int _bcopyout(const char *src, char *dst, vm_size_t len)
575 .globl EXT(_bcopyout)
579 SET_RECOVERY_HANDLER copyio_error
580 /* If len is less than 16 bytes, just do a bytewise copy */
585 /* 16 bytes at a time */
586 ldp x3, x4, [x0], #16
587 stp x3, x4, [x1], #16
590 /* Fixup the len and test for completion */
599 CLEAR_RECOVERY_HANDLER
606 * const user_addr_t user_addr,
613 .globl EXT(_bcopyinstr)
617 SET_RECOVERY_HANDLER Lcopyinstr_error, label_in_adr_range=1
618 mov x4, #0 // x4 - total bytes copied
620 ldrb w5, [x0], #1 // Load a byte from the user source
621 strb w5, [x1], #1 // Store a byte to the kernel dest
622 add x4, x4, #1 // Increment bytes copied
623 cbz x5, Lcopyinstr_done // If this byte is null, we're done
624 cmp x4, x2 // If we're out of space, return an error
627 mov x5, #ENAMETOOLONG // Set current byte to error code for later return
629 str x4, [x3] // Return number of bytes copied
630 mov x0, x5 // Set error code (0 on success, ENAMETOOLONG on failure)
633 mov x0, #EFAULT // Return EFAULT on error
635 CLEAR_RECOVERY_HANDLER
640 * int copyinframe(const vm_address_t frame_addr, char *kernel_addr, bool is64bit)
642 * Safely copy sixteen bytes (the fixed top of an ARM64 frame) from
643 * either user or kernel memory, or 8 bytes (AArch32) from user only.
645 * x0 : address of frame to copy.
646 * x1 : kernel address at which to store data.
647 * w2 : whether to copy an AArch32 or AArch64 frame.
649 * x5 : temp (kernel virtual base)
651 * x10 : old recovery function (set by SET_RECOVERY_HANDLER)
652 * x12, x13 : backtrace data
653 * x16 : thread pointer (set by SET_RECOVERY_HANDLER)
658 .globl EXT(copyinframe)
662 SET_RECOVERY_HANDLER copyio_error
663 cbnz w2, Lcopyinframe64 // Check frame size
664 adrp x5, EXT(gVirtBase)@page // For 32-bit frame, make sure we're not trying to copy from kernel
665 add x5, x5, EXT(gVirtBase)@pageoff
667 cmp x5, x0 // See if address is in kernel virtual range
668 b.hi Lcopyinframe32 // If below kernel virtual range, proceed.
669 mov w0, #EFAULT // Should never have a 32-bit frame in kernel virtual range
673 ldr x12, [x0] // Copy 8 bytes
675 mov w0, #0 // Success
679 mov x3, VM_MIN_KERNEL_ADDRESS // Check if kernel address
680 orr x9, x0, TBI_MASK // Hide tags in address comparison
681 cmp x9, x3 // If in kernel address range, skip tag test
682 b.hs Lcopyinframe_valid
683 tst x0, TBI_MASK // Detect tagged pointers
684 b.eq Lcopyinframe_valid
685 mov w0, #EFAULT // Tagged address, fail
688 ldp x12, x13, [x0] // Copy 16 bytes
690 mov w0, #0 // Success
693 CLEAR_RECOVERY_HANDLER
699 * uint32_t arm_debug_read_dscr(void)
703 .globl EXT(arm_debug_read_dscr)
704 LEXT(arm_debug_read_dscr)
708 * void arm_debug_set_cp14(arm_debug_state_t *debug_state)
710 * Set debug registers to match the current thread state
711 * (NULL to disable). Assume 6 breakpoints and 2
712 * watchpoints, since that has been the case in all cores
717 .globl EXT(arm_debug_set_cp14)
718 LEXT(arm_debug_set_cp14)
721 #if defined(APPLE_ARM64_ARCH_FAMILY)
723 * Note: still have to ISB before executing wfi!
727 .globl EXT(arm64_prepare_for_sleep)
728 LEXT(arm64_prepare_for_sleep)
731 #if defined(APPLETYPHOON)
732 // <rdar://problem/15827409>
733 HID_SET_BITS ARM64_REG_HID2, ARM64_REG_HID2_disMMUmtlbPrefetch, x9
739 cbnz x0, 1f // Skip if deep_sleep == true
740 // Mask FIQ and IRQ to avoid spurious wakeups
741 mrs x9, ARM64_REG_CYC_OVRD
742 and x9, x9, #(~(ARM64_REG_CYC_OVRD_irq_mask | ARM64_REG_CYC_OVRD_fiq_mask))
743 mov x10, #(ARM64_REG_CYC_OVRD_irq_disable | ARM64_REG_CYC_OVRD_fiq_disable)
745 msr ARM64_REG_CYC_OVRD, x9
750 cbz x0, 1f // Skip if deep_sleep == false
751 #if __ARM_GLOBAL_SLEEP_BIT__
753 mrs x1, ARM64_REG_ACC_OVRD
754 orr x1, x1, #(ARM64_REG_ACC_OVRD_enDeepSleep)
755 and x1, x1, #(~(ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_mask))
756 orr x1, x1, #( ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_deepsleep)
757 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnSRM_mask))
758 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnSRM_deepsleep)
759 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2TrDnLnk_mask))
760 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2TrDnLnk_deepsleep)
761 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnCPM_mask))
762 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnCPM_deepsleep)
763 #if HAS_RETENTION_STATE
764 orr x1, x1, #(ARM64_REG_ACC_OVRD_disPioOnWfiCpu)
766 msr ARM64_REG_ACC_OVRD, x1
771 mov x1, ARM64_REG_CYC_CFG_deepSleep
772 msr ARM64_REG_CYC_CFG, x1
776 // Set "OK to power down" (<rdar://problem/12390433>)
777 mrs x9, ARM64_REG_CYC_OVRD
778 orr x9, x9, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down)
779 #if HAS_RETENTION_STATE
780 orr x9, x9, #(ARM64_REG_CYC_OVRD_disWfiRetn)
782 msr ARM64_REG_CYC_OVRD, x9
784 #if defined(APPLEMONSOON) || defined(APPLEVORTEX)
786 cbz x9, Lwfi_inst // skip if not p-core
788 /* <rdar://problem/32512947>: Flush the GUPS prefetcher prior to
789 * wfi. A Skye HW bug can cause the GUPS prefetcher on p-cores
790 * to be left with valid entries that fail to drain if a
791 * subsequent wfi is issued. This can prevent the core from
792 * power-gating. For the idle case that is recoverable, but
793 * for the deep-sleep (S2R) case in which cores MUST power-gate,
794 * it can lead to a hang. This can be prevented by disabling
795 * and re-enabling GUPS, which forces the prefetch queue to
796 * drain. This should be done as close to wfi as possible, i.e.
797 * at the very end of arm64_prepare_for_sleep(). */
798 #if defined(APPLEVORTEX)
799 /* <rdar://problem/32821461>: Cyprus A0/A1 parts have a similar
800 * bug in the HSP prefetcher that can be worked around through
801 * the same method mentioned above for Skye. */
803 EXEC_COREALL_REVLO CPU_VERSION_B0, x9, x10
805 mrs x9, ARM64_REG_HID10
806 orr x9, x9, #(ARM64_REG_HID10_DisHwpGups)
807 msr ARM64_REG_HID10, x9
809 and x9, x9, #(~(ARM64_REG_HID10_DisHwpGups))
810 msr ARM64_REG_HID10, x9
822 * Force WFI to use clock gating only
827 .globl EXT(arm64_force_wfi_clock_gate)
828 LEXT(arm64_force_wfi_clock_gate)
832 mrs x0, ARM64_REG_CYC_OVRD
833 orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_up)
834 msr ARM64_REG_CYC_OVRD, x0
840 #if HAS_RETENTION_STATE
843 .globl EXT(arm64_retention_wfi)
844 LEXT(arm64_retention_wfi)
846 cbz lr, Lwfi_retention // If lr is 0, we entered retention state and lost all GPRs except sp and pc
847 ret // Otherwise just return to cpu_idle()
852 bl EXT(cpu_idle_exit) // cpu_idle_exit(from_reset = FALSE)
853 b . // cpu_idle_exit() should never return
856 #if defined(APPLETYPHOON)
860 .globl EXT(typhoon_prepare_for_wfi)
862 LEXT(typhoon_prepare_for_wfi)
865 // <rdar://problem/15827409>
866 HID_SET_BITS ARM64_REG_HID2, ARM64_REG_HID2_disMMUmtlbPrefetch, x0
876 .globl EXT(typhoon_return_from_wfi)
877 LEXT(typhoon_return_from_wfi)
880 // <rdar://problem/15827409>
881 HID_CLEAR_BITS ARM64_REG_HID2, ARM64_REG_HID2_disMMUmtlbPrefetch, x0
891 #define HID0_DEFEATURES_1 0x0000a0c000064010ULL
892 #define HID1_DEFEATURES_1 0x000000004005bf20ULL
893 #define HID2_DEFEATURES_1 0x0000000000102074ULL
894 #define HID3_DEFEATURES_1 0x0000000000400003ULL
895 #define HID4_DEFEATURES_1 0x83ff00e100000268ULL
896 #define HID7_DEFEATURES_1 0x000000000000000eULL
898 #define HID0_DEFEATURES_2 0x0000a1c000020010ULL
899 #define HID1_DEFEATURES_2 0x000000000005d720ULL
900 #define HID2_DEFEATURES_2 0x0000000000002074ULL
901 #define HID3_DEFEATURES_2 0x0000000000400001ULL
902 #define HID4_DEFEATURES_2 0x8390000200000208ULL
903 #define HID7_DEFEATURES_2 0x0000000000000000ULL
906 arg0 = target register
907 arg1 = 64-bit constant
910 movz $0, #(($1 >> 48) & 0xffff), lsl #48
911 movk $0, #(($1 >> 32) & 0xffff), lsl #32
912 movk $0, #(($1 >> 16) & 0xffff), lsl #16
913 movk $0, #(($1) & 0xffff)
918 .globl EXT(cpu_defeatures_set)
919 LEXT(cpu_defeatures_set)
922 b.eq cpu_defeatures_set_2
924 b.ne cpu_defeatures_set_ret
925 LOAD_UINT64 x1, HID0_DEFEATURES_1
926 mrs x0, ARM64_REG_HID0
928 msr ARM64_REG_HID0, x0
929 LOAD_UINT64 x1, HID1_DEFEATURES_1
930 mrs x0, ARM64_REG_HID1
932 msr ARM64_REG_HID1, x0
933 LOAD_UINT64 x1, HID2_DEFEATURES_1
934 mrs x0, ARM64_REG_HID2
936 msr ARM64_REG_HID2, x0
937 LOAD_UINT64 x1, HID3_DEFEATURES_1
938 mrs x0, ARM64_REG_HID3
940 msr ARM64_REG_HID3, x0
941 LOAD_UINT64 x1, HID4_DEFEATURES_1
942 mrs x0, ARM64_REG_HID4
944 msr ARM64_REG_HID4, x0
945 LOAD_UINT64 x1, HID7_DEFEATURES_1
946 mrs x0, ARM64_REG_HID7
948 msr ARM64_REG_HID7, x0
951 b cpu_defeatures_set_ret
952 cpu_defeatures_set_2:
953 LOAD_UINT64 x1, HID0_DEFEATURES_2
954 mrs x0, ARM64_REG_HID0
956 msr ARM64_REG_HID0, x0
957 LOAD_UINT64 x1, HID1_DEFEATURES_2
958 mrs x0, ARM64_REG_HID1
960 msr ARM64_REG_HID1, x0
961 LOAD_UINT64 x1, HID2_DEFEATURES_2
962 mrs x0, ARM64_REG_HID2
964 msr ARM64_REG_HID2, x0
965 LOAD_UINT64 x1, HID3_DEFEATURES_2
966 mrs x0, ARM64_REG_HID3
968 msr ARM64_REG_HID3, x0
969 LOAD_UINT64 x1, HID4_DEFEATURES_2
970 mrs x0, ARM64_REG_HID4
972 msr ARM64_REG_HID4, x0
973 LOAD_UINT64 x1, HID7_DEFEATURES_2
974 mrs x0, ARM64_REG_HID7
976 msr ARM64_REG_HID7, x0
979 b cpu_defeatures_set_ret
980 cpu_defeatures_set_ret:
985 #else /* !defined(APPLE_ARM64_ARCH_FAMILY) */
988 .globl EXT(arm64_prepare_for_sleep)
989 LEXT(arm64_prepare_for_sleep)
998 * Force WFI to use clock gating only
999 * Note: for non-Apple device, do nothing.
1003 .globl EXT(arm64_force_wfi_clock_gate)
1004 LEXT(arm64_force_wfi_clock_gate)
1009 #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */
1012 * void arm64_replace_bootstack(cpu_data_t *cpu_data)
1014 * This must be called from a kernel thread context running on the boot CPU,
1015 * after setting up new exception stacks in per-CPU data. That will guarantee
1016 * that the stack(s) we're trying to replace aren't currently in use. For
1017 * KTRR-protected devices, this must also be called prior to VM prot finalization
1018 * and lockdown, as updating SP1 requires a sensitive instruction.
1022 .globl EXT(arm64_replace_bootstack)
1023 LEXT(arm64_replace_bootstack)
1026 // Set the exception stack pointer
1027 ldr x0, [x0, CPU_EXCEPSTACK_TOP]
1028 mrs x4, DAIF // Load current DAIF; use x4 as pinst may trash x1-x3
1029 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF) // Disable IRQ/FIQ/serror
1030 // Set SP_EL1 to exception stack
1031 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
1033 bl EXT(pinst_spsel_1)
1040 msr DAIF, x4 // Restore interrupt state
1046 * unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1,
1047 uintptr_t arg2, uintptr_t arg3)
1049 * Call the EL3 monitor with 4 arguments in registers
1050 * The monitor interface maintains the same ABI as the C function call standard. Callee-saved
1051 * registers are preserved, temporary registers are not. Parameters and results are passed in
1056 .globl EXT(monitor_call)
1062 #ifdef HAS_APPLE_PAC
1066 * Macro that signs thread state.
1067 * $0 - Offset in arm_saved_state to store JOPHASH value.
1069 .macro SIGN_THREAD_STATE
1070 pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */
1072 * Mask off the carry flag so we don't need to re-sign when that flag is
1073 * touched by the system call return path.
1076 pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */
1077 pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */
1078 pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */
1079 pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */
1081 #if DEBUG || DEVELOPMENT
1083 tbz x1, #DAIF_IRQF_SHIFT, Lintr_enabled_panic
1084 #endif /* DEBUG || DEVELOPMENT */
1088 * CHECK_SIGNED_STATE
1090 * Macro that checks signed thread state.
1091 * $0 - Offset in arm_saved_state to to read the JOPHASH value from.
1092 * $1 - Label to jump to when check is unsuccessful.
1094 .macro CHECK_SIGNED_STATE
1095 pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */
1097 * Mask off the carry flag so we don't need to re-sign when that flag is
1098 * touched by the system call return path.
1101 pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */
1102 pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */
1103 pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */
1104 pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */
1108 #if DEBUG || DEVELOPMENT
1110 tbz x1, #DAIF_IRQF_SHIFT, Lintr_enabled_panic
1111 #endif /* DEBUG || DEVELOPMENT */
1115 * void ml_sign_thread_state(arm_saved_state_t *ss, uint64_t pc,
1116 * uint32_t cpsr, uint64_t lr, uint64_t x16,
1121 .globl EXT(ml_sign_thread_state)
1122 LEXT(ml_sign_thread_state)
1123 SIGN_THREAD_STATE SS64_JOPHASH
1127 * void ml_sign_kernel_thread_state(arm_kernel_saved_state *ss, uint64_t pc,
1128 * uint32_t cpsr, uint64_t lr, uint64_t x16,
1133 .globl EXT(ml_sign_kernel_thread_state)
1134 LEXT(ml_sign_kernel_thread_state)
1135 SIGN_THREAD_STATE SS64_KERNEL_JOPHASH
1139 * void ml_check_signed_state(arm_saved_state_t *ss, uint64_t pc,
1140 * uint32_t cpsr, uint64_t lr, uint64_t x16,
1145 .globl EXT(ml_check_signed_state)
1146 LEXT(ml_check_signed_state)
1147 CHECK_SIGNED_STATE SS64_JOPHASH, Lcheck_hash_panic
1151 * ml_check_signed_state normally doesn't set up a stack frame, since it
1152 * needs to work in the face of attackers that can modify the stack.
1153 * However we lazily create one in the panic path: at this point we're
1154 * *only* using the stack frame for unwinding purposes, and without one
1155 * we'd be missing information about the caller.
1160 adr x0, Lcheck_hash_str
1161 CALL_EXTERN panic_with_thread_kernel_state
1164 * void ml_check_kernel_signed_state(arm_kernel_saved_state *ss, uint64_t pc,
1165 * uint32_t cpsr, uint64_t lr, uint64_t x16,
1170 .globl EXT(ml_check_kernel_signed_state)
1171 LEXT(ml_check_kernel_signed_state)
1172 CHECK_SIGNED_STATE SS64_KERNEL_JOPHASH, Lcheck_kernel_hash_panic
1174 Lcheck_kernel_hash_panic:
1177 adr x0, Lcheck_hash_str
1181 .asciz "JOP Hash Mismatch Detected (PC, CPSR, or LR corruption)"
1183 #if DEBUG || DEVELOPMENT
1184 Lintr_enabled_panic:
1187 adr x0, Lintr_enabled_str
1191 * Please see the "Signing spilled register state" section of doc/pac.md
1192 * for an explanation of why this is bad and how it should be fixed.
1194 .asciz "Signed thread state manipulated with interrupts enabled"
1195 #endif /* DEBUG || DEVELOPMENT */
1198 * void ml_auth_thread_state_invalid_cpsr(arm_saved_state_t *ss)
1200 * Panics due to an invalid CPSR value in ss.
1204 .globl EXT(ml_auth_thread_state_invalid_cpsr)
1205 LEXT(ml_auth_thread_state_invalid_cpsr)
1209 adr x0, Linvalid_cpsr_str
1210 CALL_EXTERN panic_with_thread_kernel_state
1213 .asciz "Thread state corruption detected (PE mode == 0)"
1214 #endif /* HAS_APPLE_PAC */
1218 .globl EXT(fill32_dczva)
1229 .globl EXT(fill32_nt)
1234 stnp q0, q0, [x0, #0x20]
1235 stnp q0, q0, [x0, #0x40]
1236 stnp q0, q0, [x0, #0x60]
1242 /* vim: set sw=4 ts=4: */