2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <machine/asm.h>
30 #include <arm64/machine_machdep.h>
31 #include <arm64/proc_reg.h>
33 #include <pexpert/arm64/board_config.h>
34 #include <sys/errno.h>
38 #if defined(HAS_APPLE_PAC)
41 * ml_set_kernelkey_enabled(boolean_t enable)
43 * Toggle pointer auth kernel domain key diversification. Assembly to prevent compiler reordering.
48 .globl EXT(ml_set_kernelkey_enabled)
49 LEXT(ml_set_kernelkey_enabled)
50 mrs x1, ARM64_REG_APCTL_EL1
51 orr x2, x1, #APCTL_EL1_KernKeyEn
52 and x1, x1, #~APCTL_EL1_KernKeyEn
55 msr ARM64_REG_APCTL_EL1, x1
59 #endif /* defined(HAS_APPLE_PAC) */
63 /* uint32_t get_fpscr(void):
64 * Returns (FPSR | FPCR).
70 mrs x1, FPSR // Grab FPSR
71 mov x4, #(FPSR_MASK & 0xFFFF)
72 mov x5, #(FPSR_MASK & 0xFFFF0000)
74 and x1, x1, x0 // Be paranoid, and clear bits we expect to
76 mrs x2, FPCR // Grab FPCR
77 mov x4, #(FPCR_MASK & 0xFFFF)
78 mov x5, #(FPCR_MASK & 0xFFFF0000)
80 and x2, x2, x0 // Be paranoid, and clear bits we expect to
82 orr x0, x1, x2 // OR them to get FPSCR equivalent state
89 /* void set_fpscr(uint32_t value):
90 * Set the FPCR and FPSR registers, based on the given value; a
91 * noteworthy point is that unlike 32-bit mode, 64-bit mode FPSR
92 * and FPCR are not responsible for condition codes.
96 mov x4, #(FPSR_MASK & 0xFFFF)
97 mov x5, #(FPSR_MASK & 0xFFFF0000)
99 and x1, x1, x0 // Clear the bits that don't apply to FPSR
100 mov x4, #(FPCR_MASK & 0xFFFF)
101 mov x5, #(FPCR_MASK & 0xFFFF0000)
103 and x2, x2, x0 // Clear the bits that don't apply to FPCR
104 msr FPSR, x1 // Write FPCR
105 msr FPCR, x2 // Write FPSR
106 dsb ish // FPCR requires synchronization
111 * void update_mdscr(unsigned long clear, unsigned long set)
112 * Clears and sets the specified bits in MDSCR_EL1.
114 * Setting breakpoints in EL1 is effectively a KTRR bypass. The ability to do so is
115 * controlled by MDSCR.KDE. The MSR to set MDSCR must be present to allow
116 * self-hosted user mode debug. Any checks before the MRS can be skipped with ROP,
117 * so we need to put the checks after the MRS where they can't be skipped. That
118 * still leaves a small window if a breakpoint is set on the instruction
119 * immediately after the MRS. To handle that, we also do a check and then set of
120 * the breakpoint control registers. This allows us to guarantee that a given
121 * core will never have both KDE set and a breakpoint targeting EL1.
123 * If KDE gets set, unset it and then panic
126 .globl EXT(update_mdscr)
135 #if defined(CONFIG_KERNEL_INTEGRITY)
137 * verify KDE didn't get set (including via ROP)
138 * If set, clear it and then panic
144 b.ne Lupdate_mdscr_panic
149 adrp x0, Lupdate_mdscr_panic_str@page
150 add x0, x0, Lupdate_mdscr_panic_str@pageoff
154 Lupdate_mdscr_panic_str:
155 .asciz "MDSCR.KDE was set"
159 * Set MMU Translation Table Base Alternate
163 .globl EXT(set_mmu_ttb_alternate)
164 LEXT(set_mmu_ttb_alternate)
166 #if defined(KERNEL_INTEGRITY_KTRR)
168 bl EXT(pinst_set_ttbr1)
172 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
178 .globl EXT(set_mmu_ttb)
180 #if __ARM_KERNEL_PROTECT__
181 /* All EL1-mode ASIDs are odd. */
182 orr x0, x0, #(1 << TTBR_ASID_SHIFT)
183 #endif /* __ARM_KERNEL_PROTECT__ */
190 * set AUX control register
194 .globl EXT(set_aux_control)
195 LEXT(set_aux_control)
197 // Synchronize system
201 #if __ARM_KERNEL_PROTECT__
204 .globl EXT(set_vbar_el1)
206 #if defined(KERNEL_INTEGRITY_KTRR)
207 b EXT(pinst_set_vbar)
212 #endif /* __ARM_KERNEL_PROTECT__ */
216 * set translation control register
222 #if defined(APPLE_ARM64_ARCH_FAMILY)
223 // Assert that T0Z is always equal to T1Z
224 eor x1, x0, x0, lsr #(TCR_T1SZ_SHIFT - TCR_T0SZ_SHIFT)
225 and x1, x1, #(TCR_TSZ_MASK << TCR_T0SZ_SHIFT)
226 cbnz x1, L_set_tcr_panic
227 #if defined(KERNEL_INTEGRITY_KTRR)
229 bl EXT(pinst_set_tcr)
233 #endif /* defined(KERNEL_INTRITY_KTRR) */
241 adr x0, L_set_tcr_panic_str
244 L_set_locked_reg_panic:
248 adr x0, L_set_locked_reg_panic_str
253 .asciz "set_tcr: t0sz, t1sz not equal (%llx)\n"
256 L_set_locked_reg_panic_str:
257 .asciz "attempt to set locked register: (%llx)\n"
259 #if defined(KERNEL_INTEGRITY_KTRR)
261 bl EXT(pinst_set_tcr)
268 #endif // defined(APPLE_ARM64_ARCH_FAMILY)
271 * MMU kernel virtual to physical address translation
275 .globl EXT(mmu_kvtop)
277 mrs x2, DAIF // Load current DAIF
278 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
279 at s1e1r, x0 // Translation Stage 1 EL1
280 mrs x1, PAR_EL1 // Read result
281 msr DAIF, x2 // Restore interrupt state
282 tbnz x1, #0, L_mmu_kvtop_invalid // Test Translation not valid
283 bfm x1, x0, #0, #11 // Add page offset
284 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
287 mov x0, #0 // Return invalid
291 * MMU user virtual to physical address translation
295 .globl EXT(mmu_uvtop)
297 lsr x8, x0, #56 // Extract top byte
298 cbnz x8, L_mmu_uvtop_invalid // Tagged pointers are invalid
299 mrs x2, DAIF // Load current DAIF
300 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
301 at s1e0r, x0 // Translation Stage 1 EL0
302 mrs x1, PAR_EL1 // Read result
303 msr DAIF, x2 // Restore interrupt state
304 tbnz x1, #0, L_mmu_uvtop_invalid // Test Translation not valid
305 bfm x1, x0, #0, #11 // Add page offset
306 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
309 mov x0, #0 // Return invalid
313 * MMU kernel virtual to physical address preflight write access
317 .globl EXT(mmu_kvtop_wpreflight)
318 LEXT(mmu_kvtop_wpreflight)
319 mrs x2, DAIF // Load current DAIF
320 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
321 at s1e1w, x0 // Translation Stage 1 EL1
322 mrs x1, PAR_EL1 // Read result
323 msr DAIF, x2 // Restore interrupt state
324 tbnz x1, #0, L_mmu_kvtop_wpreflight_invalid // Test Translation not valid
325 bfm x1, x0, #0, #11 // Add page offset
326 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
328 L_mmu_kvtop_wpreflight_invalid:
329 mov x0, #0 // Return invalid
333 * SET_RECOVERY_HANDLER
335 * Sets up a page fault recovery handler
337 * arg0 - persisted thread pointer
338 * arg1 - persisted recovery handler
340 * arg3 - recovery label
342 .macro SET_RECOVERY_HANDLER
343 mrs $0, TPIDR_EL1 // Load thread pointer
344 adrp $2, $3@page // Load the recovery handler address
345 add $2, $2, $3@pageoff
346 #if defined(HAS_APPLE_PAC)
347 add $1, $0, TH_RECOVER
348 movk $1, #PAC_DISCRIMINATOR_RECOVER, lsl 48
349 pacia $2, $1 // Sign with IAKey + blended discriminator
352 ldr $1, [$0, TH_RECOVER] // Save previous recovery handler
353 str $2, [$0, TH_RECOVER] // Set new signed recovery handler
357 * CLEAR_RECOVERY_HANDLER
359 * Clears page fault handler set by SET_RECOVERY_HANDLER
361 * arg0 - thread pointer saved by SET_RECOVERY_HANDLER
362 * arg1 - old recovery handler saved by SET_RECOVERY_HANDLER
364 .macro CLEAR_RECOVERY_HANDLER
365 str $1, [$0, TH_RECOVER] // Restore the previous recovery handler
372 CLEAR_RECOVERY_HANDLER x10, x11
373 mov x0, #EFAULT // Return an EFAULT error
378 * int _bcopyin(const char *src, char *dst, vm_size_t len)
386 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
387 /* If len is less than 16 bytes, just do a bytewise copy */
392 /* 16 bytes at a time */
393 ldp x3, x4, [x0], #16
394 stp x3, x4, [x1], #16
397 /* Fixup the len and test for completion */
406 CLEAR_RECOVERY_HANDLER x10, x11
412 * int _copyin_atomic32(const char *src, uint32_t *dst)
416 .globl EXT(_copyin_atomic32)
417 LEXT(_copyin_atomic32)
420 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
424 CLEAR_RECOVERY_HANDLER x10, x11
429 * int _copyin_atomic32_wait_if_equals(const char *src, uint32_t value)
433 .globl EXT(_copyin_atomic32_wait_if_equals)
434 LEXT(_copyin_atomic32_wait_if_equals)
437 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
446 CLEAR_RECOVERY_HANDLER x10, x11
451 * int _copyin_atomic64(const char *src, uint32_t *dst)
455 .globl EXT(_copyin_atomic64)
456 LEXT(_copyin_atomic64)
459 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
463 CLEAR_RECOVERY_HANDLER x10, x11
469 * int _copyout_atomic32(uint32_t value, char *dst)
473 .globl EXT(_copyout_atomic32)
474 LEXT(_copyout_atomic32)
477 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
480 CLEAR_RECOVERY_HANDLER x10, x11
485 * int _copyout_atomic64(uint64_t value, char *dst)
489 .globl EXT(_copyout_atomic64)
490 LEXT(_copyout_atomic64)
493 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
496 CLEAR_RECOVERY_HANDLER x10, x11
502 * int _bcopyout(const char *src, char *dst, vm_size_t len)
506 .globl EXT(_bcopyout)
510 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
511 /* If len is less than 16 bytes, just do a bytewise copy */
516 /* 16 bytes at a time */
517 ldp x3, x4, [x0], #16
518 stp x3, x4, [x1], #16
521 /* Fixup the len and test for completion */
530 CLEAR_RECOVERY_HANDLER x10, x11
537 * const user_addr_t user_addr,
544 .globl EXT(_bcopyinstr)
548 adr x4, Lcopyinstr_error // Get address for recover
549 mrs x10, TPIDR_EL1 // Get thread pointer
550 ldr x11, [x10, TH_RECOVER] // Save previous recover
552 #if defined(HAS_APPLE_PAC)
553 add x5, x10, TH_RECOVER // Sign new pointer with IAKey + blended discriminator
554 movk x5, #PAC_DISCRIMINATOR_RECOVER, lsl 48
557 str x4, [x10, TH_RECOVER] // Store new recover
559 mov x4, #0 // x4 - total bytes copied
561 ldrb w5, [x0], #1 // Load a byte from the user source
562 strb w5, [x1], #1 // Store a byte to the kernel dest
563 add x4, x4, #1 // Increment bytes copied
564 cbz x5, Lcopyinstr_done // If this byte is null, we're done
565 cmp x4, x2 // If we're out of space, return an error
568 mov x5, #ENAMETOOLONG // Set current byte to error code for later return
570 str x4, [x3] // Return number of bytes copied
571 mov x0, x5 // Set error code (0 on success, ENAMETOOLONG on failure)
574 mov x0, #EFAULT // Return EFAULT on error
576 str x11, [x10, TH_RECOVER] // Restore old recover
581 * int copyinframe(const vm_address_t frame_addr, char *kernel_addr, bool is64bit)
583 * Safely copy sixteen bytes (the fixed top of an ARM64 frame) from
584 * either user or kernel memory, or 8 bytes (AArch32) from user only.
586 * x0 : address of frame to copy.
587 * x1 : kernel address at which to store data.
588 * w2 : whether to copy an AArch32 or AArch64 frame.
590 * x5 : temp (kernel virtual base)
592 * x10 : thread pointer (set by SET_RECOVERY_HANDLER)
593 * x11 : old recovery function (set by SET_RECOVERY_HANDLER)
594 * x12, x13 : backtrace data
599 .globl EXT(copyinframe)
603 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
604 cbnz w2, Lcopyinframe64 // Check frame size
605 adrp x5, EXT(gVirtBase)@page // For 32-bit frame, make sure we're not trying to copy from kernel
606 add x5, x5, EXT(gVirtBase)@pageoff
608 cmp x5, x0 // See if address is in kernel virtual range
609 b.hi Lcopyinframe32 // If below kernel virtual range, proceed.
610 mov w0, #EFAULT // Should never have a 32-bit frame in kernel virtual range
614 ldr x12, [x0] // Copy 8 bytes
616 mov w0, #0 // Success
620 mov x3, VM_MIN_KERNEL_ADDRESS // Check if kernel address
621 orr x9, x0, TBI_MASK // Hide tags in address comparison
622 cmp x9, x3 // If in kernel address range, skip tag test
623 b.hs Lcopyinframe_valid
624 tst x0, TBI_MASK // Detect tagged pointers
625 b.eq Lcopyinframe_valid
626 mov w0, #EFAULT // Tagged address, fail
629 ldp x12, x13, [x0] // Copy 16 bytes
631 mov w0, #0 // Success
634 CLEAR_RECOVERY_HANDLER x10, x11
640 * uint32_t arm_debug_read_dscr(void)
644 .globl EXT(arm_debug_read_dscr)
645 LEXT(arm_debug_read_dscr)
649 * void arm_debug_set_cp14(arm_debug_state_t *debug_state)
651 * Set debug registers to match the current thread state
652 * (NULL to disable). Assume 6 breakpoints and 2
653 * watchpoints, since that has been the case in all cores
658 .globl EXT(arm_debug_set_cp14)
659 LEXT(arm_debug_set_cp14)
662 #if defined(APPLE_ARM64_ARCH_FAMILY)
664 * Note: still have to ISB before executing wfi!
668 .globl EXT(arm64_prepare_for_sleep)
669 LEXT(arm64_prepare_for_sleep)
672 #if defined(APPLETYPHOON)
673 // <rdar://problem/15827409>
674 mrs x0, ARM64_REG_HID2 // Read HID2
675 orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch
676 msr ARM64_REG_HID2, x0 // Write HID2
681 #if __ARM_GLOBAL_SLEEP_BIT__
683 mrs x1, ARM64_REG_ACC_OVRD
684 orr x1, x1, #(ARM64_REG_ACC_OVRD_enDeepSleep)
685 and x1, x1, #(~(ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_mask))
686 orr x1, x1, #( ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_deepsleep)
687 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnSRM_mask))
688 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnSRM_deepsleep)
689 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2TrDnLnk_mask))
690 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2TrDnLnk_deepsleep)
691 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnCPM_mask))
692 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnCPM_deepsleep)
693 msr ARM64_REG_ACC_OVRD, x1
698 mov x1, ARM64_REG_CYC_CFG_deepSleep
699 msr ARM64_REG_CYC_CFG, x1
701 // Set "OK to power down" (<rdar://problem/12390433>)
702 mrs x0, ARM64_REG_CYC_OVRD
703 orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down)
704 msr ARM64_REG_CYC_OVRD, x0
706 #if defined(APPLEMONSOON)
708 cbz x0, Lwfi_inst // skip if not p-core
710 /* <rdar://problem/32512947>: Flush the GUPS prefetcher prior to
711 * wfi. A Skye HW bug can cause the GUPS prefetcher on p-cores
712 * to be left with valid entries that fail to drain if a
713 * subsequent wfi is issued. This can prevent the core from
714 * power-gating. For the idle case that is recoverable, but
715 * for the deep-sleep (S2R) case in which cores MUST power-gate,
716 * it can lead to a hang. This can be prevented by disabling
717 * and re-enabling GUPS, which forces the prefetch queue to
718 * drain. This should be done as close to wfi as possible, i.e.
719 * at the very end of arm64_prepare_for_sleep(). */
720 mrs x0, ARM64_REG_HID10
721 orr x0, x0, #(ARM64_REG_HID10_DisHwpGups)
722 msr ARM64_REG_HID10, x0
724 and x0, x0, #(~(ARM64_REG_HID10_DisHwpGups))
725 msr ARM64_REG_HID10, x0
735 * Force WFI to use clock gating only
740 .globl EXT(arm64_force_wfi_clock_gate)
741 LEXT(arm64_force_wfi_clock_gate)
745 mrs x0, ARM64_REG_CYC_OVRD
746 orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_up)
747 msr ARM64_REG_CYC_OVRD, x0
754 #if defined(APPLETYPHOON)
758 .globl EXT(typhoon_prepare_for_wfi)
760 LEXT(typhoon_prepare_for_wfi)
763 // <rdar://problem/15827409>
764 mrs x0, ARM64_REG_HID2 // Read HID2
765 orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch
766 msr ARM64_REG_HID2, x0 // Write HID2
776 .globl EXT(typhoon_return_from_wfi)
777 LEXT(typhoon_return_from_wfi)
780 // <rdar://problem/15827409>
781 mrs x0, ARM64_REG_HID2 // Read HID2
782 mov x1, #(ARM64_REG_HID2_disMMUmtlbPrefetch) //
783 bic x0, x0, x1 // Clear HID.DisableMTLBPrefetchMTLBPrefetch
784 msr ARM64_REG_HID2, x0 // Write HID2
794 #define HID0_DEFEATURES_1 0x0000a0c000064010ULL
795 #define HID1_DEFEATURES_1 0x000000004005bf20ULL
796 #define HID2_DEFEATURES_1 0x0000000000102074ULL
797 #define HID3_DEFEATURES_1 0x0000000000400003ULL
798 #define HID4_DEFEATURES_1 0x83ff00e100000268ULL
799 #define HID7_DEFEATURES_1 0x000000000000000eULL
801 #define HID0_DEFEATURES_2 0x0000a1c000020010ULL
802 #define HID1_DEFEATURES_2 0x000000000005d720ULL
803 #define HID2_DEFEATURES_2 0x0000000000002074ULL
804 #define HID3_DEFEATURES_2 0x0000000000400001ULL
805 #define HID4_DEFEATURES_2 0x8390000200000208ULL
806 #define HID7_DEFEATURES_2 0x0000000000000000ULL
809 arg0 = target register
810 arg1 = 64-bit constant
813 movz $0, #(($1 >> 48) & 0xffff), lsl #48
814 movk $0, #(($1 >> 32) & 0xffff), lsl #32
815 movk $0, #(($1 >> 16) & 0xffff), lsl #16
816 movk $0, #(($1) & 0xffff)
821 .globl EXT(cpu_defeatures_set)
822 LEXT(cpu_defeatures_set)
825 b.eq cpu_defeatures_set_2
827 b.ne cpu_defeatures_set_ret
828 LOAD_UINT64 x1, HID0_DEFEATURES_1
829 mrs x0, ARM64_REG_HID0
831 msr ARM64_REG_HID0, x0
832 LOAD_UINT64 x1, HID1_DEFEATURES_1
833 mrs x0, ARM64_REG_HID1
835 msr ARM64_REG_HID1, x0
836 LOAD_UINT64 x1, HID2_DEFEATURES_1
837 mrs x0, ARM64_REG_HID2
839 msr ARM64_REG_HID2, x0
840 LOAD_UINT64 x1, HID3_DEFEATURES_1
841 mrs x0, ARM64_REG_HID3
843 msr ARM64_REG_HID3, x0
844 LOAD_UINT64 x1, HID4_DEFEATURES_1
845 mrs x0, ARM64_REG_HID4
847 msr ARM64_REG_HID4, x0
848 LOAD_UINT64 x1, HID7_DEFEATURES_1
849 mrs x0, ARM64_REG_HID7
851 msr ARM64_REG_HID7, x0
854 b cpu_defeatures_set_ret
855 cpu_defeatures_set_2:
856 LOAD_UINT64 x1, HID0_DEFEATURES_2
857 mrs x0, ARM64_REG_HID0
859 msr ARM64_REG_HID0, x0
860 LOAD_UINT64 x1, HID1_DEFEATURES_2
861 mrs x0, ARM64_REG_HID1
863 msr ARM64_REG_HID1, x0
864 LOAD_UINT64 x1, HID2_DEFEATURES_2
865 mrs x0, ARM64_REG_HID2
867 msr ARM64_REG_HID2, x0
868 LOAD_UINT64 x1, HID3_DEFEATURES_2
869 mrs x0, ARM64_REG_HID3
871 msr ARM64_REG_HID3, x0
872 LOAD_UINT64 x1, HID4_DEFEATURES_2
873 mrs x0, ARM64_REG_HID4
875 msr ARM64_REG_HID4, x0
876 LOAD_UINT64 x1, HID7_DEFEATURES_2
877 mrs x0, ARM64_REG_HID7
879 msr ARM64_REG_HID7, x0
882 b cpu_defeatures_set_ret
883 cpu_defeatures_set_ret:
888 #else /* !defined(APPLE_ARM64_ARCH_FAMILY) */
891 .globl EXT(arm64_prepare_for_sleep)
892 LEXT(arm64_prepare_for_sleep)
901 * Force WFI to use clock gating only
902 * Note: for non-Apple device, do nothing.
906 .globl EXT(arm64_force_wfi_clock_gate)
907 LEXT(arm64_force_wfi_clock_gate)
912 #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */
915 * void arm64_replace_bootstack(cpu_data_t *cpu_data)
917 * This must be called from a kernel thread context running on the boot CPU,
918 * after setting up new exception stacks in per-CPU data. That will guarantee
919 * that the stack(s) we're trying to replace aren't currently in use. For
920 * KTRR-protected devices, this must also be called prior to VM prot finalization
921 * and lockdown, as updating SP1 requires a sensitive instruction.
925 .globl EXT(arm64_replace_bootstack)
926 LEXT(arm64_replace_bootstack)
929 // Set the exception stack pointer
930 ldr x0, [x0, CPU_EXCEPSTACK_TOP]
931 mrs x4, DAIF // Load current DAIF; use x4 as pinst may trash x1-x3
932 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF) // Disable IRQ/FIQ/serror
933 // Set SP_EL1 to exception stack
934 #if defined(KERNEL_INTEGRITY_KTRR)
936 bl EXT(pinst_spsel_1)
943 msr DAIF, x4 // Restore interrupt state
949 * unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1,
950 uintptr_t arg2, uintptr_t arg3)
952 * Call the EL3 monitor with 4 arguments in registers
953 * The monitor interface maintains the same ABI as the C function call standard. Callee-saved
954 * registers are preserved, temporary registers are not. Parameters and results are passed in
959 .globl EXT(monitor_call)
967 * void ml_sign_thread_state(arm_saved_state_t *ss, uint64_t pc,
968 * uint32_t cpsr, uint64_t lr, uint64_t x16,
973 .globl EXT(ml_sign_thread_state)
974 LEXT(ml_sign_thread_state)
975 pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */
977 * Mask off the carry flag so we don't need to re-sign when that flag is
978 * touched by the system call return path.
981 pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */
982 pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */
983 pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */
984 pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */
985 str x1, [x0, SS64_JOPHASH]
989 * void ml_check_signed_state(arm_saved_state_t *ss, uint64_t pc,
990 * uint32_t cpsr, uint64_t lr, uint64_t x16,
995 .globl EXT(ml_check_signed_state)
996 LEXT(ml_check_signed_state)
997 pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */
999 * Mask off the carry flag so we don't need to re-sign when that flag is
1000 * touched by the system call return path.
1003 pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */
1004 pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */
1005 pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */
1006 pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */
1007 ldr x2, [x0, SS64_JOPHASH]
1009 b.ne Lcheck_hash_panic
1013 adr x0, Lcheck_hash_str
1014 CALL_EXTERN panic_with_thread_kernel_state
1016 .asciz "JOP Hash Mismatch Detected (PC, CPSR, or LR corruption)"
1017 #endif /* HAS_APPLE_PAC */
1021 .globl EXT(fill32_dczva)
1032 .globl EXT(fill32_nt)
1037 stnp q0, q0, [x0, #0x20]
1038 stnp q0, q0, [x0, #0x40]
1039 stnp q0, q0, [x0, #0x60]
1045 /* vim: set sw=4 ts=4: */