2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <machine/asm.h>
30 #include <arm64/machine_machdep.h>
31 #include <arm64/proc_reg.h>
33 #include <pexpert/arm64/board_config.h>
34 #include <sys/errno.h>
38 /* uint32_t get_fpscr(void):
39 * Returns (FPSR | FPCR).
45 mrs x1, FPSR // Grab FPSR
46 mov x4, #(FPSR_MASK & 0xFFFF)
47 mov x5, #(FPSR_MASK & 0xFFFF0000)
49 and x1, x1, x0 // Be paranoid, and clear bits we expect to
51 mrs x2, FPCR // Grab FPCR
52 mov x4, #(FPCR_MASK & 0xFFFF)
53 mov x5, #(FPCR_MASK & 0xFFFF0000)
55 and x2, x2, x0 // Be paranoid, and clear bits we expect to
57 orr x0, x1, x2 // OR them to get FPSCR equivalent state
64 /* void set_fpscr(uint32_t value):
65 * Set the FPCR and FPSR registers, based on the given value; a
66 * noteworthy point is that unlike 32-bit mode, 64-bit mode FPSR
67 * and FPCR are not responsible for condition codes.
71 mov x4, #(FPSR_MASK & 0xFFFF)
72 mov x5, #(FPSR_MASK & 0xFFFF0000)
74 and x1, x1, x0 // Clear the bits that don't apply to FPSR
75 mov x4, #(FPCR_MASK & 0xFFFF)
76 mov x5, #(FPCR_MASK & 0xFFFF0000)
78 and x2, x2, x0 // Clear the bits that don't apply to FPCR
79 msr FPSR, x1 // Write FPCR
80 msr FPCR, x2 // Write FPSR
81 dsb ish // FPCR requires synchronization
85 #if __ARM_KERNEL_PROTECT__
87 * __ARM_KERNEL_PROTECT__ adds two complications to TLB management:
89 * 1. As each pmap has two ASIDs, every TLB operation that targets an ASID must
90 * target both ASIDs for the pmap that owns the target ASID.
92 * 2. Any TLB operation targeting the kernel_pmap ASID (ASID 0) must target all
93 * ASIDs (as kernel_pmap mappings may be referenced while using an ASID that
94 * belongs to another pmap). We expect these routines to be called with the
95 * EL0 ASID for the target; not the EL1 ASID.
97 #endif /* __ARM_KERNEL_PROTECT__ */
100 * void flush_mmu_tlb(void)
106 .globl EXT(flush_mmu_tlb)
114 * void flush_core_tlb(void)
120 .globl EXT(flush_core_tlb)
128 * void flush_mmu_tlb_allentries(uint64_t, uint64_t)
134 .globl EXT(flush_mmu_tlb_allentries)
135 LEXT(flush_mmu_tlb_allentries)
140 * The code below is not necessarily correct. From an overview of
141 * the client code, the expected contract for TLB flushes is that
142 * we will expand from an "address, length" pair to "start address,
143 * end address" in the course of a TLB flush. This suggests that
144 * a flush for "X, X+4" is actually only asking for a flush of a
145 * single 16KB page. At the same time, we'd like to be prepared
146 * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page
147 * number to a 16KB page boundary. This should deal correctly with
150 * If our expecations about client behavior are wrong however, this
151 * will lead to occasional TLB corruption on platforms with 16KB
157 Lflush_mmu_tlb_allentries_loop:
159 add x0, x0, #(ARM_PGBYTES / 4096) // Units are 4KB pages, as defined by the ISA
161 b.lt Lflush_mmu_tlb_allentries_loop
167 * void flush_mmu_tlb_entry(uint64_t)
173 .globl EXT(flush_mmu_tlb_entry)
174 LEXT(flush_mmu_tlb_entry)
175 #if __ARM_KERNEL_PROTECT__
177 * If we are flushing ASID 0, this is a kernel operation. With this
178 * ASID scheme, this means we should flush all ASIDs.
180 lsr x2, x0, #TLBI_ASID_SHIFT
182 b.eq Lflush_mmu_tlb_entry_globally
184 bic x0, x0, #(1 << TLBI_ASID_SHIFT)
186 orr x0, x0, #(1 << TLBI_ASID_SHIFT)
187 #endif /* __ARM_KERNEL_PROTECT__ */
192 #if __ARM_KERNEL_PROTECT__
193 Lflush_mmu_tlb_entry_globally:
198 #endif /* __ARM_KERNEL_PROTECT__ */
201 * void flush_mmu_tlb_entries(uint64_t, uint64_t)
207 .globl EXT(flush_mmu_tlb_entries)
208 LEXT(flush_mmu_tlb_entries)
213 * The code below is not necessarily correct. From an overview of
214 * the client code, the expected contract for TLB flushes is that
215 * we will expand from an "address, length" pair to "start address,
216 * end address" in the course of a TLB flush. This suggests that
217 * a flush for "X, X+4" is actually only asking for a flush of a
218 * single 16KB page. At the same time, we'd like to be prepared
219 * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page
220 * number to a 16KB page boundary. This should deal correctly with
223 * If our expecations about client behavior are wrong however, this
224 * will lead to occasional TLB corruption on platforms with 16KB
229 #endif /* __ARM_KERNEL_PROTECT__ */
230 #if __ARM_KERNEL_PROTECT__
232 * If we are flushing ASID 0, this is a kernel operation. With this
233 * ASID scheme, this means we should flush all ASIDs.
235 lsr x2, x0, #TLBI_ASID_SHIFT
237 b.eq Lflush_mmu_tlb_entries_globally_loop
239 bic x0, x0, #(1 << TLBI_ASID_SHIFT)
240 #endif /* __ARM_KERNEL_PROTECT__ */
241 Lflush_mmu_tlb_entries_loop:
243 #if __ARM_KERNEL_PROTECT__
244 orr x0, x0, #(1 << TLBI_ASID_SHIFT)
246 bic x0, x0, #(1 << TLBI_ASID_SHIFT)
247 #endif /* __ARM_KERNEL_PROTECT__ */
248 add x0, x0, #(ARM_PGBYTES / 4096) // Units are pages
250 b.lt Lflush_mmu_tlb_entries_loop
254 #if __ARM_KERNEL_PROTECT__
255 Lflush_mmu_tlb_entries_globally_loop:
257 add x0, x0, #(ARM_PGBYTES / 4096) // Units are pages
259 b.lt Lflush_mmu_tlb_entries_globally_loop
263 #endif /* __ARM_KERNEL_PROTECT__ */
266 * void flush_mmu_tlb_asid(uint64_t)
268 * Flush TLB entriesfor requested asid
272 .globl EXT(flush_mmu_tlb_asid)
273 LEXT(flush_mmu_tlb_asid)
274 #if __ARM_KERNEL_PROTECT__
276 * If we are flushing ASID 0, this is a kernel operation. With this
277 * ASID scheme, this means we should flush all ASIDs.
279 lsr x1, x0, #TLBI_ASID_SHIFT
281 b.eq Lflush_mmu_tlb_globally
283 bic x0, x0, #(1 << TLBI_ASID_SHIFT)
285 orr x0, x0, #(1 << TLBI_ASID_SHIFT)
286 #endif /* __ARM_KERNEL_PROTECT__ */
291 #if __ARM_KERNEL_PROTECT__
292 Lflush_mmu_tlb_globally:
297 #endif /* __ARM_KERNEL_PROTECT__ */
300 * void flush_core_tlb_asid(uint64_t)
302 * Flush TLB entries for core for requested asid
306 .globl EXT(flush_core_tlb_asid)
307 LEXT(flush_core_tlb_asid)
308 #if __ARM_KERNEL_PROTECT__
310 * If we are flushing ASID 0, this is a kernel operation. With this
311 * ASID scheme, this means we should flush all ASIDs.
313 lsr x1, x0, #TLBI_ASID_SHIFT
315 b.eq Lflush_core_tlb_asid_globally
317 bic x0, x0, #(1 << TLBI_ASID_SHIFT)
319 orr x0, x0, #(1 << TLBI_ASID_SHIFT)
320 #endif /* __ARM_KERNEL_PROTECT__ */
325 #if __ARM_KERNEL_PROTECT__
326 Lflush_core_tlb_asid_globally:
331 #endif /* __ARM_KERNEL_PROTECT__ */
334 * Set MMU Translation Table Base Alternate
338 .globl EXT(set_mmu_ttb_alternate)
339 LEXT(set_mmu_ttb_alternate)
341 #if defined(KERNEL_INTEGRITY_KTRR)
343 bl EXT(pinst_set_ttbr1)
347 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
352 * set AUX control register
356 .globl EXT(set_aux_control)
357 LEXT(set_aux_control)
359 // Synchronize system
364 #if __ARM_KERNEL_PROTECT__
367 .globl EXT(set_vbar_el1)
369 #if defined(KERNEL_INTEGRITY_KTRR)
370 b EXT(pinst_set_vbar)
375 #endif /* __ARM_KERNEL_PROTECT__ */
379 * set translation control register
385 #if defined(APPLE_ARM64_ARCH_FAMILY)
386 // Assert that T0Z is always equal to T1Z
387 eor x1, x0, x0, lsr #(TCR_T1SZ_SHIFT - TCR_T0SZ_SHIFT)
388 and x1, x1, #(TCR_TSZ_MASK << TCR_T0SZ_SHIFT)
389 cbnz x1, L_set_tcr_panic
390 #if defined(KERNEL_INTEGRITY_KTRR)
396 #endif /* defined(KERNEL_INTRITY_KTRR) */
404 adr x0, L_set_tcr_panic_str
407 L_set_locked_reg_panic:
411 adr x0, L_set_locked_reg_panic_str
416 .asciz "set_tcr: t0sz, t1sz not equal (%llx)\n"
419 L_set_locked_reg_panic_str:
420 .asciz "attempt to set locked register: (%llx)\n"
422 #if defined(KERNEL_INTEGRITY_KTRR)
431 #endif // defined(APPLE_ARM64_ARCH_FAMILY)
434 * MMU kernel virtual to physical address translation
438 .globl EXT(mmu_kvtop)
440 mrs x2, DAIF // Load current DAIF
441 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
442 at s1e1r, x0 // Translation Stage 1 EL1
443 mrs x1, PAR_EL1 // Read result
444 msr DAIF, x2 // Restore interrupt state
445 tbnz x1, #0, L_mmu_kvtop_invalid // Test Translation not valid
446 bfm x1, x0, #0, #11 // Add page offset
447 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
450 mov x0, xzr // Return invalid
454 * MMU user virtual to physical address translation
458 .globl EXT(mmu_uvtop)
460 lsr x8, x0, #56 // Extract top byte
461 cbnz x8, L_mmu_uvtop_invalid // Tagged pointers are invalid
462 mrs x2, DAIF // Load current DAIF
463 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
464 at s1e0r, x0 // Translation Stage 1 EL0
465 mrs x1, PAR_EL1 // Read result
466 msr DAIF, x2 // Restore interrupt state
467 tbnz x1, #0, L_mmu_uvtop_invalid // Test Translation not valid
468 bfm x1, x0, #0, #11 // Add page offset
469 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
472 mov x0, xzr // Return invalid
476 * MMU kernel virtual to physical address preflight write access
480 .globl EXT(mmu_kvtop_wpreflight)
481 LEXT(mmu_kvtop_wpreflight)
482 mrs x2, DAIF // Load current DAIF
483 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
484 at s1e1w, x0 // Translation Stage 1 EL1
485 mrs x1, PAR_EL1 // Read result
486 msr DAIF, x2 // Restore interrupt state
487 tbnz x1, #0, L_mmu_kvtop_wpreflight_invalid // Test Translation not valid
488 bfm x1, x0, #0, #11 // Add page offset
489 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
491 L_mmu_kvtop_wpreflight_invalid:
492 mov x0, xzr // Return invalid
496 * SET_RECOVERY_HANDLER
498 * Sets up a page fault recovery handler
500 * arg0 - persisted thread pointer
501 * arg1 - persisted recovery handler
503 * arg3 - recovery label
505 .macro SET_RECOVERY_HANDLER
506 mrs $0, TPIDR_EL1 // Load thread pointer
507 ldr $1, [$0, TH_RECOVER] // Save previous recovery handler
508 adrp $2, $3@page // Load the recovery handler address
509 add $2, $2, $3@pageoff
510 str $2, [$0, TH_RECOVER] // Set new recovery handler
514 * CLEAR_RECOVERY_HANDLER
516 * Clears page fault handler set by SET_RECOVERY_HANDLER
518 * arg0 - thread pointer saved by SET_RECOVERY_HANDLER
519 * arg1 - old recovery handler saved by SET_RECOVERY_HANDLER
521 .macro CLEAR_RECOVERY_HANDLER
522 str $1, [$0, TH_RECOVER] // Restore the previous recovery handler
529 CLEAR_RECOVERY_HANDLER x10, x11
530 mov x0, #EFAULT // Return an EFAULT error
535 * int _bcopyin(const char *src, char *dst, vm_size_t len)
542 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
543 /* If len is less than 16 bytes, just do a bytewise copy */
548 /* 16 bytes at a time */
549 ldp x3, x4, [x0], #16
550 stp x3, x4, [x1], #16
553 /* Fixup the len and test for completion */
562 CLEAR_RECOVERY_HANDLER x10, x11
568 * int _copyin_word(const char *src, uint64_t *dst, vm_size_t len)
572 .globl EXT(_copyin_word)
575 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
584 b L_copyin_word_store
590 CLEAR_RECOVERY_HANDLER x10, x11
597 * int _bcopyout(const char *src, char *dst, vm_size_t len)
601 .globl EXT(_bcopyout)
604 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
605 /* If len is less than 16 bytes, just do a bytewise copy */
610 /* 16 bytes at a time */
611 ldp x3, x4, [x0], #16
612 stp x3, x4, [x1], #16
615 /* Fixup the len and test for completion */
624 CLEAR_RECOVERY_HANDLER x10, x11
631 * const user_addr_t user_addr,
638 .globl EXT(_bcopyinstr)
641 adr x4, Lcopyinstr_error // Get address for recover
642 mrs x10, TPIDR_EL1 // Get thread pointer
643 ldr x11, [x10, TH_RECOVER] // Save previous recover
644 str x4, [x10, TH_RECOVER] // Store new recover
645 mov x4, xzr // x4 - total bytes copied
647 ldrb w5, [x0], #1 // Load a byte from the user source
648 strb w5, [x1], #1 // Store a byte to the kernel dest
649 add x4, x4, #1 // Increment bytes copied
650 cbz x5, Lcopyinstr_done // If this byte is null, we're done
651 cmp x4, x2 // If we're out of space, return an error
654 mov x5, #ENAMETOOLONG // Set current byte to error code for later return
656 str x4, [x3] // Return number of bytes copied
657 mov x0, x5 // Set error code (0 on success, ENAMETOOLONG on failure)
660 mov x0, #EFAULT // Return EFAULT on error
662 str x11, [x10, TH_RECOVER] // Restore old recover
667 * int copyinframe(const vm_address_t frame_addr, char *kernel_addr, bool is64bit)
669 * Safely copy sixteen bytes (the fixed top of an ARM64 frame) from
670 * either user or kernel memory, or 8 bytes (AArch32) from user only.
672 * x0 : address of frame to copy.
673 * x1 : kernel address at which to store data.
674 * w2 : whether to copy an AArch32 or AArch64 frame.
676 * x5 : temp (kernel virtual base)
678 * x10 : thread pointer (set by SET_RECOVERY_HANDLER)
679 * x11 : old recovery function (set by SET_RECOVERY_HANDLER)
680 * x12, x13 : backtrace data
685 .globl EXT(copyinframe)
688 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
689 cbnz w2, Lcopyinframe64 // Check frame size
690 adrp x5, EXT(gVirtBase)@page // For 32-bit frame, make sure we're not trying to copy from kernel
691 add x5, x5, EXT(gVirtBase)@pageoff
693 cmp x5, x0 // See if address is in kernel virtual range
694 b.hi Lcopyinframe32 // If below kernel virtual range, proceed.
695 mov w0, #EFAULT // Should never have a 32-bit frame in kernel virtual range
699 ldr x12, [x0] // Copy 8 bytes
701 mov w0, #0 // Success
705 mov x3, VM_MIN_KERNEL_ADDRESS // Check if kernel address
706 orr x9, x0, TBI_MASK // Hide tags in address comparison
707 cmp x9, x3 // If in kernel address range, skip tag test
708 b.hs Lcopyinframe_valid
709 tst x0, TBI_MASK // Detect tagged pointers
710 b.eq Lcopyinframe_valid
711 mov w0, #EFAULT // Tagged address, fail
714 ldp x12, x13, [x0] // Copy 16 bytes
716 mov w0, #0 // Success
719 CLEAR_RECOVERY_HANDLER x10, x11
725 * int _emulate_swp(user_addr_t addr, uint32_t newval, uint32_t *oldval)
727 * Securely emulates the swp instruction removed from armv8.
728 * Returns true on success.
729 * Returns false if the user address is not user accessible.
731 * x0 : address to swap
732 * x1 : new value to store
733 * x2 : address to save old value
735 * x10 : thread pointer (set by SET_RECOVERY_HANDLER)
736 * x11 : old recovery handler (set by SET_RECOVERY_HANDLER)
737 * x12 : interrupt state
742 .globl EXT(_emulate_swp)
745 SET_RECOVERY_HANDLER x10, x11, x3, swp_error
749 ldxr w3, [x0] // Load data at target address
750 stxr w4, w1, [x0] // Store new value to target address
751 cbnz w4, Lswp_try // Retry if store failed
752 str w3, [x2] // Save old value
753 mov x13, #1 // Set successful return value
756 mov x0, x13 // Set return value
757 CLEAR_RECOVERY_HANDLER x10, x11
762 * int _emulate_swpb(user_addr_t addr, uint32_t newval, uint32_t *oldval)
764 * Securely emulates the swpb instruction removed from armv8.
765 * Returns true on success.
766 * Returns false if the user address is not user accessible.
768 * x0 : address to swap
769 * x1 : new value to store
770 * x2 : address to save old value
772 * x10 : thread pointer (set by SET_RECOVERY_HANDLER)
773 * x11 : old recovery handler (set by SET_RECOVERY_HANDLER)
774 * x12 : interrupt state
779 .globl EXT(_emulate_swpb)
782 SET_RECOVERY_HANDLER x10, x11, x3, swp_error
786 ldxrb w3, [x0] // Load data at target address
787 stxrb w4, w1, [x0] // Store new value to target address
788 cbnz w4, Lswp_try // Retry if store failed
789 str w3, [x2] // Save old value
790 mov x13, #1 // Set successful return value
793 mov x0, x13 // Set return value
794 CLEAR_RECOVERY_HANDLER x10, x11
801 mov x0, xzr // Return false
802 CLEAR_RECOVERY_HANDLER x10, x11
807 * uint32_t arm_debug_read_dscr(void)
811 .globl EXT(arm_debug_read_dscr)
812 LEXT(arm_debug_read_dscr)
816 * void arm_debug_set_cp14(arm_debug_state_t *debug_state)
818 * Set debug registers to match the current thread state
819 * (NULL to disable). Assume 6 breakpoints and 2
820 * watchpoints, since that has been the case in all cores
825 .globl EXT(arm_debug_set_cp14)
826 LEXT(arm_debug_set_cp14)
830 #if defined(APPLE_ARM64_ARCH_FAMILY)
832 * Note: still have to ISB before executing wfi!
836 .globl EXT(arm64_prepare_for_sleep)
837 LEXT(arm64_prepare_for_sleep)
840 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
841 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
842 mrs x0, ARM64_REG_HID2 // Read HID2
843 orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch
844 msr ARM64_REG_HID2, x0 // Write HID2
849 #if __ARM_GLOBAL_SLEEP_BIT__
851 mrs x1, ARM64_REG_ACC_OVRD
852 orr x1, x1, #(ARM64_REG_ACC_OVRD_enDeepSleep)
853 and x1, x1, #(~(ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_mask))
854 orr x1, x1, #( ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_deepsleep)
855 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnSRM_mask))
856 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnSRM_deepsleep)
857 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2TrDnLnk_mask))
858 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2TrDnLnk_deepsleep)
859 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnCPM_mask))
860 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnCPM_deepsleep)
861 msr ARM64_REG_ACC_OVRD, x1
866 mov x1, ARM64_REG_CYC_CFG_deepSleep
867 msr ARM64_REG_CYC_CFG, x1
869 // Set "OK to power down" (<rdar://problem/12390433>)
870 mrs x0, ARM64_REG_CYC_OVRD
871 orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down)
872 msr ARM64_REG_CYC_OVRD, x0
881 * Force WFI to use clock gating only
886 .globl EXT(arm64_force_wfi_clock_gate)
887 LEXT(arm64_force_wfi_clock_gate)
890 mrs x0, ARM64_REG_CYC_OVRD
891 orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_up)
892 msr ARM64_REG_CYC_OVRD, x0
899 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
903 .globl EXT(cyclone_typhoon_prepare_for_wfi)
905 LEXT(cyclone_typhoon_prepare_for_wfi)
908 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
909 mrs x0, ARM64_REG_HID2 // Read HID2
910 orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch
911 msr ARM64_REG_HID2, x0 // Write HID2
921 .globl EXT(cyclone_typhoon_return_from_wfi)
922 LEXT(cyclone_typhoon_return_from_wfi)
925 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
926 mrs x0, ARM64_REG_HID2 // Read HID2
927 mov x1, #(ARM64_REG_HID2_disMMUmtlbPrefetch) //
928 bic x0, x0, x1 // Clear HID.DisableMTLBPrefetchMTLBPrefetch
929 msr ARM64_REG_HID2, x0 // Write HID2
939 #define HID0_DEFEATURES_1 0x0000a0c000064010ULL
940 #define HID1_DEFEATURES_1 0x000000004005bf20ULL
941 #define HID2_DEFEATURES_1 0x0000000000102074ULL
942 #define HID3_DEFEATURES_1 0x0000000000400003ULL
943 #define HID4_DEFEATURES_1 0x83ff00e100000268ULL
944 #define HID7_DEFEATURES_1 0x000000000000000eULL
946 #define HID0_DEFEATURES_2 0x0000a1c000020010ULL
947 #define HID1_DEFEATURES_2 0x000000000005d720ULL
948 #define HID2_DEFEATURES_2 0x0000000000002074ULL
949 #define HID3_DEFEATURES_2 0x0000000000400001ULL
950 #define HID4_DEFEATURES_2 0x8390000200000208ULL
951 #define HID7_DEFEATURES_2 0x0000000000000000ULL
954 arg0 = target register
955 arg1 = 64-bit constant
958 movz $0, #(($1 >> 48) & 0xffff), lsl #48
959 movk $0, #(($1 >> 32) & 0xffff), lsl #32
960 movk $0, #(($1 >> 16) & 0xffff), lsl #16
961 movk $0, #(($1) & 0xffff)
966 .globl EXT(cpu_defeatures_set)
967 LEXT(cpu_defeatures_set)
970 b.eq cpu_defeatures_set_2
972 b.ne cpu_defeatures_set_ret
973 LOAD_UINT64 x1, HID0_DEFEATURES_1
974 mrs x0, ARM64_REG_HID0
976 msr ARM64_REG_HID0, x0
977 LOAD_UINT64 x1, HID1_DEFEATURES_1
978 mrs x0, ARM64_REG_HID1
980 msr ARM64_REG_HID1, x0
981 LOAD_UINT64 x1, HID2_DEFEATURES_1
982 mrs x0, ARM64_REG_HID2
984 msr ARM64_REG_HID2, x0
985 LOAD_UINT64 x1, HID3_DEFEATURES_1
986 mrs x0, ARM64_REG_HID3
988 msr ARM64_REG_HID3, x0
989 LOAD_UINT64 x1, HID4_DEFEATURES_1
990 mrs x0, ARM64_REG_HID4
992 msr ARM64_REG_HID4, x0
993 LOAD_UINT64 x1, HID7_DEFEATURES_1
994 mrs x0, ARM64_REG_HID7
996 msr ARM64_REG_HID7, x0
999 b cpu_defeatures_set_ret
1000 cpu_defeatures_set_2:
1001 LOAD_UINT64 x1, HID0_DEFEATURES_2
1002 mrs x0, ARM64_REG_HID0
1004 msr ARM64_REG_HID0, x0
1005 LOAD_UINT64 x1, HID1_DEFEATURES_2
1006 mrs x0, ARM64_REG_HID1
1008 msr ARM64_REG_HID1, x0
1009 LOAD_UINT64 x1, HID2_DEFEATURES_2
1010 mrs x0, ARM64_REG_HID2
1012 msr ARM64_REG_HID2, x0
1013 LOAD_UINT64 x1, HID3_DEFEATURES_2
1014 mrs x0, ARM64_REG_HID3
1016 msr ARM64_REG_HID3, x0
1017 LOAD_UINT64 x1, HID4_DEFEATURES_2
1018 mrs x0, ARM64_REG_HID4
1020 msr ARM64_REG_HID4, x0
1021 LOAD_UINT64 x1, HID7_DEFEATURES_2
1022 mrs x0, ARM64_REG_HID7
1024 msr ARM64_REG_HID7, x0
1027 b cpu_defeatures_set_ret
1028 cpu_defeatures_set_ret:
1037 * unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1,
1038 uintptr_t arg2, uintptr_t arg3)
1040 * Call the EL3 monitor with 4 arguments in registers
1041 * The monitor interface maintains the same ABI as the C function call standard. Callee-saved
1042 * registers are preserved, temporary registers are not. Parameters and results are passed in
1047 .globl EXT(monitor_call)
1053 /* vim: set sw=4 ts=4: */