2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <machine/asm.h>
30 #include <arm64/machine_machdep.h>
31 #include <arm64/proc_reg.h>
33 #include <pexpert/arm64/board_config.h>
34 #include <sys/errno.h>
38 /* uint32_t get_fpscr(void):
39 * Returns (FPSR | FPCR).
45 mrs x1, FPSR // Grab FPSR
46 mov x4, #(FPSR_MASK & 0xFFFF)
47 mov x5, #(FPSR_MASK & 0xFFFF0000)
49 and x1, x1, x0 // Be paranoid, and clear bits we expect to
51 mrs x2, FPCR // Grab FPCR
52 mov x4, #(FPCR_MASK & 0xFFFF)
53 mov x5, #(FPCR_MASK & 0xFFFF0000)
55 and x2, x2, x0 // Be paranoid, and clear bits we expect to
57 orr x0, x1, x2 // OR them to get FPSCR equivalent state
64 /* void set_fpscr(uint32_t value):
65 * Set the FPCR and FPSR registers, based on the given value; a
66 * noteworthy point is that unlike 32-bit mode, 64-bit mode FPSR
67 * and FPCR are not responsible for condition codes.
71 mov x4, #(FPSR_MASK & 0xFFFF)
72 mov x5, #(FPSR_MASK & 0xFFFF0000)
74 and x1, x1, x0 // Clear the bits that don't apply to FPSR
75 mov x4, #(FPCR_MASK & 0xFFFF)
76 mov x5, #(FPCR_MASK & 0xFFFF0000)
78 and x2, x2, x0 // Clear the bits that don't apply to FPCR
79 msr FPSR, x1 // Write FPCR
80 msr FPCR, x2 // Write FPSR
81 dsb ish // FPCR requires synchronization
85 #if (__ARM_VFP__ >= 3)
100 * void flush_mmu_tlb(void)
106 .globl EXT(flush_mmu_tlb)
114 * void flush_core_tlb(void)
120 .globl EXT(flush_core_tlb)
128 * void flush_mmu_tlb_allentries(uint64_t, uint64_t)
134 .globl EXT(flush_mmu_tlb_allentries)
135 LEXT(flush_mmu_tlb_allentries)
140 * The code below is not necessarily correct. From an overview of
141 * the client code, the expected contract for TLB flushes is that
142 * we will expand from an "address, length" pair to "start address,
143 * end address" in the course of a TLB flush. This suggests that
144 * a flush for "X, X+4" is actually only asking for a flush of a
145 * single 16KB page. At the same time, we'd like to be prepared
146 * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page
147 * number to a 16KB page boundary. This should deal correctly with
150 * If our expecations about client behavior are wrong however, this
151 * will lead to occasional TLB corruption on platforms with 16KB
160 add x0, x0, #(ARM_PGBYTES / 4096) // Units are 4KB pages, as defined by the ISA
168 * void flush_mmu_tlb_entry(uint64_t)
174 .globl EXT(flush_mmu_tlb_entry)
175 LEXT(flush_mmu_tlb_entry)
182 * void flush_mmu_tlb_entries(uint64_t, uint64_t)
188 .globl EXT(flush_mmu_tlb_entries)
189 LEXT(flush_mmu_tlb_entries)
194 * The code below is not necessarily correct. From an overview of
195 * the client code, the expected contract for TLB flushes is that
196 * we will expand from an "address, length" pair to "start address,
197 * end address" in the course of a TLB flush. This suggests that
198 * a flush for "X, X+4" is actually only asking for a flush of a
199 * single 16KB page. At the same time, we'd like to be prepared
200 * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page
201 * number to a 16KB page boundary. This should deal correctly with
204 * If our expecations about client behavior are wrong however, this
205 * will lead to occasional TLB corruption on platforms with 16KB
214 add x0, x0, #(ARM_PGBYTES / 4096) // Units are pages
222 * void flush_mmu_tlb_asid(uint64_t)
224 * Flush TLB entriesfor requested asid
228 .globl EXT(flush_mmu_tlb_asid)
229 LEXT(flush_mmu_tlb_asid)
236 * void flush_core_tlb_asid(uint64_t)
238 * Flush TLB entries for core for requested asid
242 .globl EXT(flush_core_tlb_asid)
243 LEXT(flush_core_tlb_asid)
250 * Set MMU Translation Table Base Alternate
254 .globl EXT(set_mmu_ttb_alternate)
255 LEXT(set_mmu_ttb_alternate)
257 #if defined(KERNEL_INTEGRITY_KTRR)
259 bl EXT(pinst_set_ttbr1)
263 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
268 * set AUX control register
272 .globl EXT(set_aux_control)
273 LEXT(set_aux_control)
275 // Synchronize system
280 #if (DEVELOPMENT || DEBUG)
282 * set MMU control register
286 .globl EXT(set_mmu_control)
287 LEXT(set_mmu_control)
296 * set translation control register
302 #if defined(APPLE_ARM64_ARCH_FAMILY)
303 // Assert that T0Z is always equal to T1Z
304 eor x1, x0, x0, lsr #(TCR_T1SZ_SHIFT - TCR_T0SZ_SHIFT)
305 and x1, x1, #(TCR_TSZ_MASK << TCR_T0SZ_SHIFT)
306 cbnz x1, L_set_tcr_panic
307 #if defined(KERNEL_INTEGRITY_KTRR)
313 #endif /* defined(KERNEL_INTRITY_KTRR) */
321 adr x0, L_set_tcr_panic_str
324 L_set_locked_reg_panic:
328 adr x0, L_set_locked_reg_panic_str
333 .asciz "set_tcr: t0sz, t1sz not equal (%llx)\n"
336 L_set_locked_reg_panic_str:
337 .asciz "attempt to set locked register: (%llx)\n"
339 #if defined(KERNEL_INTEGRITY_KTRR)
348 #endif // defined(APPLE_ARM64_ARCH_FAMILY)
351 * MMU kernel virtual to physical address translation
355 .globl EXT(mmu_kvtop)
357 mrs x2, DAIF // Load current DAIF
358 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
359 at s1e1r, x0 // Translation Stage 1 EL1
360 mrs x1, PAR_EL1 // Read result
361 msr DAIF, x2 // Restore interrupt state
362 tbnz x1, #0, L_mmu_kvtop_invalid // Test Translation not valid
363 bfm x1, x0, #0, #11 // Add page offset
364 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
367 mov x0, xzr // Return invalid
371 * MMU user virtual to physical address translation
375 .globl EXT(mmu_uvtop)
377 lsr x8, x0, #56 // Extract top byte
378 cbnz x8, L_mmu_uvtop_invalid // Tagged pointers are invalid
379 mrs x2, DAIF // Load current DAIF
380 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
381 at s1e0r, x0 // Translation Stage 1 EL0
382 mrs x1, PAR_EL1 // Read result
383 msr DAIF, x2 // Restore interrupt state
384 tbnz x1, #0, L_mmu_uvtop_invalid // Test Translation not valid
385 bfm x1, x0, #0, #11 // Add page offset
386 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
389 mov x0, xzr // Return invalid
393 * MMU kernel virtual to physical address preflight write access
397 .globl EXT(mmu_kvtop_wpreflight)
398 LEXT(mmu_kvtop_wpreflight)
399 mrs x2, DAIF // Load current DAIF
400 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
401 at s1e1w, x0 // Translation Stage 1 EL1
402 mrs x1, PAR_EL1 // Read result
403 msr DAIF, x2 // Restore interrupt state
404 tbnz x1, #0, L_mmu_kvtop_wpreflight_invalid // Test Translation not valid
405 bfm x1, x0, #0, #11 // Add page offset
406 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
408 L_mmu_kvtop_wpreflight_invalid:
409 mov x0, xzr // Return invalid
413 * SET_RECOVERY_HANDLER
415 * Sets up a page fault recovery handler
417 * arg0 - persisted thread pointer
418 * arg1 - persisted recovery handler
420 * arg3 - recovery label
422 .macro SET_RECOVERY_HANDLER
423 mrs $0, TPIDR_EL1 // Load thread pointer
424 ldr $1, [$0, TH_RECOVER] // Save previous recovery handler
425 adrp $2, $3@page // Load the recovery handler address
426 add $2, $2, $3@pageoff
427 str $2, [$0, TH_RECOVER] // Set new recovery handler
431 * CLEAR_RECOVERY_HANDLER
433 * Clears page fault handler set by SET_RECOVERY_HANDLER
435 * arg0 - thread pointer saved by SET_RECOVERY_HANDLER
436 * arg1 - old recovery handler saved by SET_RECOVERY_HANDLER
438 .macro CLEAR_RECOVERY_HANDLER
439 str $1, [$0, TH_RECOVER] // Restore the previous recovery handler
446 CLEAR_RECOVERY_HANDLER x10, x11
447 mov x0, #EFAULT // Return an EFAULT error
452 * int _bcopyin(const char *src, char *dst, vm_size_t len)
459 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
460 /* If len is less than 16 bytes, just do a bytewise copy */
465 /* 16 bytes at a time */
466 ldp x3, x4, [x0], #16
467 stp x3, x4, [x1], #16
470 /* Fixup the len and test for completion */
479 CLEAR_RECOVERY_HANDLER x10, x11
485 * int _copyin_word(const char *src, uint64_t *dst, vm_size_t len)
489 .globl EXT(_copyin_word)
492 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
501 b L_copyin_word_store
507 CLEAR_RECOVERY_HANDLER x10, x11
514 * int _bcopyout(const char *src, char *dst, vm_size_t len)
518 .globl EXT(_bcopyout)
521 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
522 /* If len is less than 16 bytes, just do a bytewise copy */
527 /* 16 bytes at a time */
528 ldp x3, x4, [x0], #16
529 stp x3, x4, [x1], #16
532 /* Fixup the len and test for completion */
541 CLEAR_RECOVERY_HANDLER x10, x11
548 * const user_addr_t user_addr,
555 .globl EXT(_bcopyinstr)
558 adr x4, Lcopyinstr_error // Get address for recover
559 mrs x10, TPIDR_EL1 // Get thread pointer
560 ldr x11, [x10, TH_RECOVER] // Save previous recover
561 str x4, [x10, TH_RECOVER] // Store new recover
562 mov x4, xzr // x4 - total bytes copied
564 ldrb w5, [x0], #1 // Load a byte from the user source
565 strb w5, [x1], #1 // Store a byte to the kernel dest
566 add x4, x4, #1 // Increment bytes copied
567 cbz x5, Lcopyinstr_done // If this byte is null, we're done
568 cmp x4, x2 // If we're out of space, return an error
571 mov x5, #ENAMETOOLONG // Set current byte to error code for later return
573 str x4, [x3] // Return number of bytes copied
574 mov x0, x5 // Set error code (0 on success, ENAMETOOLONG on failure)
577 mov x0, #EFAULT // Return EFAULT on error
579 str x11, [x10, TH_RECOVER] // Restore old recover
584 * int copyinframe(const vm_address_t frame_addr, char *kernel_addr, bool is64bit)
586 * Safely copy sixteen bytes (the fixed top of an ARM64 frame) from
587 * either user or kernel memory, or 8 bytes (AArch32) from user only.
589 * x0 : address of frame to copy.
590 * x1 : kernel address at which to store data.
591 * w2 : whether to copy an AArch32 or AArch64 frame.
593 * x5 : temp (kernel virtual base)
595 * x10 : thread pointer (set by SET_RECOVERY_HANDLER)
596 * x11 : old recovery function (set by SET_RECOVERY_HANDLER)
597 * x12, x13 : backtrace data
602 .globl EXT(copyinframe)
605 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
606 cbnz w2, Lcopyinframe64 // Check frame size
607 adrp x5, EXT(gVirtBase)@page // For 32-bit frame, make sure we're not trying to copy from kernel
608 add x5, x5, EXT(gVirtBase)@pageoff
610 cmp x5, x0 // See if address is in kernel virtual range
611 b.hi Lcopyinframe32 // If below kernel virtual range, proceed.
612 mov w0, #EFAULT // Should never have a 32-bit frame in kernel virtual range
616 ldr x12, [x0] // Copy 8 bytes
618 mov w0, #0 // Success
622 mov x3, VM_MIN_KERNEL_ADDRESS // Check if kernel address
623 orr x9, x0, TBI_MASK // Hide tags in address comparison
624 cmp x9, x3 // If in kernel address range, skip tag test
625 b.hs Lcopyinframe_valid
626 tst x0, TBI_MASK // Detect tagged pointers
627 b.eq Lcopyinframe_valid
628 mov w0, #EFAULT // Tagged address, fail
631 ldp x12, x13, [x0] // Copy 16 bytes
633 mov w0, #0 // Success
636 CLEAR_RECOVERY_HANDLER x10, x11
642 * int _emulate_swp(user_addr_t addr, uint32_t newval, uint32_t *oldval)
644 * Securely emulates the swp instruction removed from armv8.
645 * Returns true on success.
646 * Returns false if the user address is not user accessible.
648 * x0 : address to swap
649 * x1 : new value to store
650 * x2 : address to save old value
652 * x10 : thread pointer (set by SET_RECOVERY_HANDLER)
653 * x11 : old recovery handler (set by SET_RECOVERY_HANDLER)
654 * x12 : interrupt state
659 .globl EXT(_emulate_swp)
662 SET_RECOVERY_HANDLER x10, x11, x3, swp_error
666 ldxr w3, [x0] // Load data at target address
667 stxr w4, w1, [x0] // Store new value to target address
668 cbnz w4, Lswp_try // Retry if store failed
669 str w3, [x2] // Save old value
670 mov x13, #1 // Set successful return value
673 mov x0, x13 // Set return value
674 CLEAR_RECOVERY_HANDLER x10, x11
679 * int _emulate_swpb(user_addr_t addr, uint32_t newval, uint32_t *oldval)
681 * Securely emulates the swpb instruction removed from armv8.
682 * Returns true on success.
683 * Returns false if the user address is not user accessible.
685 * x0 : address to swap
686 * x1 : new value to store
687 * x2 : address to save old value
689 * x10 : thread pointer (set by SET_RECOVERY_HANDLER)
690 * x11 : old recovery handler (set by SET_RECOVERY_HANDLER)
691 * x12 : interrupt state
696 .globl EXT(_emulate_swpb)
699 SET_RECOVERY_HANDLER x10, x11, x3, swp_error
703 ldxrb w3, [x0] // Load data at target address
704 stxrb w4, w1, [x0] // Store new value to target address
705 cbnz w4, Lswp_try // Retry if store failed
706 str w3, [x2] // Save old value
707 mov x13, #1 // Set successful return value
710 mov x0, x13 // Set return value
711 CLEAR_RECOVERY_HANDLER x10, x11
718 mov x0, xzr // Return false
719 CLEAR_RECOVERY_HANDLER x10, x11
724 * uint32_t arm_debug_read_dscr(void)
728 .globl EXT(arm_debug_read_dscr)
729 LEXT(arm_debug_read_dscr)
733 * void arm_debug_set_cp14(arm_debug_state_t *debug_state)
735 * Set debug registers to match the current thread state
736 * (NULL to disable). Assume 6 breakpoints and 2
737 * watchpoints, since that has been the case in all cores
742 .globl EXT(arm_debug_set_cp14)
743 LEXT(arm_debug_set_cp14)
747 #if defined(APPLE_ARM64_ARCH_FAMILY)
749 * Note: still have to ISB before executing wfi!
753 .globl EXT(arm64_prepare_for_sleep)
754 LEXT(arm64_prepare_for_sleep)
757 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
758 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
759 mrs x0, ARM64_REG_HID2 // Read HID2
760 orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch
761 msr ARM64_REG_HID2, x0 // Write HID2
766 #if __ARM_GLOBAL_SLEEP_BIT__
768 mrs x1, ARM64_REG_ACC_OVRD
769 orr x1, x1, #(ARM64_REG_ACC_OVRD_enDeepSleep)
770 and x1, x1, #(~(ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_mask))
771 orr x1, x1, #( ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_deepsleep)
772 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnSRM_mask))
773 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnSRM_deepsleep)
774 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2TrDnLnk_mask))
775 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2TrDnLnk_deepsleep)
776 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnCPM_mask))
777 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnCPM_deepsleep)
778 msr ARM64_REG_ACC_OVRD, x1
783 mov x1, ARM64_REG_CYC_CFG_deepSleep
784 msr ARM64_REG_CYC_CFG, x1
786 // Set "OK to power down" (<rdar://problem/12390433>)
787 mrs x0, ARM64_REG_CYC_OVRD
788 orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down)
789 msr ARM64_REG_CYC_OVRD, x0
798 * Force WFI to use clock gating only
803 .globl EXT(arm64_force_wfi_clock_gate)
804 LEXT(arm64_force_wfi_clock_gate)
807 mrs x0, ARM64_REG_CYC_OVRD
808 orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_up)
809 msr ARM64_REG_CYC_OVRD, x0
816 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
820 .globl EXT(cyclone_typhoon_prepare_for_wfi)
822 LEXT(cyclone_typhoon_prepare_for_wfi)
825 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
826 mrs x0, ARM64_REG_HID2 // Read HID2
827 orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch
828 msr ARM64_REG_HID2, x0 // Write HID2
838 .globl EXT(cyclone_typhoon_return_from_wfi)
839 LEXT(cyclone_typhoon_return_from_wfi)
842 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
843 mrs x0, ARM64_REG_HID2 // Read HID2
844 mov x1, #(ARM64_REG_HID2_disMMUmtlbPrefetch) //
845 bic x0, x0, x1 // Clear HID.DisableMTLBPrefetchMTLBPrefetch
846 msr ARM64_REG_HID2, x0 // Write HID2
856 #define HID0_DEFEATURES_1 0x0000a0c000064010ULL
857 #define HID1_DEFEATURES_1 0x000000004005bf20ULL
858 #define HID2_DEFEATURES_1 0x0000000000102074ULL
859 #define HID3_DEFEATURES_1 0x0000000000400003ULL
860 #define HID4_DEFEATURES_1 0x83ff00e100000268ULL
861 #define HID7_DEFEATURES_1 0x000000000000000eULL
863 #define HID0_DEFEATURES_2 0x0000a1c000020010ULL
864 #define HID1_DEFEATURES_2 0x000000000005d720ULL
865 #define HID2_DEFEATURES_2 0x0000000000002074ULL
866 #define HID3_DEFEATURES_2 0x0000000000400001ULL
867 #define HID4_DEFEATURES_2 0x8390000200000208ULL
868 #define HID7_DEFEATURES_2 0x0000000000000000ULL
871 arg0 = target register
872 arg1 = 64-bit constant
875 movz $0, #(($1 >> 48) & 0xffff), lsl #48
876 movk $0, #(($1 >> 32) & 0xffff), lsl #32
877 movk $0, #(($1 >> 16) & 0xffff), lsl #16
878 movk $0, #(($1) & 0xffff)
883 .globl EXT(cpu_defeatures_set)
884 LEXT(cpu_defeatures_set)
887 b.eq cpu_defeatures_set_2
889 b.ne cpu_defeatures_set_ret
890 LOAD_UINT64 x1, HID0_DEFEATURES_1
891 mrs x0, ARM64_REG_HID0
893 msr ARM64_REG_HID0, x0
894 LOAD_UINT64 x1, HID1_DEFEATURES_1
895 mrs x0, ARM64_REG_HID1
897 msr ARM64_REG_HID1, x0
898 LOAD_UINT64 x1, HID2_DEFEATURES_1
899 mrs x0, ARM64_REG_HID2
901 msr ARM64_REG_HID2, x0
902 LOAD_UINT64 x1, HID3_DEFEATURES_1
903 mrs x0, ARM64_REG_HID3
905 msr ARM64_REG_HID3, x0
906 LOAD_UINT64 x1, HID4_DEFEATURES_1
907 mrs x0, ARM64_REG_HID4
909 msr ARM64_REG_HID4, x0
910 LOAD_UINT64 x1, HID7_DEFEATURES_1
911 mrs x0, ARM64_REG_HID7
913 msr ARM64_REG_HID7, x0
916 b cpu_defeatures_set_ret
917 cpu_defeatures_set_2:
918 LOAD_UINT64 x1, HID0_DEFEATURES_2
919 mrs x0, ARM64_REG_HID0
921 msr ARM64_REG_HID0, x0
922 LOAD_UINT64 x1, HID1_DEFEATURES_2
923 mrs x0, ARM64_REG_HID1
925 msr ARM64_REG_HID1, x0
926 LOAD_UINT64 x1, HID2_DEFEATURES_2
927 mrs x0, ARM64_REG_HID2
929 msr ARM64_REG_HID2, x0
930 LOAD_UINT64 x1, HID3_DEFEATURES_2
931 mrs x0, ARM64_REG_HID3
933 msr ARM64_REG_HID3, x0
934 LOAD_UINT64 x1, HID4_DEFEATURES_2
935 mrs x0, ARM64_REG_HID4
937 msr ARM64_REG_HID4, x0
938 LOAD_UINT64 x1, HID7_DEFEATURES_2
939 mrs x0, ARM64_REG_HID7
941 msr ARM64_REG_HID7, x0
944 b cpu_defeatures_set_ret
945 cpu_defeatures_set_ret:
954 * unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1,
955 uintptr_t arg2, uintptr_t arg3)
957 * Call the EL3 monitor with 4 arguments in registers
958 * The monitor interface maintains the same ABI as the C function call standard. Callee-saved
959 * registers are preserved, temporary registers are not. Parameters and results are passed in
964 .globl EXT(monitor_call)
970 /* vim: set sw=4 ts=4: */