*/
#include <machine/asm.h>
+#include <arm64/machine_machdep.h>
#include <arm64/machine_routines_asm.h>
#include <arm64/proc_reg.h>
#include <pexpert/arm64/board_config.h>
#include <config_dtrace.h>
#include "assym.s"
#include <arm64/exception_asm.h>
+#include "dwarf_unwind.h"
#if __ARM_KERNEL_PROTECT__
#include <arm/pmap.h>
/* Return to the PPL. */
mov x15, #0
mov w10, #PPL_STATE_EXCEPTION
-#if __APRR_SUPPORTED__
- b Ldisable_aif_and_enter_ppl
-#else
#error "XPRR configuration error"
-#endif /* __APRR_SUPPORTED__ */
1:
.endmacro
-#if __APRR_SUPPORTED__
-/*
- * EL1_SP0_VECTOR_PPL_CHECK
- *
- * Check to see if the exception was taken by the kernel or the PPL. Falls
- * through if kernel, hands off to the given label if PPL. Expects to run on
- * SP1.
- * arg0 - Label to go to if this was a PPL exception.
- */
-.macro EL1_SP0_VECTOR_PPL_CHECK
- sub sp, sp, ARM_CONTEXT_SIZE
- stp x0, x1, [sp, SS64_X0]
- mrs x0, APRR_EL1
- MOV64 x1, APRR_EL1_DEFAULT
- cmp x0, x1
- b.ne $0
- ldp x0, x1, [sp, SS64_X0]
- add sp, sp, ARM_CONTEXT_SIZE
-.endmacro
-
-#define STAY_ON_SP1 0
-#define SWITCH_TO_SP0 1
-
-#define INVOKE_PREFLIGHT 0
-#define NO_INVOKE_PREFLIGHT 1
-
-/*
- * EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
- *
- * Verify whether an exception came from the PPL or from the kernel. If it came
- * from the PPL, save off the PPL state and transition out of the PPL.
- * arg0 - Label to go to if this was a kernel exception
- * arg1 - Label to go to (after leaving the PPL) if this was a PPL exception
- * arg2 - Indicates if this should switch back to SP0
- * x0 - xPRR_EL1_BR1 read by EL1_SP0_VECTOR_PPL_CHECK
- */
-.macro EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
- /* Spill some more registers. */
- stp x2, x3, [sp, SS64_X2]
-
- /*
- * Check if the PPL is locked down; if not, we can treat this as a
- * kernel execption.
- */
- adrp x1, EXT(pmap_ppl_locked_down)@page
- ldr w1, [x1, #EXT(pmap_ppl_locked_down)@pageoff]
- cbz x1, 2f
-
- /* Ensure that APRR_EL1 is actually in PPL mode. */
- MOV64 x1, APRR_EL1_PPL
- cmp x0, x1
- b.ne .
-
- /*
- * Check if the CPU is in the PPL; if not we can treat this as a
- * kernel exception.
- */
- GET_PMAP_CPU_DATA x3, x1, x2
- ldr w1, [x3, PMAP_CPU_DATA_PPL_STATE]
- cmp x1, #PPL_STATE_KERNEL
- b.eq 2f
-
- /* Ensure that the CPU is in the expected PPL state. */
- cmp x1, #PPL_STATE_DISPATCH
- b.ne .
-
- /* Mark the CPU as dealing with an exception. */
- mov x1, #PPL_STATE_EXCEPTION
- str w1, [x3, PMAP_CPU_DATA_PPL_STATE]
-
- /* Load the bounds of the PPL trampoline. */
- adrp x0, EXT(ppl_no_exception_start)@page
- add x0, x0, EXT(ppl_no_exception_start)@pageoff
- adrp x1, EXT(ppl_no_exception_end)@page
- add x1, x1, EXT(ppl_no_exception_end)@pageoff
-
- /*
- * Ensure that the exception did not occur in the trampoline. If it
- * did, we are either being attacked or our state machine is
- * horrifically broken.
- */
- mrs x2, ELR_EL1
- cmp x2, x0
- b.lo 1f
- cmp x2, x1
- b.hi 1f
-
- /* We might be under attack; spin. */
- b .
-
-1:
- /* Get the PPL save area. */
- mov x1, x3
- ldr x0, [x3, PMAP_CPU_DATA_SAVE_AREA]
-
- /* Save our x0, x1 state. */
- ldp x2, x3, [sp, SS64_X0]
- stp x2, x3, [x0, SS64_X0]
-
- /* Restore SP1 to its original state. */
- mov x3, sp
- add sp, sp, ARM_CONTEXT_SIZE
-
- .if $2 == SWITCH_TO_SP0
- /* Switch back to SP0. */
- msr SPSel, #0
- mov x2, sp
- .else
- /* Load the SP0 value. */
- mrs x2, SP_EL0
- .endif
-
- /* Save off the stack pointer. */
- str x2, [x0, SS64_SP]
-
- INIT_SAVED_STATE_FLAVORS x0, w1, w2
-
- /* Save the context that was interrupted. */
- ldp x2, x3, [x3, SS64_X2]
- stp fp, lr, [x0, SS64_FP]
- SPILL_REGISTERS KERNEL_MODE
-
- /*
- * Stash the function we wish to be invoked to deal with the exception;
- * usually this is some preflight function for the fleh_* handler.
- */
- adrp x25, $1@page
- add x25, x25, $1@pageoff
-
- /*
- * Indicate that this is a PPL exception, and that we should return to
- * the PPL.
- */
- mov x26, #1
-
- /* Transition back to kernel mode. */
- mov x15, #PPL_EXIT_EXCEPTION
- b ppl_return_to_kernel_mode
-2:
- /* Restore SP1 state. */
- ldp x2, x3, [sp, SS64_X2]
- ldp x0, x1, [sp, SS64_X0]
- add sp, sp, ARM_CONTEXT_SIZE
-
- /* Go to the specified label (usually the original exception vector). */
- b $0
-.endmacro
-#endif /* __APRR_SUPPORTED__ */
#endif /* XNU_MONITOR */
.macro COMPARE_BRANCH_FUSION
#if defined(APPLE_ARM64_ARCH_FAMILY)
- mrs $1, ARM64_REG_HID1
+ mrs $1, HID1
.if $0 == CBF_DISABLE
orr $1, $1, ARM64_REG_HID1_disCmpBrFusion
.else
mov $2, ARM64_REG_HID1_disCmpBrFusion
bic $1, $1, $2
.endif
- msr ARM64_REG_HID1, $1
+ msr HID1, $1
.if $0 == CBF_DISABLE
isb sy
.endif
#endif /* __ARM_KERNEL_PROTECT__ */
.endmacro
+/*
+ * CHECK_KERNEL_STACK
+ *
+ * Verifies that the kernel stack is aligned and mapped within an expected
+ * stack address range. Note: happens before saving registers (in case we can't
+ * save to kernel stack).
+ *
+ * Expects:
+ * {x0, x1} - saved
+ * x1 - Exception syndrome
+ * sp - Saved state
+ *
+ * Seems like we need an unused argument to the macro for the \@ syntax to work
+ *
+ */
+.macro CHECK_KERNEL_STACK unused
+ stp x2, x3, [sp, #-16]! // Save {x2-x3}
+ and x1, x1, #ESR_EC_MASK // Mask the exception class
+ mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
+ cmp x1, x2 // If we have a stack alignment exception
+ b.eq Lcorrupt_stack_\@ // ...the stack is definitely corrupted
+ mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
+ cmp x1, x2 // If we have a data abort, we need to
+ b.ne Lvalid_stack_\@ // ...validate the stack pointer
+ mrs x0, SP_EL0 // Get SP_EL0
+ mrs x1, TPIDR_EL1 // Get thread pointer
+Ltest_kstack_\@:
+ ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack
+ sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack
+ cmp x0, x2 // if (SP_EL0 >= kstack top)
+ b.ge Ltest_istack_\@ // jump to istack test
+ cmp x0, x3 // if (SP_EL0 > kstack bottom)
+ b.gt Lvalid_stack_\@ // stack pointer valid
+Ltest_istack_\@:
+ ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr
+ ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack
+ sub x3, x2, INTSTACK_SIZE_NUM // Find bottom of istack
+ cmp x0, x2 // if (SP_EL0 >= istack top)
+ b.ge Lcorrupt_stack_\@ // corrupt stack pointer
+ cmp x0, x3 // if (SP_EL0 > istack bottom)
+ b.gt Lvalid_stack_\@ // stack pointer valid
+Lcorrupt_stack_\@:
+ ldp x2, x3, [sp], #16
+ ldp x0, x1, [sp], #16
+ sub sp, sp, ARM_CONTEXT_SIZE // Allocate exception frame
+ stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the exception frame
+ stp x2, x3, [sp, SS64_X2] // Save x2, x3 to the exception frame
+ mrs x0, SP_EL0 // Get SP_EL0
+ str x0, [sp, SS64_SP] // Save sp to the exception frame
+ INIT_SAVED_STATE_FLAVORS sp, w0, w1
+ mov x0, sp // Copy exception frame pointer to x0
+ adrp x1, fleh_invalid_stack@page // Load address for fleh
+ add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there
+ b fleh_dispatch64
+Lvalid_stack_\@:
+ ldp x2, x3, [sp], #16 // Restore {x2-x3}
+.endmacro
+
+
#if __ARM_KERNEL_PROTECT__
- .text
+ .section __DATA_CONST,__const
.align 3
.globl EXT(exc_vectors_table)
LEXT(exc_vectors_table)
* END OF EXCEPTION VECTORS PAGE *
*********************************/
+
+
.macro EL1_SP0_VECTOR
msr SPSel, #0 // Switch to SP0
sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
str x0, [sp, SS64_SP] // Save stack pointer to exception frame
- stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
INIT_SAVED_STATE_FLAVORS sp, w0, w1
mov x0, sp // Copy saved state pointer to x0
.endmacro
el1_sp0_synchronous_vector_long:
-#if XNU_MONITOR && __APRR_SUPPORTED__
- /*
- * We do not have enough space for new instructions in this vector, so
- * jump to outside code to check if this exception was taken in the PPL.
- */
- b el1_sp0_synchronous_vector_ppl_check
-Lel1_sp0_synchronous_vector_kernel:
-#endif
- sub sp, sp, ARM_CONTEXT_SIZE // Make space on the exception stack
- stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the stack
+ stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
mrs x1, ESR_EL1 // Get the exception syndrome
/* If the stack pointer is corrupt, it will manifest either as a data abort
* (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
* these quickly by testing bit 5 of the exception class.
*/
tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
- mrs x0, SP_EL0 // Get SP_EL0
- stp fp, lr, [sp, SS64_FP] // Save fp, lr to the stack
- str x0, [sp, SS64_SP] // Save sp to the stack
- bl check_kernel_stack
- ldp fp, lr, [sp, SS64_FP] // Restore fp, lr
+ CHECK_KERNEL_STACK
Lkernel_stack_valid:
- ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
- add sp, sp, ARM_CONTEXT_SIZE // Restore SP1
+ ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
EL1_SP0_VECTOR
adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
add x1, x1, EXT(fleh_synchronous)@pageoff
b fleh_dispatch64
el1_sp0_irq_vector_long:
-#if XNU_MONITOR && __APRR_SUPPORTED__
- EL1_SP0_VECTOR_PPL_CHECK el1_sp0_irq_vector_not_in_kernel_mode
-Lel1_sp0_irq_vector_kernel:
-#endif
EL1_SP0_VECTOR
- mrs x1, TPIDR_EL1
- ldr x1, [x1, ACT_CPUDATAP]
- ldr x1, [x1, CPU_ISTACKPTR]
- mov sp, x1
+ SWITCH_TO_INT_STACK
adrp x1, EXT(fleh_irq)@page // Load address for fleh
add x1, x1, EXT(fleh_irq)@pageoff
b fleh_dispatch64
el1_sp0_fiq_vector_long:
// ARM64_TODO write optimized decrementer
-#if XNU_MONITOR && __APRR_SUPPORTED__
- EL1_SP0_VECTOR_PPL_CHECK el1_sp0_fiq_vector_not_in_kernel_mode
-Lel1_sp0_fiq_vector_kernel:
-#endif
EL1_SP0_VECTOR
- mrs x1, TPIDR_EL1
- ldr x1, [x1, ACT_CPUDATAP]
- ldr x1, [x1, CPU_ISTACKPTR]
- mov sp, x1
+ SWITCH_TO_INT_STACK
adrp x1, EXT(fleh_fiq)@page // Load address for fleh
add x1, x1, EXT(fleh_fiq)@pageoff
b fleh_dispatch64
el1_sp0_serror_vector_long:
-#if XNU_MONITOR && __APRR_SUPPORTED__
- EL1_SP0_VECTOR_PPL_CHECK el1_sp0_serror_vector_not_in_kernel_mode
-Lel1_sp0_serror_vector_kernel:
-#endif
EL1_SP0_VECTOR
adrp x1, EXT(fleh_serror)@page // Load address for fleh
add x1, x1, EXT(fleh_serror)@pageoff
add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
str x0, [sp, SS64_SP] // Save stack pointer to exception frame
INIT_SAVED_STATE_FLAVORS sp, w0, w1
- stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
mov x0, sp // Copy saved state pointer to x0
.endmacro
add x1, x1, fleh_serror_sp1@pageoff
b fleh_dispatch64
-#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
-/**
- * On these CPUs, SCTLR_CP15BEN_ENABLED is res0, and SCTLR_{ITD,SED}_DISABLED are res1.
- * The rest of the bits in SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED are set in common_start.
- */
-#define SCTLR_EL1_INITIAL (SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED)
-#define SCTLR_EL1_EXPECTED ((SCTLR_EL1_INITIAL | SCTLR_SED_DISABLED | SCTLR_ITD_DISABLED) & ~SCTLR_CP15BEN_ENABLED)
-#endif
.macro EL0_64_VECTOR
- mov x18, #0 // Zero x18 to avoid leaking data to user SS
stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
-#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
- // enable JOP for kernel
- adrp x0, EXT(const_boot_args)@page
- add x0, x0, EXT(const_boot_args)@pageoff
- ldr x0, [x0, BA_BOOT_FLAGS]
- and x0, x0, BA_BOOT_FLAGS_DISABLE_JOP
- cbnz x0, 1f
- // if disable jop is set, don't touch SCTLR (it's already off)
- // if (!boot_args->kernel_jop_disable) {
- mrs x0, SCTLR_EL1
- tbnz x0, SCTLR_PACIA_ENABLED_SHIFT, 1f
- // turn on jop for kernel if it isn't already on
- // if (!jop_running) {
- MOV64 x1, SCTLR_JOP_KEYS_ENABLED
- orr x0, x0, x1
- msr SCTLR_EL1, x0
- isb sy
- MOV64 x1, SCTLR_EL1_EXPECTED | SCTLR_JOP_KEYS_ENABLED
- cmp x0, x1
- bne .
- // }
- // }
-1:
-#endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
+#if __ARM_KERNEL_PROTECT__
+ mov x18, #0 // Zero x18 to avoid leaking data to user SS
+#endif
mrs x0, TPIDR_EL1 // Load the thread register
mrs x1, SP_EL0 // Load the user stack pointer
add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer
ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
msr SPSel, #0 // Switch to SP0
stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB
- stp fp, lr, [sp, SS64_FP] // Save fp and lr to the user PCB
- mov fp, #0 // Clear the fp and lr for the
- mov lr, #0 // debugger stack frame
+ mrs x1, TPIDR_EL1 // Load the thread register
+
+
mov x0, sp // Copy the user PCB pointer to x0
+ // x1 contains thread register
.endmacro
el0_synchronous_vector_64_long:
- EL0_64_VECTOR
- mrs x1, TPIDR_EL1 // Load the thread register
- ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
- mov sp, x1 // Set the stack pointer to the kernel stack
+ EL0_64_VECTOR sync
+ SWITCH_TO_KERN_STACK
adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
add x1, x1, EXT(fleh_synchronous)@pageoff
b fleh_dispatch64
el0_irq_vector_64_long:
- EL0_64_VECTOR
- mrs x1, TPIDR_EL1
- ldr x1, [x1, ACT_CPUDATAP]
- ldr x1, [x1, CPU_ISTACKPTR]
- mov sp, x1 // Set the stack pointer to the kernel stack
+ EL0_64_VECTOR irq
+ SWITCH_TO_INT_STACK
adrp x1, EXT(fleh_irq)@page // load address for fleh
add x1, x1, EXT(fleh_irq)@pageoff
b fleh_dispatch64
el0_fiq_vector_64_long:
- EL0_64_VECTOR
- mrs x1, TPIDR_EL1
- ldr x1, [x1, ACT_CPUDATAP]
- ldr x1, [x1, CPU_ISTACKPTR]
- mov sp, x1 // Set the stack pointer to the kernel stack
+ EL0_64_VECTOR fiq
+ SWITCH_TO_INT_STACK
adrp x1, EXT(fleh_fiq)@page // load address for fleh
add x1, x1, EXT(fleh_fiq)@pageoff
b fleh_dispatch64
el0_serror_vector_64_long:
- EL0_64_VECTOR
- mrs x1, TPIDR_EL1 // Load the thread register
- ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
- mov sp, x1 // Set the stack pointer to the kernel stack
+ EL0_64_VECTOR serror
+ SWITCH_TO_KERN_STACK
adrp x1, EXT(fleh_serror)@page // load address for fleh
add x1, x1, EXT(fleh_serror)@pageoff
b fleh_dispatch64
-#if XNU_MONITOR && __APRR_SUPPORTED__
-el1_sp0_synchronous_vector_ppl_check:
- EL1_SP0_VECTOR_PPL_CHECK el1_sp0_synchronous_vector_not_in_kernel_mode
-
- /* Jump back to the primary exception vector if we fell through. */
- b Lel1_sp0_synchronous_vector_kernel
-#endif
/*
* check_exception_stack
mov x18, #0
b Lel1_sp1_synchronous_valid_stack
-/*
- * check_kernel_stack
- *
- * Verifies that the kernel stack is aligned and mapped within an expected
- * stack address range. Note: happens before saving registers (in case we can't
- * save to kernel stack).
- *
- * Expects:
- * {x0, x1, sp} - saved
- * x0 - SP_EL0
- * x1 - Exception syndrome
- * sp - Saved state
- */
- .text
- .align 2
-check_kernel_stack:
- stp x2, x3, [sp, SS64_X2] // Save {x2-x3}
- and x1, x1, #ESR_EC_MASK // Mask the exception class
- mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
- cmp x1, x2 // If we have a stack alignment exception
- b.eq Lcorrupt_stack // ...the stack is definitely corrupted
- mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
- cmp x1, x2 // If we have a data abort, we need to
- b.ne Lvalid_stack // ...validate the stack pointer
- mrs x1, TPIDR_EL1 // Get thread pointer
-Ltest_kstack:
- ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack
- sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack
- cmp x0, x2 // if (SP_EL0 >= kstack top)
- b.ge Ltest_istack // jump to istack test
- cmp x0, x3 // if (SP_EL0 > kstack bottom)
- b.gt Lvalid_stack // stack pointer valid
-Ltest_istack:
- ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr
- ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack
- sub x3, x2, INTSTACK_SIZE_NUM // Find bottom of istack
- cmp x0, x2 // if (SP_EL0 >= istack top)
- b.ge Lcorrupt_stack // corrupt stack pointer
- cmp x0, x3 // if (SP_EL0 > istack bottom)
- b.gt Lvalid_stack // stack pointer valid
-Lcorrupt_stack:
- INIT_SAVED_STATE_FLAVORS sp, w0, w1
- mov x0, sp // Copy exception frame pointer to x0
- adrp x1, fleh_invalid_stack@page // Load address for fleh
- add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there
- ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
- b fleh_dispatch64
-Lvalid_stack:
- ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
- ret
#if defined(KERNEL_INTEGRITY_KTRR)
.text
/* 64-bit first level exception handler dispatcher.
* Completes register context saving and branches to FLEH.
* Expects:
- * {x0, x1, fp, lr, sp} - saved
+ * {x0, x1, sp} - saved
* x0 - arm_context_t
* x1 - address of FLEH
* fp - previous stack frame if EL1
cmp x23, #(PSR64_MODE_EL0)
bne 1f
+ SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS
+2:
mov x2, #0
mov x3, #0
mov x4, #0
#endif
mov x27, #0
mov x28, #0
- /* fp/lr already cleared by EL0_64_VECTOR */
+ mov fp, #0
+ mov lr, #0
1:
mov x21, x0 // Copy arm_context_t pointer to x21
mov x26, #0
#endif
-#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
+#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT
tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from
b.ne 1f // kernel mode, so skip precise time update
PUSH_FRAME
POP_FRAME
mov x0, x21 // Reload arm_context_t pointer
1:
-#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
+#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */
/* Dispatch to FLEH */
.align 2
.global EXT(fleh_synchronous)
LEXT(fleh_synchronous)
+
+UNWIND_PROLOGUE
+UNWIND_DIRECTIVES
+
mrs x1, ESR_EL1 // Load exception syndrome
mrs x2, FAR_EL1 // Load fault address
CHECK_EXCEPTION_RETURN_DISPATCH_PPL
#endif
+ mov x28, xzr // Don't need to check PFZ if there are ASTs
b exception_return_dispatch
Lfleh_sync_load_lr:
ldr lr, [x0, SS64_LR]
b Lvalid_link_register
-
+UNWIND_EPILOGUE
+
/* Shared prologue code for fleh_irq and fleh_fiq.
* Does any interrupt booking we may want to do
* before invoking the handler proper.
CHECK_EXCEPTION_RETURN_DISPATCH_PPL
#endif
+ mov x28, #1 // Set a bit to check PFZ if there are ASTs
b exception_return_dispatch
.text
CHECK_EXCEPTION_RETURN_DISPATCH_PPL
#endif
+ mov x28, #1 // Set a bit to check PFZ if there are ASTs
b exception_return_dispatch
.text
CHECK_EXCEPTION_RETURN_DISPATCH_PPL
#endif
+ mov x28, xzr // Don't need to check PFZ If there are ASTs
b exception_return_dispatch
/*
exception_return_dispatch:
ldr w0, [x21, SS64_CPSR]
tst w0, PSR64_MODE_EL_MASK
- b.ne return_to_kernel // return to kernel if M[3:2] > 0
+ b.ne EXT(return_to_kernel) // return to kernel if M[3:2] > 0
b return_to_user
.text
.align 2
-return_to_kernel:
+ .global EXT(return_to_kernel)
+LEXT(return_to_kernel)
tbnz w0, #DAIF_IRQF_SHIFT, exception_return // Skip AST check if IRQ disabled
mrs x3, TPIDR_EL1 // Load thread pointer
ldr w1, [x3, ACT_PREEMPT_CNT] // Load preemption count
#if CONFIG_DTRACE
bl EXT(dtrace_thread_bootstrap)
#endif
- b EXT(thread_exception_return)
+ b EXT(arm64_thread_exception_return)
.text
- .globl EXT(thread_exception_return)
-LEXT(thread_exception_return)
+ .globl EXT(arm64_thread_exception_return)
+LEXT(arm64_thread_exception_return)
mrs x0, TPIDR_EL1
add x21, x0, ACT_CONTEXT
ldr x21, [x21]
+ mov x28, xzr
//
- // Fall Through to return_to_user from thread_exception_return.
+ // Fall Through to return_to_user from arm64_thread_exception_return.
// Note that if we move return_to_user or insert a new routine
- // below thread_exception_return, the latter will need to change.
+ // below arm64_thread_exception_return, the latter will need to change.
//
.text
+/* x21 is always the machine context pointer when we get here
+ * x28 is a bit indicating whether or not we should check if pc is in pfz */
return_to_user:
check_user_asts:
- mrs x3, TPIDR_EL1 // Load thread pointer
+ mrs x3, TPIDR_EL1 // Load thread pointer
movn w2, #0
str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
#if MACH_ASSERT
ldr w0, [x3, TH_RWLOCK_CNT]
- cbz w0, 1f // Detect unbalance RW lock/unlock
- b rwlock_count_notzero
-1:
+ cbnz w0, rwlock_count_notzero // Detect unbalanced RW lock/unlock
+
ldr w0, [x3, ACT_PREEMPT_CNT]
- cbz w0, 1f
- b preempt_count_notzero
-1:
+ cbnz w0, preempt_count_notzero // Detect unbalanced enable/disable preemption
#endif
-
+ ldr w0, [x3, TH_TMP_ALLOC_CNT]
+ cbnz w0, tmp_alloc_count_nozero // Detect KHEAP_TEMP leaks
+
msr DAIFSet, #DAIFSC_ALL // Disable exceptions
ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
ldr x0, [x4, CPU_PENDING_AST] // Get ASTs
- cbnz x0, user_take_ast // If pending ASTs, go service them
-
-#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
+ cbz x0, no_asts // If no asts, skip ahead
+
+ cbz x28, user_take_ast // If we don't need to check PFZ, just handle asts
+
+ /* At this point, we have ASTs and we need to check whether we are running in the
+ * preemption free zone (PFZ) or not. No ASTs are handled if we are running in
+ * the PFZ since we don't want to handle getting a signal or getting suspended
+ * while holding a spinlock in userspace.
+ *
+ * If userspace was in the PFZ, we know (via coordination with the PFZ code
+ * in commpage_asm.s) that it will not be using x15 and it is therefore safe
+ * to use it to indicate to userspace to come back to take a delayed
+ * preemption, at which point the ASTs will be handled. */
+ mov x28, xzr // Clear the "check PFZ" bit so that we don't do this again
+ mov x19, x0 // Save x0 since it will be clobbered by commpage_is_in_pfz64
+
+ ldr x0, [x21, SS64_PC] // Load pc from machine state
+ bl EXT(commpage_is_in_pfz64) // pc in pfz?
+ cbz x0, restore_and_check_ast // No, deal with other asts
+
+ mov x0, #1
+ str x0, [x21, SS64_X15] // Mark x15 for userspace to take delayed preemption
+ mov x0, x19 // restore x0 to asts
+ b no_asts // pretend we have no asts
+
+restore_and_check_ast:
+ mov x0, x19 // restore x0
+ b user_take_ast // Service pending asts
+no_asts:
+
+
+#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT
mov x19, x3 // Preserve thread pointer across function call
PUSH_FRAME
bl EXT(timer_state_event_kernel_to_user)
POP_FRAME
mov x3, x19
-#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
+#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */
#if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
/* Watchtower
ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context
ldr x0, [x3, ACT_DEBUGDATA]
- orr x1, x1, x0 // Thread debug state and live debug state both NULL?
- cbnz x1, user_set_debug_state_and_return // If one or the other non-null, go set debug state
+ cmp x0, x1
+ beq L_skip_user_set_debug_state // If active CPU debug state does not match thread debug state, apply thread state
+
+#if defined(APPLELIGHTNING)
+/* rdar://53177964 ([Cebu Errata SW WA][v8Debug] MDR NEX L3 clock turns OFF during restoreCheckpoint due to SWStep getting masked) */
+
+ ARM64_IS_PCORE x12 // if we're not a pCORE, also do nothing
+ cbz x12, 1f
+
+ mrs x12, HID1 // if any debug session ever existed, set forceNexL3ClkOn
+ orr x12, x12, ARM64_REG_HID1_forceNexL3ClkOn
+ msr HID1, x12
+1:
+
+#endif
+
+ PUSH_FRAME
+ bl EXT(arm_debug_set) // Establish thread debug state in live regs
+ POP_FRAME
+ mrs x3, TPIDR_EL1 // Reload thread pointer
+L_skip_user_set_debug_state:
+
+
b exception_return_unint_tpidr_x3
//
exception_return_unint_tpidr_x3:
mov sp, x21 // Reload the pcb pointer
- /* ARM64_TODO Reserve x18 until we decide what to do with it */
- str xzr, [sp, SS64_X18]
+exception_return_unint_tpidr_x3_dont_trash_x18:
+
#if __ARM_KERNEL_PROTECT__
/*
msr ELR_EL1, x1 // Load the return address into ELR
msr SPSR_EL1, x2 // Load the return CPSR into SPSR
msr FPSR, x3
- msr FPCR, x4 // Synchronized by ERET
-
-#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
- /* if eret to userspace, disable JOP */
- tbnz w2, PSR64_MODE_EL_SHIFT, Lskip_disable_jop
- adrp x4, EXT(const_boot_args)@page
- add x4, x4, EXT(const_boot_args)@pageoff
- ldr x4, [x4, BA_BOOT_FLAGS]
- and x1, x4, BA_BOOT_FLAGS_DISABLE_JOP
- cbnz x1, Lskip_disable_jop // if global JOP disabled, don't touch SCTLR (kernel JOP is already off)
- and x1, x4, BA_BOOT_FLAGS_DISABLE_USER_JOP
- cbnz x1, Ldisable_jop // if global user JOP disabled, always turn off JOP regardless of thread flag (kernel running with JOP on)
- mrs x2, TPIDR_EL1
- ldr w2, [x2, TH_DISABLE_USER_JOP]
- cbz w2, Lskip_disable_jop // if thread has JOP enabled, leave it on (kernel running with JOP on)
-Ldisable_jop:
- MOV64 x1, SCTLR_JOP_KEYS_ENABLED
- mrs x4, SCTLR_EL1
- bic x4, x4, x1
- msr SCTLR_EL1, x4
- MOV64 x1, SCTLR_EL1_EXPECTED
- cmp x4, x1
- bne .
-Lskip_disable_jop:
-#endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)*/
+ mrs x5, FPCR
+ CMSR FPCR, x5, x4, 1
+1:
+
/* Restore arm_neon_saved_state64 */
ldp q0, q1, [x0, NS64_Q0]
POP_FRAME
b check_user_asts // Now try again
-user_set_debug_state_and_return:
-
-#if defined(APPLELIGHTNING)
-/* rdar://53177964 ([Cebu Errata SW WA][v8Debug] MDR NEX L3 clock turns OFF during restoreCheckpoint due to SWStep getting masked) */
-
- ARM64_IS_PCORE x12 // if we're not a pCORE, also do nothing
- cbz x12, 1f
-
- mrs x12, ARM64_REG_HID1 // if any debug session ever existed, set forceNexL3ClkOn
- orr x12, x12, ARM64_REG_HID1_forceNexL3ClkOn
- msr ARM64_REG_HID1, x12
-1:
-
-#endif
-
- ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
- isb // Synchronize context
- PUSH_FRAME
- bl EXT(arm_debug_set) // Establish thread debug state in live regs
- POP_FRAME
- isb
- b exception_return_unint // Continue, reloading the thread pointer
-
.text
.align 2
preempt_underflow:
str x0, [sp, #-16]! // We'll print thread pointer
ldr w0, [x0, TH_RWLOCK_CNT]
str w0, [sp, #8]
- adr x0, L_rwlock_count_notzero_str // Format string
+ adr x0, L_rwlock_count_notzero_str // Format string
CALL_EXTERN panic // Game over
L_rwlock_count_notzero_str:
str x0, [sp, #-16]! // We'll print thread pointer
ldr w0, [x0, ACT_PREEMPT_CNT]
str w0, [sp, #8]
- adr x0, L_preempt_count_notzero_str // Format string
+ adr x0, L_preempt_count_notzero_str // Format string
CALL_EXTERN panic // Game over
L_preempt_count_notzero_str:
.asciz "preemption count not 0 on thread %p (%u)"
#endif /* MACH_ASSERT */
-.align 2
+ .text
+ .align 2
+tmp_alloc_count_nozero:
+ mrs x0, TPIDR_EL1
+ CALL_EXTERN kheap_temp_leak_panic
#if __ARM_KERNEL_PROTECT__
/*
#endif /* __ARM_KERNEL_PROTECT__ */
#if XNU_MONITOR
-#if __APRR_SUPPORTED__
- .text
- .align 2
-el1_sp0_synchronous_vector_not_in_kernel_mode:
- EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_synchronous_vector_kernel, fleh_synchronous_from_ppl, STAY_ON_SP1
-
- .text
- .align 2
-el1_sp0_fiq_vector_not_in_kernel_mode:
- EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_fiq_vector_kernel, fleh_fiq_from_ppl, SWITCH_TO_SP0
-
- .text
- .align 2
-el1_sp0_irq_vector_not_in_kernel_mode:
- EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_irq_vector_kernel, fleh_irq_from_ppl, SWITCH_TO_SP0
-
- .text
- .align 2
-el1_sp0_serror_vector_not_in_kernel_mode:
- EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_serror_vector_kernel, fleh_serror_from_ppl, SWITCH_TO_SP0
-#endif /* __APRR_SUPPORTED__ */
/*
* Functions to preflight the fleh handlers when the PPL has taken an exception;
b fleh_invalid_stack
fleh_fiq_from_ppl:
- mrs x1, TPIDR_EL1
- ldr x1, [x1, ACT_CPUDATAP]
- ldr x1, [x1, CPU_ISTACKPTR]
- mov sp, x1
+ SWITCH_TO_INT_STACK
b EXT(fleh_fiq)
fleh_irq_from_ppl:
- mrs x1, TPIDR_EL1
- ldr x1, [x1, ACT_CPUDATAP]
- ldr x1, [x1, CPU_ISTACKPTR]
- mov sp, x1
+ SWITCH_TO_INT_STACK
b EXT(fleh_irq)
fleh_serror_from_ppl:
mov sp, x6
b EXT(fleh_serror)
-/*
- * REENABLE_DAIF
- *
- * Restores the DAIF bits to their original state (well, the AIF bits at least).
- * arg0 - DAIF bits (read from the DAIF interface) to restore
- */
-.macro REENABLE_DAIF
- /* AIF enable. */
- tst $0, #(DAIF_IRQF | DAIF_FIQF | DAIF_ASYNCF)
- b.eq 3f
-
- /* IF enable. */
- tst $0, #(DAIF_IRQF | DAIF_FIQF)
- b.eq 2f
-
- /* A enable. */
- tst $0, #(DAIF_ASYNCF)
- b.eq 1f
-
- /* Enable nothing. */
- b 4f
-
- /* A enable. */
-1:
- msr DAIFClr, #(DAIFSC_ASYNCF)
- b 4f
-
- /* IF enable. */
-2:
- msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF)
- b 4f
-
- /* AIF enable. */
-3:
- msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF)
-
- /* Done! */
-4:
-.endmacro
-
-
-#if XNU_MONITOR && __APRR_SUPPORTED__
-/*
- * aprr_ppl_enter
- *
- * Invokes the PPL
- * x15 - The index of the requested PPL function.
- */
- .text
- .align 2
- .globl EXT(aprr_ppl_enter)
-LEXT(aprr_ppl_enter)
- /* Push a frame. */
- ARM64_STACK_PROLOG
- stp x20, x21, [sp, #-0x20]!
- stp x29, x30, [sp, #0x10]
- add x29, sp, #0x10
-
- /* Increase the preemption count. */
- mrs x10, TPIDR_EL1
- ldr w12, [x10, ACT_PREEMPT_CNT]
- add w12, w12, #1
- str w12, [x10, ACT_PREEMPT_CNT]
-
- /* Is the PPL currently locked down? */
- adrp x13, EXT(pmap_ppl_locked_down)@page
- add x13, x13, EXT(pmap_ppl_locked_down)@pageoff
- ldr w14, [x13]
- cmp w14, wzr
-
- /* If not, just perform the call in the current context. */
- b.eq EXT(ppl_bootstrap_dispatch)
-
- mov w10, #PPL_STATE_KERNEL
- b Ldisable_aif_and_enter_ppl
-
- /* We align this to land the next few instructions on their own page. */
- .section __PPLTRAMP,__text,regular,pure_instructions
- .align 14
- .space (16*1024)-(4*8) // 8 insns
-
- /*
- * This label is used by exception handlers that are trying to return
- * to the PPL.
- */
-Ldisable_aif_and_enter_ppl:
- /* We must trampoline to the PPL context; disable AIF. */
- mrs x20, DAIF
- msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
-
- .globl EXT(ppl_no_exception_start)
-LEXT(ppl_no_exception_start)
- /* Switch APRR_EL1 to PPL mode. */
- MOV64 x14, APRR_EL1_PPL
- msr APRR_EL1, x14
- /* This ISB should be the last instruction on a page. */
- // TODO: can we static assert this?
- isb
-#endif /* XNU_MONITOR && __APRR_SUPPORTED__ */
// x15: ppl call number
.globl EXT(ppl_trampoline_start)
LEXT(ppl_trampoline_start)
-#if __APRR_SUPPORTED__
- /* Squash AIF AGAIN, because someone may have attacked us. */
- msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
-#endif /* __APRR_SUPPORTED__ */
-#if __APRR_SUPPORTED__
- /* Verify the state of APRR_EL1. */
- MOV64 x14, APRR_EL1_PPL
- mrs x21, APRR_EL1
-#else /* __APRR_SUPPORTED__ */
#error "XPRR configuration error"
-#endif /* __APRR_SUPPORTED__ */
cmp x14, x21
b.ne Lppl_fail_dispatch
cmp x15, PMAP_COUNT
b.hs Lppl_fail_dispatch
- /* Get the PPL CPU data structure. */
GET_PMAP_CPU_DATA x12, x13, x14
/* Mark this CPU as being in the PPL. */
/* Find the save area, and return to the saved PPL context. */
ldr x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
mov sp, x0
-#if __APRR_SUPPORTED__
- b Lexception_return_restore_registers
-#else
b EXT(return_to_ppl)
-#endif /* __APRR_SUPPORTED__ */
Lppl_mark_cpu_as_dispatching:
cmp w10, #PPL_STATE_KERNEL
mov w13, #PPL_STATE_DISPATCH
str w13, [x12, PMAP_CPU_DATA_PPL_STATE]
- /* Get the handler for the request */
- adrp x9, EXT(ppl_handler_table)@page
- add x9, x9, EXT(ppl_handler_table)@pageoff
- ldr x10, [x9, x15, lsl #3]
-
/* Switch to the regular PPL stack. */
// TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
ldr x9, [x12, PMAP_CPU_DATA_PPL_STACK]
// SP0 is now PPL stack
mov sp, x9
-
/* Save the old stack pointer off in case we need it. */
str x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
+ /* Get the handler for the request */
+ adrp x9, EXT(ppl_handler_table)@page
+ add x9, x9, EXT(ppl_handler_table)@pageoff
+ add x9, x9, x15, lsl #3
+ ldr x10, [x9]
+
/* Branch to the code that will invoke the PPL request. */
b EXT(ppl_dispatch)
/* Return to the kernel. */
b ppl_return_to_kernel_mode
-#if __APRR_SUPPORTED__
- /* We align this to land the next few instructions on their own page. */
- .align 14
- .space (16*1024)-(4*5) // 5 insns
-
-ppl_return_to_kernel_mode:
- /* Switch APRR_EL1 back to the kernel mode. */
- // must be 5 instructions
- MOV64 x14, APRR_EL1_DEFAULT
- msr APRR_EL1, x14
-
- .globl EXT(ppl_trampoline_end)
-LEXT(ppl_trampoline_end)
-
- /* This should be the first instruction on a page. */
- isb
-
- .globl EXT(ppl_no_exception_end)
-LEXT(ppl_no_exception_end)
- b ppl_exit
-#endif /* __APRR_SUPPORTED__ */
.text
/* Invoke the PPL method. */
#ifdef HAS_APPLE_PAC
- blraaz x10
+ blraa x10, x9
#else
blr x10
#endif
/* Get the requested PPL routine. */
adrp x9, EXT(ppl_handler_table)@page
add x9, x9, EXT(ppl_handler_table)@pageoff
- ldr x10, [x9, x15, lsl #3]
+ add x9, x9, x15, lsl #3
+ ldr x10, [x9]
/* Invoke the requested PPL routine. */
#ifdef HAS_APPLE_PAC
- blraaz x10
+ blraa x10, x9
#else
blr x10
#endif
.align 2
.globl EXT(ml_panic_trap_to_debugger)
LEXT(ml_panic_trap_to_debugger)
-#if 0
- // TODO: why would we ever want to turn interrupts back on after going down panic path?
- /* Grab the current AIF state, and disable AIF. */
mrs x10, DAIF
-#endif
msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
- // we want interrupts to stay masked after exiting PPL when calling into panic to halt system
- // x10 is used in ppl_return_to_kernel_mode restore desired DAIF state after GEXIT
- mrs x10, DAIF
+ adrp x12, EXT(pmap_ppl_locked_down)@page
+ ldr w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff]
+ cbz w12, Lnot_in_ppl_dispatch
+
+ LOAD_PMAP_CPU_DATA x11, x12, x13
+
+ ldr w12, [x11, PMAP_CPU_DATA_PPL_STATE]
+ cmp w12, #PPL_STATE_DISPATCH
+ b.ne Lnot_in_ppl_dispatch
/* Indicate (for the PPL->kernel transition) that we are panicking. */
mov x15, #PPL_EXIT_PANIC_CALL
- /* Get the PPL per-CPU data. */
- GET_PMAP_CPU_DATA x11, x12, x13
-
/* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
ldr x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
mov sp, x12
- /*
- * Mark this CPU as being in the PPL. Halt and catch fire if our state
- * machine appears to be broken.
- */
- ldr w12, [x11, PMAP_CPU_DATA_PPL_STATE]
- cmp w12, #PPL_STATE_DISPATCH
- b.ne .
+ mrs x10, DAIF
mov w13, #PPL_STATE_PANIC
str w13, [x11, PMAP_CPU_DATA_PPL_STATE]
/* Now we are ready to exit the PPL. */
b ppl_return_to_kernel_mode
+Lnot_in_ppl_dispatch:
+ REENABLE_DAIF x10
+ ret
.data
Lppl_bad_call_panic_str: