#include <arm/pmap.h>
#endif
+#if XNU_MONITOR
+/*
+ * CHECK_EXCEPTION_RETURN_DISPATCH_PPL
+ *
+ * Checks if an exception was taken from the PPL, and if so, trampolines back
+ * into the PPL.
+ * x26 - 0 if the exception was taken while in the kernel, 1 if the
+ * exception was taken while in the PPL.
+ */
+.macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL
+ cmp x26, xzr
+ b.eq 1f
+
+ /* Return to the PPL. */
+ mov x15, #0
+ mov w10, #PPL_STATE_EXCEPTION
+#if __APRR_SUPPORTED__
+ b Ldisable_aif_and_enter_ppl
+#else
+#error "XPRR configuration error"
+#endif /* __APRR_SUPPORTED__ */
+1:
+.endmacro
+
+#if __APRR_SUPPORTED__
+/*
+ * EL1_SP0_VECTOR_PPL_CHECK
+ *
+ * Check to see if the exception was taken by the kernel or the PPL. Falls
+ * through if kernel, hands off to the given label if PPL. Expects to run on
+ * SP1.
+ * arg0 - Label to go to if this was a PPL exception.
+ */
+.macro EL1_SP0_VECTOR_PPL_CHECK
+ sub sp, sp, ARM_CONTEXT_SIZE
+ stp x0, x1, [sp, SS64_X0]
+ mrs x0, APRR_EL1
+ MOV64 x1, APRR_EL1_DEFAULT
+ cmp x0, x1
+ b.ne $0
+ ldp x0, x1, [sp, SS64_X0]
+ add sp, sp, ARM_CONTEXT_SIZE
+.endmacro
+
+#define STAY_ON_SP1 0
+#define SWITCH_TO_SP0 1
+
+#define INVOKE_PREFLIGHT 0
+#define NO_INVOKE_PREFLIGHT 1
+
+/*
+ * EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
+ *
+ * Verify whether an exception came from the PPL or from the kernel. If it came
+ * from the PPL, save off the PPL state and transition out of the PPL.
+ * arg0 - Label to go to if this was a kernel exception
+ * arg1 - Label to go to (after leaving the PPL) if this was a PPL exception
+ * arg2 - Indicates if this should switch back to SP0
+ * x0 - xPRR_EL1_BR1 read by EL1_SP0_VECTOR_PPL_CHECK
+ */
+.macro EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
+ /* Spill some more registers. */
+ stp x2, x3, [sp, SS64_X2]
+
+ /*
+ * Check if the PPL is locked down; if not, we can treat this as a
+ * kernel execption.
+ */
+ adrp x1, EXT(pmap_ppl_locked_down)@page
+ ldr w1, [x1, #EXT(pmap_ppl_locked_down)@pageoff]
+ cbz x1, 2f
+
+ /* Ensure that APRR_EL1 is actually in PPL mode. */
+ MOV64 x1, APRR_EL1_PPL
+ cmp x0, x1
+ b.ne .
+
+ /*
+ * Check if the CPU is in the PPL; if not we can treat this as a
+ * kernel exception.
+ */
+ GET_PMAP_CPU_DATA x3, x1, x2
+ ldr w1, [x3, PMAP_CPU_DATA_PPL_STATE]
+ cmp x1, #PPL_STATE_KERNEL
+ b.eq 2f
+
+ /* Ensure that the CPU is in the expected PPL state. */
+ cmp x1, #PPL_STATE_DISPATCH
+ b.ne .
+
+ /* Mark the CPU as dealing with an exception. */
+ mov x1, #PPL_STATE_EXCEPTION
+ str w1, [x3, PMAP_CPU_DATA_PPL_STATE]
+
+ /* Load the bounds of the PPL trampoline. */
+ adrp x0, EXT(ppl_no_exception_start)@page
+ add x0, x0, EXT(ppl_no_exception_start)@pageoff
+ adrp x1, EXT(ppl_no_exception_end)@page
+ add x1, x1, EXT(ppl_no_exception_end)@pageoff
+
+ /*
+ * Ensure that the exception did not occur in the trampoline. If it
+ * did, we are either being attacked or our state machine is
+ * horrifically broken.
+ */
+ mrs x2, ELR_EL1
+ cmp x2, x0
+ b.lo 1f
+ cmp x2, x1
+ b.hi 1f
+
+ /* We might be under attack; spin. */
+ b .
+
+1:
+ /* Get the PPL save area. */
+ mov x1, x3
+ ldr x0, [x3, PMAP_CPU_DATA_SAVE_AREA]
+
+ /* Save our x0, x1 state. */
+ ldp x2, x3, [sp, SS64_X0]
+ stp x2, x3, [x0, SS64_X0]
+
+ /* Restore SP1 to its original state. */
+ mov x3, sp
+ add sp, sp, ARM_CONTEXT_SIZE
+
+ .if $2 == SWITCH_TO_SP0
+ /* Switch back to SP0. */
+ msr SPSel, #0
+ mov x2, sp
+ .else
+ /* Load the SP0 value. */
+ mrs x2, SP_EL0
+ .endif
+
+ /* Save off the stack pointer. */
+ str x2, [x0, SS64_SP]
+
+ INIT_SAVED_STATE_FLAVORS x0, w1, w2
+
+ /* Save the context that was interrupted. */
+ ldp x2, x3, [x3, SS64_X2]
+ stp fp, lr, [x0, SS64_FP]
+ SPILL_REGISTERS KERNEL_MODE
+
+ /*
+ * Stash the function we wish to be invoked to deal with the exception;
+ * usually this is some preflight function for the fleh_* handler.
+ */
+ adrp x25, $1@page
+ add x25, x25, $1@pageoff
+
+ /*
+ * Indicate that this is a PPL exception, and that we should return to
+ * the PPL.
+ */
+ mov x26, #1
+
+ /* Transition back to kernel mode. */
+ mov x15, #PPL_EXIT_EXCEPTION
+ b ppl_return_to_kernel_mode
+2:
+ /* Restore SP1 state. */
+ ldp x2, x3, [sp, SS64_X2]
+ ldp x0, x1, [sp, SS64_X0]
+ add sp, sp, ARM_CONTEXT_SIZE
+
+ /* Go to the specified label (usually the original exception vector). */
+ b $0
+.endmacro
+#endif /* __APRR_SUPPORTED__ */
+
+#endif /* XNU_MONITOR */
#define CBF_DISABLE 0
#define CBF_ENABLE 1
.endmacro
el1_sp0_synchronous_vector_long:
+#if XNU_MONITOR && __APRR_SUPPORTED__
+ /*
+ * We do not have enough space for new instructions in this vector, so
+ * jump to outside code to check if this exception was taken in the PPL.
+ */
+ b el1_sp0_synchronous_vector_ppl_check
+Lel1_sp0_synchronous_vector_kernel:
+#endif
sub sp, sp, ARM_CONTEXT_SIZE // Make space on the exception stack
stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the stack
mrs x1, ESR_EL1 // Get the exception syndrome
b fleh_dispatch64
el1_sp0_irq_vector_long:
+#if XNU_MONITOR && __APRR_SUPPORTED__
+ EL1_SP0_VECTOR_PPL_CHECK el1_sp0_irq_vector_not_in_kernel_mode
+Lel1_sp0_irq_vector_kernel:
+#endif
EL1_SP0_VECTOR
mrs x1, TPIDR_EL1
ldr x1, [x1, ACT_CPUDATAP]
el1_sp0_fiq_vector_long:
// ARM64_TODO write optimized decrementer
+#if XNU_MONITOR && __APRR_SUPPORTED__
+ EL1_SP0_VECTOR_PPL_CHECK el1_sp0_fiq_vector_not_in_kernel_mode
+Lel1_sp0_fiq_vector_kernel:
+#endif
EL1_SP0_VECTOR
mrs x1, TPIDR_EL1
ldr x1, [x1, ACT_CPUDATAP]
b fleh_dispatch64
el1_sp0_serror_vector_long:
+#if XNU_MONITOR && __APRR_SUPPORTED__
+ EL1_SP0_VECTOR_PPL_CHECK el1_sp0_serror_vector_not_in_kernel_mode
+Lel1_sp0_serror_vector_kernel:
+#endif
EL1_SP0_VECTOR
adrp x1, EXT(fleh_serror)@page // Load address for fleh
add x1, x1, EXT(fleh_serror)@pageoff
add x1, x1, EXT(fleh_serror)@pageoff
b fleh_dispatch64
+#if XNU_MONITOR && __APRR_SUPPORTED__
+el1_sp0_synchronous_vector_ppl_check:
+ EL1_SP0_VECTOR_PPL_CHECK el1_sp0_synchronous_vector_not_in_kernel_mode
+
+ /* Jump back to the primary exception vector if we fell through. */
+ b Lel1_sp0_synchronous_vector_kernel
+#endif
/*
* check_exception_stack
add sp, sp, ARM_CONTEXT_SIZE // Clean up stack
b.ne Lel1_sp1_synchronous_vector_continue
msr ELR_EL1, lr // Return to caller
- eret
-#endif /* defined(KERNEL_INTEGRITY_KTRR)*/
+ ERET_CONTEXT_SYNCHRONIZING
+#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
/* 64-bit first level exception handler dispatcher.
* Completes register context saving and branches to FLEH.
mov x23, #0
mov x24, #0
mov x25, #0
+#if !XNU_MONITOR
mov x26, #0
+#endif
mov x27, #0
mov x28, #0
/* fp/lr already cleared by EL0_64_VECTOR */
mov x21, x0 // Copy arm_context_t pointer to x21
mov x22, x1 // Copy handler routine to x22
+#if XNU_MONITOR
+ /* Zero x26 to indicate that this should not return to the PPL. */
+ mov x26, #0
+#endif
#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from
bl EXT(sleh_synchronous)
POP_FRAME
+#if XNU_MONITOR
+ CHECK_EXCEPTION_RETURN_DISPATCH_PPL
+#endif
b exception_return_dispatch
POP_FRAME
END_INTERRUPT_HANDLER
+#if XNU_MONITOR
+ CHECK_EXCEPTION_RETURN_DISPATCH_PPL
+#endif
b exception_return_dispatch
POP_FRAME
END_INTERRUPT_HANDLER
+#if XNU_MONITOR
+ CHECK_EXCEPTION_RETURN_DISPATCH_PPL
+#endif
b exception_return_dispatch
bl EXT(sleh_serror)
POP_FRAME
+#if XNU_MONITOR
+ CHECK_EXCEPTION_RETURN_DISPATCH_PPL
+#endif
b exception_return_dispatch
mov sp, x21 // Reload the pcb pointer
/* ARM64_TODO Reserve x18 until we decide what to do with it */
- ldr x0, [x3, TH_CTH_DATA] // Load cthread data pointer
- str x0, [sp, SS64_X18] // and use it to trash x18
+ str xzr, [sp, SS64_X18]
#if __ARM_KERNEL_PROTECT__
/*
Lexception_return_restore_registers:
mov x0, sp // x0 = &pcb
// Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
- AUTH_THREAD_STATE_IN_X0 x20, x21, x22, x23, x24
+ AUTH_THREAD_STATE_IN_X0 x20, x21, x22, x23, x24, el0_state_allowed=1
/* Restore special register state */
ldr w3, [sp, NS64_FPSR]
and x1, x4, BA_BOOT_FLAGS_DISABLE_USER_JOP
cbnz x1, Ldisable_jop // if global user JOP disabled, always turn off JOP regardless of thread flag (kernel running with JOP on)
mrs x2, TPIDR_EL1
- ldr x2, [x2, TH_DISABLE_USER_JOP]
- cbz x2, Lskip_disable_jop // if thread has JOP enabled, leave it on (kernel running with JOP on)
+ ldr w2, [x2, TH_DISABLE_USER_JOP]
+ cbz w2, Lskip_disable_jop // if thread has JOP enabled, leave it on (kernel running with JOP on)
Ldisable_jop:
MOV64 x1, SCTLR_JOP_KEYS_ENABLED
mrs x4, SCTLR_EL1
Lskip_ttbr1_switch:
#endif /* __ARM_KERNEL_PROTECT__ */
- eret
+ ERET_CONTEXT_SYNCHRONIZING
user_take_ast:
PUSH_FRAME
user_set_debug_state_and_return:
+#if defined(APPLELIGHTNING)
+/* rdar://53177964 ([Cebu Errata SW WA][v8Debug] MDR NEX L3 clock turns OFF during restoreCheckpoint due to SWStep getting masked) */
+
+ ARM64_IS_PCORE x12 // if we're not a pCORE, also do nothing
+ cbz x12, 1f
+
+ mrs x12, ARM64_REG_HID1 // if any debug session ever existed, set forceNexL3ClkOn
+ orr x12, x12, ARM64_REG_HID1_forceNexL3ClkOn
+ msr ARM64_REG_HID1, x12
+1:
+
+#endif
ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
isb // Synchronize context
LEXT(ExceptionVectorsEnd)
#endif /* __ARM_KERNEL_PROTECT__ */
+#if XNU_MONITOR
+#if __APRR_SUPPORTED__
+ .text
+ .align 2
+el1_sp0_synchronous_vector_not_in_kernel_mode:
+ EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_synchronous_vector_kernel, fleh_synchronous_from_ppl, STAY_ON_SP1
+
+ .text
+ .align 2
+el1_sp0_fiq_vector_not_in_kernel_mode:
+ EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_fiq_vector_kernel, fleh_fiq_from_ppl, SWITCH_TO_SP0
+
+ .text
+ .align 2
+el1_sp0_irq_vector_not_in_kernel_mode:
+ EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_irq_vector_kernel, fleh_irq_from_ppl, SWITCH_TO_SP0
+
+ .text
+ .align 2
+el1_sp0_serror_vector_not_in_kernel_mode:
+ EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_serror_vector_kernel, fleh_serror_from_ppl, SWITCH_TO_SP0
+#endif /* __APRR_SUPPORTED__ */
+
+/*
+ * Functions to preflight the fleh handlers when the PPL has taken an exception;
+ * mostly concerned with setting up state for the normal fleh code.
+ */
+fleh_synchronous_from_ppl:
+ /* Save x0. */
+ mov x15, x0
+
+ /* Grab the ESR. */
+ mrs x1, ESR_EL1 // Get the exception syndrome
+
+ /* If the stack pointer is corrupt, it will manifest either as a data abort
+ * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
+ * these quickly by testing bit 5 of the exception class.
+ */
+ tbz x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack
+ mrs x0, SP_EL0 // Get SP_EL0
+
+ /* Perform high level checks for stack corruption. */
+ and x1, x1, #ESR_EC_MASK // Mask the exception class
+ mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
+ cmp x1, x2 // If we have a stack alignment exception
+ b.eq Lcorrupt_ppl_stack // ...the stack is definitely corrupted
+ mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
+ cmp x1, x2 // If we have a data abort, we need to
+ b.ne Lvalid_ppl_stack // ...validate the stack pointer
+
+Ltest_pstack:
+ /* Bounds check the PPL stack. */
+ adrp x10, EXT(pmap_stacks_start)@page
+ ldr x10, [x10, #EXT(pmap_stacks_start)@pageoff]
+ adrp x11, EXT(pmap_stacks_end)@page
+ ldr x11, [x11, #EXT(pmap_stacks_end)@pageoff]
+ cmp x0, x10
+ b.lo Lcorrupt_ppl_stack
+ cmp x0, x11
+ b.hi Lcorrupt_ppl_stack
+
+Lvalid_ppl_stack:
+ /* Restore x0. */
+ mov x0, x15
+
+ /* Switch back to the kernel stack. */
+ msr SPSel, #0
+ GET_PMAP_CPU_DATA x5, x6, x7
+ ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
+ mov sp, x6
+
+ /* Hand off to the synch handler. */
+ b EXT(fleh_synchronous)
+
+Lcorrupt_ppl_stack:
+ /* Restore x0. */
+ mov x0, x15
+
+ /* Hand off to the invalid stack handler. */
+ b fleh_invalid_stack
+
+fleh_fiq_from_ppl:
+ mrs x1, TPIDR_EL1
+ ldr x1, [x1, ACT_CPUDATAP]
+ ldr x1, [x1, CPU_ISTACKPTR]
+ mov sp, x1
+ b EXT(fleh_fiq)
+
+fleh_irq_from_ppl:
+ mrs x1, TPIDR_EL1
+ ldr x1, [x1, ACT_CPUDATAP]
+ ldr x1, [x1, CPU_ISTACKPTR]
+ mov sp, x1
+ b EXT(fleh_irq)
+
+fleh_serror_from_ppl:
+ GET_PMAP_CPU_DATA x5, x6, x7
+ ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
+ mov sp, x6
+ b EXT(fleh_serror)
+
+/*
+ * REENABLE_DAIF
+ *
+ * Restores the DAIF bits to their original state (well, the AIF bits at least).
+ * arg0 - DAIF bits (read from the DAIF interface) to restore
+ */
+.macro REENABLE_DAIF
+ /* AIF enable. */
+ tst $0, #(DAIF_IRQF | DAIF_FIQF | DAIF_ASYNCF)
+ b.eq 3f
+
+ /* IF enable. */
+ tst $0, #(DAIF_IRQF | DAIF_FIQF)
+ b.eq 2f
+
+ /* A enable. */
+ tst $0, #(DAIF_ASYNCF)
+ b.eq 1f
+
+ /* Enable nothing. */
+ b 4f
+
+ /* A enable. */
+1:
+ msr DAIFClr, #(DAIFSC_ASYNCF)
+ b 4f
+
+ /* IF enable. */
+2:
+ msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF)
+ b 4f
+
+ /* AIF enable. */
+3:
+ msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF)
+
+ /* Done! */
+4:
+.endmacro
+
+
+#if XNU_MONITOR && __APRR_SUPPORTED__
+/*
+ * aprr_ppl_enter
+ *
+ * Invokes the PPL
+ * x15 - The index of the requested PPL function.
+ */
+ .text
+ .align 2
+ .globl EXT(aprr_ppl_enter)
+LEXT(aprr_ppl_enter)
+ /* Push a frame. */
+ ARM64_STACK_PROLOG
+ stp x20, x21, [sp, #-0x20]!
+ stp x29, x30, [sp, #0x10]
+ add x29, sp, #0x10
+
+ /* Increase the preemption count. */
+ mrs x10, TPIDR_EL1
+ ldr w12, [x10, ACT_PREEMPT_CNT]
+ add w12, w12, #1
+ str w12, [x10, ACT_PREEMPT_CNT]
+
+ /* Is the PPL currently locked down? */
+ adrp x13, EXT(pmap_ppl_locked_down)@page
+ add x13, x13, EXT(pmap_ppl_locked_down)@pageoff
+ ldr w14, [x13]
+ cmp w14, wzr
+
+ /* If not, just perform the call in the current context. */
+ b.eq EXT(ppl_bootstrap_dispatch)
+
+ mov w10, #PPL_STATE_KERNEL
+ b Ldisable_aif_and_enter_ppl
+
+ /* We align this to land the next few instructions on their own page. */
+ .section __PPLTRAMP,__text,regular,pure_instructions
+ .align 14
+ .space (16*1024)-(4*8) // 8 insns
+
+ /*
+ * This label is used by exception handlers that are trying to return
+ * to the PPL.
+ */
+Ldisable_aif_and_enter_ppl:
+ /* We must trampoline to the PPL context; disable AIF. */
+ mrs x20, DAIF
+ msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
+
+ .globl EXT(ppl_no_exception_start)
+LEXT(ppl_no_exception_start)
+ /* Switch APRR_EL1 to PPL mode. */
+ MOV64 x14, APRR_EL1_PPL
+ msr APRR_EL1, x14
+
+ /* This ISB should be the last instruction on a page. */
+ // TODO: can we static assert this?
+ isb
+#endif /* XNU_MONITOR && __APRR_SUPPORTED__ */
+
+
+ // x15: ppl call number
+ // w10: ppl_state
+ // x20: gxf_enter caller's DAIF
+ .globl EXT(ppl_trampoline_start)
+LEXT(ppl_trampoline_start)
+
+#if __APRR_SUPPORTED__
+ /* Squash AIF AGAIN, because someone may have attacked us. */
+ msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
+#endif /* __APRR_SUPPORTED__ */
+
+#if __APRR_SUPPORTED__
+ /* Verify the state of APRR_EL1. */
+ MOV64 x14, APRR_EL1_PPL
+ mrs x21, APRR_EL1
+#else /* __APRR_SUPPORTED__ */
+#error "XPRR configuration error"
+#endif /* __APRR_SUPPORTED__ */
+ cmp x14, x21
+ b.ne Lppl_fail_dispatch
+
+ /* Verify the request ID. */
+ cmp x15, PMAP_COUNT
+ b.hs Lppl_fail_dispatch
+
+ /* Get the PPL CPU data structure. */
+ GET_PMAP_CPU_DATA x12, x13, x14
+
+ /* Mark this CPU as being in the PPL. */
+ ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
+
+ cmp w9, #PPL_STATE_KERNEL
+ b.eq Lppl_mark_cpu_as_dispatching
+
+ /* Check to see if we are trying to trap from within the PPL. */
+ cmp w9, #PPL_STATE_DISPATCH
+ b.eq Lppl_fail_dispatch_ppl
+
+
+ /* Ensure that we are returning from an exception. */
+ cmp w9, #PPL_STATE_EXCEPTION
+ b.ne Lppl_fail_dispatch
+
+ // where is w10 set?
+ // in CHECK_EXCEPTION_RETURN_DISPATCH_PPL
+ cmp w10, #PPL_STATE_EXCEPTION
+ b.ne Lppl_fail_dispatch
+
+ /* This is an exception return; set the CPU to the dispatching state. */
+ mov w9, #PPL_STATE_DISPATCH
+ str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
+
+ /* Find the save area, and return to the saved PPL context. */
+ ldr x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
+ mov sp, x0
+#if __APRR_SUPPORTED__
+ b Lexception_return_restore_registers
+#else
+ b EXT(return_to_ppl)
+#endif /* __APRR_SUPPORTED__ */
+
+Lppl_mark_cpu_as_dispatching:
+ cmp w10, #PPL_STATE_KERNEL
+ b.ne Lppl_fail_dispatch
+
+ /* Mark the CPU as dispatching. */
+ mov w13, #PPL_STATE_DISPATCH
+ str w13, [x12, PMAP_CPU_DATA_PPL_STATE]
+
+ /* Get the handler for the request */
+ adrp x9, EXT(ppl_handler_table)@page
+ add x9, x9, EXT(ppl_handler_table)@pageoff
+ ldr x10, [x9, x15, lsl #3]
+
+ /* Switch to the regular PPL stack. */
+ // TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
+ ldr x9, [x12, PMAP_CPU_DATA_PPL_STACK]
+
+ // SP0 is thread stack here
+ mov x21, sp
+ // SP0 is now PPL stack
+ mov sp, x9
+
+
+ /* Save the old stack pointer off in case we need it. */
+ str x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
+
+ /* Branch to the code that will invoke the PPL request. */
+ b EXT(ppl_dispatch)
+
+Lppl_fail_dispatch_ppl:
+ /* Switch back to the kernel stack. */
+ ldr x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
+ mov sp, x10
+
+Lppl_fail_dispatch:
+ /* Indicate that we failed. */
+ mov x15, #PPL_EXIT_BAD_CALL
+
+ /* Move the DAIF bits into the expected register. */
+ mov x10, x20
+
+ /* Return to kernel mode. */
+ b ppl_return_to_kernel_mode
+
+Lppl_dispatch_exit:
+ /* Indicate that we are cleanly exiting the PPL. */
+ mov x15, #PPL_EXIT_DISPATCH
+
+ /* Switch back to the original (kernel thread) stack. */
+ mov sp, x21
+
+ /* Move the saved DAIF bits. */
+ mov x10, x20
+
+ /* Clear the old stack pointer. */
+ str xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
+
+ /*
+ * Mark the CPU as no longer being in the PPL. We spin if our state
+ * machine is broken.
+ */
+ ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
+ cmp w9, #PPL_STATE_DISPATCH
+ b.ne .
+ mov w9, #PPL_STATE_KERNEL
+ str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
+
+ /* Return to the kernel. */
+ b ppl_return_to_kernel_mode
+
+#if __APRR_SUPPORTED__
+ /* We align this to land the next few instructions on their own page. */
+ .align 14
+ .space (16*1024)-(4*5) // 5 insns
+
+ppl_return_to_kernel_mode:
+ /* Switch APRR_EL1 back to the kernel mode. */
+ // must be 5 instructions
+ MOV64 x14, APRR_EL1_DEFAULT
+ msr APRR_EL1, x14
+
+ .globl EXT(ppl_trampoline_end)
+LEXT(ppl_trampoline_end)
+
+ /* This should be the first instruction on a page. */
+ isb
+
+ .globl EXT(ppl_no_exception_end)
+LEXT(ppl_no_exception_end)
+ b ppl_exit
+#endif /* __APRR_SUPPORTED__ */
+
+
+ .text
+ppl_exit:
+ /*
+ * If we are dealing with an exception, hand off to the first level
+ * exception handler.
+ */
+ cmp x15, #PPL_EXIT_EXCEPTION
+ b.eq Ljump_to_fleh_handler
+
+ /* Restore the original AIF state. */
+ REENABLE_DAIF x10
+
+ /* If this was a panic call from the PPL, reinvoke panic. */
+ cmp x15, #PPL_EXIT_PANIC_CALL
+ b.eq Ljump_to_panic_trap_to_debugger
+
+ /* Load the preemption count. */
+ mrs x10, TPIDR_EL1
+ ldr w12, [x10, ACT_PREEMPT_CNT]
+
+ /* Detect underflow */
+ cbnz w12, Lno_preempt_underflow
+ b preempt_underflow
+Lno_preempt_underflow:
+
+ /* Lower the preemption count. */
+ sub w12, w12, #1
+ str w12, [x10, ACT_PREEMPT_CNT]
+
+ /* Skip ASTs if the peemption count is not zero. */
+ cbnz x12, Lppl_skip_ast_taken
+
+ /* Skip the AST check if interrupts are disabled. */
+ mrs x1, DAIF
+ tst x1, #DAIF_IRQF
+ b.ne Lppl_skip_ast_taken
+
+ /* Disable interrupts. */
+ msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF)
+
+ /* IF there is no urgent AST, skip the AST. */
+ ldr x12, [x10, ACT_CPUDATAP]
+ ldr x14, [x12, CPU_PENDING_AST]
+ tst x14, AST_URGENT
+ b.eq Lppl_defer_ast_taken
+
+ /* Stash our return value and return reason. */
+ mov x20, x0
+ mov x21, x15
+
+ /* Handle the AST. */
+ bl EXT(ast_taken_kernel)
+
+ /* Restore the return value and the return reason. */
+ mov x15, x21
+ mov x0, x20
+
+Lppl_defer_ast_taken:
+ /* Reenable interrupts. */
+ msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF)
+
+Lppl_skip_ast_taken:
+ /* Pop the stack frame. */
+ ldp x29, x30, [sp, #0x10]
+ ldp x20, x21, [sp], #0x20
+
+ /* Check to see if this was a bad request. */
+ cmp x15, #PPL_EXIT_BAD_CALL
+ b.eq Lppl_bad_call
+
+ /* Return. */
+ ARM64_STACK_EPILOG
+
+ .align 2
+Ljump_to_fleh_handler:
+ br x25
+
+ .align 2
+Ljump_to_panic_trap_to_debugger:
+ b EXT(panic_trap_to_debugger)
+
+Lppl_bad_call:
+ /* Panic. */
+ adrp x0, Lppl_bad_call_panic_str@page
+ add x0, x0, Lppl_bad_call_panic_str@pageoff
+ b EXT(panic)
+
+ .text
+ .align 2
+ .globl EXT(ppl_dispatch)
+LEXT(ppl_dispatch)
+ /*
+ * Save a couple of important registers (implementation detail; x12 has
+ * the PPL per-CPU data address; x13 is not actually interesting).
+ */
+ stp x12, x13, [sp, #-0x10]!
+
+ /* Restore the original AIF state. */
+ REENABLE_DAIF x20
+
+ /*
+ * Note that if the method is NULL, we'll blow up with a prefetch abort,
+ * but the exception vectors will deal with this properly.
+ */
+
+ /* Invoke the PPL method. */
+#ifdef HAS_APPLE_PAC
+ blraaz x10
+#else
+ blr x10
+#endif
+
+ /* Disable AIF. */
+ msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
+
+ /* Restore those important registers. */
+ ldp x12, x13, [sp], #0x10
+
+ /* Mark this as a regular return, and hand off to the return path. */
+ b Lppl_dispatch_exit
+
+ .text
+ .align 2
+ .globl EXT(ppl_bootstrap_dispatch)
+LEXT(ppl_bootstrap_dispatch)
+ /* Verify the PPL request. */
+ cmp x15, PMAP_COUNT
+ b.hs Lppl_fail_bootstrap_dispatch
+
+ /* Get the requested PPL routine. */
+ adrp x9, EXT(ppl_handler_table)@page
+ add x9, x9, EXT(ppl_handler_table)@pageoff
+ ldr x10, [x9, x15, lsl #3]
+
+ /* Invoke the requested PPL routine. */
+#ifdef HAS_APPLE_PAC
+ blraaz x10
+#else
+ blr x10
+#endif
+ /* Stash off the return value */
+ mov x20, x0
+ /* Drop the preemption count */
+ bl EXT(_enable_preemption)
+ mov x0, x20
+
+ /* Pop the stack frame. */
+ ldp x29, x30, [sp, #0x10]
+ ldp x20, x21, [sp], #0x20
+#if __has_feature(ptrauth_returns)
+ retab
+#else
+ ret
+#endif
+
+Lppl_fail_bootstrap_dispatch:
+ /* Pop our stack frame and panic. */
+ ldp x29, x30, [sp, #0x10]
+ ldp x20, x21, [sp], #0x20
+#if __has_feature(ptrauth_returns)
+ autibsp
+#endif
+ adrp x0, Lppl_bad_call_panic_str@page
+ add x0, x0, Lppl_bad_call_panic_str@pageoff
+ b EXT(panic)
+
+ .text
+ .align 2
+ .globl EXT(ml_panic_trap_to_debugger)
+LEXT(ml_panic_trap_to_debugger)
+#if 0
+ // TODO: why would we ever want to turn interrupts back on after going down panic path?
+ /* Grab the current AIF state, and disable AIF. */
+ mrs x10, DAIF
+#endif
+ msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
+
+ // we want interrupts to stay masked after exiting PPL when calling into panic to halt system
+ // x10 is used in ppl_return_to_kernel_mode restore desired DAIF state after GEXIT
+ mrs x10, DAIF
+
+ /* Indicate (for the PPL->kernel transition) that we are panicking. */
+ mov x15, #PPL_EXIT_PANIC_CALL
+
+ /* Get the PPL per-CPU data. */
+ GET_PMAP_CPU_DATA x11, x12, x13
+
+ /* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
+ ldr x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
+ mov sp, x12
+
+ /*
+ * Mark this CPU as being in the PPL. Halt and catch fire if our state
+ * machine appears to be broken.
+ */
+ ldr w12, [x11, PMAP_CPU_DATA_PPL_STATE]
+ cmp w12, #PPL_STATE_DISPATCH
+ b.ne .
+ mov w13, #PPL_STATE_PANIC
+ str w13, [x11, PMAP_CPU_DATA_PPL_STATE]
+
+ /* Now we are ready to exit the PPL. */
+ b ppl_return_to_kernel_mode
+
+ .data
+Lppl_bad_call_panic_str:
+ .asciz "ppl_dispatch: failed due to bad arguments/state"
+#else /* XNU_MONITOR */
.text
.align 2
.globl EXT(ml_panic_trap_to_debugger)
LEXT(ml_panic_trap_to_debugger)
ret
+#endif /* XNU_MONITOR */
/* ARM64_TODO Is globals_asm.h needed? */
//#include "globals_asm.h"