+#if XNU_MONITOR
+#if __APRR_SUPPORTED__
+ .text
+ .align 2
+el1_sp0_synchronous_vector_not_in_kernel_mode:
+ EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_synchronous_vector_kernel, fleh_synchronous_from_ppl, STAY_ON_SP1
+
+ .text
+ .align 2
+el1_sp0_fiq_vector_not_in_kernel_mode:
+ EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_fiq_vector_kernel, fleh_fiq_from_ppl, SWITCH_TO_SP0
+
+ .text
+ .align 2
+el1_sp0_irq_vector_not_in_kernel_mode:
+ EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_irq_vector_kernel, fleh_irq_from_ppl, SWITCH_TO_SP0
+
+ .text
+ .align 2
+el1_sp0_serror_vector_not_in_kernel_mode:
+ EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_serror_vector_kernel, fleh_serror_from_ppl, SWITCH_TO_SP0
+#endif /* __APRR_SUPPORTED__ */
+
+/*
+ * Functions to preflight the fleh handlers when the PPL has taken an exception;
+ * mostly concerned with setting up state for the normal fleh code.
+ */
+fleh_synchronous_from_ppl:
+ /* Save x0. */
+ mov x15, x0
+
+ /* Grab the ESR. */
+ mrs x1, ESR_EL1 // Get the exception syndrome
+
+ /* If the stack pointer is corrupt, it will manifest either as a data abort
+ * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
+ * these quickly by testing bit 5 of the exception class.
+ */
+ tbz x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack
+ mrs x0, SP_EL0 // Get SP_EL0
+
+ /* Perform high level checks for stack corruption. */
+ and x1, x1, #ESR_EC_MASK // Mask the exception class
+ mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
+ cmp x1, x2 // If we have a stack alignment exception
+ b.eq Lcorrupt_ppl_stack // ...the stack is definitely corrupted
+ mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
+ cmp x1, x2 // If we have a data abort, we need to
+ b.ne Lvalid_ppl_stack // ...validate the stack pointer
+
+Ltest_pstack:
+ /* Bounds check the PPL stack. */
+ adrp x10, EXT(pmap_stacks_start)@page
+ ldr x10, [x10, #EXT(pmap_stacks_start)@pageoff]
+ adrp x11, EXT(pmap_stacks_end)@page
+ ldr x11, [x11, #EXT(pmap_stacks_end)@pageoff]
+ cmp x0, x10
+ b.lo Lcorrupt_ppl_stack
+ cmp x0, x11
+ b.hi Lcorrupt_ppl_stack
+
+Lvalid_ppl_stack:
+ /* Restore x0. */
+ mov x0, x15
+
+ /* Switch back to the kernel stack. */
+ msr SPSel, #0
+ GET_PMAP_CPU_DATA x5, x6, x7
+ ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
+ mov sp, x6
+
+ /* Hand off to the synch handler. */
+ b EXT(fleh_synchronous)
+
+Lcorrupt_ppl_stack:
+ /* Restore x0. */
+ mov x0, x15
+
+ /* Hand off to the invalid stack handler. */
+ b fleh_invalid_stack
+
+fleh_fiq_from_ppl:
+ SWITCH_TO_INT_STACK
+ b EXT(fleh_fiq)
+
+fleh_irq_from_ppl:
+ SWITCH_TO_INT_STACK
+ b EXT(fleh_irq)
+
+fleh_serror_from_ppl:
+ GET_PMAP_CPU_DATA x5, x6, x7
+ ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
+ mov sp, x6
+ b EXT(fleh_serror)
+
+
+#if XNU_MONITOR && __APRR_SUPPORTED__
+/*
+ * aprr_ppl_enter
+ *
+ * Invokes the PPL
+ * x15 - The index of the requested PPL function.
+ */
+ .text
+ .align 2
+ .globl EXT(aprr_ppl_enter)
+LEXT(aprr_ppl_enter)
+ /* Push a frame. */
+ ARM64_STACK_PROLOG
+ stp x20, x21, [sp, #-0x20]!
+ stp x29, x30, [sp, #0x10]
+ add x29, sp, #0x10
+
+ /* Increase the preemption count. */
+ mrs x10, TPIDR_EL1
+ ldr w12, [x10, ACT_PREEMPT_CNT]
+ add w12, w12, #1
+ str w12, [x10, ACT_PREEMPT_CNT]
+
+ /* Is the PPL currently locked down? */
+ adrp x13, EXT(pmap_ppl_locked_down)@page
+ add x13, x13, EXT(pmap_ppl_locked_down)@pageoff
+ ldr w14, [x13]
+ cmp w14, wzr
+
+ /* If not, just perform the call in the current context. */
+ b.eq EXT(ppl_bootstrap_dispatch)
+
+ mov w10, #PPL_STATE_KERNEL
+ b Ldisable_aif_and_enter_ppl
+
+ /* We align this to land the next few instructions on their own page. */
+ .section __PPLTRAMP,__text,regular,pure_instructions
+ .align 14
+ .space (16*1024)-(4*8) // 8 insns
+
+ /*
+ * This label is used by exception handlers that are trying to return
+ * to the PPL.
+ */
+Ldisable_aif_and_enter_ppl:
+ /* We must trampoline to the PPL context; disable AIF. */
+ mrs x20, DAIF
+ msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
+
+ .globl EXT(ppl_no_exception_start)
+LEXT(ppl_no_exception_start)
+ /* Switch APRR_EL1 to PPL mode. */
+ MOV64 x14, APRR_EL1_PPL
+ msr APRR_EL1, x14
+
+ /* This ISB should be the last instruction on a page. */
+ // TODO: can we static assert this?
+ isb
+#endif /* XNU_MONITOR && __APRR_SUPPORTED__ */
+
+
+ // x15: ppl call number
+ // w10: ppl_state
+ // x20: gxf_enter caller's DAIF
+ .globl EXT(ppl_trampoline_start)
+LEXT(ppl_trampoline_start)
+
+#if __APRR_SUPPORTED__
+ /* Squash AIF AGAIN, because someone may have attacked us. */
+ msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
+#endif /* __APRR_SUPPORTED__ */
+
+#if __APRR_SUPPORTED__
+ /* Verify the state of APRR_EL1. */
+ MOV64 x14, APRR_EL1_PPL
+ mrs x21, APRR_EL1
+#else /* __APRR_SUPPORTED__ */
+#error "XPRR configuration error"
+#endif /* __APRR_SUPPORTED__ */
+ cmp x14, x21
+ b.ne Lppl_fail_dispatch
+
+ /* Verify the request ID. */
+ cmp x15, PMAP_COUNT
+ b.hs Lppl_fail_dispatch
+
+ GET_PMAP_CPU_DATA x12, x13, x14
+
+ /* Mark this CPU as being in the PPL. */
+ ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
+
+ cmp w9, #PPL_STATE_KERNEL
+ b.eq Lppl_mark_cpu_as_dispatching
+
+ /* Check to see if we are trying to trap from within the PPL. */
+ cmp w9, #PPL_STATE_DISPATCH
+ b.eq Lppl_fail_dispatch_ppl
+
+
+ /* Ensure that we are returning from an exception. */
+ cmp w9, #PPL_STATE_EXCEPTION
+ b.ne Lppl_fail_dispatch
+
+ // where is w10 set?
+ // in CHECK_EXCEPTION_RETURN_DISPATCH_PPL
+ cmp w10, #PPL_STATE_EXCEPTION
+ b.ne Lppl_fail_dispatch
+
+ /* This is an exception return; set the CPU to the dispatching state. */
+ mov w9, #PPL_STATE_DISPATCH
+ str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
+
+ /* Find the save area, and return to the saved PPL context. */
+ ldr x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
+ mov sp, x0
+#if __APRR_SUPPORTED__
+ b Lexception_return_restore_registers
+#else
+ b EXT(return_to_ppl)
+#endif /* __APRR_SUPPORTED__ */
+
+Lppl_mark_cpu_as_dispatching:
+ cmp w10, #PPL_STATE_KERNEL
+ b.ne Lppl_fail_dispatch
+
+ /* Mark the CPU as dispatching. */
+ mov w13, #PPL_STATE_DISPATCH
+ str w13, [x12, PMAP_CPU_DATA_PPL_STATE]
+
+ /* Switch to the regular PPL stack. */
+ // TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
+ ldr x9, [x12, PMAP_CPU_DATA_PPL_STACK]
+
+ // SP0 is thread stack here
+ mov x21, sp
+ // SP0 is now PPL stack
+ mov sp, x9
+
+ /* Save the old stack pointer off in case we need it. */
+ str x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
+
+ /* Get the handler for the request */
+ adrp x9, EXT(ppl_handler_table)@page
+ add x9, x9, EXT(ppl_handler_table)@pageoff
+ add x9, x9, x15, lsl #3
+ ldr x10, [x9]
+
+ /* Branch to the code that will invoke the PPL request. */
+ b EXT(ppl_dispatch)
+
+Lppl_fail_dispatch_ppl:
+ /* Switch back to the kernel stack. */
+ ldr x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
+ mov sp, x10
+
+Lppl_fail_dispatch:
+ /* Indicate that we failed. */
+ mov x15, #PPL_EXIT_BAD_CALL
+
+ /* Move the DAIF bits into the expected register. */
+ mov x10, x20
+
+ /* Return to kernel mode. */
+ b ppl_return_to_kernel_mode
+
+Lppl_dispatch_exit:
+ /* Indicate that we are cleanly exiting the PPL. */
+ mov x15, #PPL_EXIT_DISPATCH
+
+ /* Switch back to the original (kernel thread) stack. */
+ mov sp, x21
+
+ /* Move the saved DAIF bits. */
+ mov x10, x20
+
+ /* Clear the old stack pointer. */
+ str xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
+
+ /*
+ * Mark the CPU as no longer being in the PPL. We spin if our state
+ * machine is broken.
+ */
+ ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
+ cmp w9, #PPL_STATE_DISPATCH
+ b.ne .
+ mov w9, #PPL_STATE_KERNEL
+ str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
+
+ /* Return to the kernel. */
+ b ppl_return_to_kernel_mode
+
+#if __APRR_SUPPORTED__
+ /* We align this to land the next few instructions on their own page. */
+ .align 14
+ .space (16*1024)-(4*5) // 5 insns
+
+ppl_return_to_kernel_mode:
+ /* Switch APRR_EL1 back to the kernel mode. */
+ // must be 5 instructions
+ MOV64 x14, APRR_EL1_DEFAULT
+ msr APRR_EL1, x14
+
+ .globl EXT(ppl_trampoline_end)
+LEXT(ppl_trampoline_end)
+
+ /* This should be the first instruction on a page. */
+ isb
+
+ .globl EXT(ppl_no_exception_end)
+LEXT(ppl_no_exception_end)
+ b ppl_exit
+#endif /* __APRR_SUPPORTED__ */
+
+
+ .text
+ppl_exit:
+ /*
+ * If we are dealing with an exception, hand off to the first level
+ * exception handler.
+ */
+ cmp x15, #PPL_EXIT_EXCEPTION
+ b.eq Ljump_to_fleh_handler
+
+ /* Restore the original AIF state. */
+ REENABLE_DAIF x10
+
+ /* If this was a panic call from the PPL, reinvoke panic. */
+ cmp x15, #PPL_EXIT_PANIC_CALL
+ b.eq Ljump_to_panic_trap_to_debugger
+
+ /* Load the preemption count. */
+ mrs x10, TPIDR_EL1
+ ldr w12, [x10, ACT_PREEMPT_CNT]
+
+ /* Detect underflow */
+ cbnz w12, Lno_preempt_underflow
+ b preempt_underflow
+Lno_preempt_underflow:
+
+ /* Lower the preemption count. */
+ sub w12, w12, #1
+ str w12, [x10, ACT_PREEMPT_CNT]
+
+ /* Skip ASTs if the peemption count is not zero. */
+ cbnz x12, Lppl_skip_ast_taken
+
+ /* Skip the AST check if interrupts are disabled. */
+ mrs x1, DAIF
+ tst x1, #DAIF_IRQF
+ b.ne Lppl_skip_ast_taken
+
+ /* Disable interrupts. */
+ msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF)
+
+ /* IF there is no urgent AST, skip the AST. */
+ ldr x12, [x10, ACT_CPUDATAP]
+ ldr x14, [x12, CPU_PENDING_AST]
+ tst x14, AST_URGENT
+ b.eq Lppl_defer_ast_taken
+
+ /* Stash our return value and return reason. */
+ mov x20, x0
+ mov x21, x15
+
+ /* Handle the AST. */
+ bl EXT(ast_taken_kernel)
+
+ /* Restore the return value and the return reason. */
+ mov x15, x21
+ mov x0, x20
+
+Lppl_defer_ast_taken:
+ /* Reenable interrupts. */
+ msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF)
+
+Lppl_skip_ast_taken:
+ /* Pop the stack frame. */
+ ldp x29, x30, [sp, #0x10]
+ ldp x20, x21, [sp], #0x20
+
+ /* Check to see if this was a bad request. */
+ cmp x15, #PPL_EXIT_BAD_CALL
+ b.eq Lppl_bad_call
+
+ /* Return. */
+ ARM64_STACK_EPILOG
+
+ .align 2
+Ljump_to_fleh_handler:
+ br x25
+
+ .align 2
+Ljump_to_panic_trap_to_debugger:
+ b EXT(panic_trap_to_debugger)
+
+Lppl_bad_call:
+ /* Panic. */
+ adrp x0, Lppl_bad_call_panic_str@page
+ add x0, x0, Lppl_bad_call_panic_str@pageoff
+ b EXT(panic)
+
+ .text
+ .align 2
+ .globl EXT(ppl_dispatch)
+LEXT(ppl_dispatch)
+ /*
+ * Save a couple of important registers (implementation detail; x12 has
+ * the PPL per-CPU data address; x13 is not actually interesting).
+ */
+ stp x12, x13, [sp, #-0x10]!
+
+ /* Restore the original AIF state. */
+ REENABLE_DAIF x20
+
+ /*
+ * Note that if the method is NULL, we'll blow up with a prefetch abort,
+ * but the exception vectors will deal with this properly.
+ */
+
+ /* Invoke the PPL method. */
+#ifdef HAS_APPLE_PAC
+ blraa x10, x9
+#else
+ blr x10
+#endif
+
+ /* Disable AIF. */
+ msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
+
+ /* Restore those important registers. */
+ ldp x12, x13, [sp], #0x10
+
+ /* Mark this as a regular return, and hand off to the return path. */
+ b Lppl_dispatch_exit
+
+ .text
+ .align 2
+ .globl EXT(ppl_bootstrap_dispatch)
+LEXT(ppl_bootstrap_dispatch)
+ /* Verify the PPL request. */
+ cmp x15, PMAP_COUNT
+ b.hs Lppl_fail_bootstrap_dispatch
+
+ /* Get the requested PPL routine. */
+ adrp x9, EXT(ppl_handler_table)@page
+ add x9, x9, EXT(ppl_handler_table)@pageoff
+ add x9, x9, x15, lsl #3
+ ldr x10, [x9]
+
+ /* Invoke the requested PPL routine. */
+#ifdef HAS_APPLE_PAC
+ blraa x10, x9
+#else
+ blr x10
+#endif
+ /* Stash off the return value */
+ mov x20, x0
+ /* Drop the preemption count */
+ bl EXT(_enable_preemption)
+ mov x0, x20
+
+ /* Pop the stack frame. */
+ ldp x29, x30, [sp, #0x10]
+ ldp x20, x21, [sp], #0x20
+#if __has_feature(ptrauth_returns)
+ retab
+#else
+ ret
+#endif
+
+Lppl_fail_bootstrap_dispatch:
+ /* Pop our stack frame and panic. */
+ ldp x29, x30, [sp, #0x10]
+ ldp x20, x21, [sp], #0x20
+#if __has_feature(ptrauth_returns)
+ autibsp
+#endif
+ adrp x0, Lppl_bad_call_panic_str@page
+ add x0, x0, Lppl_bad_call_panic_str@pageoff
+ b EXT(panic)
+
+ .text
+ .align 2
+ .globl EXT(ml_panic_trap_to_debugger)
+LEXT(ml_panic_trap_to_debugger)
+ mrs x10, DAIF
+ msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
+
+ adrp x12, EXT(pmap_ppl_locked_down)@page
+ ldr w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff]
+ cbz w12, Lnot_in_ppl_dispatch
+
+ LOAD_PMAP_CPU_DATA x11, x12, x13
+
+ ldr w12, [x11, PMAP_CPU_DATA_PPL_STATE]
+ cmp w12, #PPL_STATE_DISPATCH
+ b.ne Lnot_in_ppl_dispatch
+
+ /* Indicate (for the PPL->kernel transition) that we are panicking. */
+ mov x15, #PPL_EXIT_PANIC_CALL
+
+ /* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
+ ldr x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
+ mov sp, x12
+
+ mrs x10, DAIF
+ mov w13, #PPL_STATE_PANIC
+ str w13, [x11, PMAP_CPU_DATA_PPL_STATE]
+
+ /* Now we are ready to exit the PPL. */
+ b ppl_return_to_kernel_mode
+Lnot_in_ppl_dispatch:
+ REENABLE_DAIF x10
+ ret
+
+ .data
+Lppl_bad_call_panic_str:
+ .asciz "ppl_dispatch: failed due to bad arguments/state"
+#else /* XNU_MONITOR */