X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/cb3231590a3c94ab4375e2228bd5e86b0cf1ad7e..refs/heads/master:/osfmk/arm64/locore.s diff --git a/osfmk/arm64/locore.s b/osfmk/arm64/locore.s index 5edaf67f1..6f9bf122b 100644 --- a/osfmk/arm64/locore.s +++ b/osfmk/arm64/locore.s @@ -27,6 +27,7 @@ */ #include +#include #include #include #include @@ -35,25 +36,48 @@ #include #include "assym.s" #include +#include "dwarf_unwind.h" #if __ARM_KERNEL_PROTECT__ #include #endif +#if XNU_MONITOR +/* + * CHECK_EXCEPTION_RETURN_DISPATCH_PPL + * + * Checks if an exception was taken from the PPL, and if so, trampolines back + * into the PPL. + * x26 - 0 if the exception was taken while in the kernel, 1 if the + * exception was taken while in the PPL. + */ +.macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL + cmp x26, xzr + b.eq 1f + + /* Return to the PPL. */ + mov x15, #0 + mov w10, #PPL_STATE_EXCEPTION +#error "XPRR configuration error" +1: +.endmacro + + +#endif /* XNU_MONITOR */ #define CBF_DISABLE 0 #define CBF_ENABLE 1 .macro COMPARE_BRANCH_FUSION #if defined(APPLE_ARM64_ARCH_FAMILY) - mrs $1, ARM64_REG_HID1 + mrs $1, HID1 .if $0 == CBF_DISABLE orr $1, $1, ARM64_REG_HID1_disCmpBrFusion .else mov $2, ARM64_REG_HID1_disCmpBrFusion bic $1, $1, $2 .endif - msr ARM64_REG_HID1, $1 + msr HID1, $1 .if $0 == CBF_DISABLE isb sy .endif @@ -122,8 +146,67 @@ #endif /* __ARM_KERNEL_PROTECT__ */ .endmacro +/* + * CHECK_KERNEL_STACK + * + * Verifies that the kernel stack is aligned and mapped within an expected + * stack address range. Note: happens before saving registers (in case we can't + * save to kernel stack). + * + * Expects: + * {x0, x1} - saved + * x1 - Exception syndrome + * sp - Saved state + * + * Seems like we need an unused argument to the macro for the \@ syntax to work + * + */ +.macro CHECK_KERNEL_STACK unused + stp x2, x3, [sp, #-16]! // Save {x2-x3} + and x1, x1, #ESR_EC_MASK // Mask the exception class + mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT) + cmp x1, x2 // If we have a stack alignment exception + b.eq Lcorrupt_stack_\@ // ...the stack is definitely corrupted + mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT) + cmp x1, x2 // If we have a data abort, we need to + b.ne Lvalid_stack_\@ // ...validate the stack pointer + mrs x0, SP_EL0 // Get SP_EL0 + mrs x1, TPIDR_EL1 // Get thread pointer +Ltest_kstack_\@: + ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack + sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack + cmp x0, x2 // if (SP_EL0 >= kstack top) + b.ge Ltest_istack_\@ // jump to istack test + cmp x0, x3 // if (SP_EL0 > kstack bottom) + b.gt Lvalid_stack_\@ // stack pointer valid +Ltest_istack_\@: + ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr + ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack + sub x3, x2, INTSTACK_SIZE_NUM // Find bottom of istack + cmp x0, x2 // if (SP_EL0 >= istack top) + b.ge Lcorrupt_stack_\@ // corrupt stack pointer + cmp x0, x3 // if (SP_EL0 > istack bottom) + b.gt Lvalid_stack_\@ // stack pointer valid +Lcorrupt_stack_\@: + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 + sub sp, sp, ARM_CONTEXT_SIZE // Allocate exception frame + stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the exception frame + stp x2, x3, [sp, SS64_X2] // Save x2, x3 to the exception frame + mrs x0, SP_EL0 // Get SP_EL0 + str x0, [sp, SS64_SP] // Save sp to the exception frame + INIT_SAVED_STATE_FLAVORS sp, w0, w1 + mov x0, sp // Copy exception frame pointer to x0 + adrp x1, fleh_invalid_stack@page // Load address for fleh + add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there + b fleh_dispatch64 +Lvalid_stack_\@: + ldp x2, x3, [sp], #16 // Restore {x2-x3} +.endmacro + + #if __ARM_KERNEL_PROTECT__ - .text + .section __DATA_CONST,__const .align 3 .globl EXT(exc_vectors_table) LEXT(exc_vectors_table) @@ -227,34 +310,29 @@ Lel0_serror_vector_64: * END OF EXCEPTION VECTORS PAGE * *********************************/ + + .macro EL1_SP0_VECTOR msr SPSel, #0 // Switch to SP0 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer str x0, [sp, SS64_SP] // Save stack pointer to exception frame - stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame INIT_SAVED_STATE_FLAVORS sp, w0, w1 mov x0, sp // Copy saved state pointer to x0 .endmacro el1_sp0_synchronous_vector_long: - sub sp, sp, ARM_CONTEXT_SIZE // Make space on the exception stack - stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the stack + stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack mrs x1, ESR_EL1 // Get the exception syndrome /* If the stack pointer is corrupt, it will manifest either as a data abort * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check * these quickly by testing bit 5 of the exception class. */ tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid - mrs x0, SP_EL0 // Get SP_EL0 - stp fp, lr, [sp, SS64_FP] // Save fp, lr to the stack - str x0, [sp, SS64_SP] // Save sp to the stack - bl check_kernel_stack - ldp fp, lr, [sp, SS64_FP] // Restore fp, lr + CHECK_KERNEL_STACK Lkernel_stack_valid: - ldp x0, x1, [sp, SS64_X0] // Restore x0, x1 - add sp, sp, ARM_CONTEXT_SIZE // Restore SP1 + ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack EL1_SP0_VECTOR adrp x1, EXT(fleh_synchronous)@page // Load address for fleh add x1, x1, EXT(fleh_synchronous)@pageoff @@ -262,10 +340,7 @@ Lkernel_stack_valid: el1_sp0_irq_vector_long: EL1_SP0_VECTOR - mrs x1, TPIDR_EL1 - ldr x1, [x1, ACT_CPUDATAP] - ldr x1, [x1, CPU_ISTACKPTR] - mov sp, x1 + SWITCH_TO_INT_STACK adrp x1, EXT(fleh_irq)@page // Load address for fleh add x1, x1, EXT(fleh_irq)@pageoff b fleh_dispatch64 @@ -273,10 +348,7 @@ el1_sp0_irq_vector_long: el1_sp0_fiq_vector_long: // ARM64_TODO write optimized decrementer EL1_SP0_VECTOR - mrs x1, TPIDR_EL1 - ldr x1, [x1, ACT_CPUDATAP] - ldr x1, [x1, CPU_ISTACKPTR] - mov sp, x1 + SWITCH_TO_INT_STACK adrp x1, EXT(fleh_fiq)@page // Load address for fleh add x1, x1, EXT(fleh_fiq)@pageoff b fleh_dispatch64 @@ -293,7 +365,6 @@ el1_sp0_serror_vector_long: add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer str x0, [sp, SS64_SP] // Save stack pointer to exception frame INIT_SAVED_STATE_FLAVORS sp, w0, w1 - stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame mov x0, sp // Copy saved state pointer to x0 .endmacro @@ -327,42 +398,12 @@ el1_sp1_serror_vector_long: add x1, x1, fleh_serror_sp1@pageoff b fleh_dispatch64 -#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) -/** - * On these CPUs, SCTLR_CP15BEN_ENABLED is res0, and SCTLR_{ITD,SED}_DISABLED are res1. - * The rest of the bits in SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED are set in common_start. - */ -#define SCTLR_EL1_INITIAL (SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED) -#define SCTLR_EL1_EXPECTED ((SCTLR_EL1_INITIAL | SCTLR_SED_DISABLED | SCTLR_ITD_DISABLED) & ~SCTLR_CP15BEN_ENABLED) -#endif .macro EL0_64_VECTOR - mov x18, #0 // Zero x18 to avoid leaking data to user SS stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack -#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) - // enable JOP for kernel - adrp x0, EXT(const_boot_args)@page - add x0, x0, EXT(const_boot_args)@pageoff - ldr x0, [x0, BA_BOOT_FLAGS] - and x0, x0, BA_BOOT_FLAGS_DISABLE_JOP - cbnz x0, 1f - // if disable jop is set, don't touch SCTLR (it's already off) - // if (!boot_args->kernel_jop_disable) { - mrs x0, SCTLR_EL1 - tbnz x0, SCTLR_PACIA_ENABLED_SHIFT, 1f - // turn on jop for kernel if it isn't already on - // if (!jop_running) { - MOV64 x1, SCTLR_JOP_KEYS_ENABLED - orr x0, x0, x1 - msr SCTLR_EL1, x0 - isb sy - MOV64 x1, SCTLR_EL1_EXPECTED | SCTLR_JOP_KEYS_ENABLED - cmp x0, x1 - bne . - // } - // } -1: -#endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */ +#if __ARM_KERNEL_PROTECT__ + mov x18, #0 // Zero x18 to avoid leaking data to user SS +#endif mrs x0, TPIDR_EL1 // Load the thread register mrs x1, SP_EL0 // Load the user stack pointer add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer @@ -372,47 +413,38 @@ el1_sp1_serror_vector_long: ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack msr SPSel, #0 // Switch to SP0 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB - stp fp, lr, [sp, SS64_FP] // Save fp and lr to the user PCB - mov fp, #0 // Clear the fp and lr for the - mov lr, #0 // debugger stack frame + mrs x1, TPIDR_EL1 // Load the thread register + + mov x0, sp // Copy the user PCB pointer to x0 + // x1 contains thread register .endmacro el0_synchronous_vector_64_long: - EL0_64_VECTOR - mrs x1, TPIDR_EL1 // Load the thread register - ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1 - mov sp, x1 // Set the stack pointer to the kernel stack + EL0_64_VECTOR sync + SWITCH_TO_KERN_STACK adrp x1, EXT(fleh_synchronous)@page // Load address for fleh add x1, x1, EXT(fleh_synchronous)@pageoff b fleh_dispatch64 el0_irq_vector_64_long: - EL0_64_VECTOR - mrs x1, TPIDR_EL1 - ldr x1, [x1, ACT_CPUDATAP] - ldr x1, [x1, CPU_ISTACKPTR] - mov sp, x1 // Set the stack pointer to the kernel stack + EL0_64_VECTOR irq + SWITCH_TO_INT_STACK adrp x1, EXT(fleh_irq)@page // load address for fleh add x1, x1, EXT(fleh_irq)@pageoff b fleh_dispatch64 el0_fiq_vector_64_long: - EL0_64_VECTOR - mrs x1, TPIDR_EL1 - ldr x1, [x1, ACT_CPUDATAP] - ldr x1, [x1, CPU_ISTACKPTR] - mov sp, x1 // Set the stack pointer to the kernel stack + EL0_64_VECTOR fiq + SWITCH_TO_INT_STACK adrp x1, EXT(fleh_fiq)@page // load address for fleh add x1, x1, EXT(fleh_fiq)@pageoff b fleh_dispatch64 el0_serror_vector_64_long: - EL0_64_VECTOR - mrs x1, TPIDR_EL1 // Load the thread register - ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1 - mov sp, x1 // Set the stack pointer to the kernel stack + EL0_64_VECTOR serror + SWITCH_TO_KERN_STACK adrp x1, EXT(fleh_serror)@page // load address for fleh add x1, x1, EXT(fleh_serror)@pageoff b fleh_dispatch64 @@ -442,56 +474,6 @@ Lvalid_exception_stack: mov x18, #0 b Lel1_sp1_synchronous_valid_stack -/* - * check_kernel_stack - * - * Verifies that the kernel stack is aligned and mapped within an expected - * stack address range. Note: happens before saving registers (in case we can't - * save to kernel stack). - * - * Expects: - * {x0, x1, sp} - saved - * x0 - SP_EL0 - * x1 - Exception syndrome - * sp - Saved state - */ - .text - .align 2 -check_kernel_stack: - stp x2, x3, [sp, SS64_X2] // Save {x2-x3} - and x1, x1, #ESR_EC_MASK // Mask the exception class - mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT) - cmp x1, x2 // If we have a stack alignment exception - b.eq Lcorrupt_stack // ...the stack is definitely corrupted - mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT) - cmp x1, x2 // If we have a data abort, we need to - b.ne Lvalid_stack // ...validate the stack pointer - mrs x1, TPIDR_EL1 // Get thread pointer -Ltest_kstack: - ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack - sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack - cmp x0, x2 // if (SP_EL0 >= kstack top) - b.ge Ltest_istack // jump to istack test - cmp x0, x3 // if (SP_EL0 > kstack bottom) - b.gt Lvalid_stack // stack pointer valid -Ltest_istack: - ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr - ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack - sub x3, x2, INTSTACK_SIZE_NUM // Find bottom of istack - cmp x0, x2 // if (SP_EL0 >= istack top) - b.ge Lcorrupt_stack // corrupt stack pointer - cmp x0, x3 // if (SP_EL0 > istack bottom) - b.gt Lvalid_stack // stack pointer valid -Lcorrupt_stack: - INIT_SAVED_STATE_FLAVORS sp, w0, w1 - mov x0, sp // Copy exception frame pointer to x0 - adrp x1, fleh_invalid_stack@page // Load address for fleh - add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there - ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3} - b fleh_dispatch64 -Lvalid_stack: - ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3} - ret #if defined(KERNEL_INTEGRITY_KTRR) .text @@ -524,13 +506,13 @@ check_ktrr_sctlr_trap: add sp, sp, ARM_CONTEXT_SIZE // Clean up stack b.ne Lel1_sp1_synchronous_vector_continue msr ELR_EL1, lr // Return to caller - eret -#endif /* defined(KERNEL_INTEGRITY_KTRR)*/ + ERET_CONTEXT_SYNCHRONIZING +#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ /* 64-bit first level exception handler dispatcher. * Completes register context saving and branches to FLEH. * Expects: - * {x0, x1, fp, lr, sp} - saved + * {x0, x1, sp} - saved * x0 - arm_context_t * x1 - address of FLEH * fp - previous stack frame if EL1 @@ -548,6 +530,8 @@ fleh_dispatch64: cmp x23, #(PSR64_MODE_EL0) bne 1f + SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS +2: mov x2, #0 mov x3, #0 mov x4, #0 @@ -571,17 +555,24 @@ fleh_dispatch64: mov x23, #0 mov x24, #0 mov x25, #0 +#if !XNU_MONITOR mov x26, #0 +#endif mov x27, #0 mov x28, #0 - /* fp/lr already cleared by EL0_64_VECTOR */ + mov fp, #0 + mov lr, #0 1: mov x21, x0 // Copy arm_context_t pointer to x21 mov x22, x1 // Copy handler routine to x22 +#if XNU_MONITOR + /* Zero x26 to indicate that this should not return to the PPL. */ + mov x26, #0 +#endif -#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME +#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from b.ne 1f // kernel mode, so skip precise time update PUSH_FRAME @@ -589,7 +580,7 @@ fleh_dispatch64: POP_FRAME mov x0, x21 // Reload arm_context_t pointer 1: -#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ +#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */ /* Dispatch to FLEH */ @@ -600,6 +591,10 @@ fleh_dispatch64: .align 2 .global EXT(fleh_synchronous) LEXT(fleh_synchronous) + +UNWIND_PROLOGUE +UNWIND_DIRECTIVES + mrs x1, ESR_EL1 // Load exception syndrome mrs x2, FAR_EL1 // Load fault address @@ -620,13 +615,18 @@ Lvalid_link_register: bl EXT(sleh_synchronous) POP_FRAME +#if XNU_MONITOR + CHECK_EXCEPTION_RETURN_DISPATCH_PPL +#endif + mov x28, xzr // Don't need to check PFZ if there are ASTs b exception_return_dispatch Lfleh_sync_load_lr: ldr lr, [x0, SS64_LR] b Lvalid_link_register - +UNWIND_EPILOGUE + /* Shared prologue code for fleh_irq and fleh_fiq. * Does any interrupt booking we may want to do * before invoking the handler proper. @@ -691,7 +691,11 @@ LEXT(fleh_irq) POP_FRAME END_INTERRUPT_HANDLER +#if XNU_MONITOR + CHECK_EXCEPTION_RETURN_DISPATCH_PPL +#endif + mov x28, #1 // Set a bit to check PFZ if there are ASTs b exception_return_dispatch .text @@ -710,7 +714,11 @@ LEXT(fleh_fiq) POP_FRAME END_INTERRUPT_HANDLER +#if XNU_MONITOR + CHECK_EXCEPTION_RETURN_DISPATCH_PPL +#endif + mov x28, #1 // Set a bit to check PFZ if there are ASTs b exception_return_dispatch .text @@ -724,7 +732,11 @@ LEXT(fleh_serror) bl EXT(sleh_serror) POP_FRAME +#if XNU_MONITOR + CHECK_EXCEPTION_RETURN_DISPATCH_PPL +#endif + mov x28, xzr // Don't need to check PFZ If there are ASTs b exception_return_dispatch /* @@ -784,12 +796,13 @@ Lsp1_serror_str: exception_return_dispatch: ldr w0, [x21, SS64_CPSR] tst w0, PSR64_MODE_EL_MASK - b.ne return_to_kernel // return to kernel if M[3:2] > 0 + b.ne EXT(return_to_kernel) // return to kernel if M[3:2] > 0 b return_to_user .text .align 2 -return_to_kernel: + .global EXT(return_to_kernel) +LEXT(return_to_kernel) tbnz w0, #DAIF_IRQF_SHIFT, exception_return // Skip AST check if IRQ disabled mrs x3, TPIDR_EL1 // Load thread pointer ldr w1, [x3, ACT_PREEMPT_CNT] // Load preemption count @@ -811,51 +824,82 @@ LEXT(thread_bootstrap_return) #if CONFIG_DTRACE bl EXT(dtrace_thread_bootstrap) #endif - b EXT(thread_exception_return) + b EXT(arm64_thread_exception_return) .text - .globl EXT(thread_exception_return) -LEXT(thread_exception_return) + .globl EXT(arm64_thread_exception_return) +LEXT(arm64_thread_exception_return) mrs x0, TPIDR_EL1 add x21, x0, ACT_CONTEXT ldr x21, [x21] + mov x28, xzr // - // Fall Through to return_to_user from thread_exception_return. + // Fall Through to return_to_user from arm64_thread_exception_return. // Note that if we move return_to_user or insert a new routine - // below thread_exception_return, the latter will need to change. + // below arm64_thread_exception_return, the latter will need to change. // .text +/* x21 is always the machine context pointer when we get here + * x28 is a bit indicating whether or not we should check if pc is in pfz */ return_to_user: check_user_asts: - mrs x3, TPIDR_EL1 // Load thread pointer + mrs x3, TPIDR_EL1 // Load thread pointer movn w2, #0 str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user #if MACH_ASSERT ldr w0, [x3, TH_RWLOCK_CNT] - cbz w0, 1f // Detect unbalance RW lock/unlock - b rwlock_count_notzero -1: + cbnz w0, rwlock_count_notzero // Detect unbalanced RW lock/unlock + ldr w0, [x3, ACT_PREEMPT_CNT] - cbz w0, 1f - b preempt_count_notzero -1: + cbnz w0, preempt_count_notzero // Detect unbalanced enable/disable preemption #endif - + ldr w0, [x3, TH_TMP_ALLOC_CNT] + cbnz w0, tmp_alloc_count_nozero // Detect KHEAP_TEMP leaks + msr DAIFSet, #DAIFSC_ALL // Disable exceptions ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer ldr x0, [x4, CPU_PENDING_AST] // Get ASTs - cbnz x0, user_take_ast // If pending ASTs, go service them - -#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME + cbz x0, no_asts // If no asts, skip ahead + + cbz x28, user_take_ast // If we don't need to check PFZ, just handle asts + + /* At this point, we have ASTs and we need to check whether we are running in the + * preemption free zone (PFZ) or not. No ASTs are handled if we are running in + * the PFZ since we don't want to handle getting a signal or getting suspended + * while holding a spinlock in userspace. + * + * If userspace was in the PFZ, we know (via coordination with the PFZ code + * in commpage_asm.s) that it will not be using x15 and it is therefore safe + * to use it to indicate to userspace to come back to take a delayed + * preemption, at which point the ASTs will be handled. */ + mov x28, xzr // Clear the "check PFZ" bit so that we don't do this again + mov x19, x0 // Save x0 since it will be clobbered by commpage_is_in_pfz64 + + ldr x0, [x21, SS64_PC] // Load pc from machine state + bl EXT(commpage_is_in_pfz64) // pc in pfz? + cbz x0, restore_and_check_ast // No, deal with other asts + + mov x0, #1 + str x0, [x21, SS64_X15] // Mark x15 for userspace to take delayed preemption + mov x0, x19 // restore x0 to asts + b no_asts // pretend we have no asts + +restore_and_check_ast: + mov x0, x19 // restore x0 + b user_take_ast // Service pending asts +no_asts: + + +#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT mov x19, x3 // Preserve thread pointer across function call PUSH_FRAME bl EXT(timer_state_event_kernel_to_user) POP_FRAME mov x3, x19 -#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ +#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */ #if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT) /* Watchtower @@ -886,8 +930,29 @@ check_user_asts: ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context ldr x0, [x3, ACT_DEBUGDATA] - orr x1, x1, x0 // Thread debug state and live debug state both NULL? - cbnz x1, user_set_debug_state_and_return // If one or the other non-null, go set debug state + cmp x0, x1 + beq L_skip_user_set_debug_state // If active CPU debug state does not match thread debug state, apply thread state + +#if defined(APPLELIGHTNING) +/* rdar://53177964 ([Cebu Errata SW WA][v8Debug] MDR NEX L3 clock turns OFF during restoreCheckpoint due to SWStep getting masked) */ + + ARM64_IS_PCORE x12 // if we're not a pCORE, also do nothing + cbz x12, 1f + + mrs x12, HID1 // if any debug session ever existed, set forceNexL3ClkOn + orr x12, x12, ARM64_REG_HID1_forceNexL3ClkOn + msr HID1, x12 +1: + +#endif + + PUSH_FRAME + bl EXT(arm_debug_set) // Establish thread debug state in live regs + POP_FRAME + mrs x3, TPIDR_EL1 // Reload thread pointer +L_skip_user_set_debug_state: + + b exception_return_unint_tpidr_x3 // @@ -903,9 +968,8 @@ exception_return_unint: exception_return_unint_tpidr_x3: mov sp, x21 // Reload the pcb pointer - /* ARM64_TODO Reserve x18 until we decide what to do with it */ - ldr x0, [x3, TH_CTH_DATA] // Load cthread data pointer - str x0, [sp, SS64_X18] // and use it to trash x18 +exception_return_unint_tpidr_x3_dont_trash_x18: + #if __ARM_KERNEL_PROTECT__ /* @@ -930,7 +994,7 @@ Lskip_el0_eret_mapping: Lexception_return_restore_registers: mov x0, sp // x0 = &pcb // Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2 - AUTH_THREAD_STATE_IN_X0 x20, x21, x22, x23, x24 + AUTH_THREAD_STATE_IN_X0 x20, x21, x22, x23, x24, el0_state_allowed=1 /* Restore special register state */ ldr w3, [sp, NS64_FPSR] @@ -939,31 +1003,10 @@ Lexception_return_restore_registers: msr ELR_EL1, x1 // Load the return address into ELR msr SPSR_EL1, x2 // Load the return CPSR into SPSR msr FPSR, x3 - msr FPCR, x4 // Synchronized by ERET - -#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) - /* if eret to userspace, disable JOP */ - tbnz w2, PSR64_MODE_EL_SHIFT, Lskip_disable_jop - adrp x4, EXT(const_boot_args)@page - add x4, x4, EXT(const_boot_args)@pageoff - ldr x4, [x4, BA_BOOT_FLAGS] - and x1, x4, BA_BOOT_FLAGS_DISABLE_JOP - cbnz x1, Lskip_disable_jop // if global JOP disabled, don't touch SCTLR (kernel JOP is already off) - and x1, x4, BA_BOOT_FLAGS_DISABLE_USER_JOP - cbnz x1, Ldisable_jop // if global user JOP disabled, always turn off JOP regardless of thread flag (kernel running with JOP on) - mrs x2, TPIDR_EL1 - ldr x2, [x2, TH_DISABLE_USER_JOP] - cbz x2, Lskip_disable_jop // if thread has JOP enabled, leave it on (kernel running with JOP on) -Ldisable_jop: - MOV64 x1, SCTLR_JOP_KEYS_ENABLED - mrs x4, SCTLR_EL1 - bic x4, x4, x1 - msr SCTLR_EL1, x4 - MOV64 x1, SCTLR_EL1_EXPECTED - cmp x4, x1 - bne . -Lskip_disable_jop: -#endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)*/ + mrs x5, FPCR + CMSR FPCR, x5, x4, 1 +1: + /* Restore arm_neon_saved_state64 */ ldp q0, q1, [x0, NS64_Q0] @@ -1039,7 +1082,7 @@ Lskip_disable_jop: Lskip_ttbr1_switch: #endif /* __ARM_KERNEL_PROTECT__ */ - eret + ERET_CONTEXT_SYNCHRONIZING user_take_ast: PUSH_FRAME @@ -1047,17 +1090,6 @@ user_take_ast: POP_FRAME b check_user_asts // Now try again -user_set_debug_state_and_return: - - - ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer - isb // Synchronize context - PUSH_FRAME - bl EXT(arm_debug_set) // Establish thread debug state in live regs - POP_FRAME - isb - b exception_return_unint // Continue, reloading the thread pointer - .text .align 2 preempt_underflow: @@ -1078,7 +1110,7 @@ rwlock_count_notzero: str x0, [sp, #-16]! // We'll print thread pointer ldr w0, [x0, TH_RWLOCK_CNT] str w0, [sp, #8] - adr x0, L_rwlock_count_notzero_str // Format string + adr x0, L_rwlock_count_notzero_str // Format string CALL_EXTERN panic // Game over L_rwlock_count_notzero_str: @@ -1091,14 +1123,18 @@ preempt_count_notzero: str x0, [sp, #-16]! // We'll print thread pointer ldr w0, [x0, ACT_PREEMPT_CNT] str w0, [sp, #8] - adr x0, L_preempt_count_notzero_str // Format string + adr x0, L_preempt_count_notzero_str // Format string CALL_EXTERN panic // Game over L_preempt_count_notzero_str: .asciz "preemption count not 0 on thread %p (%u)" #endif /* MACH_ASSERT */ -.align 2 + .text + .align 2 +tmp_alloc_count_nozero: + mrs x0, TPIDR_EL1 + CALL_EXTERN kheap_temp_leak_panic #if __ARM_KERNEL_PROTECT__ /* @@ -1112,11 +1148,412 @@ L_preempt_count_notzero_str: LEXT(ExceptionVectorsEnd) #endif /* __ARM_KERNEL_PROTECT__ */ +#if XNU_MONITOR + +/* + * Functions to preflight the fleh handlers when the PPL has taken an exception; + * mostly concerned with setting up state for the normal fleh code. + */ +fleh_synchronous_from_ppl: + /* Save x0. */ + mov x15, x0 + + /* Grab the ESR. */ + mrs x1, ESR_EL1 // Get the exception syndrome + + /* If the stack pointer is corrupt, it will manifest either as a data abort + * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check + * these quickly by testing bit 5 of the exception class. + */ + tbz x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack + mrs x0, SP_EL0 // Get SP_EL0 + + /* Perform high level checks for stack corruption. */ + and x1, x1, #ESR_EC_MASK // Mask the exception class + mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT) + cmp x1, x2 // If we have a stack alignment exception + b.eq Lcorrupt_ppl_stack // ...the stack is definitely corrupted + mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT) + cmp x1, x2 // If we have a data abort, we need to + b.ne Lvalid_ppl_stack // ...validate the stack pointer + +Ltest_pstack: + /* Bounds check the PPL stack. */ + adrp x10, EXT(pmap_stacks_start)@page + ldr x10, [x10, #EXT(pmap_stacks_start)@pageoff] + adrp x11, EXT(pmap_stacks_end)@page + ldr x11, [x11, #EXT(pmap_stacks_end)@pageoff] + cmp x0, x10 + b.lo Lcorrupt_ppl_stack + cmp x0, x11 + b.hi Lcorrupt_ppl_stack + +Lvalid_ppl_stack: + /* Restore x0. */ + mov x0, x15 + + /* Switch back to the kernel stack. */ + msr SPSel, #0 + GET_PMAP_CPU_DATA x5, x6, x7 + ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP] + mov sp, x6 + + /* Hand off to the synch handler. */ + b EXT(fleh_synchronous) + +Lcorrupt_ppl_stack: + /* Restore x0. */ + mov x0, x15 + + /* Hand off to the invalid stack handler. */ + b fleh_invalid_stack + +fleh_fiq_from_ppl: + SWITCH_TO_INT_STACK + b EXT(fleh_fiq) + +fleh_irq_from_ppl: + SWITCH_TO_INT_STACK + b EXT(fleh_irq) + +fleh_serror_from_ppl: + GET_PMAP_CPU_DATA x5, x6, x7 + ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP] + mov sp, x6 + b EXT(fleh_serror) + + + + + // x15: ppl call number + // w10: ppl_state + // x20: gxf_enter caller's DAIF + .globl EXT(ppl_trampoline_start) +LEXT(ppl_trampoline_start) + + +#error "XPRR configuration error" + cmp x14, x21 + b.ne Lppl_fail_dispatch + + /* Verify the request ID. */ + cmp x15, PMAP_COUNT + b.hs Lppl_fail_dispatch + + GET_PMAP_CPU_DATA x12, x13, x14 + + /* Mark this CPU as being in the PPL. */ + ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE] + + cmp w9, #PPL_STATE_KERNEL + b.eq Lppl_mark_cpu_as_dispatching + + /* Check to see if we are trying to trap from within the PPL. */ + cmp w9, #PPL_STATE_DISPATCH + b.eq Lppl_fail_dispatch_ppl + + + /* Ensure that we are returning from an exception. */ + cmp w9, #PPL_STATE_EXCEPTION + b.ne Lppl_fail_dispatch + + // where is w10 set? + // in CHECK_EXCEPTION_RETURN_DISPATCH_PPL + cmp w10, #PPL_STATE_EXCEPTION + b.ne Lppl_fail_dispatch + + /* This is an exception return; set the CPU to the dispatching state. */ + mov w9, #PPL_STATE_DISPATCH + str w9, [x12, PMAP_CPU_DATA_PPL_STATE] + + /* Find the save area, and return to the saved PPL context. */ + ldr x0, [x12, PMAP_CPU_DATA_SAVE_AREA] + mov sp, x0 + b EXT(return_to_ppl) + +Lppl_mark_cpu_as_dispatching: + cmp w10, #PPL_STATE_KERNEL + b.ne Lppl_fail_dispatch + + /* Mark the CPU as dispatching. */ + mov w13, #PPL_STATE_DISPATCH + str w13, [x12, PMAP_CPU_DATA_PPL_STATE] + + /* Switch to the regular PPL stack. */ + // TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler + ldr x9, [x12, PMAP_CPU_DATA_PPL_STACK] + + // SP0 is thread stack here + mov x21, sp + // SP0 is now PPL stack + mov sp, x9 + + /* Save the old stack pointer off in case we need it. */ + str x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP] + + /* Get the handler for the request */ + adrp x9, EXT(ppl_handler_table)@page + add x9, x9, EXT(ppl_handler_table)@pageoff + add x9, x9, x15, lsl #3 + ldr x10, [x9] + + /* Branch to the code that will invoke the PPL request. */ + b EXT(ppl_dispatch) + +Lppl_fail_dispatch_ppl: + /* Switch back to the kernel stack. */ + ldr x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP] + mov sp, x10 + +Lppl_fail_dispatch: + /* Indicate that we failed. */ + mov x15, #PPL_EXIT_BAD_CALL + + /* Move the DAIF bits into the expected register. */ + mov x10, x20 + + /* Return to kernel mode. */ + b ppl_return_to_kernel_mode + +Lppl_dispatch_exit: + /* Indicate that we are cleanly exiting the PPL. */ + mov x15, #PPL_EXIT_DISPATCH + + /* Switch back to the original (kernel thread) stack. */ + mov sp, x21 + + /* Move the saved DAIF bits. */ + mov x10, x20 + + /* Clear the old stack pointer. */ + str xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP] + + /* + * Mark the CPU as no longer being in the PPL. We spin if our state + * machine is broken. + */ + ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE] + cmp w9, #PPL_STATE_DISPATCH + b.ne . + mov w9, #PPL_STATE_KERNEL + str w9, [x12, PMAP_CPU_DATA_PPL_STATE] + + /* Return to the kernel. */ + b ppl_return_to_kernel_mode + + + + .text +ppl_exit: + /* + * If we are dealing with an exception, hand off to the first level + * exception handler. + */ + cmp x15, #PPL_EXIT_EXCEPTION + b.eq Ljump_to_fleh_handler + + /* Restore the original AIF state. */ + REENABLE_DAIF x10 + + /* If this was a panic call from the PPL, reinvoke panic. */ + cmp x15, #PPL_EXIT_PANIC_CALL + b.eq Ljump_to_panic_trap_to_debugger + + /* Load the preemption count. */ + mrs x10, TPIDR_EL1 + ldr w12, [x10, ACT_PREEMPT_CNT] + + /* Detect underflow */ + cbnz w12, Lno_preempt_underflow + b preempt_underflow +Lno_preempt_underflow: + + /* Lower the preemption count. */ + sub w12, w12, #1 + str w12, [x10, ACT_PREEMPT_CNT] + + /* Skip ASTs if the peemption count is not zero. */ + cbnz x12, Lppl_skip_ast_taken + + /* Skip the AST check if interrupts are disabled. */ + mrs x1, DAIF + tst x1, #DAIF_IRQF + b.ne Lppl_skip_ast_taken + + /* Disable interrupts. */ + msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) + + /* IF there is no urgent AST, skip the AST. */ + ldr x12, [x10, ACT_CPUDATAP] + ldr x14, [x12, CPU_PENDING_AST] + tst x14, AST_URGENT + b.eq Lppl_defer_ast_taken + + /* Stash our return value and return reason. */ + mov x20, x0 + mov x21, x15 + + /* Handle the AST. */ + bl EXT(ast_taken_kernel) + + /* Restore the return value and the return reason. */ + mov x15, x21 + mov x0, x20 + +Lppl_defer_ast_taken: + /* Reenable interrupts. */ + msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF) + +Lppl_skip_ast_taken: + /* Pop the stack frame. */ + ldp x29, x30, [sp, #0x10] + ldp x20, x21, [sp], #0x20 + + /* Check to see if this was a bad request. */ + cmp x15, #PPL_EXIT_BAD_CALL + b.eq Lppl_bad_call + + /* Return. */ + ARM64_STACK_EPILOG + + .align 2 +Ljump_to_fleh_handler: + br x25 + + .align 2 +Ljump_to_panic_trap_to_debugger: + b EXT(panic_trap_to_debugger) + +Lppl_bad_call: + /* Panic. */ + adrp x0, Lppl_bad_call_panic_str@page + add x0, x0, Lppl_bad_call_panic_str@pageoff + b EXT(panic) + + .text + .align 2 + .globl EXT(ppl_dispatch) +LEXT(ppl_dispatch) + /* + * Save a couple of important registers (implementation detail; x12 has + * the PPL per-CPU data address; x13 is not actually interesting). + */ + stp x12, x13, [sp, #-0x10]! + + /* Restore the original AIF state. */ + REENABLE_DAIF x20 + + /* + * Note that if the method is NULL, we'll blow up with a prefetch abort, + * but the exception vectors will deal with this properly. + */ + + /* Invoke the PPL method. */ +#ifdef HAS_APPLE_PAC + blraa x10, x9 +#else + blr x10 +#endif + + /* Disable AIF. */ + msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF) + + /* Restore those important registers. */ + ldp x12, x13, [sp], #0x10 + + /* Mark this as a regular return, and hand off to the return path. */ + b Lppl_dispatch_exit + + .text + .align 2 + .globl EXT(ppl_bootstrap_dispatch) +LEXT(ppl_bootstrap_dispatch) + /* Verify the PPL request. */ + cmp x15, PMAP_COUNT + b.hs Lppl_fail_bootstrap_dispatch + + /* Get the requested PPL routine. */ + adrp x9, EXT(ppl_handler_table)@page + add x9, x9, EXT(ppl_handler_table)@pageoff + add x9, x9, x15, lsl #3 + ldr x10, [x9] + + /* Invoke the requested PPL routine. */ +#ifdef HAS_APPLE_PAC + blraa x10, x9 +#else + blr x10 +#endif + /* Stash off the return value */ + mov x20, x0 + /* Drop the preemption count */ + bl EXT(_enable_preemption) + mov x0, x20 + + /* Pop the stack frame. */ + ldp x29, x30, [sp, #0x10] + ldp x20, x21, [sp], #0x20 +#if __has_feature(ptrauth_returns) + retab +#else + ret +#endif + +Lppl_fail_bootstrap_dispatch: + /* Pop our stack frame and panic. */ + ldp x29, x30, [sp, #0x10] + ldp x20, x21, [sp], #0x20 +#if __has_feature(ptrauth_returns) + autibsp +#endif + adrp x0, Lppl_bad_call_panic_str@page + add x0, x0, Lppl_bad_call_panic_str@pageoff + b EXT(panic) + + .text + .align 2 + .globl EXT(ml_panic_trap_to_debugger) +LEXT(ml_panic_trap_to_debugger) + mrs x10, DAIF + msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF) + + adrp x12, EXT(pmap_ppl_locked_down)@page + ldr w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff] + cbz w12, Lnot_in_ppl_dispatch + + LOAD_PMAP_CPU_DATA x11, x12, x13 + + ldr w12, [x11, PMAP_CPU_DATA_PPL_STATE] + cmp w12, #PPL_STATE_DISPATCH + b.ne Lnot_in_ppl_dispatch + + /* Indicate (for the PPL->kernel transition) that we are panicking. */ + mov x15, #PPL_EXIT_PANIC_CALL + + /* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */ + ldr x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP] + mov sp, x12 + + mrs x10, DAIF + mov w13, #PPL_STATE_PANIC + str w13, [x11, PMAP_CPU_DATA_PPL_STATE] + + /* Now we are ready to exit the PPL. */ + b ppl_return_to_kernel_mode +Lnot_in_ppl_dispatch: + REENABLE_DAIF x10 + ret + + .data +Lppl_bad_call_panic_str: + .asciz "ppl_dispatch: failed due to bad arguments/state" +#else /* XNU_MONITOR */ .text .align 2 .globl EXT(ml_panic_trap_to_debugger) LEXT(ml_panic_trap_to_debugger) ret +#endif /* XNU_MONITOR */ /* ARM64_TODO Is globals_asm.h needed? */ //#include "globals_asm.h"