2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <machine/asm.h>
30 #include <arm64/machine_routines_asm.h>
31 #include <arm64/proc_reg.h>
32 #include <pexpert/arm64/board_config.h>
33 #include <mach/exception_types.h>
35 #include <config_dtrace.h>
37 #include <arm64/exception_asm.h>
39 #if __ARM_KERNEL_PROTECT__
47 .macro COMPARE_BRANCH_FUSION
48 #if defined(APPLE_ARM64_ARCH_FAMILY)
49 mrs $1, ARM64_REG_HID1
51 orr $1, $1, ARM64_REG_HID1_disCmpBrFusion
53 mov $2, ARM64_REG_HID1_disCmpBrFusion
56 msr ARM64_REG_HID1, $1
66 * Restores the kernel EL1 mappings, if necessary.
68 * This may mutate x18.
71 #if __ARM_KERNEL_PROTECT__
72 /* Switch to the kernel ASID (low bit set) for the task. */
74 orr x18, x18, #(1 << TTBR_ASID_SHIFT)
78 * We eschew some barriers on Apple CPUs, as relative ordering of writes
79 * to the TTBRs and writes to the TCR should be ensured by the
82 #if !defined(APPLE_ARM64_ARCH_FAMILY)
87 * Update the TCR to map the kernel now that we are using the kernel
90 MOV64 x18, TCR_EL1_BOOT
93 #endif /* __ARM_KERNEL_PROTECT__ */
97 * BRANCH_TO_KVA_VECTOR
99 * Branches to the requested long exception vector in the kernelcache.
100 * arg0 - The label to branch to
101 * arg1 - The index of the label in exc_vectors_tables
103 * This may mutate x18.
105 .macro BRANCH_TO_KVA_VECTOR
106 #if __ARM_KERNEL_PROTECT__
108 * Find the kernelcache table for the exception vectors by accessing
112 ldr x18, [x18, ACT_CPUDATAP]
113 ldr x18, [x18, CPU_EXC_VECTORS]
116 * Get the handler for this exception and jump to it.
118 ldr x18, [x18, #($1 << 3)]
122 #endif /* __ARM_KERNEL_PROTECT__ */
125 #if __ARM_KERNEL_PROTECT__
128 .globl EXT(exc_vectors_table)
129 LEXT(exc_vectors_table)
130 /* Table of exception handlers.
131 * These handlers sometimes contain deadloops.
132 * It's nice to have symbols for them when debugging. */
133 .quad el1_sp0_synchronous_vector_long
134 .quad el1_sp0_irq_vector_long
135 .quad el1_sp0_fiq_vector_long
136 .quad el1_sp0_serror_vector_long
137 .quad el1_sp1_synchronous_vector_long
138 .quad el1_sp1_irq_vector_long
139 .quad el1_sp1_fiq_vector_long
140 .quad el1_sp1_serror_vector_long
141 .quad el0_synchronous_vector_64_long
142 .quad el0_irq_vector_64_long
143 .quad el0_fiq_vector_64_long
144 .quad el0_serror_vector_64_long
145 #endif /* __ARM_KERNEL_PROTECT__ */
148 #if __ARM_KERNEL_PROTECT__
150 * We need this to be on a page boundary so that we may avoiding mapping
151 * other text along with it. As this must be on the VM page boundary
152 * (due to how the coredumping code currently works), this will be a
153 * 16KB page boundary.
158 #endif /* __ARM_KERNEL_PROTECT__ */
159 .globl EXT(ExceptionVectorsBase)
160 LEXT(ExceptionVectorsBase)
161 Lel1_sp0_synchronous_vector:
162 BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0
167 BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1
172 BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2
176 Lel1_sp0_serror_vector:
177 BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3
181 Lel1_sp1_synchronous_vector:
182 BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4
187 BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5
192 BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6
196 Lel1_sp1_serror_vector:
197 BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7
201 Lel0_synchronous_vector_64:
203 BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8
209 BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9
215 BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10
219 Lel0_serror_vector_64:
221 BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11
223 /* Fill out the rest of the page */
226 /*********************************
227 * END OF EXCEPTION VECTORS PAGE *
228 *********************************/
230 .macro EL1_SP0_VECTOR
231 msr SPSel, #0 // Switch to SP0
232 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
233 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
234 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
235 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
236 stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
237 INIT_SAVED_STATE_FLAVORS sp, w0, w1
238 mov x0, sp // Copy saved state pointer to x0
241 el1_sp0_synchronous_vector_long:
242 sub sp, sp, ARM_CONTEXT_SIZE // Make space on the exception stack
243 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the stack
244 mrs x1, ESR_EL1 // Get the exception syndrome
245 /* If the stack pointer is corrupt, it will manifest either as a data abort
246 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
247 * these quickly by testing bit 5 of the exception class.
249 tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
250 mrs x0, SP_EL0 // Get SP_EL0
251 stp fp, lr, [sp, SS64_FP] // Save fp, lr to the stack
252 str x0, [sp, SS64_SP] // Save sp to the stack
253 bl check_kernel_stack
254 ldp fp, lr, [sp, SS64_FP] // Restore fp, lr
256 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
257 add sp, sp, ARM_CONTEXT_SIZE // Restore SP1
259 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
260 add x1, x1, EXT(fleh_synchronous)@pageoff
263 el1_sp0_irq_vector_long:
266 ldr x1, [x1, ACT_CPUDATAP]
267 ldr x1, [x1, CPU_ISTACKPTR]
269 adrp x1, EXT(fleh_irq)@page // Load address for fleh
270 add x1, x1, EXT(fleh_irq)@pageoff
273 el1_sp0_fiq_vector_long:
274 // ARM64_TODO write optimized decrementer
277 ldr x1, [x1, ACT_CPUDATAP]
278 ldr x1, [x1, CPU_ISTACKPTR]
280 adrp x1, EXT(fleh_fiq)@page // Load address for fleh
281 add x1, x1, EXT(fleh_fiq)@pageoff
284 el1_sp0_serror_vector_long:
286 adrp x1, EXT(fleh_serror)@page // Load address for fleh
287 add x1, x1, EXT(fleh_serror)@pageoff
290 .macro EL1_SP1_VECTOR
291 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
292 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
293 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
294 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
295 INIT_SAVED_STATE_FLAVORS sp, w0, w1
296 stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
297 mov x0, sp // Copy saved state pointer to x0
300 el1_sp1_synchronous_vector_long:
301 b check_exception_stack
302 Lel1_sp1_synchronous_valid_stack:
303 #if defined(KERNEL_INTEGRITY_KTRR)
304 b check_ktrr_sctlr_trap
305 Lel1_sp1_synchronous_vector_continue:
308 adrp x1, fleh_synchronous_sp1@page
309 add x1, x1, fleh_synchronous_sp1@pageoff
312 el1_sp1_irq_vector_long:
314 adrp x1, fleh_irq_sp1@page
315 add x1, x1, fleh_irq_sp1@pageoff
318 el1_sp1_fiq_vector_long:
320 adrp x1, fleh_fiq_sp1@page
321 add x1, x1, fleh_fiq_sp1@pageoff
324 el1_sp1_serror_vector_long:
326 adrp x1, fleh_serror_sp1@page
327 add x1, x1, fleh_serror_sp1@pageoff
330 #if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
332 * On these CPUs, SCTLR_CP15BEN_ENABLED is res0, and SCTLR_{ITD,SED}_DISABLED are res1.
333 * The rest of the bits in SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED are set in common_start.
335 #define SCTLR_EL1_INITIAL (SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED)
336 #define SCTLR_EL1_EXPECTED ((SCTLR_EL1_INITIAL | SCTLR_SED_DISABLED | SCTLR_ITD_DISABLED) & ~SCTLR_CP15BEN_ENABLED)
340 mov x18, #0 // Zero x18 to avoid leaking data to user SS
341 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
342 #if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
343 // enable JOP for kernel
344 adrp x0, EXT(const_boot_args)@page
345 add x0, x0, EXT(const_boot_args)@pageoff
346 ldr x0, [x0, BA_BOOT_FLAGS]
347 and x0, x0, BA_BOOT_FLAGS_DISABLE_JOP
349 // if disable jop is set, don't touch SCTLR (it's already off)
350 // if (!boot_args->kernel_jop_disable) {
352 tbnz x0, SCTLR_PACIA_ENABLED_SHIFT, 1f
353 // turn on jop for kernel if it isn't already on
354 // if (!jop_running) {
355 MOV64 x1, SCTLR_JOP_KEYS_ENABLED
359 MOV64 x1, SCTLR_EL1_EXPECTED | SCTLR_JOP_KEYS_ENABLED
365 #endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
366 mrs x0, TPIDR_EL1 // Load the thread register
367 mrs x1, SP_EL0 // Load the user stack pointer
368 add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer
369 ldr x0, [x0] // Load the user context pointer
370 str x1, [x0, SS64_SP] // Store the user stack pointer in the user PCB
371 msr SP_EL0, x0 // Copy the user PCB pointer to SP0
372 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
373 msr SPSel, #0 // Switch to SP0
374 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB
375 stp fp, lr, [sp, SS64_FP] // Save fp and lr to the user PCB
376 mov fp, #0 // Clear the fp and lr for the
377 mov lr, #0 // debugger stack frame
378 mov x0, sp // Copy the user PCB pointer to x0
382 el0_synchronous_vector_64_long:
384 mrs x1, TPIDR_EL1 // Load the thread register
385 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
386 mov sp, x1 // Set the stack pointer to the kernel stack
387 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
388 add x1, x1, EXT(fleh_synchronous)@pageoff
391 el0_irq_vector_64_long:
394 ldr x1, [x1, ACT_CPUDATAP]
395 ldr x1, [x1, CPU_ISTACKPTR]
396 mov sp, x1 // Set the stack pointer to the kernel stack
397 adrp x1, EXT(fleh_irq)@page // load address for fleh
398 add x1, x1, EXT(fleh_irq)@pageoff
401 el0_fiq_vector_64_long:
404 ldr x1, [x1, ACT_CPUDATAP]
405 ldr x1, [x1, CPU_ISTACKPTR]
406 mov sp, x1 // Set the stack pointer to the kernel stack
407 adrp x1, EXT(fleh_fiq)@page // load address for fleh
408 add x1, x1, EXT(fleh_fiq)@pageoff
411 el0_serror_vector_64_long:
413 mrs x1, TPIDR_EL1 // Load the thread register
414 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
415 mov sp, x1 // Set the stack pointer to the kernel stack
416 adrp x1, EXT(fleh_serror)@page // load address for fleh
417 add x1, x1, EXT(fleh_serror)@pageoff
422 * check_exception_stack
424 * Verifies that stack pointer at SP1 is within exception stack
425 * If not, will simply hang as we have no more stack to fall back on.
430 check_exception_stack:
431 mrs x18, TPIDR_EL1 // Get thread pointer
432 cbz x18, Lvalid_exception_stack // Thread context may not be set early in boot
433 ldr x18, [x18, ACT_CPUDATAP]
434 cbz x18, . // If thread context is set, cpu data should be too
435 ldr x18, [x18, CPU_EXCEPSTACK_TOP]
437 b.gt . // Hang if above exception stack top
438 sub x18, x18, EXCEPSTACK_SIZE_NUM // Find bottom of exception stack
440 b.lt . // Hang if below exception stack bottom
441 Lvalid_exception_stack:
443 b Lel1_sp1_synchronous_valid_stack
448 * Verifies that the kernel stack is aligned and mapped within an expected
449 * stack address range. Note: happens before saving registers (in case we can't
450 * save to kernel stack).
453 * {x0, x1, sp} - saved
455 * x1 - Exception syndrome
461 stp x2, x3, [sp, SS64_X2] // Save {x2-x3}
462 and x1, x1, #ESR_EC_MASK // Mask the exception class
463 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
464 cmp x1, x2 // If we have a stack alignment exception
465 b.eq Lcorrupt_stack // ...the stack is definitely corrupted
466 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
467 cmp x1, x2 // If we have a data abort, we need to
468 b.ne Lvalid_stack // ...validate the stack pointer
469 mrs x1, TPIDR_EL1 // Get thread pointer
471 ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack
472 sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack
473 cmp x0, x2 // if (SP_EL0 >= kstack top)
474 b.ge Ltest_istack // jump to istack test
475 cmp x0, x3 // if (SP_EL0 > kstack bottom)
476 b.gt Lvalid_stack // stack pointer valid
478 ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr
479 ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack
480 sub x3, x2, INTSTACK_SIZE_NUM // Find bottom of istack
481 cmp x0, x2 // if (SP_EL0 >= istack top)
482 b.ge Lcorrupt_stack // corrupt stack pointer
483 cmp x0, x3 // if (SP_EL0 > istack bottom)
484 b.gt Lvalid_stack // stack pointer valid
486 INIT_SAVED_STATE_FLAVORS sp, w0, w1
487 mov x0, sp // Copy exception frame pointer to x0
488 adrp x1, fleh_invalid_stack@page // Load address for fleh
489 add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there
490 ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
493 ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
496 #if defined(KERNEL_INTEGRITY_KTRR)
499 check_ktrr_sctlr_trap:
500 /* We may abort on an instruction fetch on reset when enabling the MMU by
501 * writing SCTLR_EL1 because the page containing the privileged instruction is
502 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
503 * would otherwise panic unconditionally. Check for the condition and return
504 * safe execution to the caller on behalf of the faulting function.
506 * Expected register state:
507 * x22 - Kernel virtual base
508 * x23 - Kernel physical base
510 sub sp, sp, ARM_CONTEXT_SIZE // Make some space on the stack
511 stp x0, x1, [sp, SS64_X0] // Stash x0, x1
512 mrs x0, ESR_EL1 // Check ESR for instr. fetch abort
513 and x0, x0, #0xffffffffffffffc0 // Mask off ESR.ISS.IFSC
514 movz w1, #0x8600, lsl #16
517 mrs x0, ELR_EL1 // Check for expected abort address
518 adrp x1, _pinst_set_sctlr_trap_addr@page
519 add x1, x1, _pinst_set_sctlr_trap_addr@pageoff
520 sub x1, x1, x22 // Convert to physical address
523 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
524 add sp, sp, ARM_CONTEXT_SIZE // Clean up stack
525 b.ne Lel1_sp1_synchronous_vector_continue
526 msr ELR_EL1, lr // Return to caller
528 #endif /* defined(KERNEL_INTEGRITY_KTRR)*/
530 /* 64-bit first level exception handler dispatcher.
531 * Completes register context saving and branches to FLEH.
533 * {x0, x1, fp, lr, sp} - saved
535 * x1 - address of FLEH
536 * fp - previous stack frame if EL1
543 /* Save arm_saved_state64 */
544 SPILL_REGISTERS KERNEL_MODE
546 /* If exception is from userspace, zero unused registers */
547 and x23, x23, #(PSR64_MODE_EL_MASK)
548 cmp x23, #(PSR64_MODE_EL0)
570 /* x21, x22 cleared in common case below */
577 /* fp/lr already cleared by EL0_64_VECTOR */
580 mov x21, x0 // Copy arm_context_t pointer to x21
581 mov x22, x1 // Copy handler routine to x22
584 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
585 tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from
586 b.ne 1f // kernel mode, so skip precise time update
588 bl EXT(timer_state_event_user_to_kernel)
590 mov x0, x21 // Reload arm_context_t pointer
592 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
594 /* Dispatch to FLEH */
601 .global EXT(fleh_synchronous)
602 LEXT(fleh_synchronous)
603 mrs x1, ESR_EL1 // Load exception syndrome
604 mrs x2, FAR_EL1 // Load fault address
606 /* At this point, the LR contains the value of ELR_EL1. In the case of an
607 * instruction prefetch abort, this will be the faulting pc, which we know
608 * to be invalid. This will prevent us from backtracing through the
609 * exception if we put it in our stack frame, so we load the LR from the
610 * exception saved state instead.
612 and w3, w1, #(ESR_EC_MASK)
613 lsr w3, w3, #(ESR_EC_SHIFT)
614 mov w4, #(ESR_EC_IABORT_EL1)
616 b.eq Lfleh_sync_load_lr
617 Lvalid_link_register:
620 bl EXT(sleh_synchronous)
624 b exception_return_dispatch
627 ldr lr, [x0, SS64_LR]
628 b Lvalid_link_register
630 /* Shared prologue code for fleh_irq and fleh_fiq.
631 * Does any interrupt booking we may want to do
632 * before invoking the handler proper.
636 * fp - Undefined live value (we may push a frame)
637 * lr - Undefined live value (we may push a frame)
638 * sp - Interrupt stack for the current CPU
640 .macro BEGIN_INTERRUPT_HANDLER
642 ldr x23, [x22, ACT_CPUDATAP] // Get current cpu
643 /* Update IRQ count */
644 ldr w1, [x23, CPU_STAT_IRQ]
645 add w1, w1, #1 // Increment count
646 str w1, [x23, CPU_STAT_IRQ] // Update IRQ count
647 ldr w1, [x23, CPU_STAT_IRQ_WAKE]
648 add w1, w1, #1 // Increment count
649 str w1, [x23, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
650 /* Increment preempt count */
651 ldr w1, [x22, ACT_PREEMPT_CNT]
653 str w1, [x22, ACT_PREEMPT_CNT]
654 /* Store context in int state */
655 str x0, [x23, CPU_INT_STATE] // Saved context in cpu_int_state
658 /* Shared epilogue code for fleh_irq and fleh_fiq.
659 * Cleans up after the prologue, and may do a bit more
660 * bookkeeping (kdebug related).
662 * x22 - Live TPIDR_EL1 value (thread address)
663 * x23 - Address of the current CPU data structure
664 * w24 - 0 if kdebug is disbled, nonzero otherwise
665 * fp - Undefined live value (we may push a frame)
666 * lr - Undefined live value (we may push a frame)
667 * sp - Interrupt stack for the current CPU
669 .macro END_INTERRUPT_HANDLER
670 /* Clear int context */
671 str xzr, [x23, CPU_INT_STATE]
672 /* Decrement preempt count */
673 ldr w0, [x22, ACT_PREEMPT_CNT]
674 cbnz w0, 1f // Detect underflow
678 str w0, [x22, ACT_PREEMPT_CNT]
679 /* Switch back to kernel stack */
680 ldr x0, [x22, TH_KSTACKPTR]
686 .global EXT(fleh_irq)
688 BEGIN_INTERRUPT_HANDLER
692 END_INTERRUPT_HANDLER
695 b exception_return_dispatch
699 .global EXT(fleh_fiq_generic)
700 LEXT(fleh_fiq_generic)
705 .global EXT(fleh_fiq)
707 BEGIN_INTERRUPT_HANDLER
711 END_INTERRUPT_HANDLER
714 b exception_return_dispatch
718 .global EXT(fleh_serror)
720 mrs x1, ESR_EL1 // Load exception syndrome
721 mrs x2, FAR_EL1 // Load fault address
728 b exception_return_dispatch
731 * Register state saved before we get here.
736 mrs x1, ESR_EL1 // Load exception syndrome
737 str x1, [x0, SS64_ESR]
738 mrs x2, FAR_EL1 // Load fault address
739 str x2, [x0, SS64_FAR]
741 bl EXT(sleh_invalid_stack) // Shouldn't return!
746 fleh_synchronous_sp1:
747 mrs x1, ESR_EL1 // Load exception syndrome
748 str x1, [x0, SS64_ESR]
749 mrs x2, FAR_EL1 // Load fault address
750 str x2, [x0, SS64_FAR]
752 bl EXT(sleh_synchronous_sp1)
760 b EXT(panic_with_thread_kernel_state)
762 .asciz "IRQ exception taken while SP1 selected"
769 b EXT(panic_with_thread_kernel_state)
771 .asciz "FIQ exception taken while SP1 selected"
777 adr x0, Lsp1_serror_str
778 b EXT(panic_with_thread_kernel_state)
780 .asciz "Asynchronous exception taken while SP1 selected"
784 exception_return_dispatch:
785 ldr w0, [x21, SS64_CPSR]
786 tst w0, PSR64_MODE_EL_MASK
787 b.ne return_to_kernel // return to kernel if M[3:2] > 0
793 tbnz w0, #DAIF_IRQF_SHIFT, exception_return // Skip AST check if IRQ disabled
794 mrs x3, TPIDR_EL1 // Load thread pointer
795 ldr w1, [x3, ACT_PREEMPT_CNT] // Load preemption count
796 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
797 cbnz x1, exception_return_unint_tpidr_x3 // If preemption disabled, skip AST check
798 ldr x1, [x3, ACT_CPUDATAP] // Get current CPU data pointer
799 ldr x2, [x1, CPU_PENDING_AST] // Get ASTs
800 tst x2, AST_URGENT // If no urgent ASTs, skip ast_taken
801 b.eq exception_return_unint_tpidr_x3
802 mov sp, x21 // Switch to thread stack for preemption
804 bl EXT(ast_taken_kernel) // Handle AST_URGENT
809 .globl EXT(thread_bootstrap_return)
810 LEXT(thread_bootstrap_return)
812 bl EXT(dtrace_thread_bootstrap)
814 b EXT(thread_exception_return)
817 .globl EXT(thread_exception_return)
818 LEXT(thread_exception_return)
820 add x21, x0, ACT_CONTEXT
824 // Fall Through to return_to_user from thread_exception_return.
825 // Note that if we move return_to_user or insert a new routine
826 // below thread_exception_return, the latter will need to change.
831 mrs x3, TPIDR_EL1 // Load thread pointer
834 str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
837 ldr w0, [x3, TH_RWLOCK_CNT]
838 cbz w0, 1f // Detect unbalance RW lock/unlock
839 b rwlock_count_notzero
841 ldr w0, [x3, ACT_PREEMPT_CNT]
843 b preempt_count_notzero
847 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
848 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
849 ldr x0, [x4, CPU_PENDING_AST] // Get ASTs
850 cbnz x0, user_take_ast // If pending ASTs, go service them
852 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
853 mov x19, x3 // Preserve thread pointer across function call
855 bl EXT(timer_state_event_kernel_to_user)
858 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
860 #if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
863 * Here we attempt to enable NEON access for EL0. If the last entry into the
864 * kernel from user-space was due to an IRQ, the monitor will have disabled
865 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
866 * check in with the monitor in order to reenable NEON for EL0 in exchange
867 * for routing IRQs through the monitor (2). This way the monitor will
868 * always 'own' either IRQs or EL0 NEON.
870 * If Watchtower is disabled or we did not enter the kernel through an IRQ
871 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
874 * EL0 user ________ IRQ ______
875 * EL1 xnu \ ______________________ CPACR_EL1 __/
876 * EL3 monitor \_/ \___/
881 mov x0, #(CPACR_FPEN_ENABLE)
885 /* Establish this thread's debug state as the live state on the selected CPU. */
886 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
887 ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context
888 ldr x0, [x3, ACT_DEBUGDATA]
889 orr x1, x1, x0 // Thread debug state and live debug state both NULL?
890 cbnz x1, user_set_debug_state_and_return // If one or the other non-null, go set debug state
891 b exception_return_unint_tpidr_x3
894 // Fall through from return_to_user to exception_return.
895 // Note that if we move exception_return or add a new routine below
896 // return_to_user, the latter will have to change.
900 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
901 exception_return_unint:
902 mrs x3, TPIDR_EL1 // Load thread pointer
903 exception_return_unint_tpidr_x3:
904 mov sp, x21 // Reload the pcb pointer
906 /* ARM64_TODO Reserve x18 until we decide what to do with it */
907 str xzr, [sp, SS64_X18]
909 #if __ARM_KERNEL_PROTECT__
911 * If we are going to eret to userspace, we must return through the EL0
914 ldr w1, [sp, SS64_CPSR] // Load CPSR
915 tbnz w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping // Skip if returning to EL1
917 /* We need to switch to the EL0 mapping of this code to eret to EL0. */
918 adrp x0, EXT(ExceptionVectorsBase)@page // Load vector base
919 adrp x1, Lexception_return_restore_registers@page // Load target PC
920 add x1, x1, Lexception_return_restore_registers@pageoff
921 MOV64 x2, ARM_KERNEL_PROTECT_EXCEPTION_START // Load EL0 vector address
922 sub x1, x1, x0 // Calculate delta
923 add x0, x2, x1 // Convert KVA to EL0 vector address
926 Lskip_el0_eret_mapping:
927 #endif /* __ARM_KERNEL_PROTECT__ */
929 Lexception_return_restore_registers:
930 mov x0, sp // x0 = &pcb
931 // Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
932 AUTH_THREAD_STATE_IN_X0 x20, x21, x22, x23, x24
934 /* Restore special register state */
935 ldr w3, [sp, NS64_FPSR]
936 ldr w4, [sp, NS64_FPCR]
938 msr ELR_EL1, x1 // Load the return address into ELR
939 msr SPSR_EL1, x2 // Load the return CPSR into SPSR
941 msr FPCR, x4 // Synchronized by ERET
943 #if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
944 /* if eret to userspace, disable JOP */
945 tbnz w2, PSR64_MODE_EL_SHIFT, Lskip_disable_jop
946 adrp x4, EXT(const_boot_args)@page
947 add x4, x4, EXT(const_boot_args)@pageoff
948 ldr x4, [x4, BA_BOOT_FLAGS]
949 and x1, x4, BA_BOOT_FLAGS_DISABLE_JOP
950 cbnz x1, Lskip_disable_jop // if global JOP disabled, don't touch SCTLR (kernel JOP is already off)
951 and x1, x4, BA_BOOT_FLAGS_DISABLE_USER_JOP
952 cbnz x1, Ldisable_jop // if global user JOP disabled, always turn off JOP regardless of thread flag (kernel running with JOP on)
954 ldr x2, [x2, TH_DISABLE_USER_JOP]
955 cbz x2, Lskip_disable_jop // if thread has JOP enabled, leave it on (kernel running with JOP on)
957 MOV64 x1, SCTLR_JOP_KEYS_ENABLED
961 MOV64 x1, SCTLR_EL1_EXPECTED
965 #endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)*/
967 /* Restore arm_neon_saved_state64 */
968 ldp q0, q1, [x0, NS64_Q0]
969 ldp q2, q3, [x0, NS64_Q2]
970 ldp q4, q5, [x0, NS64_Q4]
971 ldp q6, q7, [x0, NS64_Q6]
972 ldp q8, q9, [x0, NS64_Q8]
973 ldp q10, q11, [x0, NS64_Q10]
974 ldp q12, q13, [x0, NS64_Q12]
975 ldp q14, q15, [x0, NS64_Q14]
976 ldp q16, q17, [x0, NS64_Q16]
977 ldp q18, q19, [x0, NS64_Q18]
978 ldp q20, q21, [x0, NS64_Q20]
979 ldp q22, q23, [x0, NS64_Q22]
980 ldp q24, q25, [x0, NS64_Q24]
981 ldp q26, q27, [x0, NS64_Q26]
982 ldp q28, q29, [x0, NS64_Q28]
983 ldp q30, q31, [x0, NS64_Q30]
985 /* Restore arm_saved_state64 */
987 // Skip x0, x1 - we're using them
988 ldp x2, x3, [x0, SS64_X2]
989 ldp x4, x5, [x0, SS64_X4]
990 ldp x6, x7, [x0, SS64_X6]
991 ldp x8, x9, [x0, SS64_X8]
992 ldp x10, x11, [x0, SS64_X10]
993 ldp x12, x13, [x0, SS64_X12]
994 ldp x14, x15, [x0, SS64_X14]
995 // Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
996 ldp x18, x19, [x0, SS64_X18]
997 ldp x20, x21, [x0, SS64_X20]
998 ldp x22, x23, [x0, SS64_X22]
999 ldp x24, x25, [x0, SS64_X24]
1000 ldp x26, x27, [x0, SS64_X26]
1001 ldr x28, [x0, SS64_X28]
1002 ldr fp, [x0, SS64_FP]
1003 // Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1005 // Restore stack pointer and our last two GPRs
1006 ldr x1, [x0, SS64_SP]
1009 #if __ARM_KERNEL_PROTECT__
1010 ldr w18, [x0, SS64_CPSR] // Stash CPSR
1011 #endif /* __ARM_KERNEL_PROTECT__ */
1013 ldp x0, x1, [x0, SS64_X0] // Restore the GPRs
1015 #if __ARM_KERNEL_PROTECT__
1016 /* If we are going to eret to userspace, we must unmap the kernel. */
1017 tbnz w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1019 /* Update TCR to unmap the kernel. */
1020 MOV64 x18, TCR_EL1_USER
1024 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1025 * each other due to the microarchitecture.
1027 #if !defined(APPLE_ARM64_ARCH_FAMILY)
1031 /* Switch to the user ASID (low bit clear) for the task. */
1033 bic x18, x18, #(1 << TTBR_ASID_SHIFT)
1037 /* We don't need an ISB here, as the eret is synchronizing. */
1039 #endif /* __ARM_KERNEL_PROTECT__ */
1045 bl EXT(ast_taken_user) // Handle all ASTs, may return via continuation
1047 b check_user_asts // Now try again
1049 user_set_debug_state_and_return:
1052 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
1053 isb // Synchronize context
1055 bl EXT(arm_debug_set) // Establish thread debug state in live regs
1058 b exception_return_unint // Continue, reloading the thread pointer
1064 str x0, [sp, #-16]! // We'll print thread pointer
1065 adr x0, L_underflow_str // Format string
1066 CALL_EXTERN panic // Game over
1069 .asciz "Preemption count negative on thread %p"
1075 rwlock_count_notzero:
1077 str x0, [sp, #-16]! // We'll print thread pointer
1078 ldr w0, [x0, TH_RWLOCK_CNT]
1080 adr x0, L_rwlock_count_notzero_str // Format string
1081 CALL_EXTERN panic // Game over
1083 L_rwlock_count_notzero_str:
1084 .asciz "RW lock count not 0 on thread %p (%u)"
1088 preempt_count_notzero:
1090 str x0, [sp, #-16]! // We'll print thread pointer
1091 ldr w0, [x0, ACT_PREEMPT_CNT]
1093 adr x0, L_preempt_count_notzero_str // Format string
1094 CALL_EXTERN panic // Game over
1096 L_preempt_count_notzero_str:
1097 .asciz "preemption count not 0 on thread %p (%u)"
1098 #endif /* MACH_ASSERT */
1102 #if __ARM_KERNEL_PROTECT__
1104 * This symbol denotes the end of the exception vector/eret range; we page
1105 * align it so that we can avoid mapping other text in the EL0 exception
1110 .globl EXT(ExceptionVectorsEnd)
1111 LEXT(ExceptionVectorsEnd)
1112 #endif /* __ARM_KERNEL_PROTECT__ */
1116 .globl EXT(ml_panic_trap_to_debugger)
1117 LEXT(ml_panic_trap_to_debugger)
1120 /* ARM64_TODO Is globals_asm.h needed? */
1121 //#include "globals_asm.h"
1123 /* vim: set ts=4: */