2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <machine/asm.h>
30 #include <arm64/machine_routines_asm.h>
31 #include <arm64/proc_reg.h>
32 #include <pexpert/arm64/board_config.h>
33 #include <mach/exception_types.h>
35 #include <config_dtrace.h>
37 #include <arm64/exception_asm.h>
39 #if __ARM_KERNEL_PROTECT__
45 * CHECK_EXCEPTION_RETURN_DISPATCH_PPL
47 * Checks if an exception was taken from the PPL, and if so, trampolines back
49 * x26 - 0 if the exception was taken while in the kernel, 1 if the
50 * exception was taken while in the PPL.
52 .macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL
56 /* Return to the PPL. */
58 mov w10, #PPL_STATE_EXCEPTION
59 #if __APRR_SUPPORTED__
60 b Ldisable_aif_and_enter_ppl
62 #error "XPRR configuration error"
63 #endif /* __APRR_SUPPORTED__ */
67 #if __APRR_SUPPORTED__
69 * EL1_SP0_VECTOR_PPL_CHECK
71 * Check to see if the exception was taken by the kernel or the PPL. Falls
72 * through if kernel, hands off to the given label if PPL. Expects to run on
74 * arg0 - Label to go to if this was a PPL exception.
76 .macro EL1_SP0_VECTOR_PPL_CHECK
77 sub sp, sp, ARM_CONTEXT_SIZE
78 stp x0, x1, [sp, SS64_X0]
80 MOV64 x1, APRR_EL1_DEFAULT
83 ldp x0, x1, [sp, SS64_X0]
84 add sp, sp, ARM_CONTEXT_SIZE
88 #define SWITCH_TO_SP0 1
90 #define INVOKE_PREFLIGHT 0
91 #define NO_INVOKE_PREFLIGHT 1
94 * EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
96 * Verify whether an exception came from the PPL or from the kernel. If it came
97 * from the PPL, save off the PPL state and transition out of the PPL.
98 * arg0 - Label to go to if this was a kernel exception
99 * arg1 - Label to go to (after leaving the PPL) if this was a PPL exception
100 * arg2 - Indicates if this should switch back to SP0
101 * x0 - xPRR_EL1_BR1 read by EL1_SP0_VECTOR_PPL_CHECK
103 .macro EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
104 /* Spill some more registers. */
105 stp x2, x3, [sp, SS64_X2]
108 * Check if the PPL is locked down; if not, we can treat this as a
111 adrp x1, EXT(pmap_ppl_locked_down)@page
112 ldr w1, [x1, #EXT(pmap_ppl_locked_down)@pageoff]
115 /* Ensure that APRR_EL1 is actually in PPL mode. */
116 MOV64 x1, APRR_EL1_PPL
121 * Check if the CPU is in the PPL; if not we can treat this as a
124 GET_PMAP_CPU_DATA x3, x1, x2
125 ldr w1, [x3, PMAP_CPU_DATA_PPL_STATE]
126 cmp x1, #PPL_STATE_KERNEL
129 /* Ensure that the CPU is in the expected PPL state. */
130 cmp x1, #PPL_STATE_DISPATCH
133 /* Mark the CPU as dealing with an exception. */
134 mov x1, #PPL_STATE_EXCEPTION
135 str w1, [x3, PMAP_CPU_DATA_PPL_STATE]
137 /* Load the bounds of the PPL trampoline. */
138 adrp x0, EXT(ppl_no_exception_start)@page
139 add x0, x0, EXT(ppl_no_exception_start)@pageoff
140 adrp x1, EXT(ppl_no_exception_end)@page
141 add x1, x1, EXT(ppl_no_exception_end)@pageoff
144 * Ensure that the exception did not occur in the trampoline. If it
145 * did, we are either being attacked or our state machine is
146 * horrifically broken.
154 /* We might be under attack; spin. */
158 /* Get the PPL save area. */
160 ldr x0, [x3, PMAP_CPU_DATA_SAVE_AREA]
162 /* Save our x0, x1 state. */
163 ldp x2, x3, [sp, SS64_X0]
164 stp x2, x3, [x0, SS64_X0]
166 /* Restore SP1 to its original state. */
168 add sp, sp, ARM_CONTEXT_SIZE
170 .if $2 == SWITCH_TO_SP0
171 /* Switch back to SP0. */
175 /* Load the SP0 value. */
179 /* Save off the stack pointer. */
180 str x2, [x0, SS64_SP]
182 INIT_SAVED_STATE_FLAVORS x0, w1, w2
184 /* Save the context that was interrupted. */
185 ldp x2, x3, [x3, SS64_X2]
186 stp fp, lr, [x0, SS64_FP]
187 SPILL_REGISTERS KERNEL_MODE
190 * Stash the function we wish to be invoked to deal with the exception;
191 * usually this is some preflight function for the fleh_* handler.
194 add x25, x25, $1@pageoff
197 * Indicate that this is a PPL exception, and that we should return to
202 /* Transition back to kernel mode. */
203 mov x15, #PPL_EXIT_EXCEPTION
204 b ppl_return_to_kernel_mode
206 /* Restore SP1 state. */
207 ldp x2, x3, [sp, SS64_X2]
208 ldp x0, x1, [sp, SS64_X0]
209 add sp, sp, ARM_CONTEXT_SIZE
211 /* Go to the specified label (usually the original exception vector). */
214 #endif /* __APRR_SUPPORTED__ */
216 #endif /* XNU_MONITOR */
218 #define CBF_DISABLE 0
221 .macro COMPARE_BRANCH_FUSION
222 #if defined(APPLE_ARM64_ARCH_FAMILY)
223 mrs $1, ARM64_REG_HID1
224 .if $0 == CBF_DISABLE
225 orr $1, $1, ARM64_REG_HID1_disCmpBrFusion
227 mov $2, ARM64_REG_HID1_disCmpBrFusion
230 msr ARM64_REG_HID1, $1
231 .if $0 == CBF_DISABLE
240 * Restores the kernel EL1 mappings, if necessary.
242 * This may mutate x18.
245 #if __ARM_KERNEL_PROTECT__
246 /* Switch to the kernel ASID (low bit set) for the task. */
248 orr x18, x18, #(1 << TTBR_ASID_SHIFT)
252 * We eschew some barriers on Apple CPUs, as relative ordering of writes
253 * to the TTBRs and writes to the TCR should be ensured by the
256 #if !defined(APPLE_ARM64_ARCH_FAMILY)
261 * Update the TCR to map the kernel now that we are using the kernel
264 MOV64 x18, TCR_EL1_BOOT
267 #endif /* __ARM_KERNEL_PROTECT__ */
271 * BRANCH_TO_KVA_VECTOR
273 * Branches to the requested long exception vector in the kernelcache.
274 * arg0 - The label to branch to
275 * arg1 - The index of the label in exc_vectors_tables
277 * This may mutate x18.
279 .macro BRANCH_TO_KVA_VECTOR
280 #if __ARM_KERNEL_PROTECT__
282 * Find the kernelcache table for the exception vectors by accessing
286 ldr x18, [x18, ACT_CPUDATAP]
287 ldr x18, [x18, CPU_EXC_VECTORS]
290 * Get the handler for this exception and jump to it.
292 ldr x18, [x18, #($1 << 3)]
296 #endif /* __ARM_KERNEL_PROTECT__ */
299 #if __ARM_KERNEL_PROTECT__
302 .globl EXT(exc_vectors_table)
303 LEXT(exc_vectors_table)
304 /* Table of exception handlers.
305 * These handlers sometimes contain deadloops.
306 * It's nice to have symbols for them when debugging. */
307 .quad el1_sp0_synchronous_vector_long
308 .quad el1_sp0_irq_vector_long
309 .quad el1_sp0_fiq_vector_long
310 .quad el1_sp0_serror_vector_long
311 .quad el1_sp1_synchronous_vector_long
312 .quad el1_sp1_irq_vector_long
313 .quad el1_sp1_fiq_vector_long
314 .quad el1_sp1_serror_vector_long
315 .quad el0_synchronous_vector_64_long
316 .quad el0_irq_vector_64_long
317 .quad el0_fiq_vector_64_long
318 .quad el0_serror_vector_64_long
319 #endif /* __ARM_KERNEL_PROTECT__ */
322 #if __ARM_KERNEL_PROTECT__
324 * We need this to be on a page boundary so that we may avoiding mapping
325 * other text along with it. As this must be on the VM page boundary
326 * (due to how the coredumping code currently works), this will be a
327 * 16KB page boundary.
332 #endif /* __ARM_KERNEL_PROTECT__ */
333 .globl EXT(ExceptionVectorsBase)
334 LEXT(ExceptionVectorsBase)
335 Lel1_sp0_synchronous_vector:
336 BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0
341 BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1
346 BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2
350 Lel1_sp0_serror_vector:
351 BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3
355 Lel1_sp1_synchronous_vector:
356 BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4
361 BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5
366 BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6
370 Lel1_sp1_serror_vector:
371 BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7
375 Lel0_synchronous_vector_64:
377 BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8
383 BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9
389 BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10
393 Lel0_serror_vector_64:
395 BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11
397 /* Fill out the rest of the page */
400 /*********************************
401 * END OF EXCEPTION VECTORS PAGE *
402 *********************************/
404 .macro EL1_SP0_VECTOR
405 msr SPSel, #0 // Switch to SP0
406 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
407 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
408 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
409 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
410 stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
411 INIT_SAVED_STATE_FLAVORS sp, w0, w1
412 mov x0, sp // Copy saved state pointer to x0
415 el1_sp0_synchronous_vector_long:
416 #if XNU_MONITOR && __APRR_SUPPORTED__
418 * We do not have enough space for new instructions in this vector, so
419 * jump to outside code to check if this exception was taken in the PPL.
421 b el1_sp0_synchronous_vector_ppl_check
422 Lel1_sp0_synchronous_vector_kernel:
424 sub sp, sp, ARM_CONTEXT_SIZE // Make space on the exception stack
425 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the stack
426 mrs x1, ESR_EL1 // Get the exception syndrome
427 /* If the stack pointer is corrupt, it will manifest either as a data abort
428 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
429 * these quickly by testing bit 5 of the exception class.
431 tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
432 mrs x0, SP_EL0 // Get SP_EL0
433 stp fp, lr, [sp, SS64_FP] // Save fp, lr to the stack
434 str x0, [sp, SS64_SP] // Save sp to the stack
435 bl check_kernel_stack
436 ldp fp, lr, [sp, SS64_FP] // Restore fp, lr
438 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
439 add sp, sp, ARM_CONTEXT_SIZE // Restore SP1
441 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
442 add x1, x1, EXT(fleh_synchronous)@pageoff
445 el1_sp0_irq_vector_long:
446 #if XNU_MONITOR && __APRR_SUPPORTED__
447 EL1_SP0_VECTOR_PPL_CHECK el1_sp0_irq_vector_not_in_kernel_mode
448 Lel1_sp0_irq_vector_kernel:
452 ldr x1, [x1, ACT_CPUDATAP]
453 ldr x1, [x1, CPU_ISTACKPTR]
455 adrp x1, EXT(fleh_irq)@page // Load address for fleh
456 add x1, x1, EXT(fleh_irq)@pageoff
459 el1_sp0_fiq_vector_long:
460 // ARM64_TODO write optimized decrementer
461 #if XNU_MONITOR && __APRR_SUPPORTED__
462 EL1_SP0_VECTOR_PPL_CHECK el1_sp0_fiq_vector_not_in_kernel_mode
463 Lel1_sp0_fiq_vector_kernel:
467 ldr x1, [x1, ACT_CPUDATAP]
468 ldr x1, [x1, CPU_ISTACKPTR]
470 adrp x1, EXT(fleh_fiq)@page // Load address for fleh
471 add x1, x1, EXT(fleh_fiq)@pageoff
474 el1_sp0_serror_vector_long:
475 #if XNU_MONITOR && __APRR_SUPPORTED__
476 EL1_SP0_VECTOR_PPL_CHECK el1_sp0_serror_vector_not_in_kernel_mode
477 Lel1_sp0_serror_vector_kernel:
480 adrp x1, EXT(fleh_serror)@page // Load address for fleh
481 add x1, x1, EXT(fleh_serror)@pageoff
484 .macro EL1_SP1_VECTOR
485 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
486 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
487 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
488 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
489 INIT_SAVED_STATE_FLAVORS sp, w0, w1
490 stp fp, lr, [sp, SS64_FP] // Save fp and lr to exception frame
491 mov x0, sp // Copy saved state pointer to x0
494 el1_sp1_synchronous_vector_long:
495 b check_exception_stack
496 Lel1_sp1_synchronous_valid_stack:
497 #if defined(KERNEL_INTEGRITY_KTRR)
498 b check_ktrr_sctlr_trap
499 Lel1_sp1_synchronous_vector_continue:
502 adrp x1, fleh_synchronous_sp1@page
503 add x1, x1, fleh_synchronous_sp1@pageoff
506 el1_sp1_irq_vector_long:
508 adrp x1, fleh_irq_sp1@page
509 add x1, x1, fleh_irq_sp1@pageoff
512 el1_sp1_fiq_vector_long:
514 adrp x1, fleh_fiq_sp1@page
515 add x1, x1, fleh_fiq_sp1@pageoff
518 el1_sp1_serror_vector_long:
520 adrp x1, fleh_serror_sp1@page
521 add x1, x1, fleh_serror_sp1@pageoff
524 #if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
526 * On these CPUs, SCTLR_CP15BEN_ENABLED is res0, and SCTLR_{ITD,SED}_DISABLED are res1.
527 * The rest of the bits in SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED are set in common_start.
529 #define SCTLR_EL1_INITIAL (SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED)
530 #define SCTLR_EL1_EXPECTED ((SCTLR_EL1_INITIAL | SCTLR_SED_DISABLED | SCTLR_ITD_DISABLED) & ~SCTLR_CP15BEN_ENABLED)
534 mov x18, #0 // Zero x18 to avoid leaking data to user SS
535 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
536 #if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
537 // enable JOP for kernel
538 adrp x0, EXT(const_boot_args)@page
539 add x0, x0, EXT(const_boot_args)@pageoff
540 ldr x0, [x0, BA_BOOT_FLAGS]
541 and x0, x0, BA_BOOT_FLAGS_DISABLE_JOP
543 // if disable jop is set, don't touch SCTLR (it's already off)
544 // if (!boot_args->kernel_jop_disable) {
546 tbnz x0, SCTLR_PACIA_ENABLED_SHIFT, 1f
547 // turn on jop for kernel if it isn't already on
548 // if (!jop_running) {
549 MOV64 x1, SCTLR_JOP_KEYS_ENABLED
553 MOV64 x1, SCTLR_EL1_EXPECTED | SCTLR_JOP_KEYS_ENABLED
559 #endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
560 mrs x0, TPIDR_EL1 // Load the thread register
561 mrs x1, SP_EL0 // Load the user stack pointer
562 add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer
563 ldr x0, [x0] // Load the user context pointer
564 str x1, [x0, SS64_SP] // Store the user stack pointer in the user PCB
565 msr SP_EL0, x0 // Copy the user PCB pointer to SP0
566 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
567 msr SPSel, #0 // Switch to SP0
568 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB
569 stp fp, lr, [sp, SS64_FP] // Save fp and lr to the user PCB
570 mov fp, #0 // Clear the fp and lr for the
571 mov lr, #0 // debugger stack frame
572 mov x0, sp // Copy the user PCB pointer to x0
576 el0_synchronous_vector_64_long:
578 mrs x1, TPIDR_EL1 // Load the thread register
579 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
580 mov sp, x1 // Set the stack pointer to the kernel stack
581 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
582 add x1, x1, EXT(fleh_synchronous)@pageoff
585 el0_irq_vector_64_long:
588 ldr x1, [x1, ACT_CPUDATAP]
589 ldr x1, [x1, CPU_ISTACKPTR]
590 mov sp, x1 // Set the stack pointer to the kernel stack
591 adrp x1, EXT(fleh_irq)@page // load address for fleh
592 add x1, x1, EXT(fleh_irq)@pageoff
595 el0_fiq_vector_64_long:
598 ldr x1, [x1, ACT_CPUDATAP]
599 ldr x1, [x1, CPU_ISTACKPTR]
600 mov sp, x1 // Set the stack pointer to the kernel stack
601 adrp x1, EXT(fleh_fiq)@page // load address for fleh
602 add x1, x1, EXT(fleh_fiq)@pageoff
605 el0_serror_vector_64_long:
607 mrs x1, TPIDR_EL1 // Load the thread register
608 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
609 mov sp, x1 // Set the stack pointer to the kernel stack
610 adrp x1, EXT(fleh_serror)@page // load address for fleh
611 add x1, x1, EXT(fleh_serror)@pageoff
614 #if XNU_MONITOR && __APRR_SUPPORTED__
615 el1_sp0_synchronous_vector_ppl_check:
616 EL1_SP0_VECTOR_PPL_CHECK el1_sp0_synchronous_vector_not_in_kernel_mode
618 /* Jump back to the primary exception vector if we fell through. */
619 b Lel1_sp0_synchronous_vector_kernel
623 * check_exception_stack
625 * Verifies that stack pointer at SP1 is within exception stack
626 * If not, will simply hang as we have no more stack to fall back on.
631 check_exception_stack:
632 mrs x18, TPIDR_EL1 // Get thread pointer
633 cbz x18, Lvalid_exception_stack // Thread context may not be set early in boot
634 ldr x18, [x18, ACT_CPUDATAP]
635 cbz x18, . // If thread context is set, cpu data should be too
636 ldr x18, [x18, CPU_EXCEPSTACK_TOP]
638 b.gt . // Hang if above exception stack top
639 sub x18, x18, EXCEPSTACK_SIZE_NUM // Find bottom of exception stack
641 b.lt . // Hang if below exception stack bottom
642 Lvalid_exception_stack:
644 b Lel1_sp1_synchronous_valid_stack
649 * Verifies that the kernel stack is aligned and mapped within an expected
650 * stack address range. Note: happens before saving registers (in case we can't
651 * save to kernel stack).
654 * {x0, x1, sp} - saved
656 * x1 - Exception syndrome
662 stp x2, x3, [sp, SS64_X2] // Save {x2-x3}
663 and x1, x1, #ESR_EC_MASK // Mask the exception class
664 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
665 cmp x1, x2 // If we have a stack alignment exception
666 b.eq Lcorrupt_stack // ...the stack is definitely corrupted
667 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
668 cmp x1, x2 // If we have a data abort, we need to
669 b.ne Lvalid_stack // ...validate the stack pointer
670 mrs x1, TPIDR_EL1 // Get thread pointer
672 ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack
673 sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack
674 cmp x0, x2 // if (SP_EL0 >= kstack top)
675 b.ge Ltest_istack // jump to istack test
676 cmp x0, x3 // if (SP_EL0 > kstack bottom)
677 b.gt Lvalid_stack // stack pointer valid
679 ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr
680 ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack
681 sub x3, x2, INTSTACK_SIZE_NUM // Find bottom of istack
682 cmp x0, x2 // if (SP_EL0 >= istack top)
683 b.ge Lcorrupt_stack // corrupt stack pointer
684 cmp x0, x3 // if (SP_EL0 > istack bottom)
685 b.gt Lvalid_stack // stack pointer valid
687 INIT_SAVED_STATE_FLAVORS sp, w0, w1
688 mov x0, sp // Copy exception frame pointer to x0
689 adrp x1, fleh_invalid_stack@page // Load address for fleh
690 add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there
691 ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
694 ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3}
697 #if defined(KERNEL_INTEGRITY_KTRR)
700 check_ktrr_sctlr_trap:
701 /* We may abort on an instruction fetch on reset when enabling the MMU by
702 * writing SCTLR_EL1 because the page containing the privileged instruction is
703 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
704 * would otherwise panic unconditionally. Check for the condition and return
705 * safe execution to the caller on behalf of the faulting function.
707 * Expected register state:
708 * x22 - Kernel virtual base
709 * x23 - Kernel physical base
711 sub sp, sp, ARM_CONTEXT_SIZE // Make some space on the stack
712 stp x0, x1, [sp, SS64_X0] // Stash x0, x1
713 mrs x0, ESR_EL1 // Check ESR for instr. fetch abort
714 and x0, x0, #0xffffffffffffffc0 // Mask off ESR.ISS.IFSC
715 movz w1, #0x8600, lsl #16
718 mrs x0, ELR_EL1 // Check for expected abort address
719 adrp x1, _pinst_set_sctlr_trap_addr@page
720 add x1, x1, _pinst_set_sctlr_trap_addr@pageoff
721 sub x1, x1, x22 // Convert to physical address
724 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
725 add sp, sp, ARM_CONTEXT_SIZE // Clean up stack
726 b.ne Lel1_sp1_synchronous_vector_continue
727 msr ELR_EL1, lr // Return to caller
729 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
731 /* 64-bit first level exception handler dispatcher.
732 * Completes register context saving and branches to FLEH.
734 * {x0, x1, fp, lr, sp} - saved
736 * x1 - address of FLEH
737 * fp - previous stack frame if EL1
744 /* Save arm_saved_state64 */
745 SPILL_REGISTERS KERNEL_MODE
747 /* If exception is from userspace, zero unused registers */
748 and x23, x23, #(PSR64_MODE_EL_MASK)
749 cmp x23, #(PSR64_MODE_EL0)
771 /* x21, x22 cleared in common case below */
780 /* fp/lr already cleared by EL0_64_VECTOR */
783 mov x21, x0 // Copy arm_context_t pointer to x21
784 mov x22, x1 // Copy handler routine to x22
787 /* Zero x26 to indicate that this should not return to the PPL. */
791 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
792 tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from
793 b.ne 1f // kernel mode, so skip precise time update
795 bl EXT(timer_state_event_user_to_kernel)
797 mov x0, x21 // Reload arm_context_t pointer
799 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
801 /* Dispatch to FLEH */
808 .global EXT(fleh_synchronous)
809 LEXT(fleh_synchronous)
810 mrs x1, ESR_EL1 // Load exception syndrome
811 mrs x2, FAR_EL1 // Load fault address
813 /* At this point, the LR contains the value of ELR_EL1. In the case of an
814 * instruction prefetch abort, this will be the faulting pc, which we know
815 * to be invalid. This will prevent us from backtracing through the
816 * exception if we put it in our stack frame, so we load the LR from the
817 * exception saved state instead.
819 and w3, w1, #(ESR_EC_MASK)
820 lsr w3, w3, #(ESR_EC_SHIFT)
821 mov w4, #(ESR_EC_IABORT_EL1)
823 b.eq Lfleh_sync_load_lr
824 Lvalid_link_register:
827 bl EXT(sleh_synchronous)
831 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
834 b exception_return_dispatch
837 ldr lr, [x0, SS64_LR]
838 b Lvalid_link_register
840 /* Shared prologue code for fleh_irq and fleh_fiq.
841 * Does any interrupt booking we may want to do
842 * before invoking the handler proper.
846 * fp - Undefined live value (we may push a frame)
847 * lr - Undefined live value (we may push a frame)
848 * sp - Interrupt stack for the current CPU
850 .macro BEGIN_INTERRUPT_HANDLER
852 ldr x23, [x22, ACT_CPUDATAP] // Get current cpu
853 /* Update IRQ count */
854 ldr w1, [x23, CPU_STAT_IRQ]
855 add w1, w1, #1 // Increment count
856 str w1, [x23, CPU_STAT_IRQ] // Update IRQ count
857 ldr w1, [x23, CPU_STAT_IRQ_WAKE]
858 add w1, w1, #1 // Increment count
859 str w1, [x23, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
860 /* Increment preempt count */
861 ldr w1, [x22, ACT_PREEMPT_CNT]
863 str w1, [x22, ACT_PREEMPT_CNT]
864 /* Store context in int state */
865 str x0, [x23, CPU_INT_STATE] // Saved context in cpu_int_state
868 /* Shared epilogue code for fleh_irq and fleh_fiq.
869 * Cleans up after the prologue, and may do a bit more
870 * bookkeeping (kdebug related).
872 * x22 - Live TPIDR_EL1 value (thread address)
873 * x23 - Address of the current CPU data structure
874 * w24 - 0 if kdebug is disbled, nonzero otherwise
875 * fp - Undefined live value (we may push a frame)
876 * lr - Undefined live value (we may push a frame)
877 * sp - Interrupt stack for the current CPU
879 .macro END_INTERRUPT_HANDLER
880 /* Clear int context */
881 str xzr, [x23, CPU_INT_STATE]
882 /* Decrement preempt count */
883 ldr w0, [x22, ACT_PREEMPT_CNT]
884 cbnz w0, 1f // Detect underflow
888 str w0, [x22, ACT_PREEMPT_CNT]
889 /* Switch back to kernel stack */
890 ldr x0, [x22, TH_KSTACKPTR]
896 .global EXT(fleh_irq)
898 BEGIN_INTERRUPT_HANDLER
902 END_INTERRUPT_HANDLER
905 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
908 b exception_return_dispatch
912 .global EXT(fleh_fiq_generic)
913 LEXT(fleh_fiq_generic)
918 .global EXT(fleh_fiq)
920 BEGIN_INTERRUPT_HANDLER
924 END_INTERRUPT_HANDLER
927 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
930 b exception_return_dispatch
934 .global EXT(fleh_serror)
936 mrs x1, ESR_EL1 // Load exception syndrome
937 mrs x2, FAR_EL1 // Load fault address
944 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
947 b exception_return_dispatch
950 * Register state saved before we get here.
955 mrs x1, ESR_EL1 // Load exception syndrome
956 str x1, [x0, SS64_ESR]
957 mrs x2, FAR_EL1 // Load fault address
958 str x2, [x0, SS64_FAR]
960 bl EXT(sleh_invalid_stack) // Shouldn't return!
965 fleh_synchronous_sp1:
966 mrs x1, ESR_EL1 // Load exception syndrome
967 str x1, [x0, SS64_ESR]
968 mrs x2, FAR_EL1 // Load fault address
969 str x2, [x0, SS64_FAR]
971 bl EXT(sleh_synchronous_sp1)
979 b EXT(panic_with_thread_kernel_state)
981 .asciz "IRQ exception taken while SP1 selected"
988 b EXT(panic_with_thread_kernel_state)
990 .asciz "FIQ exception taken while SP1 selected"
996 adr x0, Lsp1_serror_str
997 b EXT(panic_with_thread_kernel_state)
999 .asciz "Asynchronous exception taken while SP1 selected"
1003 exception_return_dispatch:
1004 ldr w0, [x21, SS64_CPSR]
1005 tst w0, PSR64_MODE_EL_MASK
1006 b.ne return_to_kernel // return to kernel if M[3:2] > 0
1012 tbnz w0, #DAIF_IRQF_SHIFT, exception_return // Skip AST check if IRQ disabled
1013 mrs x3, TPIDR_EL1 // Load thread pointer
1014 ldr w1, [x3, ACT_PREEMPT_CNT] // Load preemption count
1015 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
1016 cbnz x1, exception_return_unint_tpidr_x3 // If preemption disabled, skip AST check
1017 ldr x1, [x3, ACT_CPUDATAP] // Get current CPU data pointer
1018 ldr x2, [x1, CPU_PENDING_AST] // Get ASTs
1019 tst x2, AST_URGENT // If no urgent ASTs, skip ast_taken
1020 b.eq exception_return_unint_tpidr_x3
1021 mov sp, x21 // Switch to thread stack for preemption
1023 bl EXT(ast_taken_kernel) // Handle AST_URGENT
1028 .globl EXT(thread_bootstrap_return)
1029 LEXT(thread_bootstrap_return)
1031 bl EXT(dtrace_thread_bootstrap)
1033 b EXT(thread_exception_return)
1036 .globl EXT(thread_exception_return)
1037 LEXT(thread_exception_return)
1039 add x21, x0, ACT_CONTEXT
1043 // Fall Through to return_to_user from thread_exception_return.
1044 // Note that if we move return_to_user or insert a new routine
1045 // below thread_exception_return, the latter will need to change.
1050 mrs x3, TPIDR_EL1 // Load thread pointer
1053 str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
1056 ldr w0, [x3, TH_RWLOCK_CNT]
1057 cbz w0, 1f // Detect unbalance RW lock/unlock
1058 b rwlock_count_notzero
1060 ldr w0, [x3, ACT_PREEMPT_CNT]
1062 b preempt_count_notzero
1066 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
1067 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
1068 ldr x0, [x4, CPU_PENDING_AST] // Get ASTs
1069 cbnz x0, user_take_ast // If pending ASTs, go service them
1071 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1072 mov x19, x3 // Preserve thread pointer across function call
1074 bl EXT(timer_state_event_kernel_to_user)
1077 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1079 #if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
1082 * Here we attempt to enable NEON access for EL0. If the last entry into the
1083 * kernel from user-space was due to an IRQ, the monitor will have disabled
1084 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
1085 * check in with the monitor in order to reenable NEON for EL0 in exchange
1086 * for routing IRQs through the monitor (2). This way the monitor will
1087 * always 'own' either IRQs or EL0 NEON.
1089 * If Watchtower is disabled or we did not enter the kernel through an IRQ
1090 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
1093 * EL0 user ________ IRQ ______
1094 * EL1 xnu \ ______________________ CPACR_EL1 __/
1095 * EL3 monitor \_/ \___/
1100 mov x0, #(CPACR_FPEN_ENABLE)
1104 /* Establish this thread's debug state as the live state on the selected CPU. */
1105 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
1106 ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context
1107 ldr x0, [x3, ACT_DEBUGDATA]
1108 orr x1, x1, x0 // Thread debug state and live debug state both NULL?
1109 cbnz x1, user_set_debug_state_and_return // If one or the other non-null, go set debug state
1110 b exception_return_unint_tpidr_x3
1113 // Fall through from return_to_user to exception_return.
1114 // Note that if we move exception_return or add a new routine below
1115 // return_to_user, the latter will have to change.
1119 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
1120 exception_return_unint:
1121 mrs x3, TPIDR_EL1 // Load thread pointer
1122 exception_return_unint_tpidr_x3:
1123 mov sp, x21 // Reload the pcb pointer
1125 /* ARM64_TODO Reserve x18 until we decide what to do with it */
1126 str xzr, [sp, SS64_X18]
1128 #if __ARM_KERNEL_PROTECT__
1130 * If we are going to eret to userspace, we must return through the EL0
1133 ldr w1, [sp, SS64_CPSR] // Load CPSR
1134 tbnz w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping // Skip if returning to EL1
1136 /* We need to switch to the EL0 mapping of this code to eret to EL0. */
1137 adrp x0, EXT(ExceptionVectorsBase)@page // Load vector base
1138 adrp x1, Lexception_return_restore_registers@page // Load target PC
1139 add x1, x1, Lexception_return_restore_registers@pageoff
1140 MOV64 x2, ARM_KERNEL_PROTECT_EXCEPTION_START // Load EL0 vector address
1141 sub x1, x1, x0 // Calculate delta
1142 add x0, x2, x1 // Convert KVA to EL0 vector address
1145 Lskip_el0_eret_mapping:
1146 #endif /* __ARM_KERNEL_PROTECT__ */
1148 Lexception_return_restore_registers:
1149 mov x0, sp // x0 = &pcb
1150 // Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
1151 AUTH_THREAD_STATE_IN_X0 x20, x21, x22, x23, x24
1153 /* Restore special register state */
1154 ldr w3, [sp, NS64_FPSR]
1155 ldr w4, [sp, NS64_FPCR]
1157 msr ELR_EL1, x1 // Load the return address into ELR
1158 msr SPSR_EL1, x2 // Load the return CPSR into SPSR
1160 msr FPCR, x4 // Synchronized by ERET
1162 #if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
1163 /* if eret to userspace, disable JOP */
1164 tbnz w2, PSR64_MODE_EL_SHIFT, Lskip_disable_jop
1165 adrp x4, EXT(const_boot_args)@page
1166 add x4, x4, EXT(const_boot_args)@pageoff
1167 ldr x4, [x4, BA_BOOT_FLAGS]
1168 and x1, x4, BA_BOOT_FLAGS_DISABLE_JOP
1169 cbnz x1, Lskip_disable_jop // if global JOP disabled, don't touch SCTLR (kernel JOP is already off)
1170 and x1, x4, BA_BOOT_FLAGS_DISABLE_USER_JOP
1171 cbnz x1, Ldisable_jop // if global user JOP disabled, always turn off JOP regardless of thread flag (kernel running with JOP on)
1173 ldr x2, [x2, TH_DISABLE_USER_JOP]
1174 cbz x2, Lskip_disable_jop // if thread has JOP enabled, leave it on (kernel running with JOP on)
1176 MOV64 x1, SCTLR_JOP_KEYS_ENABLED
1180 MOV64 x1, SCTLR_EL1_EXPECTED
1184 #endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)*/
1186 /* Restore arm_neon_saved_state64 */
1187 ldp q0, q1, [x0, NS64_Q0]
1188 ldp q2, q3, [x0, NS64_Q2]
1189 ldp q4, q5, [x0, NS64_Q4]
1190 ldp q6, q7, [x0, NS64_Q6]
1191 ldp q8, q9, [x0, NS64_Q8]
1192 ldp q10, q11, [x0, NS64_Q10]
1193 ldp q12, q13, [x0, NS64_Q12]
1194 ldp q14, q15, [x0, NS64_Q14]
1195 ldp q16, q17, [x0, NS64_Q16]
1196 ldp q18, q19, [x0, NS64_Q18]
1197 ldp q20, q21, [x0, NS64_Q20]
1198 ldp q22, q23, [x0, NS64_Q22]
1199 ldp q24, q25, [x0, NS64_Q24]
1200 ldp q26, q27, [x0, NS64_Q26]
1201 ldp q28, q29, [x0, NS64_Q28]
1202 ldp q30, q31, [x0, NS64_Q30]
1204 /* Restore arm_saved_state64 */
1206 // Skip x0, x1 - we're using them
1207 ldp x2, x3, [x0, SS64_X2]
1208 ldp x4, x5, [x0, SS64_X4]
1209 ldp x6, x7, [x0, SS64_X6]
1210 ldp x8, x9, [x0, SS64_X8]
1211 ldp x10, x11, [x0, SS64_X10]
1212 ldp x12, x13, [x0, SS64_X12]
1213 ldp x14, x15, [x0, SS64_X14]
1214 // Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1215 ldp x18, x19, [x0, SS64_X18]
1216 ldp x20, x21, [x0, SS64_X20]
1217 ldp x22, x23, [x0, SS64_X22]
1218 ldp x24, x25, [x0, SS64_X24]
1219 ldp x26, x27, [x0, SS64_X26]
1220 ldr x28, [x0, SS64_X28]
1221 ldr fp, [x0, SS64_FP]
1222 // Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1224 // Restore stack pointer and our last two GPRs
1225 ldr x1, [x0, SS64_SP]
1228 #if __ARM_KERNEL_PROTECT__
1229 ldr w18, [x0, SS64_CPSR] // Stash CPSR
1230 #endif /* __ARM_KERNEL_PROTECT__ */
1232 ldp x0, x1, [x0, SS64_X0] // Restore the GPRs
1234 #if __ARM_KERNEL_PROTECT__
1235 /* If we are going to eret to userspace, we must unmap the kernel. */
1236 tbnz w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1238 /* Update TCR to unmap the kernel. */
1239 MOV64 x18, TCR_EL1_USER
1243 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1244 * each other due to the microarchitecture.
1246 #if !defined(APPLE_ARM64_ARCH_FAMILY)
1250 /* Switch to the user ASID (low bit clear) for the task. */
1252 bic x18, x18, #(1 << TTBR_ASID_SHIFT)
1256 /* We don't need an ISB here, as the eret is synchronizing. */
1258 #endif /* __ARM_KERNEL_PROTECT__ */
1264 bl EXT(ast_taken_user) // Handle all ASTs, may return via continuation
1266 b check_user_asts // Now try again
1268 user_set_debug_state_and_return:
1270 #if defined(APPLELIGHTNING)
1271 /* rdar://53177964 ([Cebu Errata SW WA][v8Debug] MDR NEX L3 clock turns OFF during restoreCheckpoint due to SWStep getting masked) */
1273 ARM64_IS_PCORE x12 // if we're not a pCORE, also do nothing
1276 mrs x12, ARM64_REG_HID1 // if any debug session ever existed, set forceNexL3ClkOn
1277 orr x12, x12, ARM64_REG_HID1_forceNexL3ClkOn
1278 msr ARM64_REG_HID1, x12
1283 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
1284 isb // Synchronize context
1286 bl EXT(arm_debug_set) // Establish thread debug state in live regs
1289 b exception_return_unint // Continue, reloading the thread pointer
1295 str x0, [sp, #-16]! // We'll print thread pointer
1296 adr x0, L_underflow_str // Format string
1297 CALL_EXTERN panic // Game over
1300 .asciz "Preemption count negative on thread %p"
1306 rwlock_count_notzero:
1308 str x0, [sp, #-16]! // We'll print thread pointer
1309 ldr w0, [x0, TH_RWLOCK_CNT]
1311 adr x0, L_rwlock_count_notzero_str // Format string
1312 CALL_EXTERN panic // Game over
1314 L_rwlock_count_notzero_str:
1315 .asciz "RW lock count not 0 on thread %p (%u)"
1319 preempt_count_notzero:
1321 str x0, [sp, #-16]! // We'll print thread pointer
1322 ldr w0, [x0, ACT_PREEMPT_CNT]
1324 adr x0, L_preempt_count_notzero_str // Format string
1325 CALL_EXTERN panic // Game over
1327 L_preempt_count_notzero_str:
1328 .asciz "preemption count not 0 on thread %p (%u)"
1329 #endif /* MACH_ASSERT */
1333 #if __ARM_KERNEL_PROTECT__
1335 * This symbol denotes the end of the exception vector/eret range; we page
1336 * align it so that we can avoid mapping other text in the EL0 exception
1341 .globl EXT(ExceptionVectorsEnd)
1342 LEXT(ExceptionVectorsEnd)
1343 #endif /* __ARM_KERNEL_PROTECT__ */
1346 #if __APRR_SUPPORTED__
1349 el1_sp0_synchronous_vector_not_in_kernel_mode:
1350 EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_synchronous_vector_kernel, fleh_synchronous_from_ppl, STAY_ON_SP1
1354 el1_sp0_fiq_vector_not_in_kernel_mode:
1355 EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_fiq_vector_kernel, fleh_fiq_from_ppl, SWITCH_TO_SP0
1359 el1_sp0_irq_vector_not_in_kernel_mode:
1360 EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_irq_vector_kernel, fleh_irq_from_ppl, SWITCH_TO_SP0
1364 el1_sp0_serror_vector_not_in_kernel_mode:
1365 EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_serror_vector_kernel, fleh_serror_from_ppl, SWITCH_TO_SP0
1366 #endif /* __APRR_SUPPORTED__ */
1369 * Functions to preflight the fleh handlers when the PPL has taken an exception;
1370 * mostly concerned with setting up state for the normal fleh code.
1372 fleh_synchronous_from_ppl:
1377 mrs x1, ESR_EL1 // Get the exception syndrome
1379 /* If the stack pointer is corrupt, it will manifest either as a data abort
1380 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
1381 * these quickly by testing bit 5 of the exception class.
1383 tbz x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack
1384 mrs x0, SP_EL0 // Get SP_EL0
1386 /* Perform high level checks for stack corruption. */
1387 and x1, x1, #ESR_EC_MASK // Mask the exception class
1388 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
1389 cmp x1, x2 // If we have a stack alignment exception
1390 b.eq Lcorrupt_ppl_stack // ...the stack is definitely corrupted
1391 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
1392 cmp x1, x2 // If we have a data abort, we need to
1393 b.ne Lvalid_ppl_stack // ...validate the stack pointer
1396 /* Bounds check the PPL stack. */
1397 adrp x10, EXT(pmap_stacks_start)@page
1398 ldr x10, [x10, #EXT(pmap_stacks_start)@pageoff]
1399 adrp x11, EXT(pmap_stacks_end)@page
1400 ldr x11, [x11, #EXT(pmap_stacks_end)@pageoff]
1402 b.lo Lcorrupt_ppl_stack
1404 b.hi Lcorrupt_ppl_stack
1410 /* Switch back to the kernel stack. */
1412 GET_PMAP_CPU_DATA x5, x6, x7
1413 ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1416 /* Hand off to the synch handler. */
1417 b EXT(fleh_synchronous)
1423 /* Hand off to the invalid stack handler. */
1424 b fleh_invalid_stack
1428 ldr x1, [x1, ACT_CPUDATAP]
1429 ldr x1, [x1, CPU_ISTACKPTR]
1435 ldr x1, [x1, ACT_CPUDATAP]
1436 ldr x1, [x1, CPU_ISTACKPTR]
1440 fleh_serror_from_ppl:
1441 GET_PMAP_CPU_DATA x5, x6, x7
1442 ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1449 * Restores the DAIF bits to their original state (well, the AIF bits at least).
1450 * arg0 - DAIF bits (read from the DAIF interface) to restore
1452 .macro REENABLE_DAIF
1454 tst $0, #(DAIF_IRQF | DAIF_FIQF | DAIF_ASYNCF)
1458 tst $0, #(DAIF_IRQF | DAIF_FIQF)
1462 tst $0, #(DAIF_ASYNCF)
1465 /* Enable nothing. */
1470 msr DAIFClr, #(DAIFSC_ASYNCF)
1475 msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF)
1480 msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF)
1487 #if XNU_MONITOR && __APRR_SUPPORTED__
1492 * x15 - The index of the requested PPL function.
1496 .globl EXT(aprr_ppl_enter)
1497 LEXT(aprr_ppl_enter)
1500 stp x20, x21, [sp, #-0x20]!
1501 stp x29, x30, [sp, #0x10]
1504 /* Increase the preemption count. */
1506 ldr w12, [x10, ACT_PREEMPT_CNT]
1508 str w12, [x10, ACT_PREEMPT_CNT]
1510 /* Is the PPL currently locked down? */
1511 adrp x13, EXT(pmap_ppl_locked_down)@page
1512 add x13, x13, EXT(pmap_ppl_locked_down)@pageoff
1516 /* If not, just perform the call in the current context. */
1517 b.eq EXT(ppl_bootstrap_dispatch)
1519 mov w10, #PPL_STATE_KERNEL
1520 b Ldisable_aif_and_enter_ppl
1522 /* We align this to land the next few instructions on their own page. */
1523 .section __PPLTRAMP,__text,regular,pure_instructions
1525 .space (16*1024)-(4*8) // 8 insns
1528 * This label is used by exception handlers that are trying to return
1531 Ldisable_aif_and_enter_ppl:
1532 /* We must trampoline to the PPL context; disable AIF. */
1534 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1536 .globl EXT(ppl_no_exception_start)
1537 LEXT(ppl_no_exception_start)
1538 /* Switch APRR_EL1 to PPL mode. */
1539 MOV64 x14, APRR_EL1_PPL
1542 /* This ISB should be the last instruction on a page. */
1543 // TODO: can we static assert this?
1545 #endif /* XNU_MONITOR && __APRR_SUPPORTED__ */
1548 // x15: ppl call number
1550 // x20: gxf_enter caller's DAIF
1551 .globl EXT(ppl_trampoline_start)
1552 LEXT(ppl_trampoline_start)
1554 #if __APRR_SUPPORTED__
1555 /* Squash AIF AGAIN, because someone may have attacked us. */
1556 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1557 #endif /* __APRR_SUPPORTED__ */
1559 #if __APRR_SUPPORTED__
1560 /* Verify the state of APRR_EL1. */
1561 MOV64 x14, APRR_EL1_PPL
1563 #else /* __APRR_SUPPORTED__ */
1564 #error "XPRR configuration error"
1565 #endif /* __APRR_SUPPORTED__ */
1567 b.ne Lppl_fail_dispatch
1569 /* Verify the request ID. */
1571 b.hs Lppl_fail_dispatch
1573 /* Get the PPL CPU data structure. */
1574 GET_PMAP_CPU_DATA x12, x13, x14
1576 /* Mark this CPU as being in the PPL. */
1577 ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1579 cmp w9, #PPL_STATE_KERNEL
1580 b.eq Lppl_mark_cpu_as_dispatching
1582 /* Check to see if we are trying to trap from within the PPL. */
1583 cmp w9, #PPL_STATE_DISPATCH
1584 b.eq Lppl_fail_dispatch_ppl
1587 /* Ensure that we are returning from an exception. */
1588 cmp w9, #PPL_STATE_EXCEPTION
1589 b.ne Lppl_fail_dispatch
1591 // where is w10 set?
1592 // in CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1593 cmp w10, #PPL_STATE_EXCEPTION
1594 b.ne Lppl_fail_dispatch
1596 /* This is an exception return; set the CPU to the dispatching state. */
1597 mov w9, #PPL_STATE_DISPATCH
1598 str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1600 /* Find the save area, and return to the saved PPL context. */
1601 ldr x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
1603 #if __APRR_SUPPORTED__
1604 b Lexception_return_restore_registers
1606 b EXT(return_to_ppl)
1607 #endif /* __APRR_SUPPORTED__ */
1609 Lppl_mark_cpu_as_dispatching:
1610 cmp w10, #PPL_STATE_KERNEL
1611 b.ne Lppl_fail_dispatch
1613 /* Mark the CPU as dispatching. */
1614 mov w13, #PPL_STATE_DISPATCH
1615 str w13, [x12, PMAP_CPU_DATA_PPL_STATE]
1617 /* Get the handler for the request */
1618 adrp x9, EXT(ppl_handler_table)@page
1619 add x9, x9, EXT(ppl_handler_table)@pageoff
1620 ldr x10, [x9, x15, lsl #3]
1622 /* Switch to the regular PPL stack. */
1623 // TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
1624 ldr x9, [x12, PMAP_CPU_DATA_PPL_STACK]
1626 // SP0 is thread stack here
1628 // SP0 is now PPL stack
1632 /* Save the old stack pointer off in case we need it. */
1633 str x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1635 /* Branch to the code that will invoke the PPL request. */
1638 Lppl_fail_dispatch_ppl:
1639 /* Switch back to the kernel stack. */
1640 ldr x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1644 /* Indicate that we failed. */
1645 mov x15, #PPL_EXIT_BAD_CALL
1647 /* Move the DAIF bits into the expected register. */
1650 /* Return to kernel mode. */
1651 b ppl_return_to_kernel_mode
1654 /* Indicate that we are cleanly exiting the PPL. */
1655 mov x15, #PPL_EXIT_DISPATCH
1657 /* Switch back to the original (kernel thread) stack. */
1660 /* Move the saved DAIF bits. */
1663 /* Clear the old stack pointer. */
1664 str xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1667 * Mark the CPU as no longer being in the PPL. We spin if our state
1668 * machine is broken.
1670 ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1671 cmp w9, #PPL_STATE_DISPATCH
1673 mov w9, #PPL_STATE_KERNEL
1674 str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1676 /* Return to the kernel. */
1677 b ppl_return_to_kernel_mode
1679 #if __APRR_SUPPORTED__
1680 /* We align this to land the next few instructions on their own page. */
1682 .space (16*1024)-(4*5) // 5 insns
1684 ppl_return_to_kernel_mode:
1685 /* Switch APRR_EL1 back to the kernel mode. */
1686 // must be 5 instructions
1687 MOV64 x14, APRR_EL1_DEFAULT
1690 .globl EXT(ppl_trampoline_end)
1691 LEXT(ppl_trampoline_end)
1693 /* This should be the first instruction on a page. */
1696 .globl EXT(ppl_no_exception_end)
1697 LEXT(ppl_no_exception_end)
1699 #endif /* __APRR_SUPPORTED__ */
1705 * If we are dealing with an exception, hand off to the first level
1706 * exception handler.
1708 cmp x15, #PPL_EXIT_EXCEPTION
1709 b.eq Ljump_to_fleh_handler
1711 /* Restore the original AIF state. */
1714 /* If this was a panic call from the PPL, reinvoke panic. */
1715 cmp x15, #PPL_EXIT_PANIC_CALL
1716 b.eq Ljump_to_panic_trap_to_debugger
1718 /* Load the preemption count. */
1720 ldr w12, [x10, ACT_PREEMPT_CNT]
1722 /* Detect underflow */
1723 cbnz w12, Lno_preempt_underflow
1725 Lno_preempt_underflow:
1727 /* Lower the preemption count. */
1729 str w12, [x10, ACT_PREEMPT_CNT]
1731 /* Skip ASTs if the peemption count is not zero. */
1732 cbnz x12, Lppl_skip_ast_taken
1734 /* Skip the AST check if interrupts are disabled. */
1737 b.ne Lppl_skip_ast_taken
1739 /* Disable interrupts. */
1740 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF)
1742 /* IF there is no urgent AST, skip the AST. */
1743 ldr x12, [x10, ACT_CPUDATAP]
1744 ldr x14, [x12, CPU_PENDING_AST]
1746 b.eq Lppl_defer_ast_taken
1748 /* Stash our return value and return reason. */
1752 /* Handle the AST. */
1753 bl EXT(ast_taken_kernel)
1755 /* Restore the return value and the return reason. */
1759 Lppl_defer_ast_taken:
1760 /* Reenable interrupts. */
1761 msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF)
1763 Lppl_skip_ast_taken:
1764 /* Pop the stack frame. */
1765 ldp x29, x30, [sp, #0x10]
1766 ldp x20, x21, [sp], #0x20
1768 /* Check to see if this was a bad request. */
1769 cmp x15, #PPL_EXIT_BAD_CALL
1776 Ljump_to_fleh_handler:
1780 Ljump_to_panic_trap_to_debugger:
1781 b EXT(panic_trap_to_debugger)
1785 adrp x0, Lppl_bad_call_panic_str@page
1786 add x0, x0, Lppl_bad_call_panic_str@pageoff
1791 .globl EXT(ppl_dispatch)
1794 * Save a couple of important registers (implementation detail; x12 has
1795 * the PPL per-CPU data address; x13 is not actually interesting).
1797 stp x12, x13, [sp, #-0x10]!
1799 /* Restore the original AIF state. */
1803 * Note that if the method is NULL, we'll blow up with a prefetch abort,
1804 * but the exception vectors will deal with this properly.
1807 /* Invoke the PPL method. */
1808 #ifdef HAS_APPLE_PAC
1815 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1817 /* Restore those important registers. */
1818 ldp x12, x13, [sp], #0x10
1820 /* Mark this as a regular return, and hand off to the return path. */
1821 b Lppl_dispatch_exit
1825 .globl EXT(ppl_bootstrap_dispatch)
1826 LEXT(ppl_bootstrap_dispatch)
1827 /* Verify the PPL request. */
1829 b.hs Lppl_fail_bootstrap_dispatch
1831 /* Get the requested PPL routine. */
1832 adrp x9, EXT(ppl_handler_table)@page
1833 add x9, x9, EXT(ppl_handler_table)@pageoff
1834 ldr x10, [x9, x15, lsl #3]
1836 /* Invoke the requested PPL routine. */
1837 #ifdef HAS_APPLE_PAC
1842 /* Stash off the return value */
1844 /* Drop the preemption count */
1845 bl EXT(_enable_preemption)
1848 /* Pop the stack frame. */
1849 ldp x29, x30, [sp, #0x10]
1850 ldp x20, x21, [sp], #0x20
1851 #if __has_feature(ptrauth_returns)
1857 Lppl_fail_bootstrap_dispatch:
1858 /* Pop our stack frame and panic. */
1859 ldp x29, x30, [sp, #0x10]
1860 ldp x20, x21, [sp], #0x20
1861 #if __has_feature(ptrauth_returns)
1864 adrp x0, Lppl_bad_call_panic_str@page
1865 add x0, x0, Lppl_bad_call_panic_str@pageoff
1870 .globl EXT(ml_panic_trap_to_debugger)
1871 LEXT(ml_panic_trap_to_debugger)
1873 // TODO: why would we ever want to turn interrupts back on after going down panic path?
1874 /* Grab the current AIF state, and disable AIF. */
1877 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1879 // we want interrupts to stay masked after exiting PPL when calling into panic to halt system
1880 // x10 is used in ppl_return_to_kernel_mode restore desired DAIF state after GEXIT
1883 /* Indicate (for the PPL->kernel transition) that we are panicking. */
1884 mov x15, #PPL_EXIT_PANIC_CALL
1886 /* Get the PPL per-CPU data. */
1887 GET_PMAP_CPU_DATA x11, x12, x13
1889 /* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
1890 ldr x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
1894 * Mark this CPU as being in the PPL. Halt and catch fire if our state
1895 * machine appears to be broken.
1897 ldr w12, [x11, PMAP_CPU_DATA_PPL_STATE]
1898 cmp w12, #PPL_STATE_DISPATCH
1900 mov w13, #PPL_STATE_PANIC
1901 str w13, [x11, PMAP_CPU_DATA_PPL_STATE]
1903 /* Now we are ready to exit the PPL. */
1904 b ppl_return_to_kernel_mode
1907 Lppl_bad_call_panic_str:
1908 .asciz "ppl_dispatch: failed due to bad arguments/state"
1909 #else /* XNU_MONITOR */
1912 .globl EXT(ml_panic_trap_to_debugger)
1913 LEXT(ml_panic_trap_to_debugger)
1915 #endif /* XNU_MONITOR */
1917 /* ARM64_TODO Is globals_asm.h needed? */
1918 //#include "globals_asm.h"
1920 /* vim: set ts=4: */