2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <machine/asm.h>
30 #include <arm64/machine_machdep.h>
31 #include <arm64/machine_routines_asm.h>
32 #include <arm64/proc_reg.h>
33 #include <pexpert/arm64/board_config.h>
34 #include <mach/exception_types.h>
36 #include <config_dtrace.h>
38 #include <arm64/exception_asm.h>
39 #include "dwarf_unwind.h"
41 #if __ARM_KERNEL_PROTECT__
47 * CHECK_EXCEPTION_RETURN_DISPATCH_PPL
49 * Checks if an exception was taken from the PPL, and if so, trampolines back
51 * x26 - 0 if the exception was taken while in the kernel, 1 if the
52 * exception was taken while in the PPL.
54 .macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL
58 /* Return to the PPL. */
60 mov w10, #PPL_STATE_EXCEPTION
61 #error "XPRR configuration error"
66 #endif /* XNU_MONITOR */
71 .macro COMPARE_BRANCH_FUSION
72 #if defined(APPLE_ARM64_ARCH_FAMILY)
75 orr $1, $1, ARM64_REG_HID1_disCmpBrFusion
77 mov $2, ARM64_REG_HID1_disCmpBrFusion
90 * Restores the kernel EL1 mappings, if necessary.
92 * This may mutate x18.
95 #if __ARM_KERNEL_PROTECT__
96 /* Switch to the kernel ASID (low bit set) for the task. */
98 orr x18, x18, #(1 << TTBR_ASID_SHIFT)
102 * We eschew some barriers on Apple CPUs, as relative ordering of writes
103 * to the TTBRs and writes to the TCR should be ensured by the
106 #if !defined(APPLE_ARM64_ARCH_FAMILY)
111 * Update the TCR to map the kernel now that we are using the kernel
114 MOV64 x18, TCR_EL1_BOOT
117 #endif /* __ARM_KERNEL_PROTECT__ */
121 * BRANCH_TO_KVA_VECTOR
123 * Branches to the requested long exception vector in the kernelcache.
124 * arg0 - The label to branch to
125 * arg1 - The index of the label in exc_vectors_tables
127 * This may mutate x18.
129 .macro BRANCH_TO_KVA_VECTOR
130 #if __ARM_KERNEL_PROTECT__
132 * Find the kernelcache table for the exception vectors by accessing
136 ldr x18, [x18, ACT_CPUDATAP]
137 ldr x18, [x18, CPU_EXC_VECTORS]
140 * Get the handler for this exception and jump to it.
142 ldr x18, [x18, #($1 << 3)]
146 #endif /* __ARM_KERNEL_PROTECT__ */
152 * Verifies that the kernel stack is aligned and mapped within an expected
153 * stack address range. Note: happens before saving registers (in case we can't
154 * save to kernel stack).
158 * x1 - Exception syndrome
161 * Seems like we need an unused argument to the macro for the \@ syntax to work
164 .macro CHECK_KERNEL_STACK unused
165 stp x2, x3, [sp, #-16]! // Save {x2-x3}
166 and x1, x1, #ESR_EC_MASK // Mask the exception class
167 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
168 cmp x1, x2 // If we have a stack alignment exception
169 b.eq Lcorrupt_stack_\@ // ...the stack is definitely corrupted
170 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
171 cmp x1, x2 // If we have a data abort, we need to
172 b.ne Lvalid_stack_\@ // ...validate the stack pointer
173 mrs x0, SP_EL0 // Get SP_EL0
174 mrs x1, TPIDR_EL1 // Get thread pointer
176 ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack
177 sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack
178 cmp x0, x2 // if (SP_EL0 >= kstack top)
179 b.ge Ltest_istack_\@ // jump to istack test
180 cmp x0, x3 // if (SP_EL0 > kstack bottom)
181 b.gt Lvalid_stack_\@ // stack pointer valid
183 ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr
184 ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack
185 sub x3, x2, INTSTACK_SIZE_NUM // Find bottom of istack
186 cmp x0, x2 // if (SP_EL0 >= istack top)
187 b.ge Lcorrupt_stack_\@ // corrupt stack pointer
188 cmp x0, x3 // if (SP_EL0 > istack bottom)
189 b.gt Lvalid_stack_\@ // stack pointer valid
191 ldp x2, x3, [sp], #16
192 ldp x0, x1, [sp], #16
193 sub sp, sp, ARM_CONTEXT_SIZE // Allocate exception frame
194 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the exception frame
195 stp x2, x3, [sp, SS64_X2] // Save x2, x3 to the exception frame
196 mrs x0, SP_EL0 // Get SP_EL0
197 str x0, [sp, SS64_SP] // Save sp to the exception frame
198 INIT_SAVED_STATE_FLAVORS sp, w0, w1
199 mov x0, sp // Copy exception frame pointer to x0
200 adrp x1, fleh_invalid_stack@page // Load address for fleh
201 add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there
204 ldp x2, x3, [sp], #16 // Restore {x2-x3}
208 #if __ARM_KERNEL_PROTECT__
209 .section __DATA_CONST,__const
211 .globl EXT(exc_vectors_table)
212 LEXT(exc_vectors_table)
213 /* Table of exception handlers.
214 * These handlers sometimes contain deadloops.
215 * It's nice to have symbols for them when debugging. */
216 .quad el1_sp0_synchronous_vector_long
217 .quad el1_sp0_irq_vector_long
218 .quad el1_sp0_fiq_vector_long
219 .quad el1_sp0_serror_vector_long
220 .quad el1_sp1_synchronous_vector_long
221 .quad el1_sp1_irq_vector_long
222 .quad el1_sp1_fiq_vector_long
223 .quad el1_sp1_serror_vector_long
224 .quad el0_synchronous_vector_64_long
225 .quad el0_irq_vector_64_long
226 .quad el0_fiq_vector_64_long
227 .quad el0_serror_vector_64_long
228 #endif /* __ARM_KERNEL_PROTECT__ */
231 #if __ARM_KERNEL_PROTECT__
233 * We need this to be on a page boundary so that we may avoiding mapping
234 * other text along with it. As this must be on the VM page boundary
235 * (due to how the coredumping code currently works), this will be a
236 * 16KB page boundary.
241 #endif /* __ARM_KERNEL_PROTECT__ */
242 .globl EXT(ExceptionVectorsBase)
243 LEXT(ExceptionVectorsBase)
244 Lel1_sp0_synchronous_vector:
245 BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0
250 BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1
255 BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2
259 Lel1_sp0_serror_vector:
260 BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3
264 Lel1_sp1_synchronous_vector:
265 BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4
270 BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5
275 BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6
279 Lel1_sp1_serror_vector:
280 BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7
284 Lel0_synchronous_vector_64:
286 BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8
292 BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9
298 BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10
302 Lel0_serror_vector_64:
304 BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11
306 /* Fill out the rest of the page */
309 /*********************************
310 * END OF EXCEPTION VECTORS PAGE *
311 *********************************/
315 .macro EL1_SP0_VECTOR
316 msr SPSel, #0 // Switch to SP0
317 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
318 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
319 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
320 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
321 INIT_SAVED_STATE_FLAVORS sp, w0, w1
322 mov x0, sp // Copy saved state pointer to x0
325 el1_sp0_synchronous_vector_long:
326 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
327 mrs x1, ESR_EL1 // Get the exception syndrome
328 /* If the stack pointer is corrupt, it will manifest either as a data abort
329 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
330 * these quickly by testing bit 5 of the exception class.
332 tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid
335 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
337 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
338 add x1, x1, EXT(fleh_synchronous)@pageoff
341 el1_sp0_irq_vector_long:
344 adrp x1, EXT(fleh_irq)@page // Load address for fleh
345 add x1, x1, EXT(fleh_irq)@pageoff
348 el1_sp0_fiq_vector_long:
349 // ARM64_TODO write optimized decrementer
352 adrp x1, EXT(fleh_fiq)@page // Load address for fleh
353 add x1, x1, EXT(fleh_fiq)@pageoff
356 el1_sp0_serror_vector_long:
358 adrp x1, EXT(fleh_serror)@page // Load address for fleh
359 add x1, x1, EXT(fleh_serror)@pageoff
362 .macro EL1_SP1_VECTOR
363 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame
364 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame
365 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer
366 str x0, [sp, SS64_SP] // Save stack pointer to exception frame
367 INIT_SAVED_STATE_FLAVORS sp, w0, w1
368 mov x0, sp // Copy saved state pointer to x0
371 el1_sp1_synchronous_vector_long:
372 b check_exception_stack
373 Lel1_sp1_synchronous_valid_stack:
374 #if defined(KERNEL_INTEGRITY_KTRR)
375 b check_ktrr_sctlr_trap
376 Lel1_sp1_synchronous_vector_continue:
379 adrp x1, fleh_synchronous_sp1@page
380 add x1, x1, fleh_synchronous_sp1@pageoff
383 el1_sp1_irq_vector_long:
385 adrp x1, fleh_irq_sp1@page
386 add x1, x1, fleh_irq_sp1@pageoff
389 el1_sp1_fiq_vector_long:
391 adrp x1, fleh_fiq_sp1@page
392 add x1, x1, fleh_fiq_sp1@pageoff
395 el1_sp1_serror_vector_long:
397 adrp x1, fleh_serror_sp1@page
398 add x1, x1, fleh_serror_sp1@pageoff
403 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
404 #if __ARM_KERNEL_PROTECT__
405 mov x18, #0 // Zero x18 to avoid leaking data to user SS
407 mrs x0, TPIDR_EL1 // Load the thread register
408 mrs x1, SP_EL0 // Load the user stack pointer
409 add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer
410 ldr x0, [x0] // Load the user context pointer
411 str x1, [x0, SS64_SP] // Store the user stack pointer in the user PCB
412 msr SP_EL0, x0 // Copy the user PCB pointer to SP0
413 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack
414 msr SPSel, #0 // Switch to SP0
415 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB
416 mrs x1, TPIDR_EL1 // Load the thread register
419 mov x0, sp // Copy the user PCB pointer to x0
420 // x1 contains thread register
424 el0_synchronous_vector_64_long:
427 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh
428 add x1, x1, EXT(fleh_synchronous)@pageoff
431 el0_irq_vector_64_long:
434 adrp x1, EXT(fleh_irq)@page // load address for fleh
435 add x1, x1, EXT(fleh_irq)@pageoff
438 el0_fiq_vector_64_long:
441 adrp x1, EXT(fleh_fiq)@page // load address for fleh
442 add x1, x1, EXT(fleh_fiq)@pageoff
445 el0_serror_vector_64_long:
448 adrp x1, EXT(fleh_serror)@page // load address for fleh
449 add x1, x1, EXT(fleh_serror)@pageoff
454 * check_exception_stack
456 * Verifies that stack pointer at SP1 is within exception stack
457 * If not, will simply hang as we have no more stack to fall back on.
462 check_exception_stack:
463 mrs x18, TPIDR_EL1 // Get thread pointer
464 cbz x18, Lvalid_exception_stack // Thread context may not be set early in boot
465 ldr x18, [x18, ACT_CPUDATAP]
466 cbz x18, . // If thread context is set, cpu data should be too
467 ldr x18, [x18, CPU_EXCEPSTACK_TOP]
469 b.gt . // Hang if above exception stack top
470 sub x18, x18, EXCEPSTACK_SIZE_NUM // Find bottom of exception stack
472 b.lt . // Hang if below exception stack bottom
473 Lvalid_exception_stack:
475 b Lel1_sp1_synchronous_valid_stack
478 #if defined(KERNEL_INTEGRITY_KTRR)
481 check_ktrr_sctlr_trap:
482 /* We may abort on an instruction fetch on reset when enabling the MMU by
483 * writing SCTLR_EL1 because the page containing the privileged instruction is
484 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which
485 * would otherwise panic unconditionally. Check for the condition and return
486 * safe execution to the caller on behalf of the faulting function.
488 * Expected register state:
489 * x22 - Kernel virtual base
490 * x23 - Kernel physical base
492 sub sp, sp, ARM_CONTEXT_SIZE // Make some space on the stack
493 stp x0, x1, [sp, SS64_X0] // Stash x0, x1
494 mrs x0, ESR_EL1 // Check ESR for instr. fetch abort
495 and x0, x0, #0xffffffffffffffc0 // Mask off ESR.ISS.IFSC
496 movz w1, #0x8600, lsl #16
499 mrs x0, ELR_EL1 // Check for expected abort address
500 adrp x1, _pinst_set_sctlr_trap_addr@page
501 add x1, x1, _pinst_set_sctlr_trap_addr@pageoff
502 sub x1, x1, x22 // Convert to physical address
505 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1
506 add sp, sp, ARM_CONTEXT_SIZE // Clean up stack
507 b.ne Lel1_sp1_synchronous_vector_continue
508 msr ELR_EL1, lr // Return to caller
509 ERET_CONTEXT_SYNCHRONIZING
510 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
512 /* 64-bit first level exception handler dispatcher.
513 * Completes register context saving and branches to FLEH.
515 * {x0, x1, sp} - saved
517 * x1 - address of FLEH
518 * fp - previous stack frame if EL1
525 /* Save arm_saved_state64 */
526 SPILL_REGISTERS KERNEL_MODE
528 /* If exception is from userspace, zero unused registers */
529 and x23, x23, #(PSR64_MODE_EL_MASK)
530 cmp x23, #(PSR64_MODE_EL0)
533 SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS
554 /* x21, x22 cleared in common case below */
567 mov x21, x0 // Copy arm_context_t pointer to x21
568 mov x22, x1 // Copy handler routine to x22
571 /* Zero x26 to indicate that this should not return to the PPL. */
575 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT
576 tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from
577 b.ne 1f // kernel mode, so skip precise time update
579 bl EXT(timer_state_event_user_to_kernel)
581 mov x0, x21 // Reload arm_context_t pointer
583 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */
585 /* Dispatch to FLEH */
592 .global EXT(fleh_synchronous)
593 LEXT(fleh_synchronous)
598 mrs x1, ESR_EL1 // Load exception syndrome
599 mrs x2, FAR_EL1 // Load fault address
601 /* At this point, the LR contains the value of ELR_EL1. In the case of an
602 * instruction prefetch abort, this will be the faulting pc, which we know
603 * to be invalid. This will prevent us from backtracing through the
604 * exception if we put it in our stack frame, so we load the LR from the
605 * exception saved state instead.
607 and w3, w1, #(ESR_EC_MASK)
608 lsr w3, w3, #(ESR_EC_SHIFT)
609 mov w4, #(ESR_EC_IABORT_EL1)
611 b.eq Lfleh_sync_load_lr
612 Lvalid_link_register:
615 bl EXT(sleh_synchronous)
619 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
622 mov x28, xzr // Don't need to check PFZ if there are ASTs
623 b exception_return_dispatch
626 ldr lr, [x0, SS64_LR]
627 b Lvalid_link_register
630 /* Shared prologue code for fleh_irq and fleh_fiq.
631 * Does any interrupt booking we may want to do
632 * before invoking the handler proper.
636 * fp - Undefined live value (we may push a frame)
637 * lr - Undefined live value (we may push a frame)
638 * sp - Interrupt stack for the current CPU
640 .macro BEGIN_INTERRUPT_HANDLER
642 ldr x23, [x22, ACT_CPUDATAP] // Get current cpu
643 /* Update IRQ count */
644 ldr w1, [x23, CPU_STAT_IRQ]
645 add w1, w1, #1 // Increment count
646 str w1, [x23, CPU_STAT_IRQ] // Update IRQ count
647 ldr w1, [x23, CPU_STAT_IRQ_WAKE]
648 add w1, w1, #1 // Increment count
649 str w1, [x23, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count
650 /* Increment preempt count */
651 ldr w1, [x22, ACT_PREEMPT_CNT]
653 str w1, [x22, ACT_PREEMPT_CNT]
654 /* Store context in int state */
655 str x0, [x23, CPU_INT_STATE] // Saved context in cpu_int_state
658 /* Shared epilogue code for fleh_irq and fleh_fiq.
659 * Cleans up after the prologue, and may do a bit more
660 * bookkeeping (kdebug related).
662 * x22 - Live TPIDR_EL1 value (thread address)
663 * x23 - Address of the current CPU data structure
664 * w24 - 0 if kdebug is disbled, nonzero otherwise
665 * fp - Undefined live value (we may push a frame)
666 * lr - Undefined live value (we may push a frame)
667 * sp - Interrupt stack for the current CPU
669 .macro END_INTERRUPT_HANDLER
670 /* Clear int context */
671 str xzr, [x23, CPU_INT_STATE]
672 /* Decrement preempt count */
673 ldr w0, [x22, ACT_PREEMPT_CNT]
674 cbnz w0, 1f // Detect underflow
678 str w0, [x22, ACT_PREEMPT_CNT]
679 /* Switch back to kernel stack */
680 ldr x0, [x22, TH_KSTACKPTR]
686 .global EXT(fleh_irq)
688 BEGIN_INTERRUPT_HANDLER
692 END_INTERRUPT_HANDLER
695 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
698 mov x28, #1 // Set a bit to check PFZ if there are ASTs
699 b exception_return_dispatch
703 .global EXT(fleh_fiq_generic)
704 LEXT(fleh_fiq_generic)
709 .global EXT(fleh_fiq)
711 BEGIN_INTERRUPT_HANDLER
715 END_INTERRUPT_HANDLER
718 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
721 mov x28, #1 // Set a bit to check PFZ if there are ASTs
722 b exception_return_dispatch
726 .global EXT(fleh_serror)
728 mrs x1, ESR_EL1 // Load exception syndrome
729 mrs x2, FAR_EL1 // Load fault address
736 CHECK_EXCEPTION_RETURN_DISPATCH_PPL
739 mov x28, xzr // Don't need to check PFZ If there are ASTs
740 b exception_return_dispatch
743 * Register state saved before we get here.
748 mrs x1, ESR_EL1 // Load exception syndrome
749 str x1, [x0, SS64_ESR]
750 mrs x2, FAR_EL1 // Load fault address
751 str x2, [x0, SS64_FAR]
753 bl EXT(sleh_invalid_stack) // Shouldn't return!
758 fleh_synchronous_sp1:
759 mrs x1, ESR_EL1 // Load exception syndrome
760 str x1, [x0, SS64_ESR]
761 mrs x2, FAR_EL1 // Load fault address
762 str x2, [x0, SS64_FAR]
764 bl EXT(sleh_synchronous_sp1)
772 b EXT(panic_with_thread_kernel_state)
774 .asciz "IRQ exception taken while SP1 selected"
781 b EXT(panic_with_thread_kernel_state)
783 .asciz "FIQ exception taken while SP1 selected"
789 adr x0, Lsp1_serror_str
790 b EXT(panic_with_thread_kernel_state)
792 .asciz "Asynchronous exception taken while SP1 selected"
796 exception_return_dispatch:
797 ldr w0, [x21, SS64_CPSR]
798 tst w0, PSR64_MODE_EL_MASK
799 b.ne EXT(return_to_kernel) // return to kernel if M[3:2] > 0
804 .global EXT(return_to_kernel)
805 LEXT(return_to_kernel)
806 tbnz w0, #DAIF_IRQF_SHIFT, exception_return // Skip AST check if IRQ disabled
807 mrs x3, TPIDR_EL1 // Load thread pointer
808 ldr w1, [x3, ACT_PREEMPT_CNT] // Load preemption count
809 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
810 cbnz x1, exception_return_unint_tpidr_x3 // If preemption disabled, skip AST check
811 ldr x1, [x3, ACT_CPUDATAP] // Get current CPU data pointer
812 ldr x2, [x1, CPU_PENDING_AST] // Get ASTs
813 tst x2, AST_URGENT // If no urgent ASTs, skip ast_taken
814 b.eq exception_return_unint_tpidr_x3
815 mov sp, x21 // Switch to thread stack for preemption
817 bl EXT(ast_taken_kernel) // Handle AST_URGENT
822 .globl EXT(thread_bootstrap_return)
823 LEXT(thread_bootstrap_return)
825 bl EXT(dtrace_thread_bootstrap)
827 b EXT(arm64_thread_exception_return)
830 .globl EXT(arm64_thread_exception_return)
831 LEXT(arm64_thread_exception_return)
833 add x21, x0, ACT_CONTEXT
838 // Fall Through to return_to_user from arm64_thread_exception_return.
839 // Note that if we move return_to_user or insert a new routine
840 // below arm64_thread_exception_return, the latter will need to change.
843 /* x21 is always the machine context pointer when we get here
844 * x28 is a bit indicating whether or not we should check if pc is in pfz */
847 mrs x3, TPIDR_EL1 // Load thread pointer
850 str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user
853 ldr w0, [x3, TH_RWLOCK_CNT]
854 cbnz w0, rwlock_count_notzero // Detect unbalanced RW lock/unlock
856 ldr w0, [x3, ACT_PREEMPT_CNT]
857 cbnz w0, preempt_count_notzero // Detect unbalanced enable/disable preemption
859 ldr w0, [x3, TH_TMP_ALLOC_CNT]
860 cbnz w0, tmp_alloc_count_nozero // Detect KHEAP_TEMP leaks
862 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
863 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
864 ldr x0, [x4, CPU_PENDING_AST] // Get ASTs
865 cbz x0, no_asts // If no asts, skip ahead
867 cbz x28, user_take_ast // If we don't need to check PFZ, just handle asts
869 /* At this point, we have ASTs and we need to check whether we are running in the
870 * preemption free zone (PFZ) or not. No ASTs are handled if we are running in
871 * the PFZ since we don't want to handle getting a signal or getting suspended
872 * while holding a spinlock in userspace.
874 * If userspace was in the PFZ, we know (via coordination with the PFZ code
875 * in commpage_asm.s) that it will not be using x15 and it is therefore safe
876 * to use it to indicate to userspace to come back to take a delayed
877 * preemption, at which point the ASTs will be handled. */
878 mov x28, xzr // Clear the "check PFZ" bit so that we don't do this again
879 mov x19, x0 // Save x0 since it will be clobbered by commpage_is_in_pfz64
881 ldr x0, [x21, SS64_PC] // Load pc from machine state
882 bl EXT(commpage_is_in_pfz64) // pc in pfz?
883 cbz x0, restore_and_check_ast // No, deal with other asts
886 str x0, [x21, SS64_X15] // Mark x15 for userspace to take delayed preemption
887 mov x0, x19 // restore x0 to asts
888 b no_asts // pretend we have no asts
890 restore_and_check_ast:
891 mov x0, x19 // restore x0
892 b user_take_ast // Service pending asts
896 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT
897 mov x19, x3 // Preserve thread pointer across function call
899 bl EXT(timer_state_event_kernel_to_user)
902 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */
904 #if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT)
907 * Here we attempt to enable NEON access for EL0. If the last entry into the
908 * kernel from user-space was due to an IRQ, the monitor will have disabled
909 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to
910 * check in with the monitor in order to reenable NEON for EL0 in exchange
911 * for routing IRQs through the monitor (2). This way the monitor will
912 * always 'own' either IRQs or EL0 NEON.
914 * If Watchtower is disabled or we did not enter the kernel through an IRQ
915 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3
918 * EL0 user ________ IRQ ______
919 * EL1 xnu \ ______________________ CPACR_EL1 __/
920 * EL3 monitor \_/ \___/
925 mov x0, #(CPACR_FPEN_ENABLE)
929 /* Establish this thread's debug state as the live state on the selected CPU. */
930 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer
931 ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context
932 ldr x0, [x3, ACT_DEBUGDATA]
934 beq L_skip_user_set_debug_state // If active CPU debug state does not match thread debug state, apply thread state
936 #if defined(APPLELIGHTNING)
937 /* rdar://53177964 ([Cebu Errata SW WA][v8Debug] MDR NEX L3 clock turns OFF during restoreCheckpoint due to SWStep getting masked) */
939 ARM64_IS_PCORE x12 // if we're not a pCORE, also do nothing
942 mrs x12, HID1 // if any debug session ever existed, set forceNexL3ClkOn
943 orr x12, x12, ARM64_REG_HID1_forceNexL3ClkOn
950 bl EXT(arm_debug_set) // Establish thread debug state in live regs
952 mrs x3, TPIDR_EL1 // Reload thread pointer
953 L_skip_user_set_debug_state:
956 b exception_return_unint_tpidr_x3
959 // Fall through from return_to_user to exception_return.
960 // Note that if we move exception_return or add a new routine below
961 // return_to_user, the latter will have to change.
965 msr DAIFSet, #DAIFSC_ALL // Disable exceptions
966 exception_return_unint:
967 mrs x3, TPIDR_EL1 // Load thread pointer
968 exception_return_unint_tpidr_x3:
969 mov sp, x21 // Reload the pcb pointer
971 exception_return_unint_tpidr_x3_dont_trash_x18:
974 #if __ARM_KERNEL_PROTECT__
976 * If we are going to eret to userspace, we must return through the EL0
979 ldr w1, [sp, SS64_CPSR] // Load CPSR
980 tbnz w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping // Skip if returning to EL1
982 /* We need to switch to the EL0 mapping of this code to eret to EL0. */
983 adrp x0, EXT(ExceptionVectorsBase)@page // Load vector base
984 adrp x1, Lexception_return_restore_registers@page // Load target PC
985 add x1, x1, Lexception_return_restore_registers@pageoff
986 MOV64 x2, ARM_KERNEL_PROTECT_EXCEPTION_START // Load EL0 vector address
987 sub x1, x1, x0 // Calculate delta
988 add x0, x2, x1 // Convert KVA to EL0 vector address
991 Lskip_el0_eret_mapping:
992 #endif /* __ARM_KERNEL_PROTECT__ */
994 Lexception_return_restore_registers:
995 mov x0, sp // x0 = &pcb
996 // Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2
997 AUTH_THREAD_STATE_IN_X0 x20, x21, x22, x23, x24, el0_state_allowed=1
999 /* Restore special register state */
1000 ldr w3, [sp, NS64_FPSR]
1001 ldr w4, [sp, NS64_FPCR]
1003 msr ELR_EL1, x1 // Load the return address into ELR
1004 msr SPSR_EL1, x2 // Load the return CPSR into SPSR
1007 CMSR FPCR, x5, x4, 1
1011 /* Restore arm_neon_saved_state64 */
1012 ldp q0, q1, [x0, NS64_Q0]
1013 ldp q2, q3, [x0, NS64_Q2]
1014 ldp q4, q5, [x0, NS64_Q4]
1015 ldp q6, q7, [x0, NS64_Q6]
1016 ldp q8, q9, [x0, NS64_Q8]
1017 ldp q10, q11, [x0, NS64_Q10]
1018 ldp q12, q13, [x0, NS64_Q12]
1019 ldp q14, q15, [x0, NS64_Q14]
1020 ldp q16, q17, [x0, NS64_Q16]
1021 ldp q18, q19, [x0, NS64_Q18]
1022 ldp q20, q21, [x0, NS64_Q20]
1023 ldp q22, q23, [x0, NS64_Q22]
1024 ldp q24, q25, [x0, NS64_Q24]
1025 ldp q26, q27, [x0, NS64_Q26]
1026 ldp q28, q29, [x0, NS64_Q28]
1027 ldp q30, q31, [x0, NS64_Q30]
1029 /* Restore arm_saved_state64 */
1031 // Skip x0, x1 - we're using them
1032 ldp x2, x3, [x0, SS64_X2]
1033 ldp x4, x5, [x0, SS64_X4]
1034 ldp x6, x7, [x0, SS64_X6]
1035 ldp x8, x9, [x0, SS64_X8]
1036 ldp x10, x11, [x0, SS64_X10]
1037 ldp x12, x13, [x0, SS64_X12]
1038 ldp x14, x15, [x0, SS64_X14]
1039 // Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1040 ldp x18, x19, [x0, SS64_X18]
1041 ldp x20, x21, [x0, SS64_X20]
1042 ldp x22, x23, [x0, SS64_X22]
1043 ldp x24, x25, [x0, SS64_X24]
1044 ldp x26, x27, [x0, SS64_X26]
1045 ldr x28, [x0, SS64_X28]
1046 ldr fp, [x0, SS64_FP]
1047 // Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
1049 // Restore stack pointer and our last two GPRs
1050 ldr x1, [x0, SS64_SP]
1053 #if __ARM_KERNEL_PROTECT__
1054 ldr w18, [x0, SS64_CPSR] // Stash CPSR
1055 #endif /* __ARM_KERNEL_PROTECT__ */
1057 ldp x0, x1, [x0, SS64_X0] // Restore the GPRs
1059 #if __ARM_KERNEL_PROTECT__
1060 /* If we are going to eret to userspace, we must unmap the kernel. */
1061 tbnz w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch
1063 /* Update TCR to unmap the kernel. */
1064 MOV64 x18, TCR_EL1_USER
1068 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to
1069 * each other due to the microarchitecture.
1071 #if !defined(APPLE_ARM64_ARCH_FAMILY)
1075 /* Switch to the user ASID (low bit clear) for the task. */
1077 bic x18, x18, #(1 << TTBR_ASID_SHIFT)
1081 /* We don't need an ISB here, as the eret is synchronizing. */
1083 #endif /* __ARM_KERNEL_PROTECT__ */
1085 ERET_CONTEXT_SYNCHRONIZING
1089 bl EXT(ast_taken_user) // Handle all ASTs, may return via continuation
1091 b check_user_asts // Now try again
1097 str x0, [sp, #-16]! // We'll print thread pointer
1098 adr x0, L_underflow_str // Format string
1099 CALL_EXTERN panic // Game over
1102 .asciz "Preemption count negative on thread %p"
1108 rwlock_count_notzero:
1110 str x0, [sp, #-16]! // We'll print thread pointer
1111 ldr w0, [x0, TH_RWLOCK_CNT]
1113 adr x0, L_rwlock_count_notzero_str // Format string
1114 CALL_EXTERN panic // Game over
1116 L_rwlock_count_notzero_str:
1117 .asciz "RW lock count not 0 on thread %p (%u)"
1121 preempt_count_notzero:
1123 str x0, [sp, #-16]! // We'll print thread pointer
1124 ldr w0, [x0, ACT_PREEMPT_CNT]
1126 adr x0, L_preempt_count_notzero_str // Format string
1127 CALL_EXTERN panic // Game over
1129 L_preempt_count_notzero_str:
1130 .asciz "preemption count not 0 on thread %p (%u)"
1131 #endif /* MACH_ASSERT */
1135 tmp_alloc_count_nozero:
1137 CALL_EXTERN kheap_temp_leak_panic
1139 #if __ARM_KERNEL_PROTECT__
1141 * This symbol denotes the end of the exception vector/eret range; we page
1142 * align it so that we can avoid mapping other text in the EL0 exception
1147 .globl EXT(ExceptionVectorsEnd)
1148 LEXT(ExceptionVectorsEnd)
1149 #endif /* __ARM_KERNEL_PROTECT__ */
1154 * Functions to preflight the fleh handlers when the PPL has taken an exception;
1155 * mostly concerned with setting up state for the normal fleh code.
1157 fleh_synchronous_from_ppl:
1162 mrs x1, ESR_EL1 // Get the exception syndrome
1164 /* If the stack pointer is corrupt, it will manifest either as a data abort
1165 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check
1166 * these quickly by testing bit 5 of the exception class.
1168 tbz x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack
1169 mrs x0, SP_EL0 // Get SP_EL0
1171 /* Perform high level checks for stack corruption. */
1172 and x1, x1, #ESR_EC_MASK // Mask the exception class
1173 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT)
1174 cmp x1, x2 // If we have a stack alignment exception
1175 b.eq Lcorrupt_ppl_stack // ...the stack is definitely corrupted
1176 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT)
1177 cmp x1, x2 // If we have a data abort, we need to
1178 b.ne Lvalid_ppl_stack // ...validate the stack pointer
1181 /* Bounds check the PPL stack. */
1182 adrp x10, EXT(pmap_stacks_start)@page
1183 ldr x10, [x10, #EXT(pmap_stacks_start)@pageoff]
1184 adrp x11, EXT(pmap_stacks_end)@page
1185 ldr x11, [x11, #EXT(pmap_stacks_end)@pageoff]
1187 b.lo Lcorrupt_ppl_stack
1189 b.hi Lcorrupt_ppl_stack
1195 /* Switch back to the kernel stack. */
1197 GET_PMAP_CPU_DATA x5, x6, x7
1198 ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1201 /* Hand off to the synch handler. */
1202 b EXT(fleh_synchronous)
1208 /* Hand off to the invalid stack handler. */
1209 b fleh_invalid_stack
1219 fleh_serror_from_ppl:
1220 GET_PMAP_CPU_DATA x5, x6, x7
1221 ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP]
1228 // x15: ppl call number
1230 // x20: gxf_enter caller's DAIF
1231 .globl EXT(ppl_trampoline_start)
1232 LEXT(ppl_trampoline_start)
1235 #error "XPRR configuration error"
1237 b.ne Lppl_fail_dispatch
1239 /* Verify the request ID. */
1241 b.hs Lppl_fail_dispatch
1243 GET_PMAP_CPU_DATA x12, x13, x14
1245 /* Mark this CPU as being in the PPL. */
1246 ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1248 cmp w9, #PPL_STATE_KERNEL
1249 b.eq Lppl_mark_cpu_as_dispatching
1251 /* Check to see if we are trying to trap from within the PPL. */
1252 cmp w9, #PPL_STATE_DISPATCH
1253 b.eq Lppl_fail_dispatch_ppl
1256 /* Ensure that we are returning from an exception. */
1257 cmp w9, #PPL_STATE_EXCEPTION
1258 b.ne Lppl_fail_dispatch
1260 // where is w10 set?
1261 // in CHECK_EXCEPTION_RETURN_DISPATCH_PPL
1262 cmp w10, #PPL_STATE_EXCEPTION
1263 b.ne Lppl_fail_dispatch
1265 /* This is an exception return; set the CPU to the dispatching state. */
1266 mov w9, #PPL_STATE_DISPATCH
1267 str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1269 /* Find the save area, and return to the saved PPL context. */
1270 ldr x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
1272 b EXT(return_to_ppl)
1274 Lppl_mark_cpu_as_dispatching:
1275 cmp w10, #PPL_STATE_KERNEL
1276 b.ne Lppl_fail_dispatch
1278 /* Mark the CPU as dispatching. */
1279 mov w13, #PPL_STATE_DISPATCH
1280 str w13, [x12, PMAP_CPU_DATA_PPL_STATE]
1282 /* Switch to the regular PPL stack. */
1283 // TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler
1284 ldr x9, [x12, PMAP_CPU_DATA_PPL_STACK]
1286 // SP0 is thread stack here
1288 // SP0 is now PPL stack
1291 /* Save the old stack pointer off in case we need it. */
1292 str x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1294 /* Get the handler for the request */
1295 adrp x9, EXT(ppl_handler_table)@page
1296 add x9, x9, EXT(ppl_handler_table)@pageoff
1297 add x9, x9, x15, lsl #3
1300 /* Branch to the code that will invoke the PPL request. */
1303 Lppl_fail_dispatch_ppl:
1304 /* Switch back to the kernel stack. */
1305 ldr x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1309 /* Indicate that we failed. */
1310 mov x15, #PPL_EXIT_BAD_CALL
1312 /* Move the DAIF bits into the expected register. */
1315 /* Return to kernel mode. */
1316 b ppl_return_to_kernel_mode
1319 /* Indicate that we are cleanly exiting the PPL. */
1320 mov x15, #PPL_EXIT_DISPATCH
1322 /* Switch back to the original (kernel thread) stack. */
1325 /* Move the saved DAIF bits. */
1328 /* Clear the old stack pointer. */
1329 str xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP]
1332 * Mark the CPU as no longer being in the PPL. We spin if our state
1333 * machine is broken.
1335 ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1336 cmp w9, #PPL_STATE_DISPATCH
1338 mov w9, #PPL_STATE_KERNEL
1339 str w9, [x12, PMAP_CPU_DATA_PPL_STATE]
1341 /* Return to the kernel. */
1342 b ppl_return_to_kernel_mode
1349 * If we are dealing with an exception, hand off to the first level
1350 * exception handler.
1352 cmp x15, #PPL_EXIT_EXCEPTION
1353 b.eq Ljump_to_fleh_handler
1355 /* Restore the original AIF state. */
1358 /* If this was a panic call from the PPL, reinvoke panic. */
1359 cmp x15, #PPL_EXIT_PANIC_CALL
1360 b.eq Ljump_to_panic_trap_to_debugger
1362 /* Load the preemption count. */
1364 ldr w12, [x10, ACT_PREEMPT_CNT]
1366 /* Detect underflow */
1367 cbnz w12, Lno_preempt_underflow
1369 Lno_preempt_underflow:
1371 /* Lower the preemption count. */
1373 str w12, [x10, ACT_PREEMPT_CNT]
1375 /* Skip ASTs if the peemption count is not zero. */
1376 cbnz x12, Lppl_skip_ast_taken
1378 /* Skip the AST check if interrupts are disabled. */
1381 b.ne Lppl_skip_ast_taken
1383 /* Disable interrupts. */
1384 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF)
1386 /* IF there is no urgent AST, skip the AST. */
1387 ldr x12, [x10, ACT_CPUDATAP]
1388 ldr x14, [x12, CPU_PENDING_AST]
1390 b.eq Lppl_defer_ast_taken
1392 /* Stash our return value and return reason. */
1396 /* Handle the AST. */
1397 bl EXT(ast_taken_kernel)
1399 /* Restore the return value and the return reason. */
1403 Lppl_defer_ast_taken:
1404 /* Reenable interrupts. */
1405 msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF)
1407 Lppl_skip_ast_taken:
1408 /* Pop the stack frame. */
1409 ldp x29, x30, [sp, #0x10]
1410 ldp x20, x21, [sp], #0x20
1412 /* Check to see if this was a bad request. */
1413 cmp x15, #PPL_EXIT_BAD_CALL
1420 Ljump_to_fleh_handler:
1424 Ljump_to_panic_trap_to_debugger:
1425 b EXT(panic_trap_to_debugger)
1429 adrp x0, Lppl_bad_call_panic_str@page
1430 add x0, x0, Lppl_bad_call_panic_str@pageoff
1435 .globl EXT(ppl_dispatch)
1438 * Save a couple of important registers (implementation detail; x12 has
1439 * the PPL per-CPU data address; x13 is not actually interesting).
1441 stp x12, x13, [sp, #-0x10]!
1443 /* Restore the original AIF state. */
1447 * Note that if the method is NULL, we'll blow up with a prefetch abort,
1448 * but the exception vectors will deal with this properly.
1451 /* Invoke the PPL method. */
1452 #ifdef HAS_APPLE_PAC
1459 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1461 /* Restore those important registers. */
1462 ldp x12, x13, [sp], #0x10
1464 /* Mark this as a regular return, and hand off to the return path. */
1465 b Lppl_dispatch_exit
1469 .globl EXT(ppl_bootstrap_dispatch)
1470 LEXT(ppl_bootstrap_dispatch)
1471 /* Verify the PPL request. */
1473 b.hs Lppl_fail_bootstrap_dispatch
1475 /* Get the requested PPL routine. */
1476 adrp x9, EXT(ppl_handler_table)@page
1477 add x9, x9, EXT(ppl_handler_table)@pageoff
1478 add x9, x9, x15, lsl #3
1481 /* Invoke the requested PPL routine. */
1482 #ifdef HAS_APPLE_PAC
1487 /* Stash off the return value */
1489 /* Drop the preemption count */
1490 bl EXT(_enable_preemption)
1493 /* Pop the stack frame. */
1494 ldp x29, x30, [sp, #0x10]
1495 ldp x20, x21, [sp], #0x20
1496 #if __has_feature(ptrauth_returns)
1502 Lppl_fail_bootstrap_dispatch:
1503 /* Pop our stack frame and panic. */
1504 ldp x29, x30, [sp, #0x10]
1505 ldp x20, x21, [sp], #0x20
1506 #if __has_feature(ptrauth_returns)
1509 adrp x0, Lppl_bad_call_panic_str@page
1510 add x0, x0, Lppl_bad_call_panic_str@pageoff
1515 .globl EXT(ml_panic_trap_to_debugger)
1516 LEXT(ml_panic_trap_to_debugger)
1518 msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
1520 adrp x12, EXT(pmap_ppl_locked_down)@page
1521 ldr w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff]
1522 cbz w12, Lnot_in_ppl_dispatch
1524 LOAD_PMAP_CPU_DATA x11, x12, x13
1526 ldr w12, [x11, PMAP_CPU_DATA_PPL_STATE]
1527 cmp w12, #PPL_STATE_DISPATCH
1528 b.ne Lnot_in_ppl_dispatch
1530 /* Indicate (for the PPL->kernel transition) that we are panicking. */
1531 mov x15, #PPL_EXIT_PANIC_CALL
1533 /* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */
1534 ldr x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
1538 mov w13, #PPL_STATE_PANIC
1539 str w13, [x11, PMAP_CPU_DATA_PPL_STATE]
1541 /* Now we are ready to exit the PPL. */
1542 b ppl_return_to_kernel_mode
1543 Lnot_in_ppl_dispatch:
1548 Lppl_bad_call_panic_str:
1549 .asciz "ppl_dispatch: failed due to bad arguments/state"
1550 #else /* XNU_MONITOR */
1553 .globl EXT(ml_panic_trap_to_debugger)
1554 LEXT(ml_panic_trap_to_debugger)
1556 #endif /* XNU_MONITOR */
1558 /* ARM64_TODO Is globals_asm.h needed? */
1559 //#include "globals_asm.h"
1561 /* vim: set ts=4: */