2 * Copyright (c) 2012-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/caches_internal.h>
30 #include <arm/cpu_data.h>
31 #include <arm/cpu_data_internal.h>
32 #include <arm/misc_protos.h>
33 #include <arm/thread.h>
34 #include <arm/rtclock.h>
35 #include <arm/trap.h> /* for IS_ARM_GDB_TRAP() et al */
36 #include <arm64/proc_reg.h>
37 #include <arm64/machine_machdep.h>
38 #include <arm64/monotonic.h>
39 #include <arm64/instructions.h>
41 #include <kern/debug.h>
42 #include <kern/thread.h>
43 #include <mach/exception.h>
44 #include <mach/arm/traps.h>
45 #include <mach/vm_types.h>
46 #include <mach/machine/thread_status.h>
48 #include <machine/atomic.h>
49 #include <machine/limits.h>
51 #include <pexpert/arm/protos.h>
53 #include <vm/vm_page.h>
55 #include <vm/vm_fault.h>
56 #include <vm/vm_kern.h>
58 #include <sys/errno.h>
59 #include <sys/kdebug.h>
60 #include <kperf/kperf.h>
62 #include <kern/policy_internal.h>
64 #include <kern/telemetry.h>
67 #include <prng/entropy.h>
72 #error Should only be compiling for arm64.
75 #define TEST_CONTEXT32_SANITY(context) \
76 (context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \
77 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT)
79 #define TEST_CONTEXT64_SANITY(context) \
80 (context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \
81 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT)
83 #define ASSERT_CONTEXT_SANITY(context) \
84 assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context))
87 #define COPYIN(src, dst, size) \
88 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
89 copyin_kern(src, dst, size) : \
90 copyin(src, dst, size)
92 #define COPYOUT(src, dst, size) \
93 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
94 copyout_kern(src, dst, size) : \
95 copyout(src, dst, size)
97 // Below is for concatenating a string param to a string literal
99 #define STR(x) STR1(x)
101 #define ARM64_KDBG_CODE_KERNEL (0 << 8)
102 #define ARM64_KDBG_CODE_USER (1 << 8)
103 #define ARM64_KDBG_CODE_GUEST (2 << 8)
105 _Static_assert(ARM64_KDBG_CODE_GUEST
<= KDBG_CODE_MAX
, "arm64 KDBG trace codes out of range");
106 _Static_assert(ARM64_KDBG_CODE_GUEST
<= UINT16_MAX
, "arm64 KDBG trace codes out of range");
108 void panic_with_thread_kernel_state(const char *msg
, arm_saved_state_t
*ss
) __abortlike
;
110 void sleh_synchronous_sp1(arm_context_t
*, uint32_t, vm_offset_t
) __abortlike
;
111 void sleh_synchronous(arm_context_t
*, uint32_t, vm_offset_t
);
112 void sleh_irq(arm_saved_state_t
*);
113 void sleh_fiq(arm_saved_state_t
*);
114 void sleh_serror(arm_context_t
*context
, uint32_t esr
, vm_offset_t far
);
115 void sleh_invalid_stack(arm_context_t
*context
, uint32_t esr
, vm_offset_t far
) __dead2
;
117 static void sleh_interrupt_handler_prologue(arm_saved_state_t
*, unsigned int type
);
118 static void sleh_interrupt_handler_epilogue(void);
120 static void handle_svc(arm_saved_state_t
*);
121 static void handle_mach_absolute_time_trap(arm_saved_state_t
*);
122 static void handle_mach_continuous_time_trap(arm_saved_state_t
*);
124 static void handle_msr_trap(arm_saved_state_t
*state
, uint32_t esr
);
126 extern kern_return_t
arm_fast_fault(pmap_t
, vm_map_address_t
, vm_prot_t
, bool, bool);
128 static void handle_uncategorized(arm_saved_state_t
*);
129 static void handle_kernel_breakpoint(arm_saved_state_t
*, uint32_t) __dead2
;
130 static void handle_breakpoint(arm_saved_state_t
*, uint32_t) __dead2
;
132 typedef void (*abort_inspector_t
)(uint32_t, fault_status_t
*, vm_prot_t
*);
133 static void inspect_instruction_abort(uint32_t, fault_status_t
*, vm_prot_t
*);
134 static void inspect_data_abort(uint32_t, fault_status_t
*, vm_prot_t
*);
136 static int is_vm_fault(fault_status_t
);
137 static int is_translation_fault(fault_status_t
);
138 static int is_alignment_fault(fault_status_t
);
140 typedef void (*abort_handler_t
)(arm_saved_state_t
*, uint32_t, vm_offset_t
, fault_status_t
, vm_prot_t
, vm_offset_t
, expected_fault_handler_t
);
141 static void handle_user_abort(arm_saved_state_t
*, uint32_t, vm_offset_t
, fault_status_t
, vm_prot_t
, vm_offset_t
, expected_fault_handler_t
);
142 static void handle_kernel_abort(arm_saved_state_t
*, uint32_t, vm_offset_t
, fault_status_t
, vm_prot_t
, vm_offset_t
, expected_fault_handler_t
);
144 static void handle_pc_align(arm_saved_state_t
*ss
) __dead2
;
145 static void handle_sp_align(arm_saved_state_t
*ss
) __dead2
;
146 static void handle_sw_step_debug(arm_saved_state_t
*ss
) __dead2
;
147 static void handle_wf_trap(arm_saved_state_t
*ss
) __dead2
;
148 static void handle_fp_trap(arm_saved_state_t
*ss
, uint32_t esr
) __dead2
;
150 static void handle_watchpoint(vm_offset_t fault_addr
) __dead2
;
152 static void handle_abort(arm_saved_state_t
*, uint32_t, vm_offset_t
, vm_offset_t
, abort_inspector_t
, abort_handler_t
, expected_fault_handler_t
);
154 static void handle_user_trapped_instruction32(arm_saved_state_t
*, uint32_t esr
) __dead2
;
156 static void handle_simd_trap(arm_saved_state_t
*, uint32_t esr
) __dead2
;
158 extern void mach_kauth_cred_uthread_update(void);
159 void mach_syscall_trace_exit(unsigned int retval
, unsigned int call_number
);
164 typedef uint32_t arm64_instr_t
;
167 unix_syscall(struct arm_saved_state
* regs
, thread_t thread_act
,
168 struct uthread
* uthread
, struct proc
* proc
);
171 mach_syscall(struct arm_saved_state
*);
174 extern kern_return_t
dtrace_user_probe(arm_saved_state_t
* regs
);
175 extern boolean_t
dtrace_tally_fault(user_addr_t
);
178 * Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy
179 * and paste the trap instructions
180 * over from that file. Need to keep these in sync!
182 #define FASTTRAP_ARM32_INSTR 0xe7ffdefc
183 #define FASTTRAP_THUMB32_INSTR 0xdefc
184 #define FASTTRAP_ARM64_INSTR 0xe7eeee7e
186 #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb
187 #define FASTTRAP_THUMB32_RET_INSTR 0xdefb
188 #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d
190 /* See <rdar://problem/4613924> */
191 perfCallback tempDTraceTrapHook
= NULL
; /* Pointer to DTrace fbt trap hook routine */
196 extern boolean_t pgtrace_enabled
;
199 #if HAS_TWO_STAGE_SPR_LOCK
200 #ifdef CONFIG_XNUPOST
201 extern volatile vm_offset_t spr_lock_test_addr
;
202 extern volatile uint32_t spr_lock_exception_esr
;
206 #if INTERRUPT_MASKED_DEBUG
207 extern boolean_t interrupt_masked_debug
;
210 extern void arm64_thread_exception_return(void) __dead2
;
212 #if defined(APPLETYPHOON)
213 #define CPU_NAME "Typhoon"
214 #elif defined(APPLETWISTER)
215 #define CPU_NAME "Twister"
216 #elif defined(APPLEHURRICANE)
217 #define CPU_NAME "Hurricane"
218 #elif defined(APPLELIGHTNING)
219 #define CPU_NAME "Lightning"
221 #define CPU_NAME "Unknown"
224 #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT))
225 #define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400)
226 #define ESR_WT_REASON(esr) ((esr) & 0xff)
228 #define WT_REASON_NONE 0
229 #define WT_REASON_INTEGRITY_FAIL 1
230 #define WT_REASON_BAD_SYSCALL 2
231 #define WT_REASON_NOT_LOCKED 3
232 #define WT_REASON_ALREADY_LOCKED 4
233 #define WT_REASON_SW_REQ 5
234 #define WT_REASON_PT_INVALID 6
235 #define WT_REASON_PT_VIOLATION 7
236 #define WT_REASON_REG_VIOLATION 8
240 void cpu_signal_handler(void);
241 extern unsigned int gFastIPI
;
242 #endif /* defined(HAS_IPI) */
244 static arm_saved_state64_t
*original_faulting_state
= NULL
;
246 TUNABLE(bool, fp_exceptions_enabled
, "-fp_exceptions", false);
248 extern vm_offset_t static_memory_end
;
251 is_vm_fault(fault_status_t status
)
254 case FSC_TRANSLATION_FAULT_L0
:
255 case FSC_TRANSLATION_FAULT_L1
:
256 case FSC_TRANSLATION_FAULT_L2
:
257 case FSC_TRANSLATION_FAULT_L3
:
258 case FSC_ACCESS_FLAG_FAULT_L1
:
259 case FSC_ACCESS_FLAG_FAULT_L2
:
260 case FSC_ACCESS_FLAG_FAULT_L3
:
261 case FSC_PERMISSION_FAULT_L1
:
262 case FSC_PERMISSION_FAULT_L2
:
263 case FSC_PERMISSION_FAULT_L3
:
271 is_translation_fault(fault_status_t status
)
274 case FSC_TRANSLATION_FAULT_L0
:
275 case FSC_TRANSLATION_FAULT_L1
:
276 case FSC_TRANSLATION_FAULT_L2
:
277 case FSC_TRANSLATION_FAULT_L3
:
285 is_permission_fault(fault_status_t status
)
288 case FSC_PERMISSION_FAULT_L1
:
289 case FSC_PERMISSION_FAULT_L2
:
290 case FSC_PERMISSION_FAULT_L3
:
298 is_alignment_fault(fault_status_t status
)
300 return status
== FSC_ALIGNMENT_FAULT
;
304 is_parity_error(fault_status_t status
)
307 case FSC_SYNC_PARITY
:
308 case FSC_ASYNC_PARITY
:
309 case FSC_SYNC_PARITY_TT_L1
:
310 case FSC_SYNC_PARITY_TT_L2
:
311 case FSC_SYNC_PARITY_TT_L3
:
318 static inline unsigned
319 __ror(unsigned value
, unsigned shift
)
321 return ((unsigned)(value
) >> (unsigned)(shift
)) |
322 (unsigned)(value
) << ((unsigned)(sizeof(unsigned) * CHAR_BIT
) - (unsigned)(shift
));
327 arm64_implementation_specific_error(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t far
)
329 #if defined(APPLE_ARM64_ARCH_FAMILY)
330 uint64_t fed_err_sts
, mmu_err_sts
, lsu_err_sts
;
331 #if defined(NO_ECORE)
332 uint64_t l2c_err_sts
, l2c_err_adr
, l2c_err_inf
;
334 mmu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS
));
335 l2c_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS
));
336 l2c_err_adr
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR
));
337 l2c_err_inf
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF
));
338 lsu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS
));
339 fed_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS
));
341 panic_plain("Unhandled " CPU_NAME
342 " implementation specific error. state=%p esr=%#x far=%p\n"
343 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
344 "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n",
345 state
, esr
, (void *)far
,
346 (void *)lsu_err_sts
, (void *)fed_err_sts
, (void *)mmu_err_sts
,
347 (void *)l2c_err_sts
, (void *)l2c_err_adr
, (void *)l2c_err_inf
);
349 #elif defined(HAS_MIGSTS)
350 uint64_t l2c_err_sts
, l2c_err_adr
, l2c_err_inf
, mpidr
, migsts
;
352 mpidr
= __builtin_arm_rsr64("MPIDR_EL1");
353 migsts
= __builtin_arm_rsr64(STR(ARM64_REG_MIGSTS_EL1
));
354 mmu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS
));
355 l2c_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS
));
356 l2c_err_adr
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR
));
357 l2c_err_inf
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF
));
358 lsu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS
));
359 fed_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS
));
361 panic_plain("Unhandled " CPU_NAME
362 " implementation specific error. state=%p esr=%#x far=%p p-core?%d migsts=%p\n"
363 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
364 "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n",
365 state
, esr
, (void *)far
, !!(mpidr
& MPIDR_PNE
), (void *)migsts
,
366 (void *)lsu_err_sts
, (void *)fed_err_sts
, (void *)mmu_err_sts
,
367 (void *)l2c_err_sts
, (void *)l2c_err_adr
, (void *)l2c_err_inf
);
368 #else // !defined(NO_ECORE) && !defined(HAS_MIGSTS)
369 uint64_t llc_err_sts
, llc_err_adr
, llc_err_inf
, mpidr
;
370 #if defined(HAS_DPC_ERR)
371 uint64_t dpc_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_DPC_ERR_STS
));
372 #endif // defined(HAS_DPC_ERR)
374 mpidr
= __builtin_arm_rsr64("MPIDR_EL1");
376 if (mpidr
& MPIDR_PNE
) {
377 mmu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS
));
378 lsu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS
));
379 fed_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS
));
381 mmu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_E_MMU_ERR_STS
));
382 lsu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_E_LSU_ERR_STS
));
383 fed_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_E_FED_ERR_STS
));
386 llc_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS
));
387 llc_err_adr
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR
));
388 llc_err_inf
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF
));
390 panic_plain("Unhandled " CPU_NAME
391 " implementation specific error. state=%p esr=%#x far=%p p-core?%d"
392 #if defined(HAS_DPC_ERR)
396 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
397 "\tllc_err_sts:%p, llc_err_adr:%p, llc_err_inf:%p\n",
398 state
, esr
, (void *)far
, !!(mpidr
& MPIDR_PNE
),
399 #if defined(HAS_DPC_ERR)
402 (void *)lsu_err_sts
, (void *)fed_err_sts
, (void *)mmu_err_sts
,
403 (void *)llc_err_sts
, (void *)llc_err_adr
, (void *)llc_err_inf
);
405 #else // !defined(APPLE_ARM64_ARCH_FAMILY)
406 #pragma unused (state, esr, far)
407 panic_plain("Unhandled implementation specific error\n");
411 #if CONFIG_KERNEL_INTEGRITY
412 #pragma clang diagnostic push
413 #pragma clang diagnostic ignored "-Wunused-parameter"
415 kernel_integrity_error_handler(uint32_t esr
, vm_offset_t far
)
417 #if defined(KERNEL_INTEGRITY_WT)
418 #if (DEVELOPMENT || DEBUG)
419 if (ESR_WT_SERROR(esr
)) {
420 switch (ESR_WT_REASON(esr
)) {
421 case WT_REASON_INTEGRITY_FAIL
:
422 panic_plain("Kernel integrity, violation in frame 0x%016lx.", far
);
423 case WT_REASON_BAD_SYSCALL
:
424 panic_plain("Kernel integrity, bad syscall.");
425 case WT_REASON_NOT_LOCKED
:
426 panic_plain("Kernel integrity, not locked.");
427 case WT_REASON_ALREADY_LOCKED
:
428 panic_plain("Kernel integrity, already locked.");
429 case WT_REASON_SW_REQ
:
430 panic_plain("Kernel integrity, software request.");
431 case WT_REASON_PT_INVALID
:
432 panic_plain("Kernel integrity, encountered invalid TTE/PTE while "
433 "walking 0x%016lx.", far
);
434 case WT_REASON_PT_VIOLATION
:
435 panic_plain("Kernel integrity, violation in mapping 0x%016lx.",
437 case WT_REASON_REG_VIOLATION
:
438 panic_plain("Kernel integrity, violation in system register %d.",
441 panic_plain("Kernel integrity, unknown (esr=0x%08x).", esr
);
445 if (ESR_WT_SERROR(esr
)) {
446 panic_plain("SError esr: 0x%08x far: 0x%016lx.", esr
, far
);
451 #pragma clang diagnostic pop
455 arm64_platform_error(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t far
)
457 cpu_data_t
*cdp
= getCpuDatap();
459 #if CONFIG_KERNEL_INTEGRITY
460 kernel_integrity_error_handler(esr
, far
);
463 if (PE_handle_platform_error(far
)) {
465 } else if (cdp
->platform_error_handler
!= NULL
) {
466 cdp
->platform_error_handler(cdp
->cpu_id
, far
);
468 arm64_implementation_specific_error(state
, esr
, far
);
473 panic_with_thread_kernel_state(const char *msg
, arm_saved_state_t
*ss
)
477 ss_valid
= is_saved_state64(ss
);
478 arm_saved_state64_t
*state
= saved_state64(ss
);
480 os_atomic_cmpxchg(&original_faulting_state
, NULL
, state
, seq_cst
);
482 panic_plain("%s at pc 0x%016llx, lr 0x%016llx (saved state: %p%s)\n"
483 "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n"
484 "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n"
485 "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n"
486 "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n"
487 "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n"
488 "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n"
489 "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n"
490 "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n"
491 "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n",
492 msg
, state
->pc
, state
->lr
, ss
, (ss_valid
? "" : " INVALID"),
493 state
->x
[0], state
->x
[1], state
->x
[2], state
->x
[3],
494 state
->x
[4], state
->x
[5], state
->x
[6], state
->x
[7],
495 state
->x
[8], state
->x
[9], state
->x
[10], state
->x
[11],
496 state
->x
[12], state
->x
[13], state
->x
[14], state
->x
[15],
497 state
->x
[16], state
->x
[17], state
->x
[18], state
->x
[19],
498 state
->x
[20], state
->x
[21], state
->x
[22], state
->x
[23],
499 state
->x
[24], state
->x
[25], state
->x
[26], state
->x
[27],
500 state
->x
[28], state
->fp
, state
->lr
, state
->sp
,
501 state
->pc
, state
->cpsr
, state
->esr
, state
->far
);
505 sleh_synchronous_sp1(arm_context_t
*context
, uint32_t esr
, vm_offset_t far __unused
)
507 esr_exception_class_t
class = ESR_EC(esr
);
508 arm_saved_state_t
* state
= &context
->ss
;
511 case ESR_EC_UNCATEGORIZED
:
513 uint32_t instr
= *((uint32_t*)get_saved_state_pc(state
));
514 if (IS_ARM_GDB_TRAP(instr
)) {
515 DebuggerCall(EXC_BREAKPOINT
, state
);
518 OS_FALLTHROUGH
; // panic if we return from the debugger
520 panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state
);
524 #if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST)
526 handle_msr_write_from_xnupost(arm_saved_state_t
*state
, uint32_t esr
)
528 user_addr_t pc
= get_saved_state_pc(state
);
529 if ((spr_lock_test_addr
!= 0) && (pc
== spr_lock_test_addr
)) {
530 spr_lock_exception_esr
= esr
;
531 set_saved_state_pc(state
, pc
+ 4);
539 __attribute__((noreturn
))
541 thread_exception_return()
543 thread_t thread
= current_thread();
544 if (thread
->machine
.exception_trace_code
!= 0) {
545 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
546 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM
, thread
->machine
.exception_trace_code
) | DBG_FUNC_END
, 0, 0, 0, 0, 0);
547 thread
->machine
.exception_trace_code
= 0;
550 arm64_thread_exception_return();
551 __builtin_unreachable();
555 * check whether task vtimers are running and set thread and CPU BSD AST
557 * must be called with interrupts masked so updates of fields are atomic
558 * must be emitted inline to avoid generating an FBT probe on the exception path
561 __attribute__((__always_inline__
))
563 task_vtimer_check(thread_t thread
)
565 if (__improbable(thread
->task
->vtimers
)) {
566 thread
->ast
|= AST_BSD
;
567 thread
->machine
.CpuDatap
->cpu_pending_ast
|= AST_BSD
;
572 sleh_synchronous(arm_context_t
*context
, uint32_t esr
, vm_offset_t far
)
574 esr_exception_class_t
class = ESR_EC(esr
);
575 arm_saved_state_t
* state
= &context
->ss
;
576 vm_offset_t recover
= 0;
577 thread_t thread
= current_thread();
579 int preemption_level
= get_preemption_level();
581 expected_fault_handler_t expected_fault_handler
= NULL
;
582 #ifdef CONFIG_XNUPOST
583 expected_fault_handler_t saved_expected_fault_handler
= NULL
;
584 uintptr_t saved_expected_fault_addr
= 0;
585 #endif /* CONFIG_XNUPOST */
587 ASSERT_CONTEXT_SANITY(context
);
589 task_vtimer_check(thread
);
593 * Handle kernel DTrace probes as early as possible to minimize the likelihood
594 * that this path will itself trigger a DTrace probe, which would lead to infinite
597 if (__improbable((class == ESR_EC_UNCATEGORIZED
) && tempDTraceTrapHook
&&
598 (tempDTraceTrapHook(EXC_BAD_INSTRUCTION
, state
, 0, 0) == KERN_SUCCESS
))) {
602 bool is_user
= PSR64_IS_USER(get_saved_state_cpsr(state
));
605 * Use KERNEL_DEBUG_CONSTANT_IST here to avoid producing tracepoints
606 * that would disclose the behavior of PT_DENY_ATTACH processes.
609 thread
->machine
.exception_trace_code
= (uint16_t)(ARM64_KDBG_CODE_USER
| class);
610 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
611 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM
, thread
->machine
.exception_trace_code
) | DBG_FUNC_START
,
612 esr
, far
, get_saved_state_pc(state
), 0, 0);
614 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
615 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM
, ARM64_KDBG_CODE_KERNEL
| class) | DBG_FUNC_START
,
616 esr
, VM_KERNEL_ADDRHIDE(far
), VM_KERNEL_UNSLIDE(get_saved_state_pc(state
)), 0, 0);
619 if (__improbable(ESR_INSTR_IS_2BYTES(esr
))) {
621 * We no longer support 32-bit, which means no 2-byte
625 panic("Exception on 2-byte instruction, "
626 "context=%p, esr=%#x, far=%p",
627 context
, esr
, (void *)far
);
629 panic_with_thread_kernel_state("Exception on 2-byte instruction", state
);
633 /* Don't run exception handler with recover handler set in case of double fault */
634 if (thread
->recover
) {
635 recover
= thread
->recover
;
636 thread
->recover
= (vm_offset_t
)NULL
;
639 #ifdef CONFIG_XNUPOST
640 if (thread
->machine
.expected_fault_handler
!= NULL
) {
641 saved_expected_fault_handler
= thread
->machine
.expected_fault_handler
;
642 saved_expected_fault_addr
= thread
->machine
.expected_fault_addr
;
644 thread
->machine
.expected_fault_handler
= NULL
;
645 thread
->machine
.expected_fault_addr
= 0;
647 if (saved_expected_fault_addr
== far
) {
648 expected_fault_handler
= saved_expected_fault_handler
;
651 #endif /* CONFIG_XNUPOST */
653 /* Inherit the interrupt masks from previous context */
654 if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state
))) {
655 ml_set_interrupts_enabled(TRUE
);
660 if (!is_saved_state64(state
) || !is_user
) {
661 panic("Invalid SVC_64 context");
667 case ESR_EC_DABORT_EL0
:
668 handle_abort(state
, esr
, far
, recover
, inspect_data_abort
, handle_user_abort
, expected_fault_handler
);
671 case ESR_EC_MSR_TRAP
:
672 handle_msr_trap(state
, esr
);
676 case ESR_EC_IABORT_EL0
:
677 handle_abort(state
, esr
, far
, recover
, inspect_instruction_abort
, handle_user_abort
, expected_fault_handler
);
680 case ESR_EC_IABORT_EL1
:
681 #ifdef CONFIG_XNUPOST
682 if ((expected_fault_handler
!= NULL
) && expected_fault_handler(state
)) {
685 #endif /* CONFIG_XNUPOST */
687 panic_with_thread_kernel_state("Kernel instruction fetch abort", state
);
689 case ESR_EC_PC_ALIGN
:
690 handle_pc_align(state
);
691 __builtin_unreachable();
693 case ESR_EC_DABORT_EL1
:
694 handle_abort(state
, esr
, far
, recover
, inspect_data_abort
, handle_kernel_abort
, expected_fault_handler
);
697 case ESR_EC_UNCATEGORIZED
:
698 assert(!ESR_ISS(esr
));
700 #if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST)
701 if (handle_msr_write_from_xnupost(state
, esr
)) {
705 handle_uncategorized(&context
->ss
);
708 case ESR_EC_SP_ALIGN
:
709 handle_sp_align(state
);
710 __builtin_unreachable();
712 case ESR_EC_BKPT_AARCH32
:
713 handle_breakpoint(state
, esr
);
714 __builtin_unreachable();
716 case ESR_EC_BRK_AARCH64
:
717 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
718 handle_kernel_breakpoint(state
, esr
);
720 handle_breakpoint(state
, esr
);
722 __builtin_unreachable();
724 case ESR_EC_BKPT_REG_MATCH_EL0
:
725 if (FSC_DEBUG_FAULT
== ISS_SSDE_FSC(esr
)) {
726 handle_breakpoint(state
, esr
);
728 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
729 class, state
, class, esr
, (void *)far
);
730 __builtin_unreachable();
732 case ESR_EC_BKPT_REG_MATCH_EL1
:
733 panic_with_thread_kernel_state("Hardware Breakpoint Debug exception from kernel. Panic (by design)", state
);
734 __builtin_unreachable();
736 case ESR_EC_SW_STEP_DEBUG_EL0
:
737 if (FSC_DEBUG_FAULT
== ISS_SSDE_FSC(esr
)) {
738 handle_sw_step_debug(state
);
740 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
741 class, state
, class, esr
, (void *)far
);
742 __builtin_unreachable();
744 case ESR_EC_SW_STEP_DEBUG_EL1
:
745 panic_with_thread_kernel_state("Software Step Debug exception from kernel. Panic (by design)", state
);
746 __builtin_unreachable();
748 case ESR_EC_WATCHPT_MATCH_EL0
:
749 if (FSC_DEBUG_FAULT
== ISS_SSDE_FSC(esr
)) {
750 handle_watchpoint(far
);
752 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
753 class, state
, class, esr
, (void *)far
);
754 __builtin_unreachable();
756 case ESR_EC_WATCHPT_MATCH_EL1
:
758 * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
759 * abort. Turn off watchpoints and keep going; we'll turn them back on in return_from_exception..
761 if (FSC_DEBUG_FAULT
== ISS_SSDE_FSC(esr
)) {
763 break; /* return to first level handler */
765 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
766 class, state
, class, esr
, (void *)far
);
767 __builtin_unreachable();
769 case ESR_EC_TRAP_SIMD_FP
:
770 handle_simd_trap(state
, esr
);
771 __builtin_unreachable();
773 case ESR_EC_ILLEGAL_INSTR_SET
:
774 if (EXCB_ACTION_RERUN
!=
775 ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET
, far
)) {
776 // instruction is not re-executed
777 panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x",
778 state
, class, esr
, (void *)far
, get_saved_state_cpsr(state
));
780 // must clear this fault in PSR to re-run
781 mask_saved_state_cpsr(state
, 0, PSR64_IL
);
784 case ESR_EC_MCR_MRC_CP15_TRAP
:
785 case ESR_EC_MCRR_MRRC_CP15_TRAP
:
786 case ESR_EC_MCR_MRC_CP14_TRAP
:
787 case ESR_EC_LDC_STC_CP14_TRAP
:
788 case ESR_EC_MCRR_MRRC_CP14_TRAP
:
789 handle_user_trapped_instruction32(state
, esr
);
790 __builtin_unreachable();
793 // Use of WFI or WFE instruction when they have been disabled for EL0
794 handle_wf_trap(state
);
795 __builtin_unreachable();
797 case ESR_EC_FLOATING_POINT_64
:
798 handle_fp_trap(state
, esr
);
799 __builtin_unreachable();
802 panic("Unsupported synchronous exception. state=%p class=%u esr=%u far=%p",
803 state
, class, esr
, (void *)far
);
804 __builtin_unreachable();
807 #ifdef CONFIG_XNUPOST
808 if (saved_expected_fault_handler
!= NULL
) {
809 thread
->machine
.expected_fault_handler
= saved_expected_fault_handler
;
810 thread
->machine
.expected_fault_addr
= saved_expected_fault_addr
;
812 #endif /* CONFIG_XNUPOST */
815 thread
->recover
= recover
;
818 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
819 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM
, thread
->machine
.exception_trace_code
) | DBG_FUNC_END
,
820 esr
, far
, get_saved_state_pc(state
), 0, 0);
821 thread
->machine
.exception_trace_code
= 0;
823 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
824 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM
, ARM64_KDBG_CODE_KERNEL
| class) | DBG_FUNC_END
,
825 esr
, VM_KERNEL_ADDRHIDE(far
), VM_KERNEL_UNSLIDE(get_saved_state_pc(state
)), 0, 0);
828 if (preemption_level
!= get_preemption_level()) {
829 panic("synchronous exception changed preemption level from %d to %d", preemption_level
, get_preemption_level());
835 * Uncategorized exceptions are a catch-all for general execution errors.
836 * ARM64_TODO: For now, we assume this is for undefined instruction exceptions.
839 handle_uncategorized(arm_saved_state_t
*state
)
841 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
842 mach_exception_data_type_t codes
[2] = {EXC_ARM_UNDEFINED
};
843 mach_msg_type_number_t numcodes
= 2;
846 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
850 if (PSR64_IS_USER64(get_saved_state_cpsr(state
))) {
852 * For a 64bit user process, we care about all 4 bytes of the
855 if (instr
== FASTTRAP_ARM64_INSTR
|| instr
== FASTTRAP_ARM64_RET_INSTR
) {
856 if (dtrace_user_probe(state
) == KERN_SUCCESS
) {
860 } else if (PSR64_IS_USER32(get_saved_state_cpsr(state
))) {
862 * For a 32bit user process, we check for thumb mode, in
863 * which case we only care about a 2 byte instruction length.
864 * For non-thumb mode, we care about all 4 bytes of the instructin.
866 if (get_saved_state_cpsr(state
) & PSR64_MODE_USER32_THUMB
) {
867 if (((uint16_t)instr
== FASTTRAP_THUMB32_INSTR
) ||
868 ((uint16_t)instr
== FASTTRAP_THUMB32_RET_INSTR
)) {
869 if (dtrace_user_probe(state
) == KERN_SUCCESS
) {
874 if ((instr
== FASTTRAP_ARM32_INSTR
) ||
875 (instr
== FASTTRAP_ARM32_RET_INSTR
)) {
876 if (dtrace_user_probe(state
) == KERN_SUCCESS
) {
883 #endif /* CONFIG_DTRACE */
885 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
886 if (IS_ARM_GDB_TRAP(instr
)) {
887 boolean_t interrupt_state
;
888 exception
= EXC_BREAKPOINT
;
890 interrupt_state
= ml_set_interrupts_enabled(FALSE
);
892 /* Save off the context here (so that the debug logic
893 * can see the original state of this thread).
895 current_thread()->machine
.kpcb
= state
;
897 /* Hop into the debugger (typically either due to a
898 * fatal exception, an explicit panic, or a stackshot
901 DebuggerCall(exception
, state
);
903 (void) ml_set_interrupts_enabled(interrupt_state
);
906 panic("Undefined kernel instruction: pc=%p instr=%x\n", (void*)get_saved_state_pc(state
), instr
);
911 * Check for GDB breakpoint via illegal opcode.
913 if (IS_ARM_GDB_TRAP(instr
)) {
914 exception
= EXC_BREAKPOINT
;
915 codes
[0] = EXC_ARM_BREAKPOINT
;
921 exception_triage(exception
, codes
, numcodes
);
922 __builtin_unreachable();
925 #if __has_feature(ptrauth_calls)
926 static const uint16_t ptrauth_brk_comment_base
= 0xc470;
929 brk_comment_is_ptrauth(uint16_t comment
)
931 return comment
>= ptrauth_brk_comment_base
&&
932 comment
<= ptrauth_brk_comment_base
+ ptrauth_key_asdb
;
935 static inline const char *
936 brk_comment_to_ptrauth_key(uint16_t comment
)
938 switch (comment
- ptrauth_brk_comment_base
) {
939 case ptrauth_key_asia
:
941 case ptrauth_key_asib
:
943 case ptrauth_key_asda
:
945 case ptrauth_key_asdb
:
948 __builtin_unreachable();
951 #endif /* __has_feature(ptrauth_calls) */
954 handle_kernel_breakpoint(arm_saved_state_t
*state
, uint32_t esr
)
956 uint16_t comment
= ISS_BRK_COMMENT(esr
);
958 #if __has_feature(ptrauth_calls)
959 if (brk_comment_is_ptrauth(comment
)) {
960 const char *msg_fmt
= "Break 0x%04X instruction exception from kernel. Ptrauth failure with %s key resulted in 0x%016llx";
961 char msg
[strlen(msg_fmt
)
962 - strlen("0x%04X") + strlen("0xFFFF")
963 - strlen("%s") + strlen("IA")
964 - strlen("0x%016llx") + strlen("0xFFFFFFFFFFFFFFFF")
966 const char *key
= brk_comment_to_ptrauth_key(comment
);
967 snprintf(msg
, sizeof(msg
), msg_fmt
, comment
, key
, saved_state64(state
)->x
[16]);
969 panic_with_thread_kernel_state(msg
, state
);
971 #endif /* __has_feature(ptrauth_calls) */
973 const char *msg_fmt
= "Break 0x%04X instruction exception from kernel. Panic (by design)";
974 char msg
[strlen(msg_fmt
) - strlen("0x%04X") + strlen("0xFFFF") + 1];
975 snprintf(msg
, sizeof(msg
), msg_fmt
, comment
);
977 panic_with_thread_kernel_state(msg
, state
);
981 handle_breakpoint(arm_saved_state_t
*state
, uint32_t esr __unused
)
983 exception_type_t exception
= EXC_BREAKPOINT
;
984 mach_exception_data_type_t codes
[2] = {EXC_ARM_BREAKPOINT
};
985 mach_msg_type_number_t numcodes
= 2;
987 #if __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__
988 if (ESR_EC(esr
) == ESR_EC_BRK_AARCH64
&&
989 brk_comment_is_ptrauth(ISS_BRK_COMMENT(esr
))) {
990 exception
|= EXC_PTRAUTH_BIT
;
992 #endif /* __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__ */
994 codes
[1] = get_saved_state_pc(state
);
995 exception_triage(exception
, codes
, numcodes
);
996 __builtin_unreachable();
1000 handle_watchpoint(vm_offset_t fault_addr
)
1002 exception_type_t exception
= EXC_BREAKPOINT
;
1003 mach_exception_data_type_t codes
[2] = {EXC_ARM_DA_DEBUG
};
1004 mach_msg_type_number_t numcodes
= 2;
1006 codes
[1] = fault_addr
;
1007 exception_triage(exception
, codes
, numcodes
);
1008 __builtin_unreachable();
1012 handle_abort(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t fault_addr
, vm_offset_t recover
,
1013 abort_inspector_t inspect_abort
, abort_handler_t handler
, expected_fault_handler_t expected_fault_handler
)
1015 fault_status_t fault_code
;
1016 vm_prot_t fault_type
;
1018 inspect_abort(ESR_ISS(esr
), &fault_code
, &fault_type
);
1019 handler(state
, esr
, fault_addr
, fault_code
, fault_type
, recover
, expected_fault_handler
);
1023 inspect_instruction_abort(uint32_t iss
, fault_status_t
*fault_code
, vm_prot_t
*fault_type
)
1025 getCpuDatap()->cpu_stat
.instr_ex_cnt
++;
1026 *fault_code
= ISS_IA_FSC(iss
);
1027 *fault_type
= (VM_PROT_READ
| VM_PROT_EXECUTE
);
1031 inspect_data_abort(uint32_t iss
, fault_status_t
*fault_code
, vm_prot_t
*fault_type
)
1033 getCpuDatap()->cpu_stat
.data_ex_cnt
++;
1034 *fault_code
= ISS_DA_FSC(iss
);
1037 * Cache maintenance operations always report faults as write access.
1038 * Change these to read access, unless they report a permission fault.
1039 * Only certain cache maintenance operations (e.g. 'dc ivac') require write
1040 * access to the mapping, but if a cache maintenance operation that only requires
1041 * read access generates a permission fault, then we will not be able to handle
1042 * the fault regardless of whether we treat it as a read or write fault.
1044 if ((iss
& ISS_DA_WNR
) && (!(iss
& ISS_DA_CM
) || is_permission_fault(*fault_code
))) {
1045 *fault_type
= (VM_PROT_READ
| VM_PROT_WRITE
);
1047 *fault_type
= (VM_PROT_READ
);
1051 #if __has_feature(ptrauth_calls)
1053 fault_addr_bit(vm_offset_t fault_addr
, unsigned int bit
)
1055 return (bool)((fault_addr
>> bit
) & 1);
1059 * Determines whether a fault address taken at EL0 contains a PAC error code
1060 * corresponding to the specified kind of ptrauth key.
1063 user_fault_addr_matches_pac_error_code(vm_offset_t fault_addr
, bool data_key
)
1065 bool instruction_tbi
= !(get_tcr() & TCR_TBID0_TBI_DATA_ONLY
);
1066 bool tbi
= data_key
|| __improbable(instruction_tbi
);
1067 unsigned int poison_shift
;
1074 /* PAC error codes are always in the form key_number:NOT(key_number) */
1075 bool poison_bit_1
= fault_addr_bit(fault_addr
, poison_shift
);
1076 bool poison_bit_2
= fault_addr_bit(fault_addr
, poison_shift
+ 1);
1077 return poison_bit_1
!= poison_bit_2
;
1079 #endif /* __has_feature(ptrauth_calls) */
1082 handle_pc_align(arm_saved_state_t
*ss
)
1084 exception_type_t exc
;
1085 mach_exception_data_type_t codes
[2];
1086 mach_msg_type_number_t numcodes
= 2;
1088 if (!PSR64_IS_USER(get_saved_state_cpsr(ss
))) {
1089 panic_with_thread_kernel_state("PC alignment exception from kernel.", ss
);
1092 exc
= EXC_BAD_ACCESS
;
1093 #if __has_feature(ptrauth_calls)
1094 if (user_fault_addr_matches_pac_error_code(get_saved_state_pc(ss
), false)) {
1095 exc
|= EXC_PTRAUTH_BIT
;
1097 #endif /* __has_feature(ptrauth_calls) */
1099 codes
[0] = EXC_ARM_DA_ALIGN
;
1100 codes
[1] = get_saved_state_pc(ss
);
1102 exception_triage(exc
, codes
, numcodes
);
1103 __builtin_unreachable();
1107 handle_sp_align(arm_saved_state_t
*ss
)
1109 exception_type_t exc
;
1110 mach_exception_data_type_t codes
[2];
1111 mach_msg_type_number_t numcodes
= 2;
1113 if (!PSR64_IS_USER(get_saved_state_cpsr(ss
))) {
1114 panic_with_thread_kernel_state("SP alignment exception from kernel.", ss
);
1117 exc
= EXC_BAD_ACCESS
;
1118 #if __has_feature(ptrauth_calls)
1119 if (user_fault_addr_matches_pac_error_code(get_saved_state_sp(ss
), true)) {
1120 exc
|= EXC_PTRAUTH_BIT
;
1122 #endif /* __has_feature(ptrauth_calls) */
1124 codes
[0] = EXC_ARM_SP_ALIGN
;
1125 codes
[1] = get_saved_state_sp(ss
);
1127 exception_triage(exc
, codes
, numcodes
);
1128 __builtin_unreachable();
1132 handle_wf_trap(arm_saved_state_t
*state
)
1134 exception_type_t exc
;
1135 mach_exception_data_type_t codes
[2];
1136 mach_msg_type_number_t numcodes
= 2;
1139 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
1141 exc
= EXC_BAD_INSTRUCTION
;
1142 codes
[0] = EXC_ARM_UNDEFINED
;
1145 exception_triage(exc
, codes
, numcodes
);
1146 __builtin_unreachable();
1150 handle_fp_trap(arm_saved_state_t
*state
, uint32_t esr
)
1152 exception_type_t exc
= EXC_ARITHMETIC
;
1153 mach_exception_data_type_t codes
[2];
1154 mach_msg_type_number_t numcodes
= 2;
1157 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
1158 panic_with_thread_kernel_state("Floating point exception from kernel", state
);
1161 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
1164 /* The floating point trap flags are only valid if TFV is set. */
1165 if (!fp_exceptions_enabled
) {
1166 exc
= EXC_BAD_INSTRUCTION
;
1167 codes
[0] = EXC_ARM_UNDEFINED
;
1168 } else if (!(esr
& ISS_FP_TFV
)) {
1169 codes
[0] = EXC_ARM_FP_UNDEFINED
;
1170 } else if (esr
& ISS_FP_UFF
) {
1171 codes
[0] = EXC_ARM_FP_UF
;
1172 } else if (esr
& ISS_FP_OFF
) {
1173 codes
[0] = EXC_ARM_FP_OF
;
1174 } else if (esr
& ISS_FP_IOF
) {
1175 codes
[0] = EXC_ARM_FP_IO
;
1176 } else if (esr
& ISS_FP_DZF
) {
1177 codes
[0] = EXC_ARM_FP_DZ
;
1178 } else if (esr
& ISS_FP_IDF
) {
1179 codes
[0] = EXC_ARM_FP_ID
;
1180 } else if (esr
& ISS_FP_IXF
) {
1181 codes
[0] = EXC_ARM_FP_IX
;
1183 panic("Unrecognized floating point exception, state=%p, esr=%#x", state
, esr
);
1186 exception_triage(exc
, codes
, numcodes
);
1187 __builtin_unreachable();
1193 * handle_alignment_fault_from_user:
1194 * state: Saved state
1196 * Attempts to deal with an alignment fault from userspace (possibly by
1197 * emulating the faulting instruction). If emulation failed due to an
1198 * unservicable fault, the ESR for that fault will be stored in the
1199 * recovery_esr field of the thread by the exception code.
1202 * -1: Emulation failed (emulation of state/instr not supported)
1203 * 0: Successfully emulated the instruction
1204 * EFAULT: Emulation failed (probably due to permissions)
1205 * EINVAL: Emulation failed (probably due to a bad address)
1208 handle_alignment_fault_from_user(arm_saved_state_t
*state
, kern_return_t
*vmfr
)
1212 #pragma unused (state)
1213 #pragma unused (vmfr)
1220 handle_sw_step_debug(arm_saved_state_t
*state
)
1222 thread_t thread
= current_thread();
1223 exception_type_t exc
;
1224 mach_exception_data_type_t codes
[2];
1225 mach_msg_type_number_t numcodes
= 2;
1227 if (!PSR64_IS_USER(get_saved_state_cpsr(state
))) {
1228 panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel.", state
);
1231 // Disable single step and unmask interrupts (in the saved state, anticipating next exception return)
1232 if (thread
->machine
.DebugData
!= NULL
) {
1233 thread
->machine
.DebugData
->uds
.ds64
.mdscr_el1
&= ~0x1;
1235 panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL.", state
);
1238 mask_saved_state_cpsr(thread
->machine
.upcb
, 0, PSR64_SS
| DAIF_IRQF
| DAIF_FIQF
);
1240 // Special encoding for gdb single step event on ARM
1241 exc
= EXC_BREAKPOINT
;
1245 exception_triage(exc
, codes
, numcodes
);
1246 __builtin_unreachable();
1250 set_saved_state_pc_to_recovery_handler(arm_saved_state_t
*iss
, vm_offset_t recover
)
1252 #if defined(HAS_APPLE_PAC)
1253 thread_t thread
= current_thread();
1254 const uintptr_t disc
= ptrauth_blend_discriminator(&thread
->recover
, PAC_DISCRIMINATOR_RECOVER
);
1255 const char *panic_msg
= "Illegal thread->recover value %p";
1257 MANIPULATE_SIGNED_THREAD_STATE(iss
,
1258 // recover = (vm_offset_t)ptrauth_auth_data((void *)recover, ptrauth_key_function_pointer,
1259 // ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER));
1260 "mov x1, %[recover] \n"
1261 "mov x6, %[disc] \n"
1263 // if (recover != (vm_offset_t)ptrauth_strip((void *)recover, ptrauth_key_function_pointer)) {
1268 // panic("Illegal thread->recover value %p", (void *)recover);
1269 "mov x0, %[panic_msg] \n"
1273 "str x1, [x0, %[SS64_PC]] \n",
1274 [recover
] "r"(recover
),
1276 [panic_msg
] "r"(panic_msg
)
1279 set_saved_state_pc(iss
, recover
);
1284 handle_user_abort(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t fault_addr
,
1285 fault_status_t fault_code
, vm_prot_t fault_type
, vm_offset_t recover
, expected_fault_handler_t expected_fault_handler
)
1287 exception_type_t exc
= EXC_BAD_ACCESS
;
1288 mach_exception_data_type_t codes
[2];
1289 mach_msg_type_number_t numcodes
= 2;
1290 thread_t thread
= current_thread();
1293 (void)expected_fault_handler
;
1295 if (ml_at_interrupt_context()) {
1296 panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state
);
1299 thread
->iotier_override
= THROTTLE_LEVEL_NONE
; /* Reset IO tier override before handling abort from userspace */
1301 if (is_vm_fault(fault_code
)) {
1302 kern_return_t result
= KERN_FAILURE
;
1303 vm_map_t map
= thread
->map
;
1304 vm_offset_t vm_fault_addr
= fault_addr
;
1306 assert(map
!= kernel_map
);
1308 if (!(fault_type
& VM_PROT_EXECUTE
)) {
1309 vm_fault_addr
= tbi_clear(fault_addr
);
1313 if (thread
->t_dtrace_inprobe
) { /* Executing under dtrace_probe? */
1314 if (dtrace_tally_fault(vm_fault_addr
)) { /* Should a user mode fault under dtrace be ignored? */
1316 thread
->machine
.recover_esr
= esr
;
1317 thread
->machine
.recover_far
= vm_fault_addr
;
1318 set_saved_state_pc_to_recovery_handler(state
, recover
);
1320 panic_with_thread_kernel_state("copyin/out has no recovery point", state
);
1324 panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", state
);
1332 if (pgtrace_enabled
) {
1333 /* Check to see if trace bit is set */
1334 result
= pmap_pgtrace_fault(map
->pmap
, fault_addr
, state
);
1335 if (result
== KERN_SUCCESS
) {
1341 /* check to see if it is just a pmap ref/modify fault */
1343 if ((result
!= KERN_SUCCESS
) && !is_translation_fault(fault_code
)) {
1344 result
= arm_fast_fault(map
->pmap
,
1346 fault_type
, (fault_code
== FSC_ACCESS_FLAG_FAULT_L3
), TRUE
);
1348 if (result
!= KERN_SUCCESS
) {
1350 /* We have to fault the page in */
1351 result
= vm_fault(map
, vm_fault_addr
, fault_type
,
1352 /* change_wiring */ FALSE
, VM_KERN_MEMORY_NONE
, THREAD_ABORTSAFE
,
1353 /* caller_pmap */ NULL
, /* caller_pmap_addr */ 0);
1356 if (result
== KERN_SUCCESS
|| result
== KERN_ABORTED
) {
1361 * vm_fault() should never return KERN_FAILURE for page faults from user space.
1362 * If it does, we're leaking preemption disables somewhere in the kernel.
1364 if (__improbable(result
== KERN_FAILURE
)) {
1365 panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread
);
1369 } else if (is_alignment_fault(fault_code
)) {
1370 kern_return_t vmfkr
= KERN_SUCCESS
;
1371 thread
->machine
.recover_esr
= 0;
1372 thread
->machine
.recover_far
= 0;
1373 int result
= handle_alignment_fault_from_user(state
, &vmfkr
);
1375 /* Successfully emulated, or instruction
1376 * copyin() for decode/emulation failed.
1377 * Continue, or redrive instruction.
1379 thread_exception_return();
1380 } else if (((result
== EFAULT
) || (result
== EINVAL
)) &&
1381 (thread
->machine
.recover_esr
== 0)) {
1383 * If we didn't actually take a fault, but got one of
1384 * these errors, then we failed basic sanity checks of
1385 * the fault address. Treat this as an invalid
1388 codes
[0] = KERN_INVALID_ADDRESS
;
1389 } else if ((result
== EFAULT
) &&
1390 (thread
->machine
.recover_esr
)) {
1392 * Since alignment aborts are prioritized
1393 * ahead of translation aborts, the misaligned
1394 * atomic emulation flow may have triggered a
1395 * VM pagefault, which the VM could not resolve.
1396 * Report the VM fault error in codes[]
1400 assertf(vmfkr
!= KERN_SUCCESS
, "Unexpected vmfkr 0x%x", vmfkr
);
1401 /* Cause ESR_EC to reflect an EL0 abort */
1402 thread
->machine
.recover_esr
&= ~ESR_EC_MASK
;
1403 thread
->machine
.recover_esr
|= (ESR_EC_DABORT_EL0
<< ESR_EC_SHIFT
);
1404 set_saved_state_esr(thread
->machine
.upcb
, thread
->machine
.recover_esr
);
1405 set_saved_state_far(thread
->machine
.upcb
, thread
->machine
.recover_far
);
1406 fault_addr
= thread
->machine
.recover_far
;
1408 /* This was just an unsupported alignment
1409 * exception. Misaligned atomic emulation
1410 * timeouts fall in this category.
1412 codes
[0] = EXC_ARM_DA_ALIGN
;
1414 } else if (is_parity_error(fault_code
)) {
1415 #if defined(APPLE_ARM64_ARCH_FAMILY)
1416 if (fault_code
== FSC_SYNC_PARITY
) {
1417 arm64_platform_error(state
, esr
, fault_addr
);
1421 panic("User parity error.");
1424 codes
[0] = KERN_FAILURE
;
1427 codes
[1] = fault_addr
;
1428 #if __has_feature(ptrauth_calls)
1429 bool is_data_abort
= (ESR_EC(esr
) == ESR_EC_DABORT_EL0
);
1430 if (user_fault_addr_matches_pac_error_code(fault_addr
, is_data_abort
)) {
1431 exc
|= EXC_PTRAUTH_BIT
;
1433 #endif /* __has_feature(ptrauth_calls) */
1434 exception_triage(exc
, codes
, numcodes
);
1435 __builtin_unreachable();
1438 #if __ARM_PAN_AVAILABLE__
1440 is_pan_fault(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t fault_addr
, fault_status_t fault_code
)
1442 // PAN (Privileged Access Never) fault occurs for data read/write in EL1 to
1443 // virtual address that is readable/writeable from both EL1 and EL0
1445 // To check for PAN fault, we evaluate if the following conditions are true:
1446 // 1. This is a permission fault
1447 // 2. PAN is enabled
1448 // 3. AT instruction (on which PAN has no effect) on the same faulting address
1453 if (!(is_permission_fault(fault_code
) && get_saved_state_cpsr(state
) & PSR64_PAN
)) {
1457 if (esr
& ISS_DA_WNR
) {
1458 pa
= mmu_kvtop_wpreflight(fault_addr
);
1460 pa
= mmu_kvtop(fault_addr
);
1462 return (pa
)? TRUE
: FALSE
;
1467 handle_kernel_abort(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t fault_addr
,
1468 fault_status_t fault_code
, vm_prot_t fault_type
, vm_offset_t recover
, expected_fault_handler_t expected_fault_handler
)
1470 thread_t thread
= current_thread();
1473 #ifndef CONFIG_XNUPOST
1474 (void)expected_fault_handler
;
1475 #endif /* CONFIG_XNUPOST */
1478 if (is_vm_fault(fault_code
) && thread
->t_dtrace_inprobe
) { /* Executing under dtrace_probe? */
1479 if (dtrace_tally_fault(fault_addr
)) { /* Should a fault under dtrace be ignored? */
1481 * Point to next instruction, or recovery handler if set.
1484 thread
->machine
.recover_esr
= esr
;
1485 thread
->machine
.recover_far
= fault_addr
;
1486 set_saved_state_pc_to_recovery_handler(state
, recover
);
1488 add_saved_state_pc(state
, 4);
1492 panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state
);
1497 #if !CONFIG_PGTRACE /* This will be moved next to pgtrace fault evaluation */
1498 if (ml_at_interrupt_context()) {
1499 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state
);
1503 if (is_vm_fault(fault_code
)) {
1504 kern_return_t result
= KERN_FAILURE
;
1509 * Ensure no faults in the physical aperture. This could happen if
1510 * a page table is incorrectly allocated from the read only region
1511 * when running with KTRR.
1514 #ifdef CONFIG_XNUPOST
1515 if (expected_fault_handler
&& expected_fault_handler(state
)) {
1518 #endif /* CONFIG_XNUPOST */
1520 if (fault_addr
>= gVirtBase
&& fault_addr
< static_memory_end
) {
1521 panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state
);
1524 if (VM_KERNEL_ADDRESS(fault_addr
) || thread
== THREAD_NULL
) {
1526 interruptible
= THREAD_UNINT
;
1529 interruptible
= THREAD_ABORTSAFE
;
1533 if (pgtrace_enabled
) {
1534 /* Check to see if trace bit is set */
1535 result
= pmap_pgtrace_fault(map
->pmap
, fault_addr
, state
);
1536 if (result
== KERN_SUCCESS
) {
1541 if (ml_at_interrupt_context()) {
1542 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state
);
1546 /* check to see if it is just a pmap ref/modify fault */
1547 if (!is_translation_fault(fault_code
)) {
1548 result
= arm_fast_fault(map
->pmap
,
1550 fault_type
, (fault_code
== FSC_ACCESS_FLAG_FAULT_L3
), FALSE
);
1551 if (result
== KERN_SUCCESS
) {
1556 if (result
!= KERN_PROTECTION_FAILURE
) {
1558 * We have to "fault" the page in.
1560 result
= vm_fault(map
, fault_addr
, fault_type
,
1561 /* change_wiring */ FALSE
, VM_KERN_MEMORY_NONE
, interruptible
,
1562 /* caller_pmap */ NULL
, /* caller_pmap_addr */ 0);
1565 if (result
== KERN_SUCCESS
) {
1570 * If we have a recover handler, invoke it now.
1573 thread
->machine
.recover_esr
= esr
;
1574 thread
->machine
.recover_far
= fault_addr
;
1575 set_saved_state_pc_to_recovery_handler(state
, recover
);
1579 #if __ARM_PAN_AVAILABLE__
1580 if (is_pan_fault(state
, esr
, fault_addr
, fault_code
)) {
1581 panic_with_thread_kernel_state("Privileged access never abort.", state
);
1586 } else if (ml_at_interrupt_context()) {
1587 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state
);
1589 } else if (is_alignment_fault(fault_code
)) {
1591 thread
->machine
.recover_esr
= esr
;
1592 thread
->machine
.recover_far
= fault_addr
;
1593 set_saved_state_pc_to_recovery_handler(state
, recover
);
1596 panic_with_thread_kernel_state("Unaligned kernel data abort.", state
);
1597 } else if (is_parity_error(fault_code
)) {
1598 #if defined(APPLE_ARM64_ARCH_FAMILY)
1599 if (fault_code
== FSC_SYNC_PARITY
) {
1600 arm64_platform_error(state
, esr
, fault_addr
);
1604 panic_with_thread_kernel_state("Kernel parity error.", state
);
1607 kprintf("Unclassified kernel abort (fault_code=0x%x)\n", fault_code
);
1610 panic_with_thread_kernel_state("Kernel data abort.", state
);
1613 extern void syscall_trace(struct arm_saved_state
* regs
);
1616 handle_svc(arm_saved_state_t
*state
)
1618 int trap_no
= get_saved_state_svc_number(state
);
1619 thread_t thread
= current_thread();
1622 #define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */
1624 #define TRACE_SYSCALL 1
1626 syscall_trace(state
);
1629 thread
->iotier_override
= THROTTLE_LEVEL_NONE
; /* Reset IO tier override before handling SVC from userspace */
1631 if (trap_no
== (int)PLATFORM_SYSCALL_TRAP_NO
) {
1632 platform_syscall(state
);
1633 panic("Returned from platform_syscall()?");
1636 mach_kauth_cred_uthread_update();
1639 if (trap_no
== MACH_ARM_TRAP_ABSTIME
) {
1640 handle_mach_absolute_time_trap(state
);
1642 } else if (trap_no
== MACH_ARM_TRAP_CONTTIME
) {
1643 handle_mach_continuous_time_trap(state
);
1647 /* Counting perhaps better in the handler, but this is how it's been done */
1648 thread
->syscalls_mach
++;
1649 mach_syscall(state
);
1651 /* Counting perhaps better in the handler, but this is how it's been done */
1652 thread
->syscalls_unix
++;
1653 p
= get_bsdthreadtask_info(thread
);
1657 unix_syscall(state
, thread
, (struct uthread
*)thread
->uthread
, p
);
1662 handle_mach_absolute_time_trap(arm_saved_state_t
*state
)
1664 uint64_t now
= mach_absolute_time();
1665 saved_state64(state
)->x
[0] = now
;
1669 handle_mach_continuous_time_trap(arm_saved_state_t
*state
)
1671 uint64_t now
= mach_continuous_time();
1672 saved_state64(state
)->x
[0] = now
;
1675 __attribute__((noreturn
))
1677 handle_msr_trap(arm_saved_state_t
*state
, uint32_t esr
)
1679 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
1680 mach_exception_data_type_t codes
[2] = {EXC_ARM_UNDEFINED
};
1681 mach_msg_type_number_t numcodes
= 2;
1684 if (!is_saved_state64(state
)) {
1685 panic("MSR/MRS trap (ESR 0x%x) from 32-bit state\n", esr
);
1688 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
1689 panic("MSR/MRS trap (ESR 0x%x) from kernel\n", esr
);
1692 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
1695 exception_triage(exception
, codes
, numcodes
);
1696 __builtin_unreachable();
1701 handle_user_trapped_instruction32(arm_saved_state_t
*state
, uint32_t esr
)
1703 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
1704 mach_exception_data_type_t codes
[2] = {EXC_ARM_UNDEFINED
};
1705 mach_msg_type_number_t numcodes
= 2;
1708 if (is_saved_state64(state
)) {
1709 panic("ESR (0x%x) for instruction trapped from U32, but saved state is 64-bit.", esr
);
1712 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
1713 panic("ESR (0x%x) for instruction trapped from U32, actually came from kernel?", esr
);
1716 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
1719 exception_triage(exception
, codes
, numcodes
);
1720 __builtin_unreachable();
1724 handle_simd_trap(arm_saved_state_t
*state
, uint32_t esr
)
1726 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
1727 mach_exception_data_type_t codes
[2] = {EXC_ARM_UNDEFINED
};
1728 mach_msg_type_number_t numcodes
= 2;
1731 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
1732 panic("ESR (0x%x) for SIMD trap from userland, actually came from kernel?", esr
);
1735 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
1738 exception_triage(exception
, codes
, numcodes
);
1739 __builtin_unreachable();
1743 sleh_irq(arm_saved_state_t
*state
)
1745 uint64_t timestamp
= 0;
1746 uint32_t old_entropy_data
= 0;
1747 uint32_t old_entropy_sample_count
= 0;
1748 size_t entropy_index
= 0;
1749 uint32_t * entropy_data_ptr
= NULL
;
1750 cpu_data_t
* cdp __unused
= getCpuDatap();
1752 int preemption_level
= get_preemption_level();
1756 sleh_interrupt_handler_prologue(state
, DBG_INTR_TYPE_OTHER
);
1759 PE_handle_ext_interrupt();
1761 /* Run the registered interrupt handler. */
1762 cdp
->interrupt_handler(cdp
->interrupt_target
,
1763 cdp
->interrupt_refCon
,
1765 cdp
->interrupt_source
);
1768 /* We use interrupt timing as an entropy source. */
1769 timestamp
= ml_get_timebase();
1772 * The buffer index is subject to races, but as these races should only
1773 * result in multiple CPUs updating the same location, the end result
1774 * should be that noise gets written into the entropy buffer. As this
1775 * is the entire point of the entropy buffer, we will not worry about
1776 * these races for now.
1778 old_entropy_sample_count
= EntropyData
.sample_count
;
1779 EntropyData
.sample_count
+= 1;
1781 entropy_index
= old_entropy_sample_count
& EntropyData
.buffer_index_mask
;
1782 entropy_data_ptr
= EntropyData
.buffer
+ entropy_index
;
1784 /* Mix the timestamp data and the old data together. */
1785 old_entropy_data
= *entropy_data_ptr
;
1786 *entropy_data_ptr
= (uint32_t)timestamp
^ (__ror(old_entropy_data
, 9) & EntropyData
.ror_mask
);
1788 sleh_interrupt_handler_epilogue();
1790 if (preemption_level
!= get_preemption_level()) {
1791 panic("irq handler %p changed preemption level from %d to %d", cdp
->interrupt_handler
, preemption_level
, get_preemption_level());
1797 sleh_fiq(arm_saved_state_t
*state
)
1799 unsigned int type
= DBG_INTR_TYPE_UNKNOWN
;
1801 int preemption_level
= get_preemption_level();
1805 uint64_t pmcr0
= 0, upmsr
= 0;
1806 #endif /* MONOTONIC_FIQ */
1808 #if defined(HAS_IPI)
1809 boolean_t is_ipi
= FALSE
;
1810 uint64_t ipi_sr
= 0;
1813 MRS(ipi_sr
, ARM64_REG_IPI_SR
);
1821 type
= DBG_INTR_TYPE_IPI
;
1823 #endif /* defined(HAS_IPI) */
1825 if (mt_pmi_pending(&pmcr0
, &upmsr
)) {
1826 type
= DBG_INTR_TYPE_PMI
;
1828 #endif /* MONOTONIC_FIQ */
1829 if (ml_get_timer_pending()) {
1830 type
= DBG_INTR_TYPE_TIMER
;
1833 sleh_interrupt_handler_prologue(state
, type
);
1835 #if defined(HAS_IPI)
1838 * Order is important here: we must ack the IPI by writing IPI_SR
1839 * before we call cpu_signal_handler(). Otherwise, there will be
1840 * a window between the completion of pending-signal processing in
1841 * cpu_signal_handler() and the ack during which a newly-issued
1842 * IPI to this CPU may be lost. ISB is required to ensure the msr
1843 * is retired before execution of cpu_signal_handler().
1845 MSR(ARM64_REG_IPI_SR
, ipi_sr
);
1846 __builtin_arm_isb(ISB_SY
);
1847 cpu_signal_handler();
1849 #endif /* defined(HAS_IPI) */
1851 if (type
== DBG_INTR_TYPE_PMI
) {
1852 INTERRUPT_MASKED_DEBUG_START(mt_fiq
, DBG_INTR_TYPE_PMI
);
1853 mt_fiq(getCpuDatap(), pmcr0
, upmsr
);
1854 INTERRUPT_MASKED_DEBUG_END();
1856 #endif /* MONOTONIC_FIQ */
1859 * We don't know that this is a timer, but we don't have insight into
1860 * the other interrupts that go down this path.
1863 cpu_data_t
*cdp
= getCpuDatap();
1865 cdp
->cpu_decrementer
= -1; /* Large */
1868 * ARM64_TODO: whether we're coming from userland is ignored right now.
1869 * We can easily thread it through, but not bothering for the
1870 * moment (AArch32 doesn't either).
1872 INTERRUPT_MASKED_DEBUG_START(rtclock_intr
, DBG_INTR_TYPE_TIMER
);
1874 INTERRUPT_MASKED_DEBUG_END();
1877 sleh_interrupt_handler_epilogue();
1879 if (preemption_level
!= get_preemption_level()) {
1880 panic("fiq type %u changed preemption level from %d to %d", type
, preemption_level
, get_preemption_level());
1886 sleh_serror(arm_context_t
*context
, uint32_t esr
, vm_offset_t far
)
1888 task_vtimer_check(current_thread());
1890 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM
, 0) | DBG_FUNC_START
,
1891 esr
, VM_KERNEL_ADDRHIDE(far
));
1892 arm_saved_state_t
*state
= &context
->ss
;
1894 int preemption_level
= get_preemption_level();
1897 ASSERT_CONTEXT_SANITY(context
);
1898 arm64_platform_error(state
, esr
, far
);
1900 if (preemption_level
!= get_preemption_level()) {
1901 panic("serror changed preemption level from %d to %d", preemption_level
, get_preemption_level());
1904 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM
, 0) | DBG_FUNC_END
,
1905 esr
, VM_KERNEL_ADDRHIDE(far
));
1909 mach_syscall_trace_exit(unsigned int retval
,
1910 unsigned int call_number
)
1912 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1913 MACHDBG_CODE(DBG_MACH_EXCP_SC
, (call_number
)) |
1914 DBG_FUNC_END
, retval
, 0, 0, 0, 0);
1917 __attribute__((noreturn
))
1919 thread_syscall_return(kern_return_t error
)
1922 struct arm_saved_state
*state
;
1924 thread
= current_thread();
1925 state
= get_user_regs(thread
);
1927 assert(is_saved_state64(state
));
1928 saved_state64(state
)->x
[0] = error
;
1931 kern_allocation_name_t
1932 prior __assert_only
= thread_get_kernel_state(thread
)->allocation_name
;
1933 assertf(prior
== NULL
, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior
));
1934 #endif /* MACH_ASSERT */
1936 if (kdebug_enable
) {
1937 /* Invert syscall number (negative for a mach syscall) */
1938 mach_syscall_trace_exit(error
, (-1) * get_saved_state_svc_number(state
));
1941 thread_exception_return();
1946 struct arm_saved_state
* regs __unused
)
1948 /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */
1952 sleh_interrupt_handler_prologue(arm_saved_state_t
*state
, unsigned int type
)
1954 bool is_user
= PSR64_IS_USER(get_saved_state_cpsr(state
));
1956 task_vtimer_check(current_thread());
1958 uint64_t pc
= is_user
? get_saved_state_pc(state
) :
1959 VM_KERNEL_UNSLIDE(get_saved_state_pc(state
));
1961 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_START
,
1962 0, pc
, is_user
, type
);
1964 #if CONFIG_TELEMETRY
1965 if (telemetry_needs_record
) {
1966 telemetry_mark_curthread((boolean_t
)is_user
, FALSE
);
1968 #endif /* CONFIG_TELEMETRY */
1972 sleh_interrupt_handler_epilogue(void)
1977 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_END
);
1981 sleh_invalid_stack(arm_context_t
*context
, uint32_t esr __unused
, vm_offset_t far __unused
)
1983 thread_t thread
= current_thread();
1984 vm_offset_t kernel_stack_bottom
, sp
;
1986 sp
= get_saved_state_sp(&context
->ss
);
1987 kernel_stack_bottom
= round_page(thread
->machine
.kstackptr
) - KERNEL_STACK_SIZE
;
1989 if ((sp
< kernel_stack_bottom
) && (sp
>= (kernel_stack_bottom
- PAGE_SIZE
))) {
1990 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow).", &context
->ss
);
1993 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context
->ss
);