2 * Copyright (c) 2012-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/caches_internal.h>
30 #include <arm/cpu_data.h>
31 #include <arm/cpu_data_internal.h>
32 #include <arm/misc_protos.h>
33 #include <arm/thread.h>
34 #include <arm/rtclock.h>
35 #include <arm/trap.h> /* for IS_ARM_GDB_TRAP() et al */
36 #include <arm64/proc_reg.h>
37 #include <arm64/machine_machdep.h>
38 #include <arm64/monotonic.h>
40 #include <kern/debug.h>
41 #include <kern/thread.h>
42 #include <mach/exception.h>
43 #include <mach/vm_types.h>
44 #include <mach/machine/thread_status.h>
46 #include <machine/atomic.h>
47 #include <machine/limits.h>
49 #include <pexpert/arm/protos.h>
51 #include <vm/vm_page.h>
53 #include <vm/vm_fault.h>
54 #include <vm/vm_kern.h>
56 #include <sys/kdebug.h>
57 #include <kperf/kperf.h>
59 #include <kern/policy_internal.h>
61 #include <kern/telemetry.h>
64 #include <prng/random.h>
67 #error Should only be compiling for arm64.
70 #define TEST_CONTEXT32_SANITY(context) \
71 (context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \
72 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT)
74 #define TEST_CONTEXT64_SANITY(context) \
75 (context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \
76 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT)
78 #define ASSERT_CONTEXT_SANITY(context) \
79 assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context))
82 #define COPYIN(src, dst, size) \
83 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
84 copyin_kern(src, dst, size) : \
85 copyin(src, dst, size)
87 #define COPYOUT(src, dst, size) \
88 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
89 copyout_kern(src, dst, size) : \
90 copyout(src, dst, size)
92 // Below is for concatenating a string param to a string literal
94 #define STR(x) STR1(x)
96 void panic_with_thread_kernel_state(const char *msg
, arm_saved_state_t
*ss
) __abortlike
;
98 void sleh_synchronous_sp1(arm_context_t
*, uint32_t, vm_offset_t
) __abortlike
;
99 void sleh_synchronous(arm_context_t
*, uint32_t, vm_offset_t
);
100 void sleh_irq(arm_saved_state_t
*);
101 void sleh_fiq(arm_saved_state_t
*);
102 void sleh_serror(arm_context_t
*context
, uint32_t esr
, vm_offset_t far
);
103 void sleh_invalid_stack(arm_context_t
*context
, uint32_t esr
, vm_offset_t far
) __dead2
;
105 static void sleh_interrupt_handler_prologue(arm_saved_state_t
*, unsigned int type
);
106 static void sleh_interrupt_handler_epilogue(void);
108 static void handle_svc(arm_saved_state_t
*);
109 static void handle_mach_absolute_time_trap(arm_saved_state_t
*);
110 static void handle_mach_continuous_time_trap(arm_saved_state_t
*);
112 static void handle_msr_trap(arm_saved_state_t
*state
, uint32_t iss
);
114 extern kern_return_t
arm_fast_fault(pmap_t
, vm_map_address_t
, vm_prot_t
, bool, bool);
116 static void handle_uncategorized(arm_saved_state_t
*);
117 static void handle_breakpoint(arm_saved_state_t
*) __dead2
;
119 typedef void (*abort_inspector_t
)(uint32_t, fault_status_t
*, vm_prot_t
*);
120 static void inspect_instruction_abort(uint32_t, fault_status_t
*, vm_prot_t
*);
121 static void inspect_data_abort(uint32_t, fault_status_t
*, vm_prot_t
*);
123 static int is_vm_fault(fault_status_t
);
124 static int is_translation_fault(fault_status_t
);
125 static int is_alignment_fault(fault_status_t
);
127 typedef void (*abort_handler_t
)(arm_saved_state_t
*, uint32_t, vm_offset_t
, fault_status_t
, vm_prot_t
, vm_offset_t
);
128 static void handle_user_abort(arm_saved_state_t
*, uint32_t, vm_offset_t
, fault_status_t
, vm_prot_t
, vm_offset_t
);
129 static void handle_kernel_abort(arm_saved_state_t
*, uint32_t, vm_offset_t
, fault_status_t
, vm_prot_t
, vm_offset_t
);
131 static void handle_pc_align(arm_saved_state_t
*ss
) __dead2
;
132 static void handle_sp_align(arm_saved_state_t
*ss
) __dead2
;
133 static void handle_sw_step_debug(arm_saved_state_t
*ss
) __dead2
;
134 static void handle_wf_trap(arm_saved_state_t
*ss
) __dead2
;
135 static void handle_fp_trap(arm_saved_state_t
*ss
, uint32_t esr
) __dead2
;
137 static void handle_watchpoint(vm_offset_t fault_addr
) __dead2
;
139 static void handle_abort(arm_saved_state_t
*, uint32_t, vm_offset_t
, vm_offset_t
, abort_inspector_t
, abort_handler_t
);
141 static void handle_user_trapped_instruction32(arm_saved_state_t
*, uint32_t esr
) __dead2
;
143 static void handle_simd_trap(arm_saved_state_t
*, uint32_t esr
) __dead2
;
145 extern void mach_kauth_cred_uthread_update(void);
146 void mach_syscall_trace_exit(unsigned int retval
, unsigned int call_number
);
152 unix_syscall(struct arm_saved_state
* regs
, thread_t thread_act
,
153 struct uthread
* uthread
, struct proc
* proc
);
156 mach_syscall(struct arm_saved_state
*);
159 extern kern_return_t
dtrace_user_probe(arm_saved_state_t
* regs
);
160 extern boolean_t
dtrace_tally_fault(user_addr_t
);
163 * Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy
164 * and paste the trap instructions
165 * over from that file. Need to keep these in sync!
167 #define FASTTRAP_ARM32_INSTR 0xe7ffdefc
168 #define FASTTRAP_THUMB32_INSTR 0xdefc
169 #define FASTTRAP_ARM64_INSTR 0xe7eeee7e
171 #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb
172 #define FASTTRAP_THUMB32_RET_INSTR 0xdefb
173 #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d
175 /* See <rdar://problem/4613924> */
176 perfCallback tempDTraceTrapHook
= NULL
; /* Pointer to DTrace fbt trap hook routine */
181 extern boolean_t pgtrace_enabled
;
184 #if __ARM_PAN_AVAILABLE__
185 #ifdef CONFIG_XNUPOST
186 extern vm_offset_t pan_test_addr
;
187 extern vm_offset_t pan_ro_addr
;
188 extern volatile int pan_exception_level
;
189 extern volatile char pan_fault_value
;
193 #if HAS_TWO_STAGE_SPR_LOCK
194 #ifdef CONFIG_XNUPOST
195 extern volatile vm_offset_t spr_lock_test_addr
;
196 extern volatile uint32_t spr_lock_exception_esr
;
200 #if defined(APPLETYPHOON)
201 #define CPU_NAME "Typhoon"
202 #elif defined(APPLETWISTER)
203 #define CPU_NAME "Twister"
204 #elif defined(APPLEHURRICANE)
205 #define CPU_NAME "Hurricane"
207 #define CPU_NAME "Unknown"
210 #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT))
211 #define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400)
212 #define ESR_WT_REASON(esr) ((esr) & 0xff)
214 #define WT_REASON_NONE 0
215 #define WT_REASON_INTEGRITY_FAIL 1
216 #define WT_REASON_BAD_SYSCALL 2
217 #define WT_REASON_NOT_LOCKED 3
218 #define WT_REASON_ALREADY_LOCKED 4
219 #define WT_REASON_SW_REQ 5
220 #define WT_REASON_PT_INVALID 6
221 #define WT_REASON_PT_VIOLATION 7
222 #define WT_REASON_REG_VIOLATION 8
226 extern vm_offset_t static_memory_end
;
228 static inline unsigned
229 __ror(unsigned value
, unsigned shift
)
231 return ((unsigned)(value
) >> (unsigned)(shift
)) |
232 (unsigned)(value
) << ((unsigned)(sizeof(unsigned) * CHAR_BIT
) - (unsigned)(shift
));
237 arm64_implementation_specific_error(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t far
)
239 #if defined(APPLE_ARM64_ARCH_FAMILY)
240 uint64_t fed_err_sts
, mmu_err_sts
, lsu_err_sts
;
241 #if defined(NO_ECORE)
242 uint64_t l2c_err_sts
, l2c_err_adr
, l2c_err_inf
;
244 mmu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS
));
245 l2c_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS
));
246 l2c_err_adr
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR
));
247 l2c_err_inf
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF
));
248 lsu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS
));
249 fed_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS
));
251 panic_plain("Unhandled " CPU_NAME
252 " implementation specific error. state=%p esr=%#x far=%p\n"
253 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
254 "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n",
255 state
, esr
, (void *)far
,
256 (void *)lsu_err_sts
, (void *)fed_err_sts
, (void *)mmu_err_sts
,
257 (void *)l2c_err_sts
, (void *)l2c_err_adr
, (void *)l2c_err_inf
);
259 #elif defined(HAS_MIGSTS)
260 uint64_t l2c_err_sts
, l2c_err_adr
, l2c_err_inf
, mpidr
, migsts
;
262 mpidr
= __builtin_arm_rsr64("MPIDR_EL1");
263 migsts
= __builtin_arm_rsr64(STR(ARM64_REG_MIGSTS_EL1
));
264 mmu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS
));
265 l2c_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS
));
266 l2c_err_adr
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR
));
267 l2c_err_inf
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF
));
268 lsu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS
));
269 fed_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS
));
271 panic_plain("Unhandled " CPU_NAME
272 " implementation specific error. state=%p esr=%#x far=%p p-core?%d migsts=%p\n"
273 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
274 "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n",
275 state
, esr
, (void *)far
, !!(mpidr
& MPIDR_PNE
), (void *)migsts
,
276 (void *)lsu_err_sts
, (void *)fed_err_sts
, (void *)mmu_err_sts
,
277 (void *)l2c_err_sts
, (void *)l2c_err_adr
, (void *)l2c_err_inf
);
278 #else // !defined(NO_ECORE) && !defined(HAS_MIGSTS)
279 uint64_t llc_err_sts
, llc_err_adr
, llc_err_inf
, mpidr
;
280 #if defined(HAS_DPC_ERR)
281 uint64_t dpc_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_DPC_ERR_STS
));
282 #endif // defined(HAS_DPC_ERR)
284 mpidr
= __builtin_arm_rsr64("MPIDR_EL1");
286 if (mpidr
& MPIDR_PNE
) {
287 mmu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS
));
288 lsu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS
));
289 fed_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS
));
291 mmu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_E_MMU_ERR_STS
));
292 lsu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_E_LSU_ERR_STS
));
293 fed_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_E_FED_ERR_STS
));
296 llc_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS
));
297 llc_err_adr
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR
));
298 llc_err_inf
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF
));
300 panic_plain("Unhandled " CPU_NAME
301 " implementation specific error. state=%p esr=%#x far=%p p-core?%d"
302 #if defined(HAS_DPC_ERR)
306 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
307 "\tllc_err_sts:%p, llc_err_adr:%p, llc_err_inf:%p\n",
308 state
, esr
, (void *)far
, !!(mpidr
& MPIDR_PNE
),
309 #if defined(HAS_DPC_ERR)
312 (void *)lsu_err_sts
, (void *)fed_err_sts
, (void *)mmu_err_sts
,
313 (void *)llc_err_sts
, (void *)llc_err_adr
, (void *)llc_err_inf
);
315 #else // !defined(APPLE_ARM64_ARCH_FAMILY)
316 #pragma unused (state, esr, far)
317 panic_plain("Unhandled implementation specific error\n");
321 #if CONFIG_KERNEL_INTEGRITY
322 #pragma clang diagnostic push
323 #pragma clang diagnostic ignored "-Wunused-parameter"
325 kernel_integrity_error_handler(uint32_t esr
, vm_offset_t far
)
327 #if defined(KERNEL_INTEGRITY_WT)
328 #if (DEVELOPMENT || DEBUG)
329 if (ESR_WT_SERROR(esr
)) {
330 switch (ESR_WT_REASON(esr
)) {
331 case WT_REASON_INTEGRITY_FAIL
:
332 panic_plain("Kernel integrity, violation in frame 0x%016lx.", far
);
333 case WT_REASON_BAD_SYSCALL
:
334 panic_plain("Kernel integrity, bad syscall.");
335 case WT_REASON_NOT_LOCKED
:
336 panic_plain("Kernel integrity, not locked.");
337 case WT_REASON_ALREADY_LOCKED
:
338 panic_plain("Kernel integrity, already locked.");
339 case WT_REASON_SW_REQ
:
340 panic_plain("Kernel integrity, software request.");
341 case WT_REASON_PT_INVALID
:
342 panic_plain("Kernel integrity, encountered invalid TTE/PTE while "
343 "walking 0x%016lx.", far
);
344 case WT_REASON_PT_VIOLATION
:
345 panic_plain("Kernel integrity, violation in mapping 0x%016lx.",
347 case WT_REASON_REG_VIOLATION
:
348 panic_plain("Kernel integrity, violation in system register %d.",
351 panic_plain("Kernel integrity, unknown (esr=0x%08x).", esr
);
355 if (ESR_WT_SERROR(esr
)) {
356 panic_plain("SError esr: 0x%08x far: 0x%016lx.", esr
, far
);
361 #pragma clang diagnostic pop
365 arm64_platform_error(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t far
)
367 cpu_data_t
*cdp
= getCpuDatap();
369 #if CONFIG_KERNEL_INTEGRITY
370 kernel_integrity_error_handler(esr
, far
);
373 if (cdp
->platform_error_handler
!= (platform_error_handler_t
) NULL
) {
374 (*(platform_error_handler_t
)cdp
->platform_error_handler
)(cdp
->cpu_id
, far
);
376 arm64_implementation_specific_error(state
, esr
, far
);
381 panic_with_thread_kernel_state(const char *msg
, arm_saved_state_t
*ss
)
385 ss_valid
= is_saved_state64(ss
);
386 arm_saved_state64_t
*state
= saved_state64(ss
);
388 panic_plain("%s at pc 0x%016llx, lr 0x%016llx (saved state: %p%s)\n"
389 "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n"
390 "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n"
391 "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n"
392 "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n"
393 "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n"
394 "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n"
395 "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n"
396 "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n"
397 "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n",
398 msg
, state
->pc
, state
->lr
, ss
, (ss_valid
? "" : " INVALID"),
399 state
->x
[0], state
->x
[1], state
->x
[2], state
->x
[3],
400 state
->x
[4], state
->x
[5], state
->x
[6], state
->x
[7],
401 state
->x
[8], state
->x
[9], state
->x
[10], state
->x
[11],
402 state
->x
[12], state
->x
[13], state
->x
[14], state
->x
[15],
403 state
->x
[16], state
->x
[17], state
->x
[18], state
->x
[19],
404 state
->x
[20], state
->x
[21], state
->x
[22], state
->x
[23],
405 state
->x
[24], state
->x
[25], state
->x
[26], state
->x
[27],
406 state
->x
[28], state
->fp
, state
->lr
, state
->sp
,
407 state
->pc
, state
->cpsr
, state
->esr
, state
->far
);
411 sleh_synchronous_sp1(arm_context_t
*context
, uint32_t esr
, vm_offset_t far __unused
)
413 esr_exception_class_t
class = ESR_EC(esr
);
414 arm_saved_state_t
* state
= &context
->ss
;
417 case ESR_EC_UNCATEGORIZED
:
419 uint32_t instr
= *((uint32_t*)get_saved_state_pc(state
));
420 if (IS_ARM_GDB_TRAP(instr
)) {
421 DebuggerCall(EXC_BREAKPOINT
, state
);
423 // Intentionally fall through to panic if we return from the debugger
426 panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state
);
430 #if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST)
432 handle_msr_write_from_xnupost(arm_saved_state_t
*state
, uint32_t esr
)
434 user_addr_t pc
= get_saved_state_pc(state
);
435 if ((spr_lock_test_addr
!= 0) && (pc
== spr_lock_test_addr
)) {
436 spr_lock_exception_esr
= esr
;
437 set_saved_state_pc(state
, pc
+ 4);
446 sleh_synchronous(arm_context_t
*context
, uint32_t esr
, vm_offset_t far
)
448 esr_exception_class_t
class = ESR_EC(esr
);
449 arm_saved_state_t
* state
= &context
->ss
;
450 vm_offset_t recover
= 0;
451 thread_t thread
= current_thread();
453 int preemption_level
= get_preemption_level();
456 ASSERT_CONTEXT_SANITY(context
);
458 if (__improbable(ESR_INSTR_IS_2BYTES(esr
))) {
460 * We no longer support 32-bit, which means no 2-byte
463 if (PSR64_IS_USER(get_saved_state_cpsr(state
))) {
464 panic("Exception on 2-byte instruction, "
465 "context=%p, esr=%#x, far=%p",
466 context
, esr
, (void *)far
);
468 panic_with_thread_kernel_state("Exception on 2-byte instruction", state
);
472 /* Don't run exception handler with recover handler set in case of double fault */
473 if (thread
->recover
) {
474 recover
= thread
->recover
;
475 thread
->recover
= (vm_offset_t
)NULL
;
478 /* Inherit the interrupt masks from previous context */
479 if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state
))) {
480 ml_set_interrupts_enabled(TRUE
);
485 if (!is_saved_state64(state
) || !PSR64_IS_USER(get_saved_state_cpsr(state
))) {
486 panic("Invalid SVC_64 context");
492 case ESR_EC_DABORT_EL0
:
493 handle_abort(state
, esr
, far
, recover
, inspect_data_abort
, handle_user_abort
);
494 thread_exception_return();
496 case ESR_EC_MSR_TRAP
:
497 handle_msr_trap(state
, ESR_ISS(esr
));
500 case ESR_EC_IABORT_EL0
:
501 handle_abort(state
, esr
, far
, recover
, inspect_instruction_abort
, handle_user_abort
);
502 thread_exception_return();
504 case ESR_EC_IABORT_EL1
:
506 panic_with_thread_kernel_state("Kernel instruction fetch abort", state
);
508 case ESR_EC_PC_ALIGN
:
509 handle_pc_align(state
);
510 __builtin_unreachable();
512 case ESR_EC_DABORT_EL1
:
513 handle_abort(state
, esr
, far
, recover
, inspect_data_abort
, handle_kernel_abort
);
516 case ESR_EC_UNCATEGORIZED
:
517 assert(!ESR_ISS(esr
));
519 #if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST)
520 if (handle_msr_write_from_xnupost(state
, esr
)) {
524 handle_uncategorized(&context
->ss
);
527 case ESR_EC_SP_ALIGN
:
528 handle_sp_align(state
);
529 __builtin_unreachable();
531 case ESR_EC_BKPT_AARCH32
:
532 handle_breakpoint(state
);
533 __builtin_unreachable();
535 case ESR_EC_BRK_AARCH64
:
536 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
537 panic_with_thread_kernel_state("Break instruction exception from kernel. Panic (by design)", state
);
539 handle_breakpoint(state
);
541 __builtin_unreachable();
543 case ESR_EC_BKPT_REG_MATCH_EL0
:
544 if (FSC_DEBUG_FAULT
== ISS_SSDE_FSC(esr
)) {
545 handle_breakpoint(state
);
547 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
548 class, state
, class, esr
, (void *)far
);
549 __builtin_unreachable();
551 case ESR_EC_BKPT_REG_MATCH_EL1
:
552 panic_with_thread_kernel_state("Hardware Breakpoint Debug exception from kernel. Panic (by design)", state
);
553 __builtin_unreachable();
555 case ESR_EC_SW_STEP_DEBUG_EL0
:
556 if (FSC_DEBUG_FAULT
== ISS_SSDE_FSC(esr
)) {
557 handle_sw_step_debug(state
);
559 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
560 class, state
, class, esr
, (void *)far
);
561 __builtin_unreachable();
563 case ESR_EC_SW_STEP_DEBUG_EL1
:
564 panic_with_thread_kernel_state("Software Step Debug exception from kernel. Panic (by design)", state
);
565 __builtin_unreachable();
567 case ESR_EC_WATCHPT_MATCH_EL0
:
568 if (FSC_DEBUG_FAULT
== ISS_SSDE_FSC(esr
)) {
569 handle_watchpoint(far
);
571 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
572 class, state
, class, esr
, (void *)far
);
573 __builtin_unreachable();
575 case ESR_EC_WATCHPT_MATCH_EL1
:
577 * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
578 * abort. Turn off watchpoints and keep going; we'll turn them back on in return_from_exception..
580 if (FSC_DEBUG_FAULT
== ISS_SSDE_FSC(esr
)) {
582 break; /* return to first level handler */
584 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
585 class, state
, class, esr
, (void *)far
);
586 __builtin_unreachable();
588 case ESR_EC_TRAP_SIMD_FP
:
589 handle_simd_trap(state
, esr
);
590 __builtin_unreachable();
592 case ESR_EC_ILLEGAL_INSTR_SET
:
593 if (EXCB_ACTION_RERUN
!=
594 ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET
, far
)) {
595 // instruction is not re-executed
596 panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x",
597 state
, class, esr
, (void *)far
, get_saved_state_cpsr(state
));
599 // must clear this fault in PSR to re-run
600 mask_saved_state_cpsr(state
, 0, PSR64_IL
);
603 case ESR_EC_MCR_MRC_CP15_TRAP
:
604 case ESR_EC_MCRR_MRRC_CP15_TRAP
:
605 case ESR_EC_MCR_MRC_CP14_TRAP
:
606 case ESR_EC_LDC_STC_CP14_TRAP
:
607 case ESR_EC_MCRR_MRRC_CP14_TRAP
:
608 handle_user_trapped_instruction32(state
, esr
);
609 __builtin_unreachable();
612 // Use of WFI or WFE instruction when they have been disabled for EL0
613 handle_wf_trap(state
);
614 __builtin_unreachable();
616 case ESR_EC_FLOATING_POINT_64
:
617 handle_fp_trap(state
, esr
);
618 __builtin_unreachable();
622 panic("Unsupported synchronous exception. state=%p class=%u esr=%u far=%p",
623 state
, class, esr
, (void *)far
);
624 __builtin_unreachable();
628 thread
->recover
= recover
;
631 if (preemption_level
!= get_preemption_level()) {
632 panic("synchronous exception changed preemption level from %d to %d", preemption_level
, get_preemption_level());
638 * Uncategorized exceptions are a catch-all for general execution errors.
639 * ARM64_TODO: For now, we assume this is for undefined instruction exceptions.
642 handle_uncategorized(arm_saved_state_t
*state
)
644 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
645 mach_exception_data_type_t codes
[2] = {EXC_ARM_UNDEFINED
};
646 mach_msg_type_number_t numcodes
= 2;
649 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
652 if (tempDTraceTrapHook
&& (tempDTraceTrapHook(exception
, state
, 0, 0) == KERN_SUCCESS
)) {
656 if (PSR64_IS_USER64(get_saved_state_cpsr(state
))) {
658 * For a 64bit user process, we care about all 4 bytes of the
661 if (instr
== FASTTRAP_ARM64_INSTR
|| instr
== FASTTRAP_ARM64_RET_INSTR
) {
662 if (dtrace_user_probe(state
) == KERN_SUCCESS
) {
666 } else if (PSR64_IS_USER32(get_saved_state_cpsr(state
))) {
668 * For a 32bit user process, we check for thumb mode, in
669 * which case we only care about a 2 byte instruction length.
670 * For non-thumb mode, we care about all 4 bytes of the instructin.
672 if (get_saved_state_cpsr(state
) & PSR64_MODE_USER32_THUMB
) {
673 if (((uint16_t)instr
== FASTTRAP_THUMB32_INSTR
) ||
674 ((uint16_t)instr
== FASTTRAP_THUMB32_RET_INSTR
)) {
675 if (dtrace_user_probe(state
) == KERN_SUCCESS
) {
680 if ((instr
== FASTTRAP_ARM32_INSTR
) ||
681 (instr
== FASTTRAP_ARM32_RET_INSTR
)) {
682 if (dtrace_user_probe(state
) == KERN_SUCCESS
) {
689 #endif /* CONFIG_DTRACE */
691 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
692 if (IS_ARM_GDB_TRAP(instr
)) {
693 boolean_t interrupt_state
;
694 vm_offset_t kstackptr
;
695 exception
= EXC_BREAKPOINT
;
697 interrupt_state
= ml_set_interrupts_enabled(FALSE
);
699 /* Save off the context here (so that the debug logic
700 * can see the original state of this thread).
702 kstackptr
= (vm_offset_t
) current_thread()->machine
.kstackptr
;
704 copy_signed_thread_state(&((thread_kernel_state_t
) kstackptr
)->machine
.ss
, state
);
707 /* Hop into the debugger (typically either due to a
708 * fatal exception, an explicit panic, or a stackshot
711 DebuggerCall(exception
, state
);
713 (void) ml_set_interrupts_enabled(interrupt_state
);
716 panic("Undefined kernel instruction: pc=%p instr=%x\n", (void*)get_saved_state_pc(state
), instr
);
721 * Check for GDB breakpoint via illegal opcode.
723 if (IS_ARM_GDB_TRAP(instr
)) {
724 exception
= EXC_BREAKPOINT
;
725 codes
[0] = EXC_ARM_BREAKPOINT
;
731 exception_triage(exception
, codes
, numcodes
);
732 __builtin_unreachable();
736 handle_breakpoint(arm_saved_state_t
*state
)
738 exception_type_t exception
= EXC_BREAKPOINT
;
739 mach_exception_data_type_t codes
[2] = {EXC_ARM_BREAKPOINT
};
740 mach_msg_type_number_t numcodes
= 2;
742 codes
[1] = get_saved_state_pc(state
);
743 exception_triage(exception
, codes
, numcodes
);
744 __builtin_unreachable();
748 handle_watchpoint(vm_offset_t fault_addr
)
750 exception_type_t exception
= EXC_BREAKPOINT
;
751 mach_exception_data_type_t codes
[2] = {EXC_ARM_DA_DEBUG
};
752 mach_msg_type_number_t numcodes
= 2;
754 codes
[1] = fault_addr
;
755 exception_triage(exception
, codes
, numcodes
);
756 __builtin_unreachable();
760 handle_abort(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t fault_addr
, vm_offset_t recover
,
761 abort_inspector_t inspect_abort
, abort_handler_t handler
)
763 fault_status_t fault_code
;
764 vm_prot_t fault_type
;
766 inspect_abort(ESR_ISS(esr
), &fault_code
, &fault_type
);
767 handler(state
, esr
, fault_addr
, fault_code
, fault_type
, recover
);
771 inspect_instruction_abort(uint32_t iss
, fault_status_t
*fault_code
, vm_prot_t
*fault_type
)
773 getCpuDatap()->cpu_stat
.instr_ex_cnt
++;
774 *fault_code
= ISS_IA_FSC(iss
);
775 *fault_type
= (VM_PROT_READ
| VM_PROT_EXECUTE
);
779 inspect_data_abort(uint32_t iss
, fault_status_t
*fault_code
, vm_prot_t
*fault_type
)
781 getCpuDatap()->cpu_stat
.data_ex_cnt
++;
782 *fault_code
= ISS_DA_FSC(iss
);
784 /* Cache operations report faults as write access. Change these to read access. */
785 if ((iss
& ISS_DA_WNR
) && !(iss
& ISS_DA_CM
)) {
786 *fault_type
= (VM_PROT_READ
| VM_PROT_WRITE
);
788 *fault_type
= (VM_PROT_READ
);
793 handle_pc_align(arm_saved_state_t
*ss
)
795 exception_type_t exc
;
796 mach_exception_data_type_t codes
[2];
797 mach_msg_type_number_t numcodes
= 2;
799 if (!PSR64_IS_USER(get_saved_state_cpsr(ss
))) {
800 panic_with_thread_kernel_state("PC alignment exception from kernel.", ss
);
803 exc
= EXC_BAD_ACCESS
;
804 codes
[0] = EXC_ARM_DA_ALIGN
;
805 codes
[1] = get_saved_state_pc(ss
);
807 exception_triage(exc
, codes
, numcodes
);
808 __builtin_unreachable();
812 handle_sp_align(arm_saved_state_t
*ss
)
814 exception_type_t exc
;
815 mach_exception_data_type_t codes
[2];
816 mach_msg_type_number_t numcodes
= 2;
818 if (!PSR64_IS_USER(get_saved_state_cpsr(ss
))) {
819 panic_with_thread_kernel_state("SP alignment exception from kernel.", ss
);
822 exc
= EXC_BAD_ACCESS
;
823 codes
[0] = EXC_ARM_SP_ALIGN
;
824 codes
[1] = get_saved_state_sp(ss
);
826 exception_triage(exc
, codes
, numcodes
);
827 __builtin_unreachable();
831 handle_wf_trap(arm_saved_state_t
*state
)
833 exception_type_t exc
;
834 mach_exception_data_type_t codes
[2];
835 mach_msg_type_number_t numcodes
= 2;
838 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
840 exc
= EXC_BAD_INSTRUCTION
;
841 codes
[0] = EXC_ARM_UNDEFINED
;
844 exception_triage(exc
, codes
, numcodes
);
845 __builtin_unreachable();
849 handle_fp_trap(arm_saved_state_t
*state
, uint32_t esr
)
851 exception_type_t exc
= EXC_ARITHMETIC
;
852 mach_exception_data_type_t codes
[2];
853 mach_msg_type_number_t numcodes
= 2;
856 /* The floating point trap flags are only valid if TFV is set. */
857 if (!(esr
& ISS_FP_TFV
)) {
858 codes
[0] = EXC_ARM_FP_UNDEFINED
;
859 } else if (esr
& ISS_FP_UFF
) {
860 codes
[0] = EXC_ARM_FP_UF
;
861 } else if (esr
& ISS_FP_OFF
) {
862 codes
[0] = EXC_ARM_FP_OF
;
863 } else if (esr
& ISS_FP_IOF
) {
864 codes
[0] = EXC_ARM_FP_IO
;
865 } else if (esr
& ISS_FP_DZF
) {
866 codes
[0] = EXC_ARM_FP_DZ
;
867 } else if (esr
& ISS_FP_IDF
) {
868 codes
[0] = EXC_ARM_FP_ID
;
869 } else if (esr
& ISS_FP_IXF
) {
870 codes
[0] = EXC_ARM_FP_IX
;
872 panic("Unrecognized floating point exception, state=%p, esr=%#x", state
, esr
);
875 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
878 exception_triage(exc
, codes
, numcodes
);
879 __builtin_unreachable();
884 handle_sw_step_debug(arm_saved_state_t
*state
)
886 thread_t thread
= current_thread();
887 exception_type_t exc
;
888 mach_exception_data_type_t codes
[2];
889 mach_msg_type_number_t numcodes
= 2;
891 if (!PSR64_IS_USER(get_saved_state_cpsr(state
))) {
892 panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel.", state
);
895 // Disable single step and unmask interrupts (in the saved state, anticipating next exception return)
896 if (thread
->machine
.DebugData
!= NULL
) {
897 thread
->machine
.DebugData
->uds
.ds64
.mdscr_el1
&= ~0x1;
899 panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL.", state
);
902 mask_saved_state_cpsr(thread
->machine
.upcb
, 0, PSR64_SS
| DAIF_IRQF
| DAIF_FIQF
);
904 // Special encoding for gdb single step event on ARM
905 exc
= EXC_BREAKPOINT
;
909 exception_triage(exc
, codes
, numcodes
);
910 __builtin_unreachable();
914 is_vm_fault(fault_status_t status
)
917 case FSC_TRANSLATION_FAULT_L0
:
918 case FSC_TRANSLATION_FAULT_L1
:
919 case FSC_TRANSLATION_FAULT_L2
:
920 case FSC_TRANSLATION_FAULT_L3
:
921 case FSC_ACCESS_FLAG_FAULT_L1
:
922 case FSC_ACCESS_FLAG_FAULT_L2
:
923 case FSC_ACCESS_FLAG_FAULT_L3
:
924 case FSC_PERMISSION_FAULT_L1
:
925 case FSC_PERMISSION_FAULT_L2
:
926 case FSC_PERMISSION_FAULT_L3
:
934 is_translation_fault(fault_status_t status
)
937 case FSC_TRANSLATION_FAULT_L0
:
938 case FSC_TRANSLATION_FAULT_L1
:
939 case FSC_TRANSLATION_FAULT_L2
:
940 case FSC_TRANSLATION_FAULT_L3
:
947 #if __ARM_PAN_AVAILABLE__
949 is_permission_fault(fault_status_t status
)
952 case FSC_PERMISSION_FAULT_L1
:
953 case FSC_PERMISSION_FAULT_L2
:
954 case FSC_PERMISSION_FAULT_L3
:
963 is_alignment_fault(fault_status_t status
)
965 return status
== FSC_ALIGNMENT_FAULT
;
969 is_parity_error(fault_status_t status
)
972 case FSC_SYNC_PARITY
:
973 case FSC_ASYNC_PARITY
:
974 case FSC_SYNC_PARITY_TT_L1
:
975 case FSC_SYNC_PARITY_TT_L2
:
976 case FSC_SYNC_PARITY_TT_L3
:
984 set_saved_state_pc_to_recovery_handler(arm_saved_state_t
*iss
, vm_offset_t recover
)
986 #if defined(HAS_APPLE_PAC)
987 thread_t thread
= current_thread();
988 const uintptr_t disc
= ptrauth_blend_discriminator(&thread
->recover
, PAC_DISCRIMINATOR_RECOVER
);
989 const char *panic_msg
= "Illegal thread->recover value %p";
991 MANIPULATE_SIGNED_THREAD_STATE(iss
,
992 // recover = (vm_offset_t)ptrauth_auth_data((void *)recover, ptrauth_key_function_pointer,
993 // ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER));
994 "mov x1, %[recover] \n"
997 // if (recover != (vm_offset_t)ptrauth_strip((void *)recover, ptrauth_key_function_pointer)) {
1002 // panic("Illegal thread->recover value %p", (void *)recover);
1003 "mov x0, %[panic_msg] \n"
1007 "str x1, [x0, %[SS64_PC]] \n",
1008 [recover
] "r"(recover
),
1010 [panic_msg
] "r"(panic_msg
)
1013 set_saved_state_pc(iss
, recover
);
1018 handle_user_abort(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t fault_addr
,
1019 fault_status_t fault_code
, vm_prot_t fault_type
, vm_offset_t recover
)
1021 exception_type_t exc
= EXC_BAD_ACCESS
;
1022 mach_exception_data_type_t codes
[2];
1023 mach_msg_type_number_t numcodes
= 2;
1024 thread_t thread
= current_thread();
1029 if (ml_at_interrupt_context()) {
1030 panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state
);
1033 thread
->iotier_override
= THROTTLE_LEVEL_NONE
; /* Reset IO tier override before handling abort from userspace */
1035 if (is_vm_fault(fault_code
)) {
1036 kern_return_t result
= KERN_FAILURE
;
1037 vm_map_t map
= thread
->map
;
1038 vm_offset_t vm_fault_addr
= fault_addr
;
1040 assert(map
!= kernel_map
);
1042 if (!(fault_type
& VM_PROT_EXECUTE
) && user_tbi_enabled()) {
1043 vm_fault_addr
= tbi_clear(fault_addr
);
1047 if (thread
->t_dtrace_inprobe
) { /* Executing under dtrace_probe? */
1048 if (dtrace_tally_fault(vm_fault_addr
)) { /* Should a user mode fault under dtrace be ignored? */
1050 set_saved_state_pc_to_recovery_handler(state
, recover
);
1052 ml_set_interrupts_enabled(FALSE
);
1053 panic_with_thread_kernel_state("copyin/out has no recovery point", state
);
1057 ml_set_interrupts_enabled(FALSE
);
1058 panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", state
);
1066 if (pgtrace_enabled
) {
1067 /* Check to see if trace bit is set */
1068 result
= pmap_pgtrace_fault(map
->pmap
, fault_addr
, state
);
1069 if (result
== KERN_SUCCESS
) {
1075 /* check to see if it is just a pmap ref/modify fault */
1077 if ((result
!= KERN_SUCCESS
) && !is_translation_fault(fault_code
)) {
1078 result
= arm_fast_fault(map
->pmap
, trunc_page(vm_fault_addr
), fault_type
, (fault_code
== FSC_ACCESS_FLAG_FAULT_L3
), TRUE
);
1080 if (result
!= KERN_SUCCESS
) {
1082 /* We have to fault the page in */
1083 result
= vm_fault(map
, vm_fault_addr
, fault_type
,
1084 /* change_wiring */ FALSE
, VM_KERN_MEMORY_NONE
, THREAD_ABORTSAFE
,
1085 /* caller_pmap */ NULL
, /* caller_pmap_addr */ 0);
1088 if (result
== KERN_SUCCESS
|| result
== KERN_ABORTED
) {
1093 * vm_fault() should never return KERN_FAILURE for page faults from user space.
1094 * If it does, we're leaking preemption disables somewhere in the kernel.
1096 if (__improbable(result
== KERN_FAILURE
)) {
1097 panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread
);
1101 } else if (is_alignment_fault(fault_code
)) {
1102 codes
[0] = EXC_ARM_DA_ALIGN
;
1103 } else if (is_parity_error(fault_code
)) {
1104 #if defined(APPLE_ARM64_ARCH_FAMILY)
1105 if (fault_code
== FSC_SYNC_PARITY
) {
1106 arm64_platform_error(state
, esr
, fault_addr
);
1110 panic("User parity error.");
1113 codes
[0] = KERN_FAILURE
;
1116 codes
[1] = fault_addr
;
1117 exception_triage(exc
, codes
, numcodes
);
1118 __builtin_unreachable();
1121 #if __ARM_PAN_AVAILABLE__
1123 is_pan_fault(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t fault_addr
, fault_status_t fault_code
)
1125 // PAN (Privileged Access Never) fault occurs for data read/write in EL1 to
1126 // virtual address that is readable/writeable from both EL1 and EL0
1128 // To check for PAN fault, we evaluate if the following conditions are true:
1129 // 1. This is a permission fault
1130 // 2. PAN is enabled
1131 // 3. AT instruction (on which PAN has no effect) on the same faulting address
1136 if (!(is_permission_fault(fault_code
) && get_saved_state_cpsr(state
) & PSR64_PAN
)) {
1140 if (esr
& ISS_DA_WNR
) {
1141 pa
= mmu_kvtop_wpreflight(fault_addr
);
1143 pa
= mmu_kvtop(fault_addr
);
1145 return (pa
)? TRUE
: FALSE
;
1150 handle_kernel_abort(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t fault_addr
,
1151 fault_status_t fault_code
, vm_prot_t fault_type
, vm_offset_t recover
)
1153 thread_t thread
= current_thread();
1157 if (is_vm_fault(fault_code
) && thread
->t_dtrace_inprobe
) { /* Executing under dtrace_probe? */
1158 if (dtrace_tally_fault(fault_addr
)) { /* Should a fault under dtrace be ignored? */
1160 * Point to next instruction, or recovery handler if set.
1163 set_saved_state_pc_to_recovery_handler(state
, recover
);
1165 add_saved_state_pc(state
, 4);
1169 ml_set_interrupts_enabled(FALSE
);
1170 panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state
);
1175 #if !CONFIG_PGTRACE /* This will be moved next to pgtrace fault evaluation */
1176 if (ml_at_interrupt_context()) {
1177 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state
);
1181 if (is_vm_fault(fault_code
)) {
1182 kern_return_t result
= KERN_FAILURE
;
1187 * Ensure no faults in the physical aperture. This could happen if
1188 * a page table is incorrectly allocated from the read only region
1189 * when running with KTRR.
1193 #if __ARM_PAN_AVAILABLE__ && defined(CONFIG_XNUPOST)
1194 if (is_permission_fault(fault_code
) && !(get_saved_state_cpsr(state
) & PSR64_PAN
) &&
1195 (pan_ro_addr
!= 0) && (fault_addr
== pan_ro_addr
)) {
1196 ++pan_exception_level
;
1197 // On an exception taken from a PAN-disabled context, verify
1198 // that PAN is re-enabled for the exception handler and that
1199 // accessing the test address produces a PAN fault.
1200 pan_fault_value
= *(char *)pan_test_addr
;
1201 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1202 add_saved_state_pc(state
, 4);
1207 if (fault_addr
>= gVirtBase
&& fault_addr
< static_memory_end
) {
1208 panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state
);
1211 if (VM_KERNEL_ADDRESS(fault_addr
) || thread
== THREAD_NULL
) {
1213 interruptible
= THREAD_UNINT
;
1216 interruptible
= THREAD_ABORTSAFE
;
1220 if (pgtrace_enabled
) {
1221 /* Check to see if trace bit is set */
1222 result
= pmap_pgtrace_fault(map
->pmap
, fault_addr
, state
);
1223 if (result
== KERN_SUCCESS
) {
1228 if (ml_at_interrupt_context()) {
1229 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state
);
1233 /* check to see if it is just a pmap ref/modify fault */
1234 if (!is_translation_fault(fault_code
)) {
1235 result
= arm_fast_fault(map
->pmap
, trunc_page(fault_addr
), fault_type
, (fault_code
== FSC_ACCESS_FLAG_FAULT_L3
), FALSE
);
1236 if (result
== KERN_SUCCESS
) {
1241 if (result
!= KERN_PROTECTION_FAILURE
) {
1243 * We have to "fault" the page in.
1245 result
= vm_fault(map
, fault_addr
, fault_type
,
1246 /* change_wiring */ FALSE
, VM_KERN_MEMORY_NONE
, interruptible
,
1247 /* caller_pmap */ NULL
, /* caller_pmap_addr */ 0);
1250 if (result
== KERN_SUCCESS
) {
1255 * If we have a recover handler, invoke it now.
1258 set_saved_state_pc_to_recovery_handler(state
, recover
);
1262 #if __ARM_PAN_AVAILABLE__
1263 if (is_pan_fault(state
, esr
, fault_addr
, fault_code
)) {
1264 #ifdef CONFIG_XNUPOST
1265 if ((pan_test_addr
!= 0) && (fault_addr
== pan_test_addr
)) {
1266 ++pan_exception_level
;
1267 // read the user-accessible value to make sure
1268 // pan is enabled and produces a 2nd fault from
1269 // the exception handler
1270 if (pan_exception_level
== 1) {
1271 pan_fault_value
= *(char *)pan_test_addr
;
1272 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1274 // this fault address is used for PAN test
1275 // disable PAN and rerun
1276 mask_saved_state_cpsr(state
, 0, PSR64_PAN
);
1280 panic_with_thread_kernel_state("Privileged access never abort.", state
);
1285 } else if (ml_at_interrupt_context()) {
1286 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state
);
1288 } else if (is_alignment_fault(fault_code
)) {
1290 set_saved_state_pc_to_recovery_handler(state
, recover
);
1293 panic_with_thread_kernel_state("Unaligned kernel data abort.", state
);
1294 } else if (is_parity_error(fault_code
)) {
1295 #if defined(APPLE_ARM64_ARCH_FAMILY)
1296 if (fault_code
== FSC_SYNC_PARITY
) {
1297 arm64_platform_error(state
, esr
, fault_addr
);
1301 panic_with_thread_kernel_state("Kernel parity error.", state
);
1304 kprintf("Unclassified kernel abort (fault_code=0x%x)\n", fault_code
);
1307 panic_with_thread_kernel_state("Kernel data abort.", state
);
1310 extern void syscall_trace(struct arm_saved_state
* regs
);
1313 handle_svc(arm_saved_state_t
*state
)
1315 int trap_no
= get_saved_state_svc_number(state
);
1316 thread_t thread
= current_thread();
1319 #define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */
1321 #define TRACE_SYSCALL 1
1323 syscall_trace(state
);
1326 thread
->iotier_override
= THROTTLE_LEVEL_NONE
; /* Reset IO tier override before handling SVC from userspace */
1328 if (trap_no
== (int)PLATFORM_SYSCALL_TRAP_NO
) {
1329 platform_syscall(state
);
1330 panic("Returned from platform_syscall()?");
1333 mach_kauth_cred_uthread_update();
1336 if (trap_no
== -3) {
1337 handle_mach_absolute_time_trap(state
);
1339 } else if (trap_no
== -4) {
1340 handle_mach_continuous_time_trap(state
);
1344 /* Counting perhaps better in the handler, but this is how it's been done */
1345 thread
->syscalls_mach
++;
1346 mach_syscall(state
);
1348 /* Counting perhaps better in the handler, but this is how it's been done */
1349 thread
->syscalls_unix
++;
1350 p
= get_bsdthreadtask_info(thread
);
1354 unix_syscall(state
, thread
, (struct uthread
*)thread
->uthread
, p
);
1359 handle_mach_absolute_time_trap(arm_saved_state_t
*state
)
1361 uint64_t now
= mach_absolute_time();
1362 saved_state64(state
)->x
[0] = now
;
1366 handle_mach_continuous_time_trap(arm_saved_state_t
*state
)
1368 uint64_t now
= mach_continuous_time();
1369 saved_state64(state
)->x
[0] = now
;
1373 handle_msr_trap(arm_saved_state_t
*state
, uint32_t iss
)
1375 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
1376 mach_exception_data_type_t codes
[2] = {EXC_ARM_UNDEFINED
};
1377 mach_msg_type_number_t numcodes
= 2;
1382 if (!is_saved_state64(state
)) {
1383 panic("MSR/MRS trap (EC 0x%x) from 32-bit state\n", ESR_EC_MSR_TRAP
);
1386 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
1387 panic("MSR/MRS trap (EC 0x%x) from kernel\n", ESR_EC_MSR_TRAP
);
1390 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
1393 exception_triage(exception
, codes
, numcodes
);
1397 handle_user_trapped_instruction32(arm_saved_state_t
*state
, uint32_t esr
)
1399 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
1400 mach_exception_data_type_t codes
[2] = {EXC_ARM_UNDEFINED
};
1401 mach_msg_type_number_t numcodes
= 2;
1404 if (is_saved_state64(state
)) {
1405 panic("ESR (0x%x) for instruction trapped from U32, but saved state is 64-bit.", esr
);
1408 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
1409 panic("ESR (0x%x) for instruction trapped from U32, actually came from kernel?", esr
);
1412 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
1415 exception_triage(exception
, codes
, numcodes
);
1416 __builtin_unreachable();
1420 handle_simd_trap(arm_saved_state_t
*state
, uint32_t esr
)
1422 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
1423 mach_exception_data_type_t codes
[2] = {EXC_ARM_UNDEFINED
};
1424 mach_msg_type_number_t numcodes
= 2;
1427 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
1428 panic("ESR (0x%x) for SIMD trap from userland, actually came from kernel?", esr
);
1431 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
1434 exception_triage(exception
, codes
, numcodes
);
1435 __builtin_unreachable();
1439 sleh_irq(arm_saved_state_t
*state
)
1441 uint64_t timestamp
= 0;
1442 uint32_t old_entropy_data
= 0;
1443 uint32_t old_entropy_sample_count
= 0;
1444 size_t entropy_index
= 0;
1445 uint32_t * entropy_data_ptr
= NULL
;
1446 cpu_data_t
* cdp
= getCpuDatap();
1448 int preemption_level
= get_preemption_level();
1452 sleh_interrupt_handler_prologue(state
, DBG_INTR_TYPE_OTHER
);
1454 /* Run the registered interrupt handler. */
1455 cdp
->interrupt_handler(cdp
->interrupt_target
,
1456 cdp
->interrupt_refCon
,
1458 cdp
->interrupt_source
);
1460 /* We use interrupt timing as an entropy source. */
1461 timestamp
= ml_get_timebase();
1464 * The buffer index is subject to races, but as these races should only
1465 * result in multiple CPUs updating the same location, the end result
1466 * should be that noise gets written into the entropy buffer. As this
1467 * is the entire point of the entropy buffer, we will not worry about
1468 * these races for now.
1470 old_entropy_sample_count
= EntropyData
.sample_count
;
1471 EntropyData
.sample_count
+= 1;
1473 entropy_index
= old_entropy_sample_count
& ENTROPY_BUFFER_INDEX_MASK
;
1474 entropy_data_ptr
= EntropyData
.buffer
+ entropy_index
;
1476 /* Mix the timestamp data and the old data together. */
1477 old_entropy_data
= *entropy_data_ptr
;
1478 *entropy_data_ptr
= (uint32_t)timestamp
^ __ror(old_entropy_data
, 9);
1480 sleh_interrupt_handler_epilogue();
1482 if (preemption_level
!= get_preemption_level()) {
1483 panic("irq handler %p changed preemption level from %d to %d", cdp
->interrupt_handler
, preemption_level
, get_preemption_level());
1489 sleh_fiq(arm_saved_state_t
*state
)
1491 unsigned int type
= DBG_INTR_TYPE_UNKNOWN
;
1493 int preemption_level
= get_preemption_level();
1497 uint64_t pmcr0
= 0, upmsr
= 0;
1498 #endif /* MONOTONIC_FIQ */
1501 if (mt_pmi_pending(&pmcr0
, &upmsr
)) {
1502 type
= DBG_INTR_TYPE_PMI
;
1504 #endif /* MONOTONIC_FIQ */
1505 if (ml_get_timer_pending()) {
1506 type
= DBG_INTR_TYPE_TIMER
;
1509 sleh_interrupt_handler_prologue(state
, type
);
1512 if (type
== DBG_INTR_TYPE_PMI
) {
1513 mt_fiq(getCpuDatap(), pmcr0
, upmsr
);
1515 #endif /* MONOTONIC_FIQ */
1518 * We don't know that this is a timer, but we don't have insight into
1519 * the other interrupts that go down this path.
1522 cpu_data_t
*cdp
= getCpuDatap();
1524 cdp
->cpu_decrementer
= -1; /* Large */
1527 * ARM64_TODO: whether we're coming from userland is ignored right now.
1528 * We can easily thread it through, but not bothering for the
1529 * moment (AArch32 doesn't either).
1534 sleh_interrupt_handler_epilogue();
1536 if (preemption_level
!= get_preemption_level()) {
1537 panic("fiq type %u changed preemption level from %d to %d", type
, preemption_level
, get_preemption_level());
1543 sleh_serror(arm_context_t
*context
, uint32_t esr
, vm_offset_t far
)
1545 arm_saved_state_t
*state
= &context
->ss
;
1547 int preemption_level
= get_preemption_level();
1550 ASSERT_CONTEXT_SANITY(context
);
1551 arm64_platform_error(state
, esr
, far
);
1553 if (preemption_level
!= get_preemption_level()) {
1554 panic("serror changed preemption level from %d to %d", preemption_level
, get_preemption_level());
1560 mach_syscall_trace_exit(unsigned int retval
,
1561 unsigned int call_number
)
1563 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1564 MACHDBG_CODE(DBG_MACH_EXCP_SC
, (call_number
)) |
1565 DBG_FUNC_END
, retval
, 0, 0, 0, 0);
1568 __attribute__((noreturn
))
1570 thread_syscall_return(kern_return_t error
)
1573 struct arm_saved_state
*state
;
1575 thread
= current_thread();
1576 state
= get_user_regs(thread
);
1578 assert(is_saved_state64(state
));
1579 saved_state64(state
)->x
[0] = error
;
1582 kern_allocation_name_t
1583 prior __assert_only
= thread_get_kernel_state(thread
)->allocation_name
;
1584 assertf(prior
== NULL
, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior
));
1585 #endif /* MACH_ASSERT */
1587 if (kdebug_enable
) {
1588 /* Invert syscall number (negative for a mach syscall) */
1589 mach_syscall_trace_exit(error
, (-1) * get_saved_state_svc_number(state
));
1592 thread_exception_return();
1597 struct arm_saved_state
* regs __unused
)
1599 /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */
1603 sleh_interrupt_handler_prologue(arm_saved_state_t
*state
, unsigned int type
)
1605 uint64_t is_user
= PSR64_IS_USER(get_saved_state_cpsr(state
));
1607 uint64_t pc
= is_user
? get_saved_state_pc(state
) :
1608 VM_KERNEL_UNSLIDE(get_saved_state_pc(state
));
1610 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_START
,
1611 0, pc
, is_user
, type
);
1613 #if CONFIG_TELEMETRY
1614 if (telemetry_needs_record
) {
1615 telemetry_mark_curthread((boolean_t
)is_user
, FALSE
);
1617 #endif /* CONFIG_TELEMETRY */
1621 sleh_interrupt_handler_epilogue(void)
1626 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_END
);
1630 sleh_invalid_stack(arm_context_t
*context
, uint32_t esr __unused
, vm_offset_t far __unused
)
1632 thread_t thread
= current_thread();
1633 vm_offset_t kernel_stack_bottom
, sp
;
1635 sp
= get_saved_state_sp(&context
->ss
);
1636 kernel_stack_bottom
= round_page(thread
->machine
.kstackptr
) - KERNEL_STACK_SIZE
;
1638 if ((sp
< kernel_stack_bottom
) && (sp
>= (kernel_stack_bottom
- PAGE_SIZE
))) {
1639 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow).", &context
->ss
);
1642 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context
->ss
);