2 * Copyright (c) 2012-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/caches_internal.h>
30 #include <arm/cpu_data.h>
31 #include <arm/cpu_data_internal.h>
32 #include <arm/misc_protos.h>
33 #include <arm/thread.h>
34 #include <arm/rtclock.h>
35 #include <arm/trap.h> /* for IS_ARM_GDB_TRAP() et al */
36 #include <arm64/proc_reg.h>
37 #include <arm64/machine_machdep.h>
38 #include <arm64/monotonic.h>
40 #include <kern/debug.h>
41 #include <kern/thread.h>
42 #include <mach/exception.h>
43 #include <mach/vm_types.h>
44 #include <mach/machine/thread_status.h>
46 #include <machine/atomic.h>
47 #include <machine/machlimits.h>
49 #include <pexpert/arm/protos.h>
51 #include <vm/vm_page.h>
53 #include <vm/vm_fault.h>
54 #include <vm/vm_kern.h>
56 #include <sys/kdebug.h>
57 #include <kperf/kperf.h>
59 #include <kern/policy_internal.h>
61 #include <kern/telemetry.h>
64 #include <prng/random.h>
67 #error Should only be compiling for arm64.
70 #define TEST_CONTEXT32_SANITY(context) \
71 (context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \
72 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT)
74 #define TEST_CONTEXT64_SANITY(context) \
75 (context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \
76 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT)
78 #define ASSERT_CONTEXT_SANITY(context) \
79 assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context))
82 #define COPYIN(src, dst, size) \
83 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
84 copyin_kern(src, dst, size) \
86 copyin(src, dst, size)
88 #define COPYOUT(src, dst, size) \
89 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
90 copyout_kern(src, dst, size) \
92 copyout(src, dst, size)
94 // Below is for concatenating a string param to a string literal
96 #define STR(x) STR1(x)
98 void panic_with_thread_kernel_state(const char *msg
, arm_saved_state_t
*ss
);
100 void sleh_synchronous_sp1(arm_context_t
*, uint32_t, vm_offset_t
);
101 void sleh_synchronous(arm_context_t
*, uint32_t, vm_offset_t
);
102 void sleh_irq(arm_saved_state_t
*);
103 void sleh_fiq(arm_saved_state_t
*);
104 void sleh_serror(arm_context_t
*context
, uint32_t esr
, vm_offset_t far
);
105 void sleh_invalid_stack(arm_context_t
*context
, uint32_t esr
, vm_offset_t far
);
107 static void sleh_interrupt_handler_prologue(arm_saved_state_t
*, unsigned int type
);
108 static void sleh_interrupt_handler_epilogue(void);
110 static void handle_svc(arm_saved_state_t
*);
111 static void handle_mach_absolute_time_trap(arm_saved_state_t
*);
112 static void handle_mach_continuous_time_trap(arm_saved_state_t
*);
114 static void handle_msr_trap(arm_saved_state_t
*state
, uint32_t iss
);
116 extern kern_return_t
arm_fast_fault(pmap_t
, vm_map_address_t
, vm_prot_t
, boolean_t
);
118 static void handle_uncategorized(arm_saved_state_t
*, boolean_t
);
119 static void handle_breakpoint(arm_saved_state_t
*);
121 typedef void(*abort_inspector_t
)(uint32_t, fault_status_t
*, vm_prot_t
*);
122 static void inspect_instruction_abort(uint32_t, fault_status_t
*, vm_prot_t
*);
123 static void inspect_data_abort(uint32_t, fault_status_t
*, vm_prot_t
*);
125 static int is_vm_fault(fault_status_t
);
126 static int is_translation_fault(fault_status_t
);
127 static int is_alignment_fault(fault_status_t
);
129 typedef void(*abort_handler_t
)(arm_saved_state_t
*, uint32_t, vm_offset_t
, fault_status_t
, vm_prot_t
, vm_offset_t
);
130 static void handle_user_abort(arm_saved_state_t
*, uint32_t, vm_offset_t
, fault_status_t
, vm_prot_t
, vm_offset_t
);
131 static void handle_kernel_abort(arm_saved_state_t
*, uint32_t, vm_offset_t
, fault_status_t
, vm_prot_t
, vm_offset_t
);
133 static void handle_pc_align(arm_saved_state_t
*ss
);
134 static void handle_sp_align(arm_saved_state_t
*ss
);
135 static void handle_sw_step_debug(arm_saved_state_t
*ss
);
136 static void handle_wf_trap(arm_saved_state_t
*ss
);
138 static void handle_watchpoint(vm_offset_t fault_addr
);
140 static void handle_abort(arm_saved_state_t
*, uint32_t, vm_offset_t
, vm_offset_t
, abort_inspector_t
, abort_handler_t
);
142 static void handle_user_trapped_instruction32(arm_saved_state_t
*, uint32_t esr
);
144 static void handle_simd_trap(arm_saved_state_t
*, uint32_t esr
);
146 extern void mach_kauth_cred_uthread_update(void);
147 void mach_syscall_trace_exit(unsigned int retval
, unsigned int call_number
);
153 unix_syscall(struct arm_saved_state
* regs
, thread_t thread_act
,
154 struct uthread
* uthread
, struct proc
* proc
);
157 mach_syscall(struct arm_saved_state
*);
160 extern kern_return_t
dtrace_user_probe(arm_saved_state_t
* regs
);
161 extern boolean_t
dtrace_tally_fault(user_addr_t
);
163 /* Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy and paste the trap instructions
164 over from that file. Need to keep these in sync! */
165 #define FASTTRAP_ARM32_INSTR 0xe7ffdefc
166 #define FASTTRAP_THUMB32_INSTR 0xdefc
167 #define FASTTRAP_ARM64_INSTR 0xe7eeee7e
169 #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb
170 #define FASTTRAP_THUMB32_RET_INSTR 0xdefb
171 #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d
173 /* See <rdar://problem/4613924> */
174 perfCallback tempDTraceTrapHook
= NULL
; /* Pointer to DTrace fbt trap hook routine */
178 extern boolean_t pgtrace_enabled
;
181 #if __ARM_PAN_AVAILABLE__
182 #ifdef CONFIG_XNUPOST
183 extern vm_offset_t pan_test_addr
;
184 extern vm_offset_t pan_ro_addr
;
185 extern volatile int pan_exception_level
;
186 extern volatile char pan_fault_value
;
190 #if defined(APPLECYCLONE)
191 #define CPU_NAME "Cyclone"
192 #elif defined(APPLETYPHOON)
193 #define CPU_NAME "Typhoon"
194 #elif defined(APPLETWISTER)
195 #define CPU_NAME "Twister"
196 #elif defined(APPLEHURRICANE)
197 #define CPU_NAME "Hurricane"
199 #define CPU_NAME "Unknown"
202 #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT))
203 #define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400)
204 #define ESR_WT_REASON(esr) ((esr) & 0xff)
206 #define WT_REASON_NONE 0
207 #define WT_REASON_INTEGRITY_FAIL 1
208 #define WT_REASON_BAD_SYSCALL 2
209 #define WT_REASON_NOT_LOCKED 3
210 #define WT_REASON_ALREADY_LOCKED 4
211 #define WT_REASON_SW_REQ 5
212 #define WT_REASON_PT_INVALID 6
213 #define WT_REASON_PT_VIOLATION 7
214 #define WT_REASON_REG_VIOLATION 8
218 extern vm_offset_t static_memory_end
;
220 static inline unsigned
221 __ror(unsigned value
, unsigned shift
)
223 return (((unsigned)(value
) >> (unsigned)(shift
)) |
224 (unsigned)(value
) << ((unsigned)(sizeof(unsigned) * CHAR_BIT
) - (unsigned)(shift
)));
228 arm64_implementation_specific_error(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t far
)
230 #if defined(APPLE_ARM64_ARCH_FAMILY)
231 uint64_t fed_err_sts
, mmu_err_sts
, lsu_err_sts
;
232 #if defined(NO_ECORE)
233 uint64_t l2c_err_sts
, l2c_err_adr
, l2c_err_inf
;
235 mmu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS
));
236 l2c_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS
));
237 l2c_err_adr
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR
));
238 l2c_err_inf
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF
));
239 lsu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS
));
240 fed_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS
));
242 panic_plain("Unhandled " CPU_NAME
243 " implementation specific error. state=%p esr=%#x far=%p\n"
244 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
245 "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n",
246 state
, esr
, (void *)far
,
247 (void *)lsu_err_sts
, (void *)fed_err_sts
, (void *)mmu_err_sts
,
248 (void *)l2c_err_sts
, (void *)l2c_err_adr
, (void *)l2c_err_inf
);
250 #elif defined(HAS_MIGSTS)
251 uint64_t l2c_err_sts
, l2c_err_adr
, l2c_err_inf
, mpidr
, migsts
;
253 mpidr
= __builtin_arm_rsr64("MPIDR_EL1");
254 migsts
= __builtin_arm_rsr64(STR(ARM64_REG_MIGSTS_EL1
));
255 mmu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS
));
256 l2c_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS
));
257 l2c_err_adr
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR
));
258 l2c_err_inf
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF
));
259 lsu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS
));
260 fed_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS
));
262 panic_plain("Unhandled " CPU_NAME
263 " implementation specific error. state=%p esr=%#x far=%p p-core?%d migsts=%p\n"
264 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
265 "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n",
266 state
, esr
, (void *)far
, !!(mpidr
& MPIDR_PNE
), (void *)migsts
,
267 (void *)lsu_err_sts
, (void *)fed_err_sts
, (void *)mmu_err_sts
,
268 (void *)l2c_err_sts
, (void *)l2c_err_adr
, (void *)l2c_err_inf
);
269 #else // !defined(NO_ECORE) && !defined(HAS_MIGSTS)
270 uint64_t llc_err_sts
, llc_err_adr
, llc_err_inf
, mpidr
;
272 mpidr
= __builtin_arm_rsr64("MPIDR_EL1");
274 if (mpidr
& MPIDR_PNE
) {
275 mmu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS
));
276 lsu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS
));
277 fed_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS
));
279 mmu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_E_MMU_ERR_STS
));
280 lsu_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_E_LSU_ERR_STS
));
281 fed_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_E_FED_ERR_STS
));
284 llc_err_sts
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS
));
285 llc_err_adr
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR
));
286 llc_err_inf
= __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF
));
288 panic_plain("Unhandled " CPU_NAME
289 " implementation specific error. state=%p esr=%#x far=%p p-core?%d\n"
290 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
291 "\tllc_err_sts:%p, llc_err_adr:%p, llc_err_inf:%p\n",
292 state
, esr
, (void *)far
, !!(mpidr
& MPIDR_PNE
),
293 (void *)lsu_err_sts
, (void *)fed_err_sts
, (void *)mmu_err_sts
,
294 (void *)llc_err_sts
, (void *)llc_err_adr
, (void *)llc_err_inf
);
296 #else // !defined(APPLE_ARM64_ARCH_FAMILY)
297 #pragma unused (state, esr, far)
298 panic_plain("Unhandled implementation specific error\n");
302 #if CONFIG_KERNEL_INTEGRITY
303 #pragma clang diagnostic push
304 #pragma clang diagnostic ignored "-Wunused-parameter"
306 kernel_integrity_error_handler(uint32_t esr
, vm_offset_t far
) {
307 #if defined(KERNEL_INTEGRITY_WT)
308 #if (DEVELOPMENT || DEBUG)
309 if (ESR_WT_SERROR(esr
)) {
310 switch (ESR_WT_REASON(esr
)) {
311 case WT_REASON_INTEGRITY_FAIL
:
312 panic_plain("Kernel integrity, violation in frame 0x%016lx.", far
);
313 case WT_REASON_BAD_SYSCALL
:
314 panic_plain("Kernel integrity, bad syscall.");
315 case WT_REASON_NOT_LOCKED
:
316 panic_plain("Kernel integrity, not locked.");
317 case WT_REASON_ALREADY_LOCKED
:
318 panic_plain("Kernel integrity, already locked.");
319 case WT_REASON_SW_REQ
:
320 panic_plain("Kernel integrity, software request.");
321 case WT_REASON_PT_INVALID
:
322 panic_plain("Kernel integrity, encountered invalid TTE/PTE while "
323 "walking 0x%016lx.", far
);
324 case WT_REASON_PT_VIOLATION
:
325 panic_plain("Kernel integrity, violation in mapping 0x%016lx.",
327 case WT_REASON_REG_VIOLATION
:
328 panic_plain("Kernel integrity, violation in system register %d.",
331 panic_plain("Kernel integrity, unknown (esr=0x%08x).", esr
);
335 if (ESR_WT_SERROR(esr
)) {
336 panic_plain("SError esr: 0x%08x far: 0x%016lx.", esr
, far
);
341 #pragma clang diagnostic pop
345 arm64_platform_error(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t far
)
347 cpu_data_t
*cdp
= getCpuDatap();
349 #if CONFIG_KERNEL_INTEGRITY
350 kernel_integrity_error_handler(esr
, far
);
353 if (cdp
->platform_error_handler
!= (platform_error_handler_t
) NULL
)
354 (*(platform_error_handler_t
)cdp
->platform_error_handler
) (cdp
->cpu_id
, far
);
356 arm64_implementation_specific_error(state
, esr
, far
);
360 panic_with_thread_kernel_state(const char *msg
, arm_saved_state_t
*ss
)
364 ss_valid
= is_saved_state64(ss
);
365 arm_saved_state64_t
*state
= saved_state64(ss
);
367 panic_plain("%s (saved state: %p%s)\n"
368 "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n"
369 "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n"
370 "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n"
371 "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n"
372 "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n"
373 "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n"
374 "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n"
375 "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n"
376 "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n",
377 msg
, ss
, (ss_valid
? "" : " INVALID"),
378 state
->x
[0], state
->x
[1], state
->x
[2], state
->x
[3],
379 state
->x
[4], state
->x
[5], state
->x
[6], state
->x
[7],
380 state
->x
[8], state
->x
[9], state
->x
[10], state
->x
[11],
381 state
->x
[12], state
->x
[13], state
->x
[14], state
->x
[15],
382 state
->x
[16], state
->x
[17], state
->x
[18], state
->x
[19],
383 state
->x
[20], state
->x
[21], state
->x
[22], state
->x
[23],
384 state
->x
[24], state
->x
[25], state
->x
[26], state
->x
[27],
385 state
->x
[28], state
->fp
, state
->lr
, state
->sp
,
386 state
->pc
, state
->cpsr
, state
->esr
, state
->far
);
391 sleh_synchronous_sp1(arm_context_t
*context
, uint32_t esr
, vm_offset_t far __unused
)
393 esr_exception_class_t
class = ESR_EC(esr
);
394 arm_saved_state_t
*state
= &context
->ss
;
397 case ESR_EC_UNCATEGORIZED
:
399 uint32_t instr
= *((uint32_t*)get_saved_state_pc(state
));
400 if (IS_ARM_GDB_TRAP(instr
))
401 DebuggerCall(EXC_BREAKPOINT
, state
);
402 // Intentionally fall through to panic if we return from the debugger
405 panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state
);
410 sleh_synchronous(arm_context_t
*context
, uint32_t esr
, vm_offset_t far
)
412 esr_exception_class_t
class = ESR_EC(esr
);
413 arm_saved_state_t
*state
= &context
->ss
;
414 vm_offset_t recover
= 0;
415 thread_t thread
= current_thread();
417 ASSERT_CONTEXT_SANITY(context
);
419 /* Don't run exception handler with recover handler set in case of double fault */
420 if (thread
->recover
) {
421 recover
= thread
->recover
;
422 thread
->recover
= (vm_offset_t
)NULL
;
425 /* Inherit the interrupt masks from previous context */
426 if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state
)))
427 ml_set_interrupts_enabled(TRUE
);
431 if (!is_saved_state64(state
) || !PSR64_IS_USER(get_saved_state_cpsr(state
))) {
432 panic("Invalid SVC_64 context");
438 case ESR_EC_DABORT_EL0
:
439 handle_abort(state
, esr
, far
, recover
, inspect_data_abort
, handle_user_abort
);
440 assert(0); /* Unreachable */
442 case ESR_EC_MSR_TRAP
:
443 handle_msr_trap(state
, ESR_ISS(esr
));
446 case ESR_EC_IABORT_EL0
:
447 handle_abort(state
, esr
, far
, recover
, inspect_instruction_abort
, handle_user_abort
);
448 assert(0); /* Unreachable */
450 case ESR_EC_IABORT_EL1
:
452 panic_with_thread_kernel_state("Kernel instruction fetch abort", state
);
454 case ESR_EC_PC_ALIGN
:
455 handle_pc_align(state
);
456 assert(0); /* Unreachable */
459 case ESR_EC_DABORT_EL1
:
460 handle_abort(state
, esr
, far
, recover
, inspect_data_abort
, handle_kernel_abort
);
463 case ESR_EC_UNCATEGORIZED
:
464 assert(!ESR_ISS(esr
));
466 handle_uncategorized(&context
->ss
, ESR_INSTR_IS_2BYTES(esr
));
467 /* TODO: Uncomment this after stackshot uses a brk instruction
468 * rather than an undefined instruction, as stackshot is the
469 * only case where we want to return to the first-level handler.
471 //assert(0); /* Unreachable */
474 case ESR_EC_SP_ALIGN
:
475 handle_sp_align(state
);
476 assert(0); /* Unreachable */
479 case ESR_EC_BKPT_AARCH32
:
480 handle_breakpoint(state
);
481 assert(0); /* Unreachable */
484 case ESR_EC_BRK_AARCH64
:
485 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
487 kprintf("Breakpoint instruction exception from kernel. Hanging here (by design).\n");
490 __unreachable_ok_push
491 DebuggerCall(EXC_BREAKPOINT
, &context
->ss
);
495 handle_breakpoint(state
);
496 assert(0); /* Unreachable */
499 case ESR_EC_BKPT_REG_MATCH_EL0
:
500 if (FSC_DEBUG_FAULT
== ISS_SSDE_FSC(esr
)) {
501 handle_breakpoint(state
);
502 assert(0); /* Unreachable */
504 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
505 class, state
, class, esr
, (void *)far
);
506 assert(0); /* Unreachable */
509 case ESR_EC_BKPT_REG_MATCH_EL1
:
510 if (!PE_i_can_has_debugger(NULL
) && FSC_DEBUG_FAULT
== ISS_SSDE_FSC(esr
)) {
511 kprintf("Hardware Breakpoint Debug exception from kernel. Hanging here (by design).\n");
514 __unreachable_ok_push
515 DebuggerCall(EXC_BREAKPOINT
, &context
->ss
);
519 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
520 class, state
, class, esr
, (void *)far
);
521 assert(0); /* Unreachable */
524 case ESR_EC_SW_STEP_DEBUG_EL0
:
525 if (FSC_DEBUG_FAULT
== ISS_SSDE_FSC(esr
)) {
526 handle_sw_step_debug(state
);
527 assert(0); /* Unreachable */
529 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
530 class, state
, class, esr
, (void *)far
);
531 assert(0); /* Unreachable */
534 case ESR_EC_SW_STEP_DEBUG_EL1
:
535 if (!PE_i_can_has_debugger(NULL
) && FSC_DEBUG_FAULT
== ISS_SSDE_FSC(esr
)) {
536 kprintf("Software Step Debug exception from kernel. Hanging here (by design).\n");
539 __unreachable_ok_push
540 DebuggerCall(EXC_BREAKPOINT
, &context
->ss
);
544 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
545 class, state
, class, esr
, (void *)far
);
546 assert(0); /* Unreachable */
549 case ESR_EC_WATCHPT_MATCH_EL0
:
550 if (FSC_DEBUG_FAULT
== ISS_SSDE_FSC(esr
)) {
551 handle_watchpoint(far
);
552 assert(0); /* Unreachable */
554 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
555 class, state
, class, esr
, (void *)far
);
556 assert(0); /* Unreachable */
559 case ESR_EC_WATCHPT_MATCH_EL1
:
561 * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
562 * abort. Turn off watchpoints and keep going; we'll turn them back on in return_from_exception..
564 if (FSC_DEBUG_FAULT
== ISS_SSDE_FSC(esr
)) {
566 break; /* return to first level handler */
568 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
569 class, state
, class, esr
, (void *)far
);
570 assert(0); /* Unreachable */
573 case ESR_EC_TRAP_SIMD_FP
:
574 handle_simd_trap(state
, esr
);
578 case ESR_EC_ILLEGAL_INSTR_SET
:
579 if (EXCB_ACTION_RERUN
!=
580 ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET
, far
)) {
581 // instruction is not re-executed
582 panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x",
583 state
, class, esr
, (void *)far
, get_saved_state_cpsr(state
));
586 // must clear this fault in PSR to re-run
587 set_saved_state_cpsr(state
, get_saved_state_cpsr(state
) & (~PSR64_IL
));
590 case ESR_EC_MCR_MRC_CP15_TRAP
:
591 case ESR_EC_MCRR_MRRC_CP15_TRAP
:
592 case ESR_EC_MCR_MRC_CP14_TRAP
:
593 case ESR_EC_LDC_STC_CP14_TRAP
:
594 case ESR_EC_MCRR_MRRC_CP14_TRAP
:
595 handle_user_trapped_instruction32(state
, esr
);
600 // Use of WFI or WFE instruction when they have been disabled for EL0
601 handle_wf_trap(state
);
602 assert(0); /* Unreachable */
606 panic("Unsupported synchronous exception. state=%p class=%u esr=%u far=%p",
607 state
, class, esr
, (void *)far
);
608 assert(0); /* Unreachable */
613 thread
->recover
= recover
;
617 * Uncategorized exceptions are a catch-all for general execution errors.
618 * ARM64_TODO: For now, we assume this is for undefined instruction exceptions.
621 handle_uncategorized(arm_saved_state_t
*state
, boolean_t instrLen2
)
623 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
624 mach_exception_data_type_t codes
[2] = {EXC_ARM_UNDEFINED
};
625 mach_msg_type_number_t numcodes
= 2;
629 uint16_t instr16
= 0;
630 COPYIN(get_saved_state_pc(state
), (char *)&instr16
, sizeof(instr16
));
634 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
638 if (tempDTraceTrapHook
&& (tempDTraceTrapHook(exception
, state
, 0, 0) == KERN_SUCCESS
)) {
642 if (PSR64_IS_USER64(get_saved_state_cpsr(state
))) {
644 * For a 64bit user process, we care about all 4 bytes of the
647 if (instr
== FASTTRAP_ARM64_INSTR
|| instr
== FASTTRAP_ARM64_RET_INSTR
) {
648 if (dtrace_user_probe(state
) == KERN_SUCCESS
)
651 } else if (PSR64_IS_USER32(get_saved_state_cpsr(state
))) {
653 * For a 32bit user process, we check for thumb mode, in
654 * which case we only care about a 2 byte instruction length.
655 * For non-thumb mode, we care about all 4 bytes of the instructin.
657 if (get_saved_state_cpsr(state
) & PSR64_MODE_USER32_THUMB
) {
658 if (((uint16_t)instr
== FASTTRAP_THUMB32_INSTR
) ||
659 ((uint16_t)instr
== FASTTRAP_THUMB32_RET_INSTR
)) {
660 if (dtrace_user_probe(state
) == KERN_SUCCESS
) {
665 if ((instr
== FASTTRAP_ARM32_INSTR
) ||
666 (instr
== FASTTRAP_ARM32_RET_INSTR
)) {
667 if (dtrace_user_probe(state
) == KERN_SUCCESS
) {
674 #endif /* CONFIG_DTRACE */
676 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
677 if (IS_ARM_GDB_TRAP(instr
)) {
678 boolean_t interrupt_state
;
679 vm_offset_t kstackptr
;
680 exception
= EXC_BREAKPOINT
;
682 interrupt_state
= ml_set_interrupts_enabled(FALSE
);
684 /* Save off the context here (so that the debug logic
685 * can see the original state of this thread).
687 kstackptr
= (vm_offset_t
) current_thread()->machine
.kstackptr
;
689 ((thread_kernel_state_t
) kstackptr
)->machine
.ss
= *state
;
692 /* Hop into the debugger (typically either due to a
693 * fatal exception, an explicit panic, or a stackshot
696 DebuggerCall(exception
, state
);
698 (void) ml_set_interrupts_enabled(interrupt_state
);
701 panic("Undefined kernel instruction: pc=%p instr=%x\n", (void*)get_saved_state_pc(state
), instr
);
706 * Check for GDB breakpoint via illegal opcode.
709 if (IS_THUMB_GDB_TRAP(instr
)) {
710 exception
= EXC_BREAKPOINT
;
711 codes
[0] = EXC_ARM_BREAKPOINT
;
717 if (IS_ARM_GDB_TRAP(instr
)) {
718 exception
= EXC_BREAKPOINT
;
719 codes
[0] = EXC_ARM_BREAKPOINT
;
721 } else if (IS_THUMB_GDB_TRAP((instr
& 0xFFFF))) {
722 exception
= EXC_BREAKPOINT
;
723 codes
[0] = EXC_ARM_BREAKPOINT
;
724 codes
[1] = instr
& 0xFFFF;
725 } else if (IS_THUMB_GDB_TRAP((instr
>> 16))) {
726 exception
= EXC_BREAKPOINT
;
727 codes
[0] = EXC_ARM_BREAKPOINT
;
728 codes
[1] = instr
>> 16;
734 exception_triage(exception
, codes
, numcodes
);
735 assert(0); /* NOTREACHED */
739 handle_breakpoint(arm_saved_state_t
*state
)
741 exception_type_t exception
= EXC_BREAKPOINT
;
742 mach_exception_data_type_t codes
[2] = {EXC_ARM_BREAKPOINT
};
743 mach_msg_type_number_t numcodes
= 2;
745 codes
[1] = get_saved_state_pc(state
);
746 exception_triage(exception
, codes
, numcodes
);
747 assert(0); /* NOTREACHED */
751 handle_watchpoint(vm_offset_t fault_addr
)
753 exception_type_t exception
= EXC_BREAKPOINT
;
754 mach_exception_data_type_t codes
[2] = {EXC_ARM_DA_DEBUG
};
755 mach_msg_type_number_t numcodes
= 2;
757 codes
[1] = fault_addr
;
758 exception_triage(exception
, codes
, numcodes
);
759 assert(0); /* NOTREACHED */
763 handle_abort(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t fault_addr
, vm_offset_t recover
,
764 abort_inspector_t inspect_abort
, abort_handler_t handler
)
766 fault_status_t fault_code
;
767 vm_prot_t fault_type
;
769 inspect_abort(ESR_ISS(esr
), &fault_code
, &fault_type
);
770 handler(state
, esr
, fault_addr
, fault_code
, fault_type
, recover
);
774 inspect_instruction_abort(uint32_t iss
, fault_status_t
*fault_code
, vm_prot_t
*fault_type
)
776 getCpuDatap()->cpu_stat
.instr_ex_cnt
++;
777 *fault_code
= ISS_IA_FSC(iss
);
778 *fault_type
= (VM_PROT_READ
| VM_PROT_EXECUTE
);
782 inspect_data_abort(uint32_t iss
, fault_status_t
*fault_code
, vm_prot_t
*fault_type
)
784 getCpuDatap()->cpu_stat
.data_ex_cnt
++;
785 *fault_code
= ISS_DA_FSC(iss
);
787 /* Cache operations report faults as write access. Change these to read access. */
788 if ((iss
& ISS_DA_WNR
) && !(iss
& ISS_DA_CM
)) {
789 *fault_type
= (VM_PROT_READ
| VM_PROT_WRITE
);
791 *fault_type
= (VM_PROT_READ
);
796 handle_pc_align(arm_saved_state_t
*ss
)
798 exception_type_t exc
;
799 mach_exception_data_type_t codes
[2];
800 mach_msg_type_number_t numcodes
= 2;
802 if (!PSR64_IS_USER(get_saved_state_cpsr(ss
))) {
803 panic_with_thread_kernel_state("PC alignment exception from kernel.", ss
);
806 exc
= EXC_BAD_ACCESS
;
807 codes
[0] = EXC_ARM_DA_ALIGN
;
808 codes
[1] = get_saved_state_pc(ss
);
810 exception_triage(exc
, codes
, numcodes
);
811 assert(0); /* NOTREACHED */
815 handle_sp_align(arm_saved_state_t
*ss
)
817 exception_type_t exc
;
818 mach_exception_data_type_t codes
[2];
819 mach_msg_type_number_t numcodes
= 2;
821 if (!PSR64_IS_USER(get_saved_state_cpsr(ss
))) {
822 panic_with_thread_kernel_state("SP alignment exception from kernel.", ss
);
825 exc
= EXC_BAD_ACCESS
;
826 codes
[0] = EXC_ARM_SP_ALIGN
;
827 codes
[1] = get_saved_state_sp(ss
);
829 exception_triage(exc
, codes
, numcodes
);
830 assert(0); /* NOTREACHED */
834 handle_wf_trap(arm_saved_state_t
*ss
)
836 exception_type_t exc
;
837 mach_exception_data_type_t codes
[2];
838 mach_msg_type_number_t numcodes
= 2;
840 exc
= EXC_BAD_INSTRUCTION
;
841 codes
[0] = EXC_ARM_UNDEFINED
;
842 codes
[1] = get_saved_state_sp(ss
);
844 exception_triage(exc
, codes
, numcodes
);
845 assert(0); /* NOTREACHED */
850 handle_sw_step_debug(arm_saved_state_t
*state
)
852 thread_t thread
= current_thread();
853 exception_type_t exc
;
854 mach_exception_data_type_t codes
[2];
855 mach_msg_type_number_t numcodes
= 2;
857 if (!PSR64_IS_USER(get_saved_state_cpsr(state
))) {
858 panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel.", state
);
861 // Disable single step and unmask interrupts (in the saved state, anticipating next exception return)
862 if (thread
->machine
.DebugData
!= NULL
) {
863 thread
->machine
.DebugData
->uds
.ds64
.mdscr_el1
&= ~0x1;
865 panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL.", state
);
868 set_saved_state_cpsr((thread
->machine
.upcb
),
869 get_saved_state_cpsr((thread
->machine
.upcb
)) & ~(PSR64_SS
| DAIF_IRQF
| DAIF_FIQF
));
871 // Special encoding for gdb single step event on ARM
872 exc
= EXC_BREAKPOINT
;
876 exception_triage(exc
, codes
, numcodes
);
877 assert(0); /* NOTREACHED */
881 is_vm_fault(fault_status_t status
)
884 case FSC_TRANSLATION_FAULT_L0
:
885 case FSC_TRANSLATION_FAULT_L1
:
886 case FSC_TRANSLATION_FAULT_L2
:
887 case FSC_TRANSLATION_FAULT_L3
:
888 case FSC_ACCESS_FLAG_FAULT_L1
:
889 case FSC_ACCESS_FLAG_FAULT_L2
:
890 case FSC_ACCESS_FLAG_FAULT_L3
:
891 case FSC_PERMISSION_FAULT_L1
:
892 case FSC_PERMISSION_FAULT_L2
:
893 case FSC_PERMISSION_FAULT_L3
:
901 is_translation_fault(fault_status_t status
)
904 case FSC_TRANSLATION_FAULT_L0
:
905 case FSC_TRANSLATION_FAULT_L1
:
906 case FSC_TRANSLATION_FAULT_L2
:
907 case FSC_TRANSLATION_FAULT_L3
:
914 #if __ARM_PAN_AVAILABLE__
916 is_permission_fault(fault_status_t status
)
919 case FSC_PERMISSION_FAULT_L1
:
920 case FSC_PERMISSION_FAULT_L2
:
921 case FSC_PERMISSION_FAULT_L3
:
930 is_alignment_fault(fault_status_t status
)
932 return (status
== FSC_ALIGNMENT_FAULT
);
936 is_parity_error(fault_status_t status
)
939 case FSC_SYNC_PARITY
:
940 case FSC_ASYNC_PARITY
:
941 case FSC_SYNC_PARITY_TT_L1
:
942 case FSC_SYNC_PARITY_TT_L2
:
943 case FSC_SYNC_PARITY_TT_L3
:
951 handle_user_abort(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t fault_addr
,
952 fault_status_t fault_code
, vm_prot_t fault_type
, vm_offset_t recover
)
954 exception_type_t exc
= EXC_BAD_ACCESS
;
955 mach_exception_data_type_t codes
[2];
956 mach_msg_type_number_t numcodes
= 2;
957 thread_t thread
= current_thread();
962 if (ml_at_interrupt_context())
963 panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state
);
965 thread
->iotier_override
= THROTTLE_LEVEL_NONE
; /* Reset IO tier override before handling abort from userspace */
967 if (is_vm_fault(fault_code
)) {
968 kern_return_t result
= KERN_FAILURE
;
969 vm_map_t map
= thread
->map
;
970 vm_offset_t vm_fault_addr
= fault_addr
;
972 assert(map
!= kernel_map
);
974 if (!(fault_type
& VM_PROT_EXECUTE
) && user_tbi_enabled())
975 vm_fault_addr
= tbi_clear(fault_addr
);
978 if (thread
->options
& TH_OPT_DTRACE
) { /* Executing under dtrace_probe? */
979 if (dtrace_tally_fault(vm_fault_addr
)) { /* Should a user mode fault under dtrace be ignored? */
981 set_saved_state_pc(state
, recover
);
983 boolean_t intr
= ml_set_interrupts_enabled(FALSE
);
984 panic_with_thread_kernel_state("copyin/out has no recovery point", state
);
985 (void) ml_set_interrupts_enabled(intr
);
989 boolean_t intr
= ml_set_interrupts_enabled(FALSE
);
990 panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", state
);
991 (void) ml_set_interrupts_enabled(intr
);
1000 if (pgtrace_enabled
) {
1001 /* Check to see if trace bit is set */
1002 result
= pmap_pgtrace_fault(map
->pmap
, fault_addr
, state
);
1003 if (result
== KERN_SUCCESS
) return;
1007 /* check to see if it is just a pmap ref/modify fault */
1009 if ((result
!= KERN_SUCCESS
) && !is_translation_fault(fault_code
)) {
1010 result
= arm_fast_fault(map
->pmap
, trunc_page(vm_fault_addr
), fault_type
, TRUE
);
1012 if (result
!= KERN_SUCCESS
) {
1015 /* We have to fault the page in */
1016 result
= vm_fault(map
, vm_fault_addr
, fault_type
,
1017 /* change_wiring */ FALSE
, VM_KERN_MEMORY_NONE
, THREAD_ABORTSAFE
,
1018 /* caller_pmap */ NULL
, /* caller_pmap_addr */ 0);
1021 if (result
== KERN_SUCCESS
|| result
== KERN_ABORTED
) {
1022 thread_exception_return();
1027 } else if (is_alignment_fault(fault_code
)) {
1028 codes
[0] = EXC_ARM_DA_ALIGN
;
1029 } else if (is_parity_error(fault_code
)) {
1030 #if defined(APPLE_ARM64_ARCH_FAMILY)
1031 if (fault_code
== FSC_SYNC_PARITY
) {
1032 arm64_platform_error(state
, esr
, fault_addr
);
1033 thread_exception_return();
1037 panic("User parity error.");
1040 codes
[0] = KERN_FAILURE
;
1043 codes
[1] = fault_addr
;
1044 exception_triage(exc
, codes
, numcodes
);
1045 assert(0); /* NOTREACHED */
1048 #if __ARM_PAN_AVAILABLE__
1050 is_pan_fault(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t fault_addr
, fault_status_t fault_code
)
1052 // PAN (Privileged Access Never) fault occurs for data read/write in EL1 to
1053 // virtual address that is readable/writeable from both EL1 and EL0
1055 // To check for PAN fault, we evaluate if the following conditions are true:
1056 // 1. This is a permission fault
1057 // 2. PAN is enabled
1058 // 3. AT instruction (on which PAN has no effect) on the same faulting address
1063 if (!(is_permission_fault(fault_code
) && get_saved_state_cpsr(state
) & PSR64_PAN
)) {
1067 if (esr
& ISS_DA_WNR
) {
1068 pa
= mmu_kvtop_wpreflight(fault_addr
);
1070 pa
= mmu_kvtop(fault_addr
);
1072 return (pa
)? TRUE
: FALSE
;
1077 handle_kernel_abort(arm_saved_state_t
*state
, uint32_t esr
, vm_offset_t fault_addr
,
1078 fault_status_t fault_code
, vm_prot_t fault_type
, vm_offset_t recover
)
1080 thread_t thread
= current_thread();
1084 if (is_vm_fault(fault_code
) && thread
->options
& TH_OPT_DTRACE
) { /* Executing under dtrace_probe? */
1085 if (dtrace_tally_fault(fault_addr
)) { /* Should a fault under dtrace be ignored? */
1087 * Point to next instruction, or recovery handler if set.
1090 set_saved_state_pc(state
, recover
);
1092 set_saved_state_pc(state
, get_saved_state_pc(state
) + 4);
1096 boolean_t intr
= ml_set_interrupts_enabled(FALSE
);
1097 panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state
);
1098 (void) ml_set_interrupts_enabled(intr
);
1104 #if !CONFIG_PGTRACE /* This will be moved next to pgtrace fault evaluation */
1105 if (ml_at_interrupt_context())
1106 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state
);
1109 if (is_vm_fault(fault_code
)) {
1110 kern_return_t result
= KERN_FAILURE
;
1115 * Ensure no faults in the physical aperture. This could happen if
1116 * a page table is incorrectly allocated from the read only region
1117 * when running with KTRR.
1121 #if __ARM_PAN_AVAILABLE__ && defined(CONFIG_XNUPOST)
1122 if (is_permission_fault(fault_code
) && !(get_saved_state_cpsr(state
) & PSR64_PAN
) &&
1123 (pan_ro_addr
!= 0) && (fault_addr
== pan_ro_addr
)) {
1124 ++pan_exception_level
;
1125 // On an exception taken from a PAN-disabled context, verify
1126 // that PAN is re-enabled for the exception handler and that
1127 // accessing the test address produces a PAN fault.
1128 pan_fault_value
= *(char *)pan_test_addr
;
1129 set_saved_state_pc(state
, get_saved_state_pc(state
) + 4);
1134 if (fault_addr
>= gVirtBase
&& fault_addr
< static_memory_end
) {
1135 panic_with_thread_kernel_state("Unexpected fault in kernel static region\n",state
);
1138 if (VM_KERNEL_ADDRESS(fault_addr
) || thread
== THREAD_NULL
) {
1140 interruptible
= THREAD_UNINT
;
1143 interruptible
= THREAD_ABORTSAFE
;
1147 if (pgtrace_enabled
) {
1148 /* Check to see if trace bit is set */
1149 result
= pmap_pgtrace_fault(map
->pmap
, fault_addr
, state
);
1150 if (result
== KERN_SUCCESS
) return;
1153 if (ml_at_interrupt_context())
1154 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state
);
1157 /* check to see if it is just a pmap ref/modify fault */
1158 if (!is_translation_fault(fault_code
)) {
1159 result
= arm_fast_fault(map
->pmap
, trunc_page(fault_addr
), fault_type
, FALSE
);
1160 if (result
== KERN_SUCCESS
) return;
1163 if (result
!= KERN_PROTECTION_FAILURE
)
1166 * We have to "fault" the page in.
1168 result
= vm_fault(map
, fault_addr
, fault_type
,
1169 /* change_wiring */ FALSE
, VM_KERN_MEMORY_NONE
, interruptible
,
1170 /* caller_pmap */ NULL
, /* caller_pmap_addr */ 0);
1173 if (result
== KERN_SUCCESS
) return;
1176 * If we have a recover handler, invoke it now.
1179 set_saved_state_pc(state
, recover
);
1183 #if __ARM_PAN_AVAILABLE__
1184 if (is_pan_fault(state
, esr
, fault_addr
, fault_code
)) {
1185 #ifdef CONFIG_XNUPOST
1186 if ((pan_test_addr
!= 0) && (fault_addr
== pan_test_addr
))
1188 ++pan_exception_level
;
1189 // read the user-accessible value to make sure
1190 // pan is enabled and produces a 2nd fault from
1191 // the exception handler
1192 if (pan_exception_level
== 1)
1193 pan_fault_value
= *(char *)pan_test_addr
;
1194 // this fault address is used for PAN test
1195 // disable PAN and rerun
1196 set_saved_state_cpsr(state
,
1197 get_saved_state_cpsr(state
) & (~PSR64_PAN
));
1201 panic_with_thread_kernel_state("Privileged access never abort.", state
);
1206 } else if (ml_at_interrupt_context()) {
1207 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state
);
1209 } else if (is_alignment_fault(fault_code
)) {
1210 panic_with_thread_kernel_state("Unaligned kernel data abort.", state
);
1211 } else if (is_parity_error(fault_code
)) {
1212 #if defined(APPLE_ARM64_ARCH_FAMILY)
1213 if (fault_code
== FSC_SYNC_PARITY
) {
1214 arm64_platform_error(state
, esr
, fault_addr
);
1218 panic_with_thread_kernel_state("Kernel parity error.", state
);
1221 kprintf("Unclassified kernel abort (fault_code=0x%x)\n", fault_code
);
1224 panic_with_thread_kernel_state("Kernel data abort.", state
);
1227 extern void syscall_trace(struct arm_saved_state
* regs
);
1230 handle_svc(arm_saved_state_t
*state
)
1232 int trap_no
= get_saved_state_svc_number(state
);
1233 thread_t thread
= current_thread();
1236 #define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */
1238 #define TRACE_SYSCALL 1
1240 syscall_trace(state
);
1243 thread
->iotier_override
= THROTTLE_LEVEL_NONE
; /* Reset IO tier override before handling SVC from userspace */
1245 if (trap_no
== (int)PLATFORM_SYSCALL_TRAP_NO
) {
1246 platform_syscall(state
);
1247 panic("Returned from platform_syscall()?");
1250 mach_kauth_cred_uthread_update();
1253 if (trap_no
== -3) {
1254 handle_mach_absolute_time_trap(state
);
1256 } else if (trap_no
== -4) {
1257 handle_mach_continuous_time_trap(state
);
1261 /* Counting perhaps better in the handler, but this is how it's been done */
1262 thread
->syscalls_mach
++;
1263 mach_syscall(state
);
1265 /* Counting perhaps better in the handler, but this is how it's been done */
1266 thread
->syscalls_unix
++;
1267 p
= get_bsdthreadtask_info(thread
);
1271 unix_syscall(state
, thread
, (struct uthread
*)thread
->uthread
, p
);
1276 handle_mach_absolute_time_trap(arm_saved_state_t
*state
)
1278 uint64_t now
= mach_absolute_time();
1279 saved_state64(state
)->x
[0] = now
;
1283 handle_mach_continuous_time_trap(arm_saved_state_t
*state
)
1285 uint64_t now
= mach_continuous_time();
1286 saved_state64(state
)->x
[0] = now
;
1290 handle_msr_trap(arm_saved_state_t
*state
, uint32_t iss
)
1292 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
1293 mach_exception_data_type_t codes
[2] = {EXC_ARM_UNDEFINED
};
1294 mach_msg_type_number_t numcodes
= 2;
1299 if (!is_saved_state64(state
)) {
1300 panic("MSR/MRS trap (EC 0x%x) from 32-bit state\n", ESR_EC_MSR_TRAP
);
1303 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
1304 panic("MSR/MRS trap (EC 0x%x) from kernel\n", ESR_EC_MSR_TRAP
);
1307 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
1310 exception_triage(exception
, codes
, numcodes
);
1314 handle_user_trapped_instruction32(arm_saved_state_t
*state
, uint32_t esr
)
1316 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
1317 mach_exception_data_type_t codes
[2] = {EXC_ARM_UNDEFINED
};
1318 mach_msg_type_number_t numcodes
= 2;
1321 if (is_saved_state64(state
)) {
1322 panic("ESR (0x%x) for instruction trapped from U32, but saved state is 64-bit.", esr
);
1325 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
1326 panic("ESR (0x%x) for instruction trapped from U32, actually came from kernel?", esr
);
1329 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
1332 exception_triage(exception
, codes
, numcodes
);
1336 handle_simd_trap(arm_saved_state_t
*state
, uint32_t esr
)
1338 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
1339 mach_exception_data_type_t codes
[2] = {EXC_ARM_UNDEFINED
};
1340 mach_msg_type_number_t numcodes
= 2;
1343 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state
))) {
1344 panic("ESR (0x%x) for SIMD trap from userland, actually came from kernel?", esr
);
1347 COPYIN(get_saved_state_pc(state
), (char *)&instr
, sizeof(instr
));
1350 exception_triage(exception
, codes
, numcodes
);
1354 sleh_irq(arm_saved_state_t
*state
)
1356 uint64_t timestamp
= 0;
1357 uint32_t old_entropy_data
= 0;
1358 uint32_t * old_entropy_data_ptr
= NULL
;
1359 uint32_t * new_entropy_data_ptr
= NULL
;
1360 cpu_data_t
* cdp
= getCpuDatap();
1361 #if DEVELOPMENT || DEBUG
1362 int preemption_level
= get_preemption_level();
1365 sleh_interrupt_handler_prologue(state
, DBG_INTR_TYPE_OTHER
);
1367 /* Run the registered interrupt handler. */
1368 cdp
->interrupt_handler(cdp
->interrupt_target
,
1369 cdp
->interrupt_refCon
,
1371 cdp
->interrupt_source
);
1373 /* We use interrupt timing as an entropy source. */
1374 timestamp
= ml_get_timebase();
1377 * The buffer index is subject to races, but as these races should only
1378 * result in multiple CPUs updating the same location, the end result
1379 * should be that noise gets written into the entropy buffer. As this
1380 * is the entire point of the entropy buffer, we will not worry about
1381 * these races for now.
1383 old_entropy_data_ptr
= EntropyData
.index_ptr
;
1384 new_entropy_data_ptr
= old_entropy_data_ptr
+ 1;
1386 if (new_entropy_data_ptr
>= &EntropyData
.buffer
[ENTROPY_BUFFER_SIZE
]) {
1387 new_entropy_data_ptr
= EntropyData
.buffer
;
1390 EntropyData
.index_ptr
= new_entropy_data_ptr
;
1392 /* Mix the timestamp data and the old data together. */
1393 old_entropy_data
= *old_entropy_data_ptr
;
1394 *old_entropy_data_ptr
= (uint32_t)timestamp
^ __ror(old_entropy_data
, 9);
1396 sleh_interrupt_handler_epilogue();
1397 #if DEVELOPMENT || DEBUG
1398 if (preemption_level
!= get_preemption_level())
1399 panic("irq handler %p changed preemption level from %d to %d", cdp
->interrupt_handler
, preemption_level
, get_preemption_level());
1404 sleh_fiq(arm_saved_state_t
*state
)
1406 unsigned int type
= DBG_INTR_TYPE_UNKNOWN
;
1407 #if DEVELOPMENT || DEBUG
1408 int preemption_level
= get_preemption_level();
1411 uint64_t pmsr
= 0, upmsr
= 0;
1412 #endif /* MONOTONIC */
1415 if (mt_pmi_pending(&pmsr
, &upmsr
)) {
1416 type
= DBG_INTR_TYPE_PMI
;
1418 #endif /* MONOTONIC */
1419 if (ml_get_timer_pending()) {
1420 type
= DBG_INTR_TYPE_TIMER
;
1423 sleh_interrupt_handler_prologue(state
, type
);
1426 if (type
== DBG_INTR_TYPE_PMI
) {
1427 mt_fiq(getCpuDatap(), pmsr
, upmsr
);
1429 #endif /* MONOTONIC */
1432 * We don't know that this is a timer, but we don't have insight into
1433 * the other interrupts that go down this path.
1436 cpu_data_t
*cdp
= getCpuDatap();
1438 cdp
->cpu_decrementer
= -1; /* Large */
1441 * ARM64_TODO: whether we're coming from userland is ignored right now.
1442 * We can easily thread it through, but not bothering for the
1443 * moment (AArch32 doesn't either).
1448 sleh_interrupt_handler_epilogue();
1449 #if DEVELOPMENT || DEBUG
1450 if (preemption_level
!= get_preemption_level())
1451 panic("fiq type %u changed preemption level from %d to %d", type
, preemption_level
, get_preemption_level());
1456 sleh_serror(arm_context_t
*context
, uint32_t esr
, vm_offset_t far
)
1458 arm_saved_state_t
*state
= &context
->ss
;
1459 #if DEVELOPMENT || DEBUG
1460 int preemption_level
= get_preemption_level();
1463 ASSERT_CONTEXT_SANITY(context
);
1464 arm64_platform_error(state
, esr
, far
);
1465 #if DEVELOPMENT || DEBUG
1466 if (preemption_level
!= get_preemption_level())
1467 panic("serror changed preemption level from %d to %d", preemption_level
, get_preemption_level());
1472 mach_syscall_trace_exit(
1473 unsigned int retval
,
1474 unsigned int call_number
)
1476 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1477 MACHDBG_CODE(DBG_MACH_EXCP_SC
, (call_number
)) | DBG_FUNC_END
,
1478 retval
, 0, 0, 0, 0);
1481 __attribute__((noreturn
))
1483 thread_syscall_return(kern_return_t error
)
1486 struct arm_saved_state
*state
;
1488 thread
= current_thread();
1489 state
= get_user_regs(thread
);
1491 assert(is_saved_state64(state
));
1492 saved_state64(state
)->x
[0] = error
;
1494 #if DEBUG || DEVELOPMENT
1495 kern_allocation_name_t
1496 prior __assert_only
= thread_get_kernel_state(thread
)->allocation_name
;
1497 assertf(prior
== NULL
, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior
));
1498 #endif /* DEBUG || DEVELOPMENT */
1500 if (kdebug_enable
) {
1501 /* Invert syscall number (negative for a mach syscall) */
1502 mach_syscall_trace_exit(error
, (-1) * get_saved_state_svc_number(state
));
1505 thread_exception_return();
1510 struct arm_saved_state
* regs __unused
)
1512 /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */
1516 sleh_interrupt_handler_prologue(arm_saved_state_t
*state
, unsigned int type
)
1518 uint64_t is_user
= PSR64_IS_USER(get_saved_state_cpsr(state
));
1520 uint64_t pc
= is_user
? get_saved_state_pc(state
) :
1521 VM_KERNEL_UNSLIDE(get_saved_state_pc(state
));
1523 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_START
,
1524 0, pc
, is_user
, type
);
1526 #if CONFIG_TELEMETRY
1527 if (telemetry_needs_record
) {
1528 telemetry_mark_curthread((boolean_t
)is_user
, FALSE
);
1530 #endif /* CONFIG_TELEMETRY */
1534 sleh_interrupt_handler_epilogue(void)
1539 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_END
);
1543 sleh_invalid_stack(arm_context_t
*context
, uint32_t esr __unused
, vm_offset_t far __unused
)
1545 thread_t thread
= current_thread();
1546 vm_offset_t kernel_stack_bottom
, sp
;
1548 sp
= get_saved_state_sp(&context
->ss
);
1549 kernel_stack_bottom
= round_page(thread
->machine
.kstackptr
) - KERNEL_STACK_SIZE
;
1551 if ((sp
< kernel_stack_bottom
) && (sp
>= (kernel_stack_bottom
- PAGE_SIZE
))) {
1552 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow).", &context
->ss
);
1555 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context
->ss
);