]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/sleh.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / osfmk / arm64 / sleh.c
1 /*
2 * Copyright (c) 2012-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/caches_internal.h>
30 #include <arm/cpu_data.h>
31 #include <arm/cpu_data_internal.h>
32 #include <arm/misc_protos.h>
33 #include <arm/thread.h>
34 #include <arm/rtclock.h>
35 #include <arm/trap.h> /* for IS_ARM_GDB_TRAP() et al */
36 #include <arm64/proc_reg.h>
37 #include <arm64/machine_machdep.h>
38 #include <arm64/monotonic.h>
39 #include <arm64/instructions.h>
40
41 #include <kern/debug.h>
42 #include <kern/thread.h>
43 #include <mach/exception.h>
44 #include <mach/arm/traps.h>
45 #include <mach/vm_types.h>
46 #include <mach/machine/thread_status.h>
47
48 #include <machine/atomic.h>
49 #include <machine/limits.h>
50
51 #include <pexpert/arm/protos.h>
52
53 #include <vm/vm_page.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_fault.h>
56 #include <vm/vm_kern.h>
57
58 #include <sys/errno.h>
59 #include <sys/kdebug.h>
60 #include <kperf/kperf.h>
61
62 #include <kern/policy_internal.h>
63 #if CONFIG_TELEMETRY
64 #include <kern/telemetry.h>
65 #endif
66
67 #include <prng/entropy.h>
68
69
70
71 #ifndef __arm64__
72 #error Should only be compiling for arm64.
73 #endif
74
75 #define TEST_CONTEXT32_SANITY(context) \
76 (context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \
77 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT)
78
79 #define TEST_CONTEXT64_SANITY(context) \
80 (context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \
81 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT)
82
83 #define ASSERT_CONTEXT_SANITY(context) \
84 assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context))
85
86
87 #define COPYIN(src, dst, size) \
88 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
89 copyin_kern(src, dst, size) : \
90 copyin(src, dst, size)
91
92 #define COPYOUT(src, dst, size) \
93 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
94 copyout_kern(src, dst, size) : \
95 copyout(src, dst, size)
96
97 // Below is for concatenating a string param to a string literal
98 #define STR1(x) #x
99 #define STR(x) STR1(x)
100
101 #define ARM64_KDBG_CODE_KERNEL (0 << 8)
102 #define ARM64_KDBG_CODE_USER (1 << 8)
103 #define ARM64_KDBG_CODE_GUEST (2 << 8)
104
105 _Static_assert(ARM64_KDBG_CODE_GUEST <= KDBG_CODE_MAX, "arm64 KDBG trace codes out of range");
106 _Static_assert(ARM64_KDBG_CODE_GUEST <= UINT16_MAX, "arm64 KDBG trace codes out of range");
107
108 void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) __abortlike;
109
110 void sleh_synchronous_sp1(arm_context_t *, uint32_t, vm_offset_t) __abortlike;
111 void sleh_synchronous(arm_context_t *, uint32_t, vm_offset_t);
112 void sleh_irq(arm_saved_state_t *);
113 void sleh_fiq(arm_saved_state_t *);
114 void sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far);
115 void sleh_invalid_stack(arm_context_t *context, uint32_t esr, vm_offset_t far) __dead2;
116
117 static void sleh_interrupt_handler_prologue(arm_saved_state_t *, unsigned int type);
118 static void sleh_interrupt_handler_epilogue(void);
119
120 static void handle_svc(arm_saved_state_t *);
121 static void handle_mach_absolute_time_trap(arm_saved_state_t *);
122 static void handle_mach_continuous_time_trap(arm_saved_state_t *);
123
124 static void handle_msr_trap(arm_saved_state_t *state, uint32_t esr);
125
126 extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, bool, bool);
127
128 static void handle_uncategorized(arm_saved_state_t *);
129 static void handle_kernel_breakpoint(arm_saved_state_t *, uint32_t) __dead2;
130 static void handle_breakpoint(arm_saved_state_t *, uint32_t) __dead2;
131
132 typedef void (*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *);
133 static void inspect_instruction_abort(uint32_t, fault_status_t *, vm_prot_t *);
134 static void inspect_data_abort(uint32_t, fault_status_t *, vm_prot_t *);
135
136 static int is_vm_fault(fault_status_t);
137 static int is_translation_fault(fault_status_t);
138 static int is_alignment_fault(fault_status_t);
139
140 typedef void (*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t);
141 static void handle_user_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t);
142 static void handle_kernel_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t);
143
144 static void handle_pc_align(arm_saved_state_t *ss) __dead2;
145 static void handle_sp_align(arm_saved_state_t *ss) __dead2;
146 static void handle_sw_step_debug(arm_saved_state_t *ss) __dead2;
147 static void handle_wf_trap(arm_saved_state_t *ss) __dead2;
148 static void handle_fp_trap(arm_saved_state_t *ss, uint32_t esr) __dead2;
149
150 static void handle_watchpoint(vm_offset_t fault_addr) __dead2;
151
152 static void handle_abort(arm_saved_state_t *, uint32_t, vm_offset_t, vm_offset_t, abort_inspector_t, abort_handler_t, expected_fault_handler_t);
153
154 static void handle_user_trapped_instruction32(arm_saved_state_t *, uint32_t esr) __dead2;
155
156 static void handle_simd_trap(arm_saved_state_t *, uint32_t esr) __dead2;
157
158 extern void mach_kauth_cred_uthread_update(void);
159 void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number);
160
161 struct uthread;
162 struct proc;
163
164 typedef uint32_t arm64_instr_t;
165
166 extern void
167 unix_syscall(struct arm_saved_state * regs, thread_t thread_act,
168 struct uthread * uthread, struct proc * proc);
169
170 extern void
171 mach_syscall(struct arm_saved_state*);
172
173 #if CONFIG_DTRACE
174 extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs);
175 extern boolean_t dtrace_tally_fault(user_addr_t);
176
177 /*
178 * Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy
179 * and paste the trap instructions
180 * over from that file. Need to keep these in sync!
181 */
182 #define FASTTRAP_ARM32_INSTR 0xe7ffdefc
183 #define FASTTRAP_THUMB32_INSTR 0xdefc
184 #define FASTTRAP_ARM64_INSTR 0xe7eeee7e
185
186 #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb
187 #define FASTTRAP_THUMB32_RET_INSTR 0xdefb
188 #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d
189
190 /* See <rdar://problem/4613924> */
191 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
192 #endif
193
194
195 #if CONFIG_PGTRACE
196 extern boolean_t pgtrace_enabled;
197 #endif
198
199 #if HAS_TWO_STAGE_SPR_LOCK
200 #ifdef CONFIG_XNUPOST
201 extern volatile vm_offset_t spr_lock_test_addr;
202 extern volatile uint32_t spr_lock_exception_esr;
203 #endif
204 #endif
205
206 #if INTERRUPT_MASKED_DEBUG
207 extern boolean_t interrupt_masked_debug;
208 #endif
209
210 extern void arm64_thread_exception_return(void) __dead2;
211
212 #if defined(APPLETYPHOON)
213 #define CPU_NAME "Typhoon"
214 #elif defined(APPLETWISTER)
215 #define CPU_NAME "Twister"
216 #elif defined(APPLEHURRICANE)
217 #define CPU_NAME "Hurricane"
218 #elif defined(APPLELIGHTNING)
219 #define CPU_NAME "Lightning"
220 #else
221 #define CPU_NAME "Unknown"
222 #endif
223
224 #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT))
225 #define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400)
226 #define ESR_WT_REASON(esr) ((esr) & 0xff)
227
228 #define WT_REASON_NONE 0
229 #define WT_REASON_INTEGRITY_FAIL 1
230 #define WT_REASON_BAD_SYSCALL 2
231 #define WT_REASON_NOT_LOCKED 3
232 #define WT_REASON_ALREADY_LOCKED 4
233 #define WT_REASON_SW_REQ 5
234 #define WT_REASON_PT_INVALID 6
235 #define WT_REASON_PT_VIOLATION 7
236 #define WT_REASON_REG_VIOLATION 8
237 #endif
238
239 #if defined(HAS_IPI)
240 void cpu_signal_handler(void);
241 extern unsigned int gFastIPI;
242 #endif /* defined(HAS_IPI) */
243
244 static arm_saved_state64_t *original_faulting_state = NULL;
245
246 TUNABLE(bool, fp_exceptions_enabled, "-fp_exceptions", false);
247
248 extern vm_offset_t static_memory_end;
249
250 static inline int
251 is_vm_fault(fault_status_t status)
252 {
253 switch (status) {
254 case FSC_TRANSLATION_FAULT_L0:
255 case FSC_TRANSLATION_FAULT_L1:
256 case FSC_TRANSLATION_FAULT_L2:
257 case FSC_TRANSLATION_FAULT_L3:
258 case FSC_ACCESS_FLAG_FAULT_L1:
259 case FSC_ACCESS_FLAG_FAULT_L2:
260 case FSC_ACCESS_FLAG_FAULT_L3:
261 case FSC_PERMISSION_FAULT_L1:
262 case FSC_PERMISSION_FAULT_L2:
263 case FSC_PERMISSION_FAULT_L3:
264 return TRUE;
265 default:
266 return FALSE;
267 }
268 }
269
270 static inline int
271 is_translation_fault(fault_status_t status)
272 {
273 switch (status) {
274 case FSC_TRANSLATION_FAULT_L0:
275 case FSC_TRANSLATION_FAULT_L1:
276 case FSC_TRANSLATION_FAULT_L2:
277 case FSC_TRANSLATION_FAULT_L3:
278 return TRUE;
279 default:
280 return FALSE;
281 }
282 }
283
284 static inline int
285 is_permission_fault(fault_status_t status)
286 {
287 switch (status) {
288 case FSC_PERMISSION_FAULT_L1:
289 case FSC_PERMISSION_FAULT_L2:
290 case FSC_PERMISSION_FAULT_L3:
291 return TRUE;
292 default:
293 return FALSE;
294 }
295 }
296
297 static inline int
298 is_alignment_fault(fault_status_t status)
299 {
300 return status == FSC_ALIGNMENT_FAULT;
301 }
302
303 static inline int
304 is_parity_error(fault_status_t status)
305 {
306 switch (status) {
307 case FSC_SYNC_PARITY:
308 case FSC_ASYNC_PARITY:
309 case FSC_SYNC_PARITY_TT_L1:
310 case FSC_SYNC_PARITY_TT_L2:
311 case FSC_SYNC_PARITY_TT_L3:
312 return TRUE;
313 default:
314 return FALSE;
315 }
316 }
317
318 __dead2
319 static void
320 arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
321 {
322 #if defined(APPLE_ARM64_ARCH_FAMILY)
323 uint64_t fed_err_sts, mmu_err_sts, lsu_err_sts;
324 #if defined(NO_ECORE)
325 uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf;
326
327 mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS));
328 l2c_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS));
329 l2c_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR));
330 l2c_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF));
331 lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS));
332 fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS));
333
334 panic_plain("Unhandled " CPU_NAME
335 " implementation specific error. state=%p esr=%#x far=%p\n"
336 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
337 "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n",
338 state, esr, (void *)far,
339 (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts,
340 (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf);
341
342 #elif defined(HAS_MIGSTS)
343 uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf, mpidr, migsts;
344
345 mpidr = __builtin_arm_rsr64("MPIDR_EL1");
346 migsts = __builtin_arm_rsr64(STR(ARM64_REG_MIGSTS_EL1));
347 mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS));
348 l2c_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS));
349 l2c_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR));
350 l2c_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF));
351 lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS));
352 fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS));
353
354 panic_plain("Unhandled " CPU_NAME
355 " implementation specific error. state=%p esr=%#x far=%p p-core?%d migsts=%p\n"
356 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
357 "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n",
358 state, esr, (void *)far, !!(mpidr & MPIDR_PNE), (void *)migsts,
359 (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts,
360 (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf);
361 #else // !defined(NO_ECORE) && !defined(HAS_MIGSTS)
362 uint64_t llc_err_sts, llc_err_adr, llc_err_inf, mpidr;
363 #if defined(HAS_DPC_ERR)
364 uint64_t dpc_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_DPC_ERR_STS));
365 #endif // defined(HAS_DPC_ERR)
366
367 mpidr = __builtin_arm_rsr64("MPIDR_EL1");
368
369 if (mpidr & MPIDR_PNE) {
370 mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS));
371 lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS));
372 fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS));
373 } else {
374 mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_MMU_ERR_STS));
375 lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_LSU_ERR_STS));
376 fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_FED_ERR_STS));
377 }
378
379 llc_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS));
380 llc_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR));
381 llc_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF));
382
383 panic_plain("Unhandled " CPU_NAME
384 " implementation specific error. state=%p esr=%#x far=%p p-core?%d"
385 #if defined(HAS_DPC_ERR)
386 " dpc_err_sts:%p"
387 #endif
388 "\n"
389 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
390 "\tllc_err_sts:%p, llc_err_adr:%p, llc_err_inf:%p\n",
391 state, esr, (void *)far, !!(mpidr & MPIDR_PNE),
392 #if defined(HAS_DPC_ERR)
393 (void *)dpc_err_sts,
394 #endif
395 (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts,
396 (void *)llc_err_sts, (void *)llc_err_adr, (void *)llc_err_inf);
397 #endif
398 #else // !defined(APPLE_ARM64_ARCH_FAMILY)
399 #pragma unused (state, esr, far)
400 panic_plain("Unhandled implementation specific error\n");
401 #endif
402 }
403
404 #if CONFIG_KERNEL_INTEGRITY
405 #pragma clang diagnostic push
406 #pragma clang diagnostic ignored "-Wunused-parameter"
407 static void
408 kernel_integrity_error_handler(uint32_t esr, vm_offset_t far)
409 {
410 #if defined(KERNEL_INTEGRITY_WT)
411 #if (DEVELOPMENT || DEBUG)
412 if (ESR_WT_SERROR(esr)) {
413 switch (ESR_WT_REASON(esr)) {
414 case WT_REASON_INTEGRITY_FAIL:
415 panic_plain("Kernel integrity, violation in frame 0x%016lx.", far);
416 case WT_REASON_BAD_SYSCALL:
417 panic_plain("Kernel integrity, bad syscall.");
418 case WT_REASON_NOT_LOCKED:
419 panic_plain("Kernel integrity, not locked.");
420 case WT_REASON_ALREADY_LOCKED:
421 panic_plain("Kernel integrity, already locked.");
422 case WT_REASON_SW_REQ:
423 panic_plain("Kernel integrity, software request.");
424 case WT_REASON_PT_INVALID:
425 panic_plain("Kernel integrity, encountered invalid TTE/PTE while "
426 "walking 0x%016lx.", far);
427 case WT_REASON_PT_VIOLATION:
428 panic_plain("Kernel integrity, violation in mapping 0x%016lx.",
429 far);
430 case WT_REASON_REG_VIOLATION:
431 panic_plain("Kernel integrity, violation in system register %d.",
432 (unsigned) far);
433 default:
434 panic_plain("Kernel integrity, unknown (esr=0x%08x).", esr);
435 }
436 }
437 #else
438 if (ESR_WT_SERROR(esr)) {
439 panic_plain("SError esr: 0x%08x far: 0x%016lx.", esr, far);
440 }
441 #endif
442 #endif
443 }
444 #pragma clang diagnostic pop
445 #endif
446
447 static void
448 arm64_platform_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
449 {
450 cpu_data_t *cdp = getCpuDatap();
451
452 #if CONFIG_KERNEL_INTEGRITY
453 kernel_integrity_error_handler(esr, far);
454 #endif
455
456 if (PE_handle_platform_error(far)) {
457 return;
458 } else if (cdp->platform_error_handler != NULL) {
459 cdp->platform_error_handler(cdp->cpu_id, far);
460 } else {
461 arm64_implementation_specific_error(state, esr, far);
462 }
463 }
464
465 void
466 panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss)
467 {
468 boolean_t ss_valid;
469
470 ss_valid = is_saved_state64(ss);
471 arm_saved_state64_t *state = saved_state64(ss);
472
473 os_atomic_cmpxchg(&original_faulting_state, NULL, state, seq_cst);
474
475 panic_plain("%s at pc 0x%016llx, lr 0x%016llx (saved state: %p%s)\n"
476 "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n"
477 "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n"
478 "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n"
479 "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n"
480 "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n"
481 "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n"
482 "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n"
483 "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n"
484 "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n",
485 msg, state->pc, state->lr, ss, (ss_valid ? "" : " INVALID"),
486 state->x[0], state->x[1], state->x[2], state->x[3],
487 state->x[4], state->x[5], state->x[6], state->x[7],
488 state->x[8], state->x[9], state->x[10], state->x[11],
489 state->x[12], state->x[13], state->x[14], state->x[15],
490 state->x[16], state->x[17], state->x[18], state->x[19],
491 state->x[20], state->x[21], state->x[22], state->x[23],
492 state->x[24], state->x[25], state->x[26], state->x[27],
493 state->x[28], state->fp, state->lr, state->sp,
494 state->pc, state->cpsr, state->esr, state->far);
495 }
496
497 void
498 sleh_synchronous_sp1(arm_context_t *context, uint32_t esr, vm_offset_t far __unused)
499 {
500 esr_exception_class_t class = ESR_EC(esr);
501 arm_saved_state_t * state = &context->ss;
502
503 switch (class) {
504 case ESR_EC_UNCATEGORIZED:
505 {
506 uint32_t instr = *((uint32_t*)get_saved_state_pc(state));
507 if (IS_ARM_GDB_TRAP(instr)) {
508 DebuggerCall(EXC_BREAKPOINT, state);
509 }
510 }
511 OS_FALLTHROUGH; // panic if we return from the debugger
512 default:
513 panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state);
514 }
515 }
516
517 #if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST)
518 static bool
519 handle_msr_write_from_xnupost(arm_saved_state_t *state, uint32_t esr)
520 {
521 user_addr_t pc = get_saved_state_pc(state);
522 if ((spr_lock_test_addr != 0) && (pc == spr_lock_test_addr)) {
523 spr_lock_exception_esr = esr;
524 set_saved_state_pc(state, pc + 4);
525 return true;
526 }
527
528 return false;
529 }
530 #endif
531
532 __attribute__((noreturn))
533 void
534 thread_exception_return()
535 {
536 thread_t thread = current_thread();
537 if (thread->machine.exception_trace_code != 0) {
538 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
539 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END, 0, 0, 0, 0, 0);
540 thread->machine.exception_trace_code = 0;
541 }
542
543 arm64_thread_exception_return();
544 __builtin_unreachable();
545 }
546
547 /*
548 * check whether task vtimers are running and set thread and CPU BSD AST
549 *
550 * must be called with interrupts masked so updates of fields are atomic
551 * must be emitted inline to avoid generating an FBT probe on the exception path
552 *
553 */
554 __attribute__((__always_inline__))
555 static inline void
556 task_vtimer_check(thread_t thread)
557 {
558 if (__improbable(thread->task->vtimers)) {
559 thread->ast |= AST_BSD;
560 thread->machine.CpuDatap->cpu_pending_ast |= AST_BSD;
561 }
562 }
563
564 void
565 sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far)
566 {
567 esr_exception_class_t class = ESR_EC(esr);
568 arm_saved_state_t * state = &context->ss;
569 vm_offset_t recover = 0;
570 thread_t thread = current_thread();
571 #if MACH_ASSERT
572 int preemption_level = get_preemption_level();
573 #endif
574 expected_fault_handler_t expected_fault_handler = NULL;
575 #ifdef CONFIG_XNUPOST
576 expected_fault_handler_t saved_expected_fault_handler = NULL;
577 uintptr_t saved_expected_fault_addr = 0;
578 #endif /* CONFIG_XNUPOST */
579
580 ASSERT_CONTEXT_SANITY(context);
581
582 task_vtimer_check(thread);
583
584 #if CONFIG_DTRACE
585 /*
586 * Handle kernel DTrace probes as early as possible to minimize the likelihood
587 * that this path will itself trigger a DTrace probe, which would lead to infinite
588 * probe recursion.
589 */
590 if (__improbable((class == ESR_EC_UNCATEGORIZED) && tempDTraceTrapHook &&
591 (tempDTraceTrapHook(EXC_BAD_INSTRUCTION, state, 0, 0) == KERN_SUCCESS))) {
592 return;
593 }
594 #endif
595 bool is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
596
597 /*
598 * Use KERNEL_DEBUG_CONSTANT_IST here to avoid producing tracepoints
599 * that would disclose the behavior of PT_DENY_ATTACH processes.
600 */
601 if (is_user) {
602 thread->machine.exception_trace_code = (uint16_t)(ARM64_KDBG_CODE_USER | class);
603 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
604 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_START,
605 esr, far, get_saved_state_pc(state), 0, 0);
606 } else {
607 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
608 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_START,
609 esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
610 }
611
612 if (__improbable(ESR_INSTR_IS_2BYTES(esr))) {
613 /*
614 * We no longer support 32-bit, which means no 2-byte
615 * instructions.
616 */
617 if (is_user) {
618 panic("Exception on 2-byte instruction, "
619 "context=%p, esr=%#x, far=%p",
620 context, esr, (void *)far);
621 } else {
622 panic_with_thread_kernel_state("Exception on 2-byte instruction", state);
623 }
624 }
625
626 /* Don't run exception handler with recover handler set in case of double fault */
627 if (thread->recover) {
628 recover = thread->recover;
629 thread->recover = (vm_offset_t)NULL;
630 }
631
632 #ifdef CONFIG_XNUPOST
633 if (thread->machine.expected_fault_handler != NULL) {
634 saved_expected_fault_handler = thread->machine.expected_fault_handler;
635 saved_expected_fault_addr = thread->machine.expected_fault_addr;
636
637 thread->machine.expected_fault_handler = NULL;
638 thread->machine.expected_fault_addr = 0;
639
640 if (saved_expected_fault_addr == far) {
641 expected_fault_handler = saved_expected_fault_handler;
642 }
643 }
644 #endif /* CONFIG_XNUPOST */
645
646 /* Inherit the interrupt masks from previous context */
647 if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) {
648 ml_set_interrupts_enabled(TRUE);
649 }
650
651 switch (class) {
652 case ESR_EC_SVC_64:
653 if (!is_saved_state64(state) || !is_user) {
654 panic("Invalid SVC_64 context");
655 }
656
657 handle_svc(state);
658 break;
659
660 case ESR_EC_DABORT_EL0:
661 handle_abort(state, esr, far, recover, inspect_data_abort, handle_user_abort, expected_fault_handler);
662 break;
663
664 case ESR_EC_MSR_TRAP:
665 handle_msr_trap(state, esr);
666 break;
667
668
669 case ESR_EC_IABORT_EL0:
670 handle_abort(state, esr, far, recover, inspect_instruction_abort, handle_user_abort, expected_fault_handler);
671 break;
672
673 case ESR_EC_IABORT_EL1:
674 #ifdef CONFIG_XNUPOST
675 if ((expected_fault_handler != NULL) && expected_fault_handler(state)) {
676 break;
677 }
678 #endif /* CONFIG_XNUPOST */
679
680 panic_with_thread_kernel_state("Kernel instruction fetch abort", state);
681
682 case ESR_EC_PC_ALIGN:
683 handle_pc_align(state);
684 __builtin_unreachable();
685
686 case ESR_EC_DABORT_EL1:
687 handle_abort(state, esr, far, recover, inspect_data_abort, handle_kernel_abort, expected_fault_handler);
688 break;
689
690 case ESR_EC_UNCATEGORIZED:
691 assert(!ESR_ISS(esr));
692
693 #if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST)
694 if (handle_msr_write_from_xnupost(state, esr)) {
695 break;
696 }
697 #endif
698 handle_uncategorized(&context->ss);
699 break;
700
701 case ESR_EC_SP_ALIGN:
702 handle_sp_align(state);
703 __builtin_unreachable();
704
705 case ESR_EC_BKPT_AARCH32:
706 handle_breakpoint(state, esr);
707 __builtin_unreachable();
708
709 case ESR_EC_BRK_AARCH64:
710 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
711 handle_kernel_breakpoint(state, esr);
712 } else {
713 handle_breakpoint(state, esr);
714 }
715 __builtin_unreachable();
716
717 case ESR_EC_BKPT_REG_MATCH_EL0:
718 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
719 handle_breakpoint(state, esr);
720 }
721 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
722 class, state, class, esr, (void *)far);
723 __builtin_unreachable();
724
725 case ESR_EC_BKPT_REG_MATCH_EL1:
726 panic_with_thread_kernel_state("Hardware Breakpoint Debug exception from kernel. Panic (by design)", state);
727 __builtin_unreachable();
728
729 case ESR_EC_SW_STEP_DEBUG_EL0:
730 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
731 handle_sw_step_debug(state);
732 }
733 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
734 class, state, class, esr, (void *)far);
735 __builtin_unreachable();
736
737 case ESR_EC_SW_STEP_DEBUG_EL1:
738 panic_with_thread_kernel_state("Software Step Debug exception from kernel. Panic (by design)", state);
739 __builtin_unreachable();
740
741 case ESR_EC_WATCHPT_MATCH_EL0:
742 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
743 handle_watchpoint(far);
744 }
745 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
746 class, state, class, esr, (void *)far);
747 __builtin_unreachable();
748
749 case ESR_EC_WATCHPT_MATCH_EL1:
750 /*
751 * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
752 * abort. Turn off watchpoints and keep going; we'll turn them back on in return_from_exception..
753 */
754 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
755 arm_debug_set(NULL);
756 break; /* return to first level handler */
757 }
758 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
759 class, state, class, esr, (void *)far);
760 __builtin_unreachable();
761
762 case ESR_EC_TRAP_SIMD_FP:
763 handle_simd_trap(state, esr);
764 __builtin_unreachable();
765
766 case ESR_EC_ILLEGAL_INSTR_SET:
767 if (EXCB_ACTION_RERUN !=
768 ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET, far)) {
769 // instruction is not re-executed
770 panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x",
771 state, class, esr, (void *)far, get_saved_state_cpsr(state));
772 }
773 // must clear this fault in PSR to re-run
774 mask_saved_state_cpsr(state, 0, PSR64_IL);
775 break;
776
777 case ESR_EC_MCR_MRC_CP15_TRAP:
778 case ESR_EC_MCRR_MRRC_CP15_TRAP:
779 case ESR_EC_MCR_MRC_CP14_TRAP:
780 case ESR_EC_LDC_STC_CP14_TRAP:
781 case ESR_EC_MCRR_MRRC_CP14_TRAP:
782 handle_user_trapped_instruction32(state, esr);
783 __builtin_unreachable();
784
785 case ESR_EC_WFI_WFE:
786 // Use of WFI or WFE instruction when they have been disabled for EL0
787 handle_wf_trap(state);
788 __builtin_unreachable();
789
790 case ESR_EC_FLOATING_POINT_64:
791 handle_fp_trap(state, esr);
792 __builtin_unreachable();
793
794 default:
795 panic("Unsupported synchronous exception. state=%p class=%u esr=%u far=%p",
796 state, class, esr, (void *)far);
797 __builtin_unreachable();
798 }
799
800 #ifdef CONFIG_XNUPOST
801 if (saved_expected_fault_handler != NULL) {
802 thread->machine.expected_fault_handler = saved_expected_fault_handler;
803 thread->machine.expected_fault_addr = saved_expected_fault_addr;
804 }
805 #endif /* CONFIG_XNUPOST */
806
807 if (recover) {
808 thread->recover = recover;
809 }
810 if (is_user) {
811 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
812 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END,
813 esr, far, get_saved_state_pc(state), 0, 0);
814 thread->machine.exception_trace_code = 0;
815 } else {
816 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
817 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_END,
818 esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
819 }
820 #if MACH_ASSERT
821 if (preemption_level != get_preemption_level()) {
822 panic("synchronous exception changed preemption level from %d to %d", preemption_level, get_preemption_level());
823 }
824 #endif
825 }
826
827 /*
828 * Uncategorized exceptions are a catch-all for general execution errors.
829 * ARM64_TODO: For now, we assume this is for undefined instruction exceptions.
830 */
831 static void
832 handle_uncategorized(arm_saved_state_t *state)
833 {
834 exception_type_t exception = EXC_BAD_INSTRUCTION;
835 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
836 mach_msg_type_number_t numcodes = 2;
837 uint32_t instr = 0;
838
839 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
840
841 #if CONFIG_DTRACE
842
843 if (PSR64_IS_USER64(get_saved_state_cpsr(state))) {
844 /*
845 * For a 64bit user process, we care about all 4 bytes of the
846 * instr.
847 */
848 if (instr == FASTTRAP_ARM64_INSTR || instr == FASTTRAP_ARM64_RET_INSTR) {
849 if (dtrace_user_probe(state) == KERN_SUCCESS) {
850 return;
851 }
852 }
853 } else if (PSR64_IS_USER32(get_saved_state_cpsr(state))) {
854 /*
855 * For a 32bit user process, we check for thumb mode, in
856 * which case we only care about a 2 byte instruction length.
857 * For non-thumb mode, we care about all 4 bytes of the instructin.
858 */
859 if (get_saved_state_cpsr(state) & PSR64_MODE_USER32_THUMB) {
860 if (((uint16_t)instr == FASTTRAP_THUMB32_INSTR) ||
861 ((uint16_t)instr == FASTTRAP_THUMB32_RET_INSTR)) {
862 if (dtrace_user_probe(state) == KERN_SUCCESS) {
863 return;
864 }
865 }
866 } else {
867 if ((instr == FASTTRAP_ARM32_INSTR) ||
868 (instr == FASTTRAP_ARM32_RET_INSTR)) {
869 if (dtrace_user_probe(state) == KERN_SUCCESS) {
870 return;
871 }
872 }
873 }
874 }
875
876 #endif /* CONFIG_DTRACE */
877
878 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
879 if (IS_ARM_GDB_TRAP(instr)) {
880 boolean_t interrupt_state;
881 exception = EXC_BREAKPOINT;
882
883 interrupt_state = ml_set_interrupts_enabled(FALSE);
884
885 /* Save off the context here (so that the debug logic
886 * can see the original state of this thread).
887 */
888 current_thread()->machine.kpcb = state;
889
890 /* Hop into the debugger (typically either due to a
891 * fatal exception, an explicit panic, or a stackshot
892 * request.
893 */
894 DebuggerCall(exception, state);
895
896 (void) ml_set_interrupts_enabled(interrupt_state);
897 return;
898 } else {
899 panic("Undefined kernel instruction: pc=%p instr=%x\n", (void*)get_saved_state_pc(state), instr);
900 }
901 }
902
903 /*
904 * Check for GDB breakpoint via illegal opcode.
905 */
906 if (IS_ARM_GDB_TRAP(instr)) {
907 exception = EXC_BREAKPOINT;
908 codes[0] = EXC_ARM_BREAKPOINT;
909 codes[1] = instr;
910 } else {
911 codes[1] = instr;
912 }
913
914 exception_triage(exception, codes, numcodes);
915 __builtin_unreachable();
916 }
917
918 #if __has_feature(ptrauth_calls)
919 static const uint16_t ptrauth_brk_comment_base = 0xc470;
920
921 static inline bool
922 brk_comment_is_ptrauth(uint16_t comment)
923 {
924 return comment >= ptrauth_brk_comment_base &&
925 comment <= ptrauth_brk_comment_base + ptrauth_key_asdb;
926 }
927
928 static inline const char *
929 brk_comment_to_ptrauth_key(uint16_t comment)
930 {
931 switch (comment - ptrauth_brk_comment_base) {
932 case ptrauth_key_asia:
933 return "IA";
934 case ptrauth_key_asib:
935 return "IB";
936 case ptrauth_key_asda:
937 return "DA";
938 case ptrauth_key_asdb:
939 return "DB";
940 default:
941 __builtin_unreachable();
942 }
943 }
944 #endif /* __has_feature(ptrauth_calls) */
945
946 static void
947 handle_kernel_breakpoint(arm_saved_state_t *state, uint32_t esr)
948 {
949 uint16_t comment = ISS_BRK_COMMENT(esr);
950
951 #if __has_feature(ptrauth_calls)
952 if (brk_comment_is_ptrauth(comment)) {
953 const char *msg_fmt = "Break 0x%04X instruction exception from kernel. Ptrauth failure with %s key resulted in 0x%016llx";
954 char msg[strlen(msg_fmt)
955 - strlen("0x%04X") + strlen("0xFFFF")
956 - strlen("%s") + strlen("IA")
957 - strlen("0x%016llx") + strlen("0xFFFFFFFFFFFFFFFF")
958 + 1];
959 const char *key = brk_comment_to_ptrauth_key(comment);
960 snprintf(msg, sizeof(msg), msg_fmt, comment, key, saved_state64(state)->x[16]);
961
962 panic_with_thread_kernel_state(msg, state);
963 }
964 #endif /* __has_feature(ptrauth_calls) */
965
966 const char *msg_fmt = "Break 0x%04X instruction exception from kernel. Panic (by design)";
967 char msg[strlen(msg_fmt) - strlen("0x%04X") + strlen("0xFFFF") + 1];
968 snprintf(msg, sizeof(msg), msg_fmt, comment);
969
970 panic_with_thread_kernel_state(msg, state);
971 }
972
973 static void
974 handle_breakpoint(arm_saved_state_t *state, uint32_t esr __unused)
975 {
976 exception_type_t exception = EXC_BREAKPOINT;
977 mach_exception_data_type_t codes[2] = {EXC_ARM_BREAKPOINT};
978 mach_msg_type_number_t numcodes = 2;
979
980 #if __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__
981 if (ESR_EC(esr) == ESR_EC_BRK_AARCH64 &&
982 brk_comment_is_ptrauth(ISS_BRK_COMMENT(esr))) {
983 exception |= EXC_PTRAUTH_BIT;
984 }
985 #endif /* __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__ */
986
987 codes[1] = get_saved_state_pc(state);
988 exception_triage(exception, codes, numcodes);
989 __builtin_unreachable();
990 }
991
992 static void
993 handle_watchpoint(vm_offset_t fault_addr)
994 {
995 exception_type_t exception = EXC_BREAKPOINT;
996 mach_exception_data_type_t codes[2] = {EXC_ARM_DA_DEBUG};
997 mach_msg_type_number_t numcodes = 2;
998
999 codes[1] = fault_addr;
1000 exception_triage(exception, codes, numcodes);
1001 __builtin_unreachable();
1002 }
1003
1004 static void
1005 handle_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, vm_offset_t recover,
1006 abort_inspector_t inspect_abort, abort_handler_t handler, expected_fault_handler_t expected_fault_handler)
1007 {
1008 fault_status_t fault_code;
1009 vm_prot_t fault_type;
1010
1011 inspect_abort(ESR_ISS(esr), &fault_code, &fault_type);
1012 handler(state, esr, fault_addr, fault_code, fault_type, recover, expected_fault_handler);
1013 }
1014
1015 static void
1016 inspect_instruction_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
1017 {
1018 getCpuDatap()->cpu_stat.instr_ex_cnt++;
1019 *fault_code = ISS_IA_FSC(iss);
1020 *fault_type = (VM_PROT_READ | VM_PROT_EXECUTE);
1021 }
1022
1023 static void
1024 inspect_data_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
1025 {
1026 getCpuDatap()->cpu_stat.data_ex_cnt++;
1027 *fault_code = ISS_DA_FSC(iss);
1028
1029 /*
1030 * Cache maintenance operations always report faults as write access.
1031 * Change these to read access, unless they report a permission fault.
1032 * Only certain cache maintenance operations (e.g. 'dc ivac') require write
1033 * access to the mapping, but if a cache maintenance operation that only requires
1034 * read access generates a permission fault, then we will not be able to handle
1035 * the fault regardless of whether we treat it as a read or write fault.
1036 */
1037 if ((iss & ISS_DA_WNR) && (!(iss & ISS_DA_CM) || is_permission_fault(*fault_code))) {
1038 *fault_type = (VM_PROT_READ | VM_PROT_WRITE);
1039 } else {
1040 *fault_type = (VM_PROT_READ);
1041 }
1042 }
1043
1044 #if __has_feature(ptrauth_calls)
1045 static inline bool
1046 fault_addr_bit(vm_offset_t fault_addr, unsigned int bit)
1047 {
1048 return (bool)((fault_addr >> bit) & 1);
1049 }
1050
1051 /**
1052 * Determines whether a fault address taken at EL0 contains a PAC error code
1053 * corresponding to the specified kind of ptrauth key.
1054 */
1055 static bool
1056 user_fault_addr_matches_pac_error_code(vm_offset_t fault_addr, bool data_key)
1057 {
1058 bool instruction_tbi = !(get_tcr() & TCR_TBID0_TBI_DATA_ONLY);
1059 bool tbi = data_key || __improbable(instruction_tbi);
1060 unsigned int poison_shift;
1061 if (tbi) {
1062 poison_shift = 53;
1063 } else {
1064 poison_shift = 61;
1065 }
1066
1067 /* PAC error codes are always in the form key_number:NOT(key_number) */
1068 bool poison_bit_1 = fault_addr_bit(fault_addr, poison_shift);
1069 bool poison_bit_2 = fault_addr_bit(fault_addr, poison_shift + 1);
1070 return poison_bit_1 != poison_bit_2;
1071 }
1072 #endif /* __has_feature(ptrauth_calls) */
1073
1074 static void
1075 handle_pc_align(arm_saved_state_t *ss)
1076 {
1077 exception_type_t exc;
1078 mach_exception_data_type_t codes[2];
1079 mach_msg_type_number_t numcodes = 2;
1080
1081 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1082 panic_with_thread_kernel_state("PC alignment exception from kernel.", ss);
1083 }
1084
1085 exc = EXC_BAD_ACCESS;
1086 #if __has_feature(ptrauth_calls)
1087 if (user_fault_addr_matches_pac_error_code(get_saved_state_pc(ss), false)) {
1088 exc |= EXC_PTRAUTH_BIT;
1089 }
1090 #endif /* __has_feature(ptrauth_calls) */
1091
1092 codes[0] = EXC_ARM_DA_ALIGN;
1093 codes[1] = get_saved_state_pc(ss);
1094
1095 exception_triage(exc, codes, numcodes);
1096 __builtin_unreachable();
1097 }
1098
1099 static void
1100 handle_sp_align(arm_saved_state_t *ss)
1101 {
1102 exception_type_t exc;
1103 mach_exception_data_type_t codes[2];
1104 mach_msg_type_number_t numcodes = 2;
1105
1106 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1107 panic_with_thread_kernel_state("SP alignment exception from kernel.", ss);
1108 }
1109
1110 exc = EXC_BAD_ACCESS;
1111 #if __has_feature(ptrauth_calls)
1112 if (user_fault_addr_matches_pac_error_code(get_saved_state_sp(ss), true)) {
1113 exc |= EXC_PTRAUTH_BIT;
1114 }
1115 #endif /* __has_feature(ptrauth_calls) */
1116
1117 codes[0] = EXC_ARM_SP_ALIGN;
1118 codes[1] = get_saved_state_sp(ss);
1119
1120 exception_triage(exc, codes, numcodes);
1121 __builtin_unreachable();
1122 }
1123
1124 static void
1125 handle_wf_trap(arm_saved_state_t *state)
1126 {
1127 exception_type_t exc;
1128 mach_exception_data_type_t codes[2];
1129 mach_msg_type_number_t numcodes = 2;
1130 uint32_t instr = 0;
1131
1132 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1133
1134 exc = EXC_BAD_INSTRUCTION;
1135 codes[0] = EXC_ARM_UNDEFINED;
1136 codes[1] = instr;
1137
1138 exception_triage(exc, codes, numcodes);
1139 __builtin_unreachable();
1140 }
1141
1142 static void
1143 handle_fp_trap(arm_saved_state_t *state, uint32_t esr)
1144 {
1145 exception_type_t exc = EXC_ARITHMETIC;
1146 mach_exception_data_type_t codes[2];
1147 mach_msg_type_number_t numcodes = 2;
1148 uint32_t instr = 0;
1149
1150 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1151 panic_with_thread_kernel_state("Floating point exception from kernel", state);
1152 }
1153
1154 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1155 codes[1] = instr;
1156
1157 /* The floating point trap flags are only valid if TFV is set. */
1158 if (!fp_exceptions_enabled) {
1159 exc = EXC_BAD_INSTRUCTION;
1160 codes[0] = EXC_ARM_UNDEFINED;
1161 } else if (!(esr & ISS_FP_TFV)) {
1162 codes[0] = EXC_ARM_FP_UNDEFINED;
1163 } else if (esr & ISS_FP_UFF) {
1164 codes[0] = EXC_ARM_FP_UF;
1165 } else if (esr & ISS_FP_OFF) {
1166 codes[0] = EXC_ARM_FP_OF;
1167 } else if (esr & ISS_FP_IOF) {
1168 codes[0] = EXC_ARM_FP_IO;
1169 } else if (esr & ISS_FP_DZF) {
1170 codes[0] = EXC_ARM_FP_DZ;
1171 } else if (esr & ISS_FP_IDF) {
1172 codes[0] = EXC_ARM_FP_ID;
1173 } else if (esr & ISS_FP_IXF) {
1174 codes[0] = EXC_ARM_FP_IX;
1175 } else {
1176 panic("Unrecognized floating point exception, state=%p, esr=%#x", state, esr);
1177 }
1178
1179 exception_triage(exc, codes, numcodes);
1180 __builtin_unreachable();
1181 }
1182
1183
1184
1185 /*
1186 * handle_alignment_fault_from_user:
1187 * state: Saved state
1188 *
1189 * Attempts to deal with an alignment fault from userspace (possibly by
1190 * emulating the faulting instruction). If emulation failed due to an
1191 * unservicable fault, the ESR for that fault will be stored in the
1192 * recovery_esr field of the thread by the exception code.
1193 *
1194 * Returns:
1195 * -1: Emulation failed (emulation of state/instr not supported)
1196 * 0: Successfully emulated the instruction
1197 * EFAULT: Emulation failed (probably due to permissions)
1198 * EINVAL: Emulation failed (probably due to a bad address)
1199 */
1200 static int
1201 handle_alignment_fault_from_user(arm_saved_state_t *state, kern_return_t *vmfr)
1202 {
1203 int ret = -1;
1204
1205 #pragma unused (state)
1206 #pragma unused (vmfr)
1207
1208 return ret;
1209 }
1210
1211
1212 static void
1213 handle_sw_step_debug(arm_saved_state_t *state)
1214 {
1215 thread_t thread = current_thread();
1216 exception_type_t exc;
1217 mach_exception_data_type_t codes[2];
1218 mach_msg_type_number_t numcodes = 2;
1219
1220 if (!PSR64_IS_USER(get_saved_state_cpsr(state))) {
1221 panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel.", state);
1222 }
1223
1224 // Disable single step and unmask interrupts (in the saved state, anticipating next exception return)
1225 if (thread->machine.DebugData != NULL) {
1226 thread->machine.DebugData->uds.ds64.mdscr_el1 &= ~0x1;
1227 } else {
1228 panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL.", state);
1229 }
1230
1231 mask_saved_state_cpsr(thread->machine.upcb, 0, PSR64_SS | DAIF_IRQF | DAIF_FIQF);
1232
1233 // Special encoding for gdb single step event on ARM
1234 exc = EXC_BREAKPOINT;
1235 codes[0] = 1;
1236 codes[1] = 0;
1237
1238 exception_triage(exc, codes, numcodes);
1239 __builtin_unreachable();
1240 }
1241
1242 static void
1243 set_saved_state_pc_to_recovery_handler(arm_saved_state_t *iss, vm_offset_t recover)
1244 {
1245 #if defined(HAS_APPLE_PAC)
1246 thread_t thread = current_thread();
1247 const uintptr_t disc = ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER);
1248 const char *panic_msg = "Illegal thread->recover value %p";
1249
1250 MANIPULATE_SIGNED_THREAD_STATE(iss,
1251 // recover = (vm_offset_t)ptrauth_auth_data((void *)recover, ptrauth_key_function_pointer,
1252 // ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER));
1253 "mov x1, %[recover] \n"
1254 "mov x6, %[disc] \n"
1255 "autia x1, x6 \n"
1256 // if (recover != (vm_offset_t)ptrauth_strip((void *)recover, ptrauth_key_function_pointer)) {
1257 "mov x6, x1 \n"
1258 "xpaci x6 \n"
1259 "cmp x1, x6 \n"
1260 "beq 1f \n"
1261 // panic("Illegal thread->recover value %p", (void *)recover);
1262 "mov x0, %[panic_msg] \n"
1263 "bl _panic \n"
1264 // }
1265 "1: \n"
1266 "str x1, [x0, %[SS64_PC]] \n",
1267 [recover] "r"(recover),
1268 [disc] "r"(disc),
1269 [panic_msg] "r"(panic_msg)
1270 );
1271 #else
1272 set_saved_state_pc(iss, recover);
1273 #endif
1274 }
1275
1276 static void
1277 handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1278 fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover, expected_fault_handler_t expected_fault_handler)
1279 {
1280 exception_type_t exc = EXC_BAD_ACCESS;
1281 mach_exception_data_type_t codes[2];
1282 mach_msg_type_number_t numcodes = 2;
1283 thread_t thread = current_thread();
1284
1285 (void)esr;
1286 (void)expected_fault_handler;
1287
1288 if (ml_at_interrupt_context()) {
1289 panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state);
1290 }
1291
1292 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling abort from userspace */
1293
1294 if (is_vm_fault(fault_code)) {
1295 kern_return_t result = KERN_FAILURE;
1296 vm_map_t map = thread->map;
1297 vm_offset_t vm_fault_addr = fault_addr;
1298
1299 assert(map != kernel_map);
1300
1301 if (!(fault_type & VM_PROT_EXECUTE)) {
1302 vm_fault_addr = tbi_clear(fault_addr);
1303 }
1304
1305 #if CONFIG_DTRACE
1306 if (thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
1307 if (dtrace_tally_fault(vm_fault_addr)) { /* Should a user mode fault under dtrace be ignored? */
1308 if (recover) {
1309 thread->machine.recover_esr = esr;
1310 thread->machine.recover_far = vm_fault_addr;
1311 set_saved_state_pc_to_recovery_handler(state, recover);
1312 } else {
1313 panic_with_thread_kernel_state("copyin/out has no recovery point", state);
1314 }
1315 return;
1316 } else {
1317 panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", state);
1318 }
1319 }
1320 #else
1321 (void)recover;
1322 #endif
1323
1324 #if CONFIG_PGTRACE
1325 if (pgtrace_enabled) {
1326 /* Check to see if trace bit is set */
1327 result = pmap_pgtrace_fault(map->pmap, fault_addr, state);
1328 if (result == KERN_SUCCESS) {
1329 return;
1330 }
1331 }
1332 #endif
1333
1334 /* check to see if it is just a pmap ref/modify fault */
1335
1336 if ((result != KERN_SUCCESS) && !is_translation_fault(fault_code)) {
1337 result = arm_fast_fault(map->pmap,
1338 vm_fault_addr,
1339 fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), TRUE);
1340 }
1341 if (result != KERN_SUCCESS) {
1342 {
1343 /* We have to fault the page in */
1344 result = vm_fault(map, vm_fault_addr, fault_type,
1345 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE,
1346 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
1347 }
1348 }
1349 if (result == KERN_SUCCESS || result == KERN_ABORTED) {
1350 return;
1351 }
1352
1353 /*
1354 * vm_fault() should never return KERN_FAILURE for page faults from user space.
1355 * If it does, we're leaking preemption disables somewhere in the kernel.
1356 */
1357 if (__improbable(result == KERN_FAILURE)) {
1358 panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread);
1359 }
1360
1361 codes[0] = result;
1362 } else if (is_alignment_fault(fault_code)) {
1363 kern_return_t vmfkr = KERN_SUCCESS;
1364 thread->machine.recover_esr = 0;
1365 thread->machine.recover_far = 0;
1366 int result = handle_alignment_fault_from_user(state, &vmfkr);
1367 if (result == 0) {
1368 /* Successfully emulated, or instruction
1369 * copyin() for decode/emulation failed.
1370 * Continue, or redrive instruction.
1371 */
1372 thread_exception_return();
1373 } else if (((result == EFAULT) || (result == EINVAL)) &&
1374 (thread->machine.recover_esr == 0)) {
1375 /*
1376 * If we didn't actually take a fault, but got one of
1377 * these errors, then we failed basic sanity checks of
1378 * the fault address. Treat this as an invalid
1379 * address.
1380 */
1381 codes[0] = KERN_INVALID_ADDRESS;
1382 } else if ((result == EFAULT) &&
1383 (thread->machine.recover_esr)) {
1384 /*
1385 * Since alignment aborts are prioritized
1386 * ahead of translation aborts, the misaligned
1387 * atomic emulation flow may have triggered a
1388 * VM pagefault, which the VM could not resolve.
1389 * Report the VM fault error in codes[]
1390 */
1391
1392 codes[0] = vmfkr;
1393 assertf(vmfkr != KERN_SUCCESS, "Unexpected vmfkr 0x%x", vmfkr);
1394 /* Cause ESR_EC to reflect an EL0 abort */
1395 thread->machine.recover_esr &= ~ESR_EC_MASK;
1396 thread->machine.recover_esr |= (ESR_EC_DABORT_EL0 << ESR_EC_SHIFT);
1397 set_saved_state_esr(thread->machine.upcb, thread->machine.recover_esr);
1398 set_saved_state_far(thread->machine.upcb, thread->machine.recover_far);
1399 fault_addr = thread->machine.recover_far;
1400 } else {
1401 /* This was just an unsupported alignment
1402 * exception. Misaligned atomic emulation
1403 * timeouts fall in this category.
1404 */
1405 codes[0] = EXC_ARM_DA_ALIGN;
1406 }
1407 } else if (is_parity_error(fault_code)) {
1408 #if defined(APPLE_ARM64_ARCH_FAMILY)
1409 if (fault_code == FSC_SYNC_PARITY) {
1410 arm64_platform_error(state, esr, fault_addr);
1411 return;
1412 }
1413 #else
1414 panic("User parity error.");
1415 #endif
1416 } else {
1417 codes[0] = KERN_FAILURE;
1418 }
1419
1420 codes[1] = fault_addr;
1421 #if __has_feature(ptrauth_calls)
1422 bool is_data_abort = (ESR_EC(esr) == ESR_EC_DABORT_EL0);
1423 if (user_fault_addr_matches_pac_error_code(fault_addr, is_data_abort)) {
1424 exc |= EXC_PTRAUTH_BIT;
1425 }
1426 #endif /* __has_feature(ptrauth_calls) */
1427 exception_triage(exc, codes, numcodes);
1428 __builtin_unreachable();
1429 }
1430
1431 #if __ARM_PAN_AVAILABLE__
1432 static int
1433 is_pan_fault(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, fault_status_t fault_code)
1434 {
1435 // PAN (Privileged Access Never) fault occurs for data read/write in EL1 to
1436 // virtual address that is readable/writeable from both EL1 and EL0
1437
1438 // To check for PAN fault, we evaluate if the following conditions are true:
1439 // 1. This is a permission fault
1440 // 2. PAN is enabled
1441 // 3. AT instruction (on which PAN has no effect) on the same faulting address
1442 // succeeds
1443
1444 vm_offset_t pa;
1445
1446 if (!(is_permission_fault(fault_code) && get_saved_state_cpsr(state) & PSR64_PAN)) {
1447 return FALSE;
1448 }
1449
1450 if (esr & ISS_DA_WNR) {
1451 pa = mmu_kvtop_wpreflight(fault_addr);
1452 } else {
1453 pa = mmu_kvtop(fault_addr);
1454 }
1455 return (pa)? TRUE: FALSE;
1456 }
1457 #endif
1458
1459 static void
1460 handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1461 fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover, expected_fault_handler_t expected_fault_handler)
1462 {
1463 thread_t thread = current_thread();
1464 (void)esr;
1465
1466 #ifndef CONFIG_XNUPOST
1467 (void)expected_fault_handler;
1468 #endif /* CONFIG_XNUPOST */
1469
1470 #if CONFIG_DTRACE
1471 if (is_vm_fault(fault_code) && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
1472 if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */
1473 /*
1474 * Point to next instruction, or recovery handler if set.
1475 */
1476 if (recover) {
1477 thread->machine.recover_esr = esr;
1478 thread->machine.recover_far = fault_addr;
1479 set_saved_state_pc_to_recovery_handler(state, recover);
1480 } else {
1481 add_saved_state_pc(state, 4);
1482 }
1483 return;
1484 } else {
1485 panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state);
1486 }
1487 }
1488 #endif
1489
1490 #if !CONFIG_PGTRACE /* This will be moved next to pgtrace fault evaluation */
1491 if (ml_at_interrupt_context()) {
1492 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
1493 }
1494 #endif
1495
1496 if (is_vm_fault(fault_code)) {
1497 kern_return_t result = KERN_FAILURE;
1498 vm_map_t map;
1499 int interruptible;
1500
1501 /*
1502 * Ensure no faults in the physical aperture. This could happen if
1503 * a page table is incorrectly allocated from the read only region
1504 * when running with KTRR.
1505 */
1506
1507 #ifdef CONFIG_XNUPOST
1508 if (expected_fault_handler && expected_fault_handler(state)) {
1509 return;
1510 }
1511 #endif /* CONFIG_XNUPOST */
1512
1513 if (fault_addr >= gVirtBase && fault_addr < static_memory_end) {
1514 panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state);
1515 }
1516
1517 if (VM_KERNEL_ADDRESS(fault_addr) || thread == THREAD_NULL) {
1518 map = kernel_map;
1519 interruptible = THREAD_UNINT;
1520 } else {
1521 map = thread->map;
1522 interruptible = THREAD_ABORTSAFE;
1523 }
1524
1525 #if CONFIG_PGTRACE
1526 if (pgtrace_enabled) {
1527 /* Check to see if trace bit is set */
1528 result = pmap_pgtrace_fault(map->pmap, fault_addr, state);
1529 if (result == KERN_SUCCESS) {
1530 return;
1531 }
1532 }
1533
1534 if (ml_at_interrupt_context()) {
1535 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
1536 }
1537 #endif
1538
1539 /* check to see if it is just a pmap ref/modify fault */
1540 if (!is_translation_fault(fault_code)) {
1541 result = arm_fast_fault(map->pmap,
1542 fault_addr,
1543 fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), FALSE);
1544 if (result == KERN_SUCCESS) {
1545 return;
1546 }
1547 }
1548
1549 if (result != KERN_PROTECTION_FAILURE) {
1550 /*
1551 * We have to "fault" the page in.
1552 */
1553 result = vm_fault(map, fault_addr, fault_type,
1554 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, interruptible,
1555 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
1556 }
1557
1558 if (result == KERN_SUCCESS) {
1559 return;
1560 }
1561
1562 /*
1563 * If we have a recover handler, invoke it now.
1564 */
1565 if (recover) {
1566 thread->machine.recover_esr = esr;
1567 thread->machine.recover_far = fault_addr;
1568 set_saved_state_pc_to_recovery_handler(state, recover);
1569 return;
1570 }
1571
1572 #if __ARM_PAN_AVAILABLE__
1573 if (is_pan_fault(state, esr, fault_addr, fault_code)) {
1574 panic_with_thread_kernel_state("Privileged access never abort.", state);
1575 }
1576 #endif
1577
1578 #if CONFIG_PGTRACE
1579 } else if (ml_at_interrupt_context()) {
1580 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
1581 #endif
1582 } else if (is_alignment_fault(fault_code)) {
1583 if (recover) {
1584 thread->machine.recover_esr = esr;
1585 thread->machine.recover_far = fault_addr;
1586 set_saved_state_pc_to_recovery_handler(state, recover);
1587 return;
1588 }
1589 panic_with_thread_kernel_state("Unaligned kernel data abort.", state);
1590 } else if (is_parity_error(fault_code)) {
1591 #if defined(APPLE_ARM64_ARCH_FAMILY)
1592 if (fault_code == FSC_SYNC_PARITY) {
1593 arm64_platform_error(state, esr, fault_addr);
1594 return;
1595 }
1596 #else
1597 panic_with_thread_kernel_state("Kernel parity error.", state);
1598 #endif
1599 } else {
1600 kprintf("Unclassified kernel abort (fault_code=0x%x)\n", fault_code);
1601 }
1602
1603 panic_with_thread_kernel_state("Kernel data abort.", state);
1604 }
1605
1606 extern void syscall_trace(struct arm_saved_state * regs);
1607
1608 static void
1609 handle_svc(arm_saved_state_t *state)
1610 {
1611 int trap_no = get_saved_state_svc_number(state);
1612 thread_t thread = current_thread();
1613 struct proc *p;
1614
1615 #define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */
1616
1617 #define TRACE_SYSCALL 1
1618 #if TRACE_SYSCALL
1619 syscall_trace(state);
1620 #endif
1621
1622 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling SVC from userspace */
1623
1624 if (trap_no == (int)PLATFORM_SYSCALL_TRAP_NO) {
1625 platform_syscall(state);
1626 panic("Returned from platform_syscall()?");
1627 }
1628
1629 mach_kauth_cred_uthread_update();
1630
1631 if (trap_no < 0) {
1632 if (trap_no == MACH_ARM_TRAP_ABSTIME) {
1633 handle_mach_absolute_time_trap(state);
1634 return;
1635 } else if (trap_no == MACH_ARM_TRAP_CONTTIME) {
1636 handle_mach_continuous_time_trap(state);
1637 return;
1638 }
1639
1640 /* Counting perhaps better in the handler, but this is how it's been done */
1641 thread->syscalls_mach++;
1642 mach_syscall(state);
1643 } else {
1644 /* Counting perhaps better in the handler, but this is how it's been done */
1645 thread->syscalls_unix++;
1646 p = get_bsdthreadtask_info(thread);
1647
1648 assert(p);
1649
1650 unix_syscall(state, thread, (struct uthread*)thread->uthread, p);
1651 }
1652 }
1653
1654 static void
1655 handle_mach_absolute_time_trap(arm_saved_state_t *state)
1656 {
1657 uint64_t now = mach_absolute_time();
1658 saved_state64(state)->x[0] = now;
1659 }
1660
1661 static void
1662 handle_mach_continuous_time_trap(arm_saved_state_t *state)
1663 {
1664 uint64_t now = mach_continuous_time();
1665 saved_state64(state)->x[0] = now;
1666 }
1667
1668 __attribute__((noreturn))
1669 static void
1670 handle_msr_trap(arm_saved_state_t *state, uint32_t esr)
1671 {
1672 exception_type_t exception = EXC_BAD_INSTRUCTION;
1673 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1674 mach_msg_type_number_t numcodes = 2;
1675 uint32_t instr = 0;
1676
1677 if (!is_saved_state64(state)) {
1678 panic("MSR/MRS trap (ESR 0x%x) from 32-bit state\n", esr);
1679 }
1680
1681 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1682 panic("MSR/MRS trap (ESR 0x%x) from kernel\n", esr);
1683 }
1684
1685 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1686 codes[1] = instr;
1687
1688 exception_triage(exception, codes, numcodes);
1689 __builtin_unreachable();
1690 }
1691
1692
1693 static void
1694 handle_user_trapped_instruction32(arm_saved_state_t *state, uint32_t esr)
1695 {
1696 exception_type_t exception = EXC_BAD_INSTRUCTION;
1697 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1698 mach_msg_type_number_t numcodes = 2;
1699 uint32_t instr;
1700
1701 if (is_saved_state64(state)) {
1702 panic("ESR (0x%x) for instruction trapped from U32, but saved state is 64-bit.", esr);
1703 }
1704
1705 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1706 panic("ESR (0x%x) for instruction trapped from U32, actually came from kernel?", esr);
1707 }
1708
1709 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1710 codes[1] = instr;
1711
1712 exception_triage(exception, codes, numcodes);
1713 __builtin_unreachable();
1714 }
1715
1716 static void
1717 handle_simd_trap(arm_saved_state_t *state, uint32_t esr)
1718 {
1719 exception_type_t exception = EXC_BAD_INSTRUCTION;
1720 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1721 mach_msg_type_number_t numcodes = 2;
1722 uint32_t instr = 0;
1723
1724 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1725 panic("ESR (0x%x) for SIMD trap from userland, actually came from kernel?", esr);
1726 }
1727
1728 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1729 codes[1] = instr;
1730
1731 exception_triage(exception, codes, numcodes);
1732 __builtin_unreachable();
1733 }
1734
1735 void
1736 sleh_irq(arm_saved_state_t *state)
1737 {
1738 cpu_data_t * cdp __unused = getCpuDatap();
1739 #if MACH_ASSERT
1740 int preemption_level = get_preemption_level();
1741 #endif
1742
1743
1744 sleh_interrupt_handler_prologue(state, DBG_INTR_TYPE_OTHER);
1745
1746 #if USE_APPLEARMSMP
1747 PE_handle_ext_interrupt();
1748 #else
1749 /* Run the registered interrupt handler. */
1750 cdp->interrupt_handler(cdp->interrupt_target,
1751 cdp->interrupt_refCon,
1752 cdp->interrupt_nub,
1753 cdp->interrupt_source);
1754 #endif
1755
1756 entropy_collect();
1757
1758 sleh_interrupt_handler_epilogue();
1759 #if MACH_ASSERT
1760 if (preemption_level != get_preemption_level()) {
1761 panic("irq handler %p changed preemption level from %d to %d", cdp->interrupt_handler, preemption_level, get_preemption_level());
1762 }
1763 #endif
1764 }
1765
1766 void
1767 sleh_fiq(arm_saved_state_t *state)
1768 {
1769 unsigned int type = DBG_INTR_TYPE_UNKNOWN;
1770 #if MACH_ASSERT
1771 int preemption_level = get_preemption_level();
1772 #endif
1773
1774 #if MONOTONIC_FIQ
1775 uint64_t pmcr0 = 0, upmsr = 0;
1776 #endif /* MONOTONIC_FIQ */
1777
1778 #if defined(HAS_IPI)
1779 boolean_t is_ipi = FALSE;
1780 uint64_t ipi_sr = 0;
1781
1782 if (gFastIPI) {
1783 MRS(ipi_sr, ARM64_REG_IPI_SR);
1784
1785 if (ipi_sr & 1) {
1786 is_ipi = TRUE;
1787 }
1788 }
1789
1790 if (is_ipi) {
1791 type = DBG_INTR_TYPE_IPI;
1792 } else
1793 #endif /* defined(HAS_IPI) */
1794 #if MONOTONIC_FIQ
1795 if (mt_pmi_pending(&pmcr0, &upmsr)) {
1796 type = DBG_INTR_TYPE_PMI;
1797 } else
1798 #endif /* MONOTONIC_FIQ */
1799 if (ml_get_timer_pending()) {
1800 type = DBG_INTR_TYPE_TIMER;
1801 }
1802
1803 sleh_interrupt_handler_prologue(state, type);
1804
1805 #if defined(HAS_IPI)
1806 if (is_ipi) {
1807 /*
1808 * Order is important here: we must ack the IPI by writing IPI_SR
1809 * before we call cpu_signal_handler(). Otherwise, there will be
1810 * a window between the completion of pending-signal processing in
1811 * cpu_signal_handler() and the ack during which a newly-issued
1812 * IPI to this CPU may be lost. ISB is required to ensure the msr
1813 * is retired before execution of cpu_signal_handler().
1814 */
1815 MSR(ARM64_REG_IPI_SR, ipi_sr);
1816 __builtin_arm_isb(ISB_SY);
1817 cpu_signal_handler();
1818 } else
1819 #endif /* defined(HAS_IPI) */
1820 #if MONOTONIC_FIQ
1821 if (type == DBG_INTR_TYPE_PMI) {
1822 INTERRUPT_MASKED_DEBUG_START(mt_fiq, DBG_INTR_TYPE_PMI);
1823 mt_fiq(getCpuDatap(), pmcr0, upmsr);
1824 INTERRUPT_MASKED_DEBUG_END();
1825 } else
1826 #endif /* MONOTONIC_FIQ */
1827 {
1828 /*
1829 * We don't know that this is a timer, but we don't have insight into
1830 * the other interrupts that go down this path.
1831 */
1832
1833 cpu_data_t *cdp = getCpuDatap();
1834
1835 cdp->cpu_decrementer = -1; /* Large */
1836
1837 /*
1838 * ARM64_TODO: whether we're coming from userland is ignored right now.
1839 * We can easily thread it through, but not bothering for the
1840 * moment (AArch32 doesn't either).
1841 */
1842 INTERRUPT_MASKED_DEBUG_START(rtclock_intr, DBG_INTR_TYPE_TIMER);
1843 rtclock_intr(TRUE);
1844 INTERRUPT_MASKED_DEBUG_END();
1845 }
1846
1847 sleh_interrupt_handler_epilogue();
1848 #if MACH_ASSERT
1849 if (preemption_level != get_preemption_level()) {
1850 panic("fiq type %u changed preemption level from %d to %d", type, preemption_level, get_preemption_level());
1851 }
1852 #endif
1853 }
1854
1855 void
1856 sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far)
1857 {
1858 task_vtimer_check(current_thread());
1859
1860 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_START,
1861 esr, VM_KERNEL_ADDRHIDE(far));
1862 arm_saved_state_t *state = &context->ss;
1863 #if MACH_ASSERT
1864 int preemption_level = get_preemption_level();
1865 #endif
1866
1867 ASSERT_CONTEXT_SANITY(context);
1868 arm64_platform_error(state, esr, far);
1869 #if MACH_ASSERT
1870 if (preemption_level != get_preemption_level()) {
1871 panic("serror changed preemption level from %d to %d", preemption_level, get_preemption_level());
1872 }
1873 #endif
1874 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_END,
1875 esr, VM_KERNEL_ADDRHIDE(far));
1876 }
1877
1878 void
1879 mach_syscall_trace_exit(unsigned int retval,
1880 unsigned int call_number)
1881 {
1882 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1883 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) |
1884 DBG_FUNC_END, retval, 0, 0, 0, 0);
1885 }
1886
1887 __attribute__((noreturn))
1888 void
1889 thread_syscall_return(kern_return_t error)
1890 {
1891 thread_t thread;
1892 struct arm_saved_state *state;
1893
1894 thread = current_thread();
1895 state = get_user_regs(thread);
1896
1897 assert(is_saved_state64(state));
1898 saved_state64(state)->x[0] = error;
1899
1900 #if MACH_ASSERT
1901 kern_allocation_name_t
1902 prior __assert_only = thread_get_kernel_state(thread)->allocation_name;
1903 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
1904 #endif /* MACH_ASSERT */
1905
1906 if (kdebug_enable) {
1907 /* Invert syscall number (negative for a mach syscall) */
1908 mach_syscall_trace_exit(error, (-1) * get_saved_state_svc_number(state));
1909 }
1910
1911 thread_exception_return();
1912 }
1913
1914 void
1915 syscall_trace(
1916 struct arm_saved_state * regs __unused)
1917 {
1918 /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */
1919 }
1920
1921 static void
1922 sleh_interrupt_handler_prologue(arm_saved_state_t *state, unsigned int type)
1923 {
1924 bool is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
1925
1926 task_vtimer_check(current_thread());
1927
1928 uint64_t pc = is_user ? get_saved_state_pc(state) :
1929 VM_KERNEL_UNSLIDE(get_saved_state_pc(state));
1930
1931 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
1932 0, pc, is_user, type);
1933
1934 #if CONFIG_TELEMETRY
1935 if (telemetry_needs_record) {
1936 telemetry_mark_curthread((boolean_t)is_user, FALSE);
1937 }
1938 #endif /* CONFIG_TELEMETRY */
1939 }
1940
1941 static void
1942 sleh_interrupt_handler_epilogue(void)
1943 {
1944 #if KPERF
1945 kperf_interrupt();
1946 #endif /* KPERF */
1947 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END);
1948 }
1949
1950 void
1951 sleh_invalid_stack(arm_context_t *context, uint32_t esr __unused, vm_offset_t far __unused)
1952 {
1953 thread_t thread = current_thread();
1954 vm_offset_t kernel_stack_bottom, sp;
1955
1956 sp = get_saved_state_sp(&context->ss);
1957 kernel_stack_bottom = round_page(thread->machine.kstackptr) - KERNEL_STACK_SIZE;
1958
1959 if ((sp < kernel_stack_bottom) && (sp >= (kernel_stack_bottom - PAGE_SIZE))) {
1960 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow).", &context->ss);
1961 }
1962
1963 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context->ss);
1964 }
1965