]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/sleh.c
745e9d17f18697c9077b974603aea4c9d94ddd42
[apple/xnu.git] / osfmk / arm64 / sleh.c
1 /*
2 * Copyright (c) 2012-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/caches_internal.h>
30 #include <arm/cpu_data.h>
31 #include <arm/cpu_data_internal.h>
32 #include <arm/misc_protos.h>
33 #include <arm/thread.h>
34 #include <arm/rtclock.h>
35 #include <arm/trap.h> /* for IS_ARM_GDB_TRAP() et al */
36 #include <arm64/proc_reg.h>
37 #include <arm64/machine_machdep.h>
38 #include <arm64/monotonic.h>
39 #include <arm64/instructions.h>
40
41 #include <kern/debug.h>
42 #include <kern/thread.h>
43 #include <mach/exception.h>
44 #include <mach/arm/traps.h>
45 #include <mach/vm_types.h>
46 #include <mach/machine/thread_status.h>
47
48 #include <machine/atomic.h>
49 #include <machine/limits.h>
50
51 #include <pexpert/arm/protos.h>
52
53 #include <vm/vm_page.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_fault.h>
56 #include <vm/vm_kern.h>
57
58 #include <sys/errno.h>
59 #include <sys/kdebug.h>
60 #include <kperf/kperf.h>
61
62 #include <kern/policy_internal.h>
63 #if CONFIG_TELEMETRY
64 #include <kern/telemetry.h>
65 #endif
66
67 #include <prng/entropy.h>
68
69
70
71 #ifndef __arm64__
72 #error Should only be compiling for arm64.
73 #endif
74
75 #define TEST_CONTEXT32_SANITY(context) \
76 (context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \
77 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT)
78
79 #define TEST_CONTEXT64_SANITY(context) \
80 (context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \
81 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT)
82
83 #define ASSERT_CONTEXT_SANITY(context) \
84 assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context))
85
86
87 #define COPYIN(src, dst, size) \
88 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
89 copyin_kern(src, dst, size) : \
90 copyin(src, dst, size)
91
92 #define COPYOUT(src, dst, size) \
93 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
94 copyout_kern(src, dst, size) : \
95 copyout(src, dst, size)
96
97 // Below is for concatenating a string param to a string literal
98 #define STR1(x) #x
99 #define STR(x) STR1(x)
100
101 #define ARM64_KDBG_CODE_KERNEL (0 << 8)
102 #define ARM64_KDBG_CODE_USER (1 << 8)
103 #define ARM64_KDBG_CODE_GUEST (2 << 8)
104
105 _Static_assert(ARM64_KDBG_CODE_GUEST <= KDBG_CODE_MAX, "arm64 KDBG trace codes out of range");
106 _Static_assert(ARM64_KDBG_CODE_GUEST <= UINT16_MAX, "arm64 KDBG trace codes out of range");
107
108 void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) __abortlike;
109
110 void sleh_synchronous_sp1(arm_context_t *, uint32_t, vm_offset_t) __abortlike;
111 void sleh_synchronous(arm_context_t *, uint32_t, vm_offset_t);
112 void sleh_irq(arm_saved_state_t *);
113 void sleh_fiq(arm_saved_state_t *);
114 void sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far);
115 void sleh_invalid_stack(arm_context_t *context, uint32_t esr, vm_offset_t far) __dead2;
116
117 static void sleh_interrupt_handler_prologue(arm_saved_state_t *, unsigned int type);
118 static void sleh_interrupt_handler_epilogue(void);
119
120 static void handle_svc(arm_saved_state_t *);
121 static void handle_mach_absolute_time_trap(arm_saved_state_t *);
122 static void handle_mach_continuous_time_trap(arm_saved_state_t *);
123
124 static void handle_msr_trap(arm_saved_state_t *state, uint32_t esr);
125
126 extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, bool, bool);
127
128 static void handle_uncategorized(arm_saved_state_t *);
129 static void handle_kernel_breakpoint(arm_saved_state_t *, uint32_t) __dead2;
130 static void handle_breakpoint(arm_saved_state_t *, uint32_t) __dead2;
131
132 typedef void (*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *);
133 static void inspect_instruction_abort(uint32_t, fault_status_t *, vm_prot_t *);
134 static void inspect_data_abort(uint32_t, fault_status_t *, vm_prot_t *);
135
136 static int is_vm_fault(fault_status_t);
137 static int is_translation_fault(fault_status_t);
138 static int is_alignment_fault(fault_status_t);
139
140 typedef void (*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t);
141 static void handle_user_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t);
142 static void handle_kernel_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t);
143
144 static void handle_pc_align(arm_saved_state_t *ss) __dead2;
145 static void handle_sp_align(arm_saved_state_t *ss) __dead2;
146 static void handle_sw_step_debug(arm_saved_state_t *ss) __dead2;
147 static void handle_wf_trap(arm_saved_state_t *ss) __dead2;
148 static void handle_fp_trap(arm_saved_state_t *ss, uint32_t esr) __dead2;
149
150 static void handle_watchpoint(vm_offset_t fault_addr) __dead2;
151
152 static void handle_abort(arm_saved_state_t *, uint32_t, vm_offset_t, vm_offset_t, abort_inspector_t, abort_handler_t, expected_fault_handler_t);
153
154 static void handle_user_trapped_instruction32(arm_saved_state_t *, uint32_t esr) __dead2;
155
156 static void handle_simd_trap(arm_saved_state_t *, uint32_t esr) __dead2;
157
158 extern void mach_kauth_cred_uthread_update(void);
159 void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number);
160
161 struct uthread;
162 struct proc;
163
164 typedef uint32_t arm64_instr_t;
165
166 extern void
167 unix_syscall(struct arm_saved_state * regs, thread_t thread_act,
168 struct uthread * uthread, struct proc * proc);
169
170 extern void
171 mach_syscall(struct arm_saved_state*);
172
173 #if CONFIG_DTRACE
174 extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs);
175 extern boolean_t dtrace_tally_fault(user_addr_t);
176
177 /*
178 * Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy
179 * and paste the trap instructions
180 * over from that file. Need to keep these in sync!
181 */
182 #define FASTTRAP_ARM32_INSTR 0xe7ffdefc
183 #define FASTTRAP_THUMB32_INSTR 0xdefc
184 #define FASTTRAP_ARM64_INSTR 0xe7eeee7e
185
186 #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb
187 #define FASTTRAP_THUMB32_RET_INSTR 0xdefb
188 #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d
189
190 /* See <rdar://problem/4613924> */
191 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
192 #endif
193
194
195 #if CONFIG_PGTRACE
196 extern boolean_t pgtrace_enabled;
197 #endif
198
199 #if HAS_TWO_STAGE_SPR_LOCK
200 #ifdef CONFIG_XNUPOST
201 extern volatile vm_offset_t spr_lock_test_addr;
202 extern volatile uint32_t spr_lock_exception_esr;
203 #endif
204 #endif
205
206 #if INTERRUPT_MASKED_DEBUG
207 extern boolean_t interrupt_masked_debug;
208 #endif
209
210 extern void arm64_thread_exception_return(void) __dead2;
211
212 #if defined(APPLETYPHOON)
213 #define CPU_NAME "Typhoon"
214 #elif defined(APPLETWISTER)
215 #define CPU_NAME "Twister"
216 #elif defined(APPLEHURRICANE)
217 #define CPU_NAME "Hurricane"
218 #elif defined(APPLELIGHTNING)
219 #define CPU_NAME "Lightning"
220 #else
221 #define CPU_NAME "Unknown"
222 #endif
223
224 #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT))
225 #define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400)
226 #define ESR_WT_REASON(esr) ((esr) & 0xff)
227
228 #define WT_REASON_NONE 0
229 #define WT_REASON_INTEGRITY_FAIL 1
230 #define WT_REASON_BAD_SYSCALL 2
231 #define WT_REASON_NOT_LOCKED 3
232 #define WT_REASON_ALREADY_LOCKED 4
233 #define WT_REASON_SW_REQ 5
234 #define WT_REASON_PT_INVALID 6
235 #define WT_REASON_PT_VIOLATION 7
236 #define WT_REASON_REG_VIOLATION 8
237 #endif
238
239 #if defined(HAS_IPI)
240 void cpu_signal_handler(void);
241 extern unsigned int gFastIPI;
242 #endif /* defined(HAS_IPI) */
243
244 static arm_saved_state64_t *original_faulting_state = NULL;
245
246 TUNABLE(bool, fp_exceptions_enabled, "-fp_exceptions", false);
247
248 extern vm_offset_t static_memory_end;
249
250 static inline int
251 is_vm_fault(fault_status_t status)
252 {
253 switch (status) {
254 case FSC_TRANSLATION_FAULT_L0:
255 case FSC_TRANSLATION_FAULT_L1:
256 case FSC_TRANSLATION_FAULT_L2:
257 case FSC_TRANSLATION_FAULT_L3:
258 case FSC_ACCESS_FLAG_FAULT_L1:
259 case FSC_ACCESS_FLAG_FAULT_L2:
260 case FSC_ACCESS_FLAG_FAULT_L3:
261 case FSC_PERMISSION_FAULT_L1:
262 case FSC_PERMISSION_FAULT_L2:
263 case FSC_PERMISSION_FAULT_L3:
264 return TRUE;
265 default:
266 return FALSE;
267 }
268 }
269
270 static inline int
271 is_translation_fault(fault_status_t status)
272 {
273 switch (status) {
274 case FSC_TRANSLATION_FAULT_L0:
275 case FSC_TRANSLATION_FAULT_L1:
276 case FSC_TRANSLATION_FAULT_L2:
277 case FSC_TRANSLATION_FAULT_L3:
278 return TRUE;
279 default:
280 return FALSE;
281 }
282 }
283
284 static inline int
285 is_permission_fault(fault_status_t status)
286 {
287 switch (status) {
288 case FSC_PERMISSION_FAULT_L1:
289 case FSC_PERMISSION_FAULT_L2:
290 case FSC_PERMISSION_FAULT_L3:
291 return TRUE;
292 default:
293 return FALSE;
294 }
295 }
296
297 static inline int
298 is_alignment_fault(fault_status_t status)
299 {
300 return status == FSC_ALIGNMENT_FAULT;
301 }
302
303 static inline int
304 is_parity_error(fault_status_t status)
305 {
306 switch (status) {
307 case FSC_SYNC_PARITY:
308 case FSC_ASYNC_PARITY:
309 case FSC_SYNC_PARITY_TT_L1:
310 case FSC_SYNC_PARITY_TT_L2:
311 case FSC_SYNC_PARITY_TT_L3:
312 return TRUE;
313 default:
314 return FALSE;
315 }
316 }
317
318 static inline unsigned
319 __ror(unsigned value, unsigned shift)
320 {
321 return ((unsigned)(value) >> (unsigned)(shift)) |
322 (unsigned)(value) << ((unsigned)(sizeof(unsigned) * CHAR_BIT) - (unsigned)(shift));
323 }
324
325 __dead2
326 static void
327 arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
328 {
329 #if defined(APPLE_ARM64_ARCH_FAMILY)
330 uint64_t fed_err_sts, mmu_err_sts, lsu_err_sts;
331 #if defined(NO_ECORE)
332 uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf;
333
334 mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS));
335 l2c_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS));
336 l2c_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR));
337 l2c_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF));
338 lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS));
339 fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS));
340
341 panic_plain("Unhandled " CPU_NAME
342 " implementation specific error. state=%p esr=%#x far=%p\n"
343 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
344 "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n",
345 state, esr, (void *)far,
346 (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts,
347 (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf);
348
349 #elif defined(HAS_MIGSTS)
350 uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf, mpidr, migsts;
351
352 mpidr = __builtin_arm_rsr64("MPIDR_EL1");
353 migsts = __builtin_arm_rsr64(STR(ARM64_REG_MIGSTS_EL1));
354 mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS));
355 l2c_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS));
356 l2c_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR));
357 l2c_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF));
358 lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS));
359 fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS));
360
361 panic_plain("Unhandled " CPU_NAME
362 " implementation specific error. state=%p esr=%#x far=%p p-core?%d migsts=%p\n"
363 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
364 "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n",
365 state, esr, (void *)far, !!(mpidr & MPIDR_PNE), (void *)migsts,
366 (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts,
367 (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf);
368 #else // !defined(NO_ECORE) && !defined(HAS_MIGSTS)
369 uint64_t llc_err_sts, llc_err_adr, llc_err_inf, mpidr;
370 #if defined(HAS_DPC_ERR)
371 uint64_t dpc_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_DPC_ERR_STS));
372 #endif // defined(HAS_DPC_ERR)
373
374 mpidr = __builtin_arm_rsr64("MPIDR_EL1");
375
376 if (mpidr & MPIDR_PNE) {
377 mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS));
378 lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS));
379 fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS));
380 } else {
381 mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_MMU_ERR_STS));
382 lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_LSU_ERR_STS));
383 fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_FED_ERR_STS));
384 }
385
386 llc_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS));
387 llc_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR));
388 llc_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF));
389
390 panic_plain("Unhandled " CPU_NAME
391 " implementation specific error. state=%p esr=%#x far=%p p-core?%d"
392 #if defined(HAS_DPC_ERR)
393 " dpc_err_sts:%p"
394 #endif
395 "\n"
396 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
397 "\tllc_err_sts:%p, llc_err_adr:%p, llc_err_inf:%p\n",
398 state, esr, (void *)far, !!(mpidr & MPIDR_PNE),
399 #if defined(HAS_DPC_ERR)
400 (void *)dpc_err_sts,
401 #endif
402 (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts,
403 (void *)llc_err_sts, (void *)llc_err_adr, (void *)llc_err_inf);
404 #endif
405 #else // !defined(APPLE_ARM64_ARCH_FAMILY)
406 #pragma unused (state, esr, far)
407 panic_plain("Unhandled implementation specific error\n");
408 #endif
409 }
410
411 #if CONFIG_KERNEL_INTEGRITY
412 #pragma clang diagnostic push
413 #pragma clang diagnostic ignored "-Wunused-parameter"
414 static void
415 kernel_integrity_error_handler(uint32_t esr, vm_offset_t far)
416 {
417 #if defined(KERNEL_INTEGRITY_WT)
418 #if (DEVELOPMENT || DEBUG)
419 if (ESR_WT_SERROR(esr)) {
420 switch (ESR_WT_REASON(esr)) {
421 case WT_REASON_INTEGRITY_FAIL:
422 panic_plain("Kernel integrity, violation in frame 0x%016lx.", far);
423 case WT_REASON_BAD_SYSCALL:
424 panic_plain("Kernel integrity, bad syscall.");
425 case WT_REASON_NOT_LOCKED:
426 panic_plain("Kernel integrity, not locked.");
427 case WT_REASON_ALREADY_LOCKED:
428 panic_plain("Kernel integrity, already locked.");
429 case WT_REASON_SW_REQ:
430 panic_plain("Kernel integrity, software request.");
431 case WT_REASON_PT_INVALID:
432 panic_plain("Kernel integrity, encountered invalid TTE/PTE while "
433 "walking 0x%016lx.", far);
434 case WT_REASON_PT_VIOLATION:
435 panic_plain("Kernel integrity, violation in mapping 0x%016lx.",
436 far);
437 case WT_REASON_REG_VIOLATION:
438 panic_plain("Kernel integrity, violation in system register %d.",
439 (unsigned) far);
440 default:
441 panic_plain("Kernel integrity, unknown (esr=0x%08x).", esr);
442 }
443 }
444 #else
445 if (ESR_WT_SERROR(esr)) {
446 panic_plain("SError esr: 0x%08x far: 0x%016lx.", esr, far);
447 }
448 #endif
449 #endif
450 }
451 #pragma clang diagnostic pop
452 #endif
453
454 static void
455 arm64_platform_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
456 {
457 cpu_data_t *cdp = getCpuDatap();
458
459 #if CONFIG_KERNEL_INTEGRITY
460 kernel_integrity_error_handler(esr, far);
461 #endif
462
463 if (PE_handle_platform_error(far)) {
464 return;
465 } else if (cdp->platform_error_handler != NULL) {
466 cdp->platform_error_handler(cdp->cpu_id, far);
467 } else {
468 arm64_implementation_specific_error(state, esr, far);
469 }
470 }
471
472 void
473 panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss)
474 {
475 boolean_t ss_valid;
476
477 ss_valid = is_saved_state64(ss);
478 arm_saved_state64_t *state = saved_state64(ss);
479
480 os_atomic_cmpxchg(&original_faulting_state, NULL, state, seq_cst);
481
482 panic_plain("%s at pc 0x%016llx, lr 0x%016llx (saved state: %p%s)\n"
483 "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n"
484 "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n"
485 "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n"
486 "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n"
487 "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n"
488 "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n"
489 "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n"
490 "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n"
491 "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n",
492 msg, state->pc, state->lr, ss, (ss_valid ? "" : " INVALID"),
493 state->x[0], state->x[1], state->x[2], state->x[3],
494 state->x[4], state->x[5], state->x[6], state->x[7],
495 state->x[8], state->x[9], state->x[10], state->x[11],
496 state->x[12], state->x[13], state->x[14], state->x[15],
497 state->x[16], state->x[17], state->x[18], state->x[19],
498 state->x[20], state->x[21], state->x[22], state->x[23],
499 state->x[24], state->x[25], state->x[26], state->x[27],
500 state->x[28], state->fp, state->lr, state->sp,
501 state->pc, state->cpsr, state->esr, state->far);
502 }
503
504 void
505 sleh_synchronous_sp1(arm_context_t *context, uint32_t esr, vm_offset_t far __unused)
506 {
507 esr_exception_class_t class = ESR_EC(esr);
508 arm_saved_state_t * state = &context->ss;
509
510 switch (class) {
511 case ESR_EC_UNCATEGORIZED:
512 {
513 uint32_t instr = *((uint32_t*)get_saved_state_pc(state));
514 if (IS_ARM_GDB_TRAP(instr)) {
515 DebuggerCall(EXC_BREAKPOINT, state);
516 }
517 }
518 OS_FALLTHROUGH; // panic if we return from the debugger
519 default:
520 panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state);
521 }
522 }
523
524 #if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST)
525 static bool
526 handle_msr_write_from_xnupost(arm_saved_state_t *state, uint32_t esr)
527 {
528 user_addr_t pc = get_saved_state_pc(state);
529 if ((spr_lock_test_addr != 0) && (pc == spr_lock_test_addr)) {
530 spr_lock_exception_esr = esr;
531 set_saved_state_pc(state, pc + 4);
532 return true;
533 }
534
535 return false;
536 }
537 #endif
538
539 __attribute__((noreturn))
540 void
541 thread_exception_return()
542 {
543 thread_t thread = current_thread();
544 if (thread->machine.exception_trace_code != 0) {
545 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
546 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END, 0, 0, 0, 0, 0);
547 thread->machine.exception_trace_code = 0;
548 }
549
550 arm64_thread_exception_return();
551 __builtin_unreachable();
552 }
553
554 /*
555 * check whether task vtimers are running and set thread and CPU BSD AST
556 *
557 * must be called with interrupts masked so updates of fields are atomic
558 * must be emitted inline to avoid generating an FBT probe on the exception path
559 *
560 */
561 __attribute__((__always_inline__))
562 static inline void
563 task_vtimer_check(thread_t thread)
564 {
565 if (__improbable(thread->task->vtimers)) {
566 thread->ast |= AST_BSD;
567 thread->machine.CpuDatap->cpu_pending_ast |= AST_BSD;
568 }
569 }
570
571 void
572 sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far)
573 {
574 esr_exception_class_t class = ESR_EC(esr);
575 arm_saved_state_t * state = &context->ss;
576 vm_offset_t recover = 0;
577 thread_t thread = current_thread();
578 #if MACH_ASSERT
579 int preemption_level = get_preemption_level();
580 #endif
581 expected_fault_handler_t expected_fault_handler = NULL;
582 #ifdef CONFIG_XNUPOST
583 expected_fault_handler_t saved_expected_fault_handler = NULL;
584 uintptr_t saved_expected_fault_addr = 0;
585 #endif /* CONFIG_XNUPOST */
586
587 ASSERT_CONTEXT_SANITY(context);
588
589 task_vtimer_check(thread);
590
591 #if CONFIG_DTRACE
592 /*
593 * Handle kernel DTrace probes as early as possible to minimize the likelihood
594 * that this path will itself trigger a DTrace probe, which would lead to infinite
595 * probe recursion.
596 */
597 if (__improbable((class == ESR_EC_UNCATEGORIZED) && tempDTraceTrapHook &&
598 (tempDTraceTrapHook(EXC_BAD_INSTRUCTION, state, 0, 0) == KERN_SUCCESS))) {
599 return;
600 }
601 #endif
602 bool is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
603
604 /*
605 * Use KERNEL_DEBUG_CONSTANT_IST here to avoid producing tracepoints
606 * that would disclose the behavior of PT_DENY_ATTACH processes.
607 */
608 if (is_user) {
609 thread->machine.exception_trace_code = (uint16_t)(ARM64_KDBG_CODE_USER | class);
610 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
611 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_START,
612 esr, far, get_saved_state_pc(state), 0, 0);
613 } else {
614 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
615 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_START,
616 esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
617 }
618
619 if (__improbable(ESR_INSTR_IS_2BYTES(esr))) {
620 /*
621 * We no longer support 32-bit, which means no 2-byte
622 * instructions.
623 */
624 if (is_user) {
625 panic("Exception on 2-byte instruction, "
626 "context=%p, esr=%#x, far=%p",
627 context, esr, (void *)far);
628 } else {
629 panic_with_thread_kernel_state("Exception on 2-byte instruction", state);
630 }
631 }
632
633 /* Don't run exception handler with recover handler set in case of double fault */
634 if (thread->recover) {
635 recover = thread->recover;
636 thread->recover = (vm_offset_t)NULL;
637 }
638
639 #ifdef CONFIG_XNUPOST
640 if (thread->machine.expected_fault_handler != NULL) {
641 saved_expected_fault_handler = thread->machine.expected_fault_handler;
642 saved_expected_fault_addr = thread->machine.expected_fault_addr;
643
644 thread->machine.expected_fault_handler = NULL;
645 thread->machine.expected_fault_addr = 0;
646
647 if (saved_expected_fault_addr == far) {
648 expected_fault_handler = saved_expected_fault_handler;
649 }
650 }
651 #endif /* CONFIG_XNUPOST */
652
653 /* Inherit the interrupt masks from previous context */
654 if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) {
655 ml_set_interrupts_enabled(TRUE);
656 }
657
658 switch (class) {
659 case ESR_EC_SVC_64:
660 if (!is_saved_state64(state) || !is_user) {
661 panic("Invalid SVC_64 context");
662 }
663
664 handle_svc(state);
665 break;
666
667 case ESR_EC_DABORT_EL0:
668 handle_abort(state, esr, far, recover, inspect_data_abort, handle_user_abort, expected_fault_handler);
669 break;
670
671 case ESR_EC_MSR_TRAP:
672 handle_msr_trap(state, esr);
673 break;
674
675
676 case ESR_EC_IABORT_EL0:
677 handle_abort(state, esr, far, recover, inspect_instruction_abort, handle_user_abort, expected_fault_handler);
678 break;
679
680 case ESR_EC_IABORT_EL1:
681 #ifdef CONFIG_XNUPOST
682 if ((expected_fault_handler != NULL) && expected_fault_handler(state)) {
683 break;
684 }
685 #endif /* CONFIG_XNUPOST */
686
687 panic_with_thread_kernel_state("Kernel instruction fetch abort", state);
688
689 case ESR_EC_PC_ALIGN:
690 handle_pc_align(state);
691 __builtin_unreachable();
692
693 case ESR_EC_DABORT_EL1:
694 handle_abort(state, esr, far, recover, inspect_data_abort, handle_kernel_abort, expected_fault_handler);
695 break;
696
697 case ESR_EC_UNCATEGORIZED:
698 assert(!ESR_ISS(esr));
699
700 #if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST)
701 if (handle_msr_write_from_xnupost(state, esr)) {
702 break;
703 }
704 #endif
705 handle_uncategorized(&context->ss);
706 break;
707
708 case ESR_EC_SP_ALIGN:
709 handle_sp_align(state);
710 __builtin_unreachable();
711
712 case ESR_EC_BKPT_AARCH32:
713 handle_breakpoint(state, esr);
714 __builtin_unreachable();
715
716 case ESR_EC_BRK_AARCH64:
717 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
718 handle_kernel_breakpoint(state, esr);
719 } else {
720 handle_breakpoint(state, esr);
721 }
722 __builtin_unreachable();
723
724 case ESR_EC_BKPT_REG_MATCH_EL0:
725 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
726 handle_breakpoint(state, esr);
727 }
728 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
729 class, state, class, esr, (void *)far);
730 __builtin_unreachable();
731
732 case ESR_EC_BKPT_REG_MATCH_EL1:
733 panic_with_thread_kernel_state("Hardware Breakpoint Debug exception from kernel. Panic (by design)", state);
734 __builtin_unreachable();
735
736 case ESR_EC_SW_STEP_DEBUG_EL0:
737 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
738 handle_sw_step_debug(state);
739 }
740 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
741 class, state, class, esr, (void *)far);
742 __builtin_unreachable();
743
744 case ESR_EC_SW_STEP_DEBUG_EL1:
745 panic_with_thread_kernel_state("Software Step Debug exception from kernel. Panic (by design)", state);
746 __builtin_unreachable();
747
748 case ESR_EC_WATCHPT_MATCH_EL0:
749 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
750 handle_watchpoint(far);
751 }
752 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
753 class, state, class, esr, (void *)far);
754 __builtin_unreachable();
755
756 case ESR_EC_WATCHPT_MATCH_EL1:
757 /*
758 * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
759 * abort. Turn off watchpoints and keep going; we'll turn them back on in return_from_exception..
760 */
761 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
762 arm_debug_set(NULL);
763 break; /* return to first level handler */
764 }
765 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
766 class, state, class, esr, (void *)far);
767 __builtin_unreachable();
768
769 case ESR_EC_TRAP_SIMD_FP:
770 handle_simd_trap(state, esr);
771 __builtin_unreachable();
772
773 case ESR_EC_ILLEGAL_INSTR_SET:
774 if (EXCB_ACTION_RERUN !=
775 ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET, far)) {
776 // instruction is not re-executed
777 panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x",
778 state, class, esr, (void *)far, get_saved_state_cpsr(state));
779 }
780 // must clear this fault in PSR to re-run
781 mask_saved_state_cpsr(state, 0, PSR64_IL);
782 break;
783
784 case ESR_EC_MCR_MRC_CP15_TRAP:
785 case ESR_EC_MCRR_MRRC_CP15_TRAP:
786 case ESR_EC_MCR_MRC_CP14_TRAP:
787 case ESR_EC_LDC_STC_CP14_TRAP:
788 case ESR_EC_MCRR_MRRC_CP14_TRAP:
789 handle_user_trapped_instruction32(state, esr);
790 __builtin_unreachable();
791
792 case ESR_EC_WFI_WFE:
793 // Use of WFI or WFE instruction when they have been disabled for EL0
794 handle_wf_trap(state);
795 __builtin_unreachable();
796
797 case ESR_EC_FLOATING_POINT_64:
798 handle_fp_trap(state, esr);
799 __builtin_unreachable();
800
801 default:
802 panic("Unsupported synchronous exception. state=%p class=%u esr=%u far=%p",
803 state, class, esr, (void *)far);
804 __builtin_unreachable();
805 }
806
807 #ifdef CONFIG_XNUPOST
808 if (saved_expected_fault_handler != NULL) {
809 thread->machine.expected_fault_handler = saved_expected_fault_handler;
810 thread->machine.expected_fault_addr = saved_expected_fault_addr;
811 }
812 #endif /* CONFIG_XNUPOST */
813
814 if (recover) {
815 thread->recover = recover;
816 }
817 if (is_user) {
818 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
819 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END,
820 esr, far, get_saved_state_pc(state), 0, 0);
821 thread->machine.exception_trace_code = 0;
822 } else {
823 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
824 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_END,
825 esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
826 }
827 #if MACH_ASSERT
828 if (preemption_level != get_preemption_level()) {
829 panic("synchronous exception changed preemption level from %d to %d", preemption_level, get_preemption_level());
830 }
831 #endif
832 }
833
834 /*
835 * Uncategorized exceptions are a catch-all for general execution errors.
836 * ARM64_TODO: For now, we assume this is for undefined instruction exceptions.
837 */
838 static void
839 handle_uncategorized(arm_saved_state_t *state)
840 {
841 exception_type_t exception = EXC_BAD_INSTRUCTION;
842 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
843 mach_msg_type_number_t numcodes = 2;
844 uint32_t instr = 0;
845
846 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
847
848 #if CONFIG_DTRACE
849
850 if (PSR64_IS_USER64(get_saved_state_cpsr(state))) {
851 /*
852 * For a 64bit user process, we care about all 4 bytes of the
853 * instr.
854 */
855 if (instr == FASTTRAP_ARM64_INSTR || instr == FASTTRAP_ARM64_RET_INSTR) {
856 if (dtrace_user_probe(state) == KERN_SUCCESS) {
857 return;
858 }
859 }
860 } else if (PSR64_IS_USER32(get_saved_state_cpsr(state))) {
861 /*
862 * For a 32bit user process, we check for thumb mode, in
863 * which case we only care about a 2 byte instruction length.
864 * For non-thumb mode, we care about all 4 bytes of the instructin.
865 */
866 if (get_saved_state_cpsr(state) & PSR64_MODE_USER32_THUMB) {
867 if (((uint16_t)instr == FASTTRAP_THUMB32_INSTR) ||
868 ((uint16_t)instr == FASTTRAP_THUMB32_RET_INSTR)) {
869 if (dtrace_user_probe(state) == KERN_SUCCESS) {
870 return;
871 }
872 }
873 } else {
874 if ((instr == FASTTRAP_ARM32_INSTR) ||
875 (instr == FASTTRAP_ARM32_RET_INSTR)) {
876 if (dtrace_user_probe(state) == KERN_SUCCESS) {
877 return;
878 }
879 }
880 }
881 }
882
883 #endif /* CONFIG_DTRACE */
884
885 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
886 if (IS_ARM_GDB_TRAP(instr)) {
887 boolean_t interrupt_state;
888 exception = EXC_BREAKPOINT;
889
890 interrupt_state = ml_set_interrupts_enabled(FALSE);
891
892 /* Save off the context here (so that the debug logic
893 * can see the original state of this thread).
894 */
895 current_thread()->machine.kpcb = state;
896
897 /* Hop into the debugger (typically either due to a
898 * fatal exception, an explicit panic, or a stackshot
899 * request.
900 */
901 DebuggerCall(exception, state);
902
903 (void) ml_set_interrupts_enabled(interrupt_state);
904 return;
905 } else {
906 panic("Undefined kernel instruction: pc=%p instr=%x\n", (void*)get_saved_state_pc(state), instr);
907 }
908 }
909
910 /*
911 * Check for GDB breakpoint via illegal opcode.
912 */
913 if (IS_ARM_GDB_TRAP(instr)) {
914 exception = EXC_BREAKPOINT;
915 codes[0] = EXC_ARM_BREAKPOINT;
916 codes[1] = instr;
917 } else {
918 codes[1] = instr;
919 }
920
921 exception_triage(exception, codes, numcodes);
922 __builtin_unreachable();
923 }
924
925 #if __has_feature(ptrauth_calls)
926 static const uint16_t ptrauth_brk_comment_base = 0xc470;
927
928 static inline bool
929 brk_comment_is_ptrauth(uint16_t comment)
930 {
931 return comment >= ptrauth_brk_comment_base &&
932 comment <= ptrauth_brk_comment_base + ptrauth_key_asdb;
933 }
934
935 static inline const char *
936 brk_comment_to_ptrauth_key(uint16_t comment)
937 {
938 switch (comment - ptrauth_brk_comment_base) {
939 case ptrauth_key_asia:
940 return "IA";
941 case ptrauth_key_asib:
942 return "IB";
943 case ptrauth_key_asda:
944 return "DA";
945 case ptrauth_key_asdb:
946 return "DB";
947 default:
948 __builtin_unreachable();
949 }
950 }
951 #endif /* __has_feature(ptrauth_calls) */
952
953 static void
954 handle_kernel_breakpoint(arm_saved_state_t *state, uint32_t esr)
955 {
956 uint16_t comment = ISS_BRK_COMMENT(esr);
957
958 #if __has_feature(ptrauth_calls)
959 if (brk_comment_is_ptrauth(comment)) {
960 const char *msg_fmt = "Break 0x%04X instruction exception from kernel. Ptrauth failure with %s key resulted in 0x%016llx";
961 char msg[strlen(msg_fmt)
962 - strlen("0x%04X") + strlen("0xFFFF")
963 - strlen("%s") + strlen("IA")
964 - strlen("0x%016llx") + strlen("0xFFFFFFFFFFFFFFFF")
965 + 1];
966 const char *key = brk_comment_to_ptrauth_key(comment);
967 snprintf(msg, sizeof(msg), msg_fmt, comment, key, saved_state64(state)->x[16]);
968
969 panic_with_thread_kernel_state(msg, state);
970 }
971 #endif /* __has_feature(ptrauth_calls) */
972
973 const char *msg_fmt = "Break 0x%04X instruction exception from kernel. Panic (by design)";
974 char msg[strlen(msg_fmt) - strlen("0x%04X") + strlen("0xFFFF") + 1];
975 snprintf(msg, sizeof(msg), msg_fmt, comment);
976
977 panic_with_thread_kernel_state(msg, state);
978 }
979
980 static void
981 handle_breakpoint(arm_saved_state_t *state, uint32_t esr __unused)
982 {
983 exception_type_t exception = EXC_BREAKPOINT;
984 mach_exception_data_type_t codes[2] = {EXC_ARM_BREAKPOINT};
985 mach_msg_type_number_t numcodes = 2;
986
987 #if __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__
988 if (ESR_EC(esr) == ESR_EC_BRK_AARCH64 &&
989 brk_comment_is_ptrauth(ISS_BRK_COMMENT(esr))) {
990 exception |= EXC_PTRAUTH_BIT;
991 }
992 #endif /* __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__ */
993
994 codes[1] = get_saved_state_pc(state);
995 exception_triage(exception, codes, numcodes);
996 __builtin_unreachable();
997 }
998
999 static void
1000 handle_watchpoint(vm_offset_t fault_addr)
1001 {
1002 exception_type_t exception = EXC_BREAKPOINT;
1003 mach_exception_data_type_t codes[2] = {EXC_ARM_DA_DEBUG};
1004 mach_msg_type_number_t numcodes = 2;
1005
1006 codes[1] = fault_addr;
1007 exception_triage(exception, codes, numcodes);
1008 __builtin_unreachable();
1009 }
1010
1011 static void
1012 handle_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, vm_offset_t recover,
1013 abort_inspector_t inspect_abort, abort_handler_t handler, expected_fault_handler_t expected_fault_handler)
1014 {
1015 fault_status_t fault_code;
1016 vm_prot_t fault_type;
1017
1018 inspect_abort(ESR_ISS(esr), &fault_code, &fault_type);
1019 handler(state, esr, fault_addr, fault_code, fault_type, recover, expected_fault_handler);
1020 }
1021
1022 static void
1023 inspect_instruction_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
1024 {
1025 getCpuDatap()->cpu_stat.instr_ex_cnt++;
1026 *fault_code = ISS_IA_FSC(iss);
1027 *fault_type = (VM_PROT_READ | VM_PROT_EXECUTE);
1028 }
1029
1030 static void
1031 inspect_data_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
1032 {
1033 getCpuDatap()->cpu_stat.data_ex_cnt++;
1034 *fault_code = ISS_DA_FSC(iss);
1035
1036 /*
1037 * Cache maintenance operations always report faults as write access.
1038 * Change these to read access, unless they report a permission fault.
1039 * Only certain cache maintenance operations (e.g. 'dc ivac') require write
1040 * access to the mapping, but if a cache maintenance operation that only requires
1041 * read access generates a permission fault, then we will not be able to handle
1042 * the fault regardless of whether we treat it as a read or write fault.
1043 */
1044 if ((iss & ISS_DA_WNR) && (!(iss & ISS_DA_CM) || is_permission_fault(*fault_code))) {
1045 *fault_type = (VM_PROT_READ | VM_PROT_WRITE);
1046 } else {
1047 *fault_type = (VM_PROT_READ);
1048 }
1049 }
1050
1051 #if __has_feature(ptrauth_calls)
1052 static inline bool
1053 fault_addr_bit(vm_offset_t fault_addr, unsigned int bit)
1054 {
1055 return (bool)((fault_addr >> bit) & 1);
1056 }
1057
1058 /**
1059 * Determines whether a fault address taken at EL0 contains a PAC error code
1060 * corresponding to the specified kind of ptrauth key.
1061 */
1062 static bool
1063 user_fault_addr_matches_pac_error_code(vm_offset_t fault_addr, bool data_key)
1064 {
1065 bool instruction_tbi = !(get_tcr() & TCR_TBID0_TBI_DATA_ONLY);
1066 bool tbi = data_key || __improbable(instruction_tbi);
1067 unsigned int poison_shift;
1068 if (tbi) {
1069 poison_shift = 53;
1070 } else {
1071 poison_shift = 61;
1072 }
1073
1074 /* PAC error codes are always in the form key_number:NOT(key_number) */
1075 bool poison_bit_1 = fault_addr_bit(fault_addr, poison_shift);
1076 bool poison_bit_2 = fault_addr_bit(fault_addr, poison_shift + 1);
1077 return poison_bit_1 != poison_bit_2;
1078 }
1079 #endif /* __has_feature(ptrauth_calls) */
1080
1081 static void
1082 handle_pc_align(arm_saved_state_t *ss)
1083 {
1084 exception_type_t exc;
1085 mach_exception_data_type_t codes[2];
1086 mach_msg_type_number_t numcodes = 2;
1087
1088 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1089 panic_with_thread_kernel_state("PC alignment exception from kernel.", ss);
1090 }
1091
1092 exc = EXC_BAD_ACCESS;
1093 #if __has_feature(ptrauth_calls)
1094 if (user_fault_addr_matches_pac_error_code(get_saved_state_pc(ss), false)) {
1095 exc |= EXC_PTRAUTH_BIT;
1096 }
1097 #endif /* __has_feature(ptrauth_calls) */
1098
1099 codes[0] = EXC_ARM_DA_ALIGN;
1100 codes[1] = get_saved_state_pc(ss);
1101
1102 exception_triage(exc, codes, numcodes);
1103 __builtin_unreachable();
1104 }
1105
1106 static void
1107 handle_sp_align(arm_saved_state_t *ss)
1108 {
1109 exception_type_t exc;
1110 mach_exception_data_type_t codes[2];
1111 mach_msg_type_number_t numcodes = 2;
1112
1113 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1114 panic_with_thread_kernel_state("SP alignment exception from kernel.", ss);
1115 }
1116
1117 exc = EXC_BAD_ACCESS;
1118 #if __has_feature(ptrauth_calls)
1119 if (user_fault_addr_matches_pac_error_code(get_saved_state_sp(ss), true)) {
1120 exc |= EXC_PTRAUTH_BIT;
1121 }
1122 #endif /* __has_feature(ptrauth_calls) */
1123
1124 codes[0] = EXC_ARM_SP_ALIGN;
1125 codes[1] = get_saved_state_sp(ss);
1126
1127 exception_triage(exc, codes, numcodes);
1128 __builtin_unreachable();
1129 }
1130
1131 static void
1132 handle_wf_trap(arm_saved_state_t *state)
1133 {
1134 exception_type_t exc;
1135 mach_exception_data_type_t codes[2];
1136 mach_msg_type_number_t numcodes = 2;
1137 uint32_t instr = 0;
1138
1139 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1140
1141 exc = EXC_BAD_INSTRUCTION;
1142 codes[0] = EXC_ARM_UNDEFINED;
1143 codes[1] = instr;
1144
1145 exception_triage(exc, codes, numcodes);
1146 __builtin_unreachable();
1147 }
1148
1149 static void
1150 handle_fp_trap(arm_saved_state_t *state, uint32_t esr)
1151 {
1152 exception_type_t exc = EXC_ARITHMETIC;
1153 mach_exception_data_type_t codes[2];
1154 mach_msg_type_number_t numcodes = 2;
1155 uint32_t instr = 0;
1156
1157 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1158 panic_with_thread_kernel_state("Floating point exception from kernel", state);
1159 }
1160
1161 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1162 codes[1] = instr;
1163
1164 /* The floating point trap flags are only valid if TFV is set. */
1165 if (!fp_exceptions_enabled) {
1166 exc = EXC_BAD_INSTRUCTION;
1167 codes[0] = EXC_ARM_UNDEFINED;
1168 } else if (!(esr & ISS_FP_TFV)) {
1169 codes[0] = EXC_ARM_FP_UNDEFINED;
1170 } else if (esr & ISS_FP_UFF) {
1171 codes[0] = EXC_ARM_FP_UF;
1172 } else if (esr & ISS_FP_OFF) {
1173 codes[0] = EXC_ARM_FP_OF;
1174 } else if (esr & ISS_FP_IOF) {
1175 codes[0] = EXC_ARM_FP_IO;
1176 } else if (esr & ISS_FP_DZF) {
1177 codes[0] = EXC_ARM_FP_DZ;
1178 } else if (esr & ISS_FP_IDF) {
1179 codes[0] = EXC_ARM_FP_ID;
1180 } else if (esr & ISS_FP_IXF) {
1181 codes[0] = EXC_ARM_FP_IX;
1182 } else {
1183 panic("Unrecognized floating point exception, state=%p, esr=%#x", state, esr);
1184 }
1185
1186 exception_triage(exc, codes, numcodes);
1187 __builtin_unreachable();
1188 }
1189
1190
1191
1192 /*
1193 * handle_alignment_fault_from_user:
1194 * state: Saved state
1195 *
1196 * Attempts to deal with an alignment fault from userspace (possibly by
1197 * emulating the faulting instruction). If emulation failed due to an
1198 * unservicable fault, the ESR for that fault will be stored in the
1199 * recovery_esr field of the thread by the exception code.
1200 *
1201 * Returns:
1202 * -1: Emulation failed (emulation of state/instr not supported)
1203 * 0: Successfully emulated the instruction
1204 * EFAULT: Emulation failed (probably due to permissions)
1205 * EINVAL: Emulation failed (probably due to a bad address)
1206 */
1207 static int
1208 handle_alignment_fault_from_user(arm_saved_state_t *state, kern_return_t *vmfr)
1209 {
1210 int ret = -1;
1211
1212 #pragma unused (state)
1213 #pragma unused (vmfr)
1214
1215 return ret;
1216 }
1217
1218
1219 static void
1220 handle_sw_step_debug(arm_saved_state_t *state)
1221 {
1222 thread_t thread = current_thread();
1223 exception_type_t exc;
1224 mach_exception_data_type_t codes[2];
1225 mach_msg_type_number_t numcodes = 2;
1226
1227 if (!PSR64_IS_USER(get_saved_state_cpsr(state))) {
1228 panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel.", state);
1229 }
1230
1231 // Disable single step and unmask interrupts (in the saved state, anticipating next exception return)
1232 if (thread->machine.DebugData != NULL) {
1233 thread->machine.DebugData->uds.ds64.mdscr_el1 &= ~0x1;
1234 } else {
1235 panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL.", state);
1236 }
1237
1238 mask_saved_state_cpsr(thread->machine.upcb, 0, PSR64_SS | DAIF_IRQF | DAIF_FIQF);
1239
1240 // Special encoding for gdb single step event on ARM
1241 exc = EXC_BREAKPOINT;
1242 codes[0] = 1;
1243 codes[1] = 0;
1244
1245 exception_triage(exc, codes, numcodes);
1246 __builtin_unreachable();
1247 }
1248
1249 static void
1250 set_saved_state_pc_to_recovery_handler(arm_saved_state_t *iss, vm_offset_t recover)
1251 {
1252 #if defined(HAS_APPLE_PAC)
1253 thread_t thread = current_thread();
1254 const uintptr_t disc = ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER);
1255 const char *panic_msg = "Illegal thread->recover value %p";
1256
1257 MANIPULATE_SIGNED_THREAD_STATE(iss,
1258 // recover = (vm_offset_t)ptrauth_auth_data((void *)recover, ptrauth_key_function_pointer,
1259 // ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER));
1260 "mov x1, %[recover] \n"
1261 "mov x6, %[disc] \n"
1262 "autia x1, x6 \n"
1263 // if (recover != (vm_offset_t)ptrauth_strip((void *)recover, ptrauth_key_function_pointer)) {
1264 "mov x6, x1 \n"
1265 "xpaci x6 \n"
1266 "cmp x1, x6 \n"
1267 "beq 1f \n"
1268 // panic("Illegal thread->recover value %p", (void *)recover);
1269 "mov x0, %[panic_msg] \n"
1270 "bl _panic \n"
1271 // }
1272 "1: \n"
1273 "str x1, [x0, %[SS64_PC]] \n",
1274 [recover] "r"(recover),
1275 [disc] "r"(disc),
1276 [panic_msg] "r"(panic_msg)
1277 );
1278 #else
1279 set_saved_state_pc(iss, recover);
1280 #endif
1281 }
1282
1283 static void
1284 handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1285 fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover, expected_fault_handler_t expected_fault_handler)
1286 {
1287 exception_type_t exc = EXC_BAD_ACCESS;
1288 mach_exception_data_type_t codes[2];
1289 mach_msg_type_number_t numcodes = 2;
1290 thread_t thread = current_thread();
1291
1292 (void)esr;
1293 (void)expected_fault_handler;
1294
1295 if (ml_at_interrupt_context()) {
1296 panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state);
1297 }
1298
1299 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling abort from userspace */
1300
1301 if (is_vm_fault(fault_code)) {
1302 kern_return_t result = KERN_FAILURE;
1303 vm_map_t map = thread->map;
1304 vm_offset_t vm_fault_addr = fault_addr;
1305
1306 assert(map != kernel_map);
1307
1308 if (!(fault_type & VM_PROT_EXECUTE)) {
1309 vm_fault_addr = tbi_clear(fault_addr);
1310 }
1311
1312 #if CONFIG_DTRACE
1313 if (thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
1314 if (dtrace_tally_fault(vm_fault_addr)) { /* Should a user mode fault under dtrace be ignored? */
1315 if (recover) {
1316 thread->machine.recover_esr = esr;
1317 thread->machine.recover_far = vm_fault_addr;
1318 set_saved_state_pc_to_recovery_handler(state, recover);
1319 } else {
1320 panic_with_thread_kernel_state("copyin/out has no recovery point", state);
1321 }
1322 return;
1323 } else {
1324 panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", state);
1325 }
1326 }
1327 #else
1328 (void)recover;
1329 #endif
1330
1331 #if CONFIG_PGTRACE
1332 if (pgtrace_enabled) {
1333 /* Check to see if trace bit is set */
1334 result = pmap_pgtrace_fault(map->pmap, fault_addr, state);
1335 if (result == KERN_SUCCESS) {
1336 return;
1337 }
1338 }
1339 #endif
1340
1341 /* check to see if it is just a pmap ref/modify fault */
1342
1343 if ((result != KERN_SUCCESS) && !is_translation_fault(fault_code)) {
1344 result = arm_fast_fault(map->pmap,
1345 vm_fault_addr,
1346 fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), TRUE);
1347 }
1348 if (result != KERN_SUCCESS) {
1349 {
1350 /* We have to fault the page in */
1351 result = vm_fault(map, vm_fault_addr, fault_type,
1352 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE,
1353 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
1354 }
1355 }
1356 if (result == KERN_SUCCESS || result == KERN_ABORTED) {
1357 return;
1358 }
1359
1360 /*
1361 * vm_fault() should never return KERN_FAILURE for page faults from user space.
1362 * If it does, we're leaking preemption disables somewhere in the kernel.
1363 */
1364 if (__improbable(result == KERN_FAILURE)) {
1365 panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread);
1366 }
1367
1368 codes[0] = result;
1369 } else if (is_alignment_fault(fault_code)) {
1370 kern_return_t vmfkr = KERN_SUCCESS;
1371 thread->machine.recover_esr = 0;
1372 thread->machine.recover_far = 0;
1373 int result = handle_alignment_fault_from_user(state, &vmfkr);
1374 if (result == 0) {
1375 /* Successfully emulated, or instruction
1376 * copyin() for decode/emulation failed.
1377 * Continue, or redrive instruction.
1378 */
1379 thread_exception_return();
1380 } else if (((result == EFAULT) || (result == EINVAL)) &&
1381 (thread->machine.recover_esr == 0)) {
1382 /*
1383 * If we didn't actually take a fault, but got one of
1384 * these errors, then we failed basic sanity checks of
1385 * the fault address. Treat this as an invalid
1386 * address.
1387 */
1388 codes[0] = KERN_INVALID_ADDRESS;
1389 } else if ((result == EFAULT) &&
1390 (thread->machine.recover_esr)) {
1391 /*
1392 * Since alignment aborts are prioritized
1393 * ahead of translation aborts, the misaligned
1394 * atomic emulation flow may have triggered a
1395 * VM pagefault, which the VM could not resolve.
1396 * Report the VM fault error in codes[]
1397 */
1398
1399 codes[0] = vmfkr;
1400 assertf(vmfkr != KERN_SUCCESS, "Unexpected vmfkr 0x%x", vmfkr);
1401 /* Cause ESR_EC to reflect an EL0 abort */
1402 thread->machine.recover_esr &= ~ESR_EC_MASK;
1403 thread->machine.recover_esr |= (ESR_EC_DABORT_EL0 << ESR_EC_SHIFT);
1404 set_saved_state_esr(thread->machine.upcb, thread->machine.recover_esr);
1405 set_saved_state_far(thread->machine.upcb, thread->machine.recover_far);
1406 fault_addr = thread->machine.recover_far;
1407 } else {
1408 /* This was just an unsupported alignment
1409 * exception. Misaligned atomic emulation
1410 * timeouts fall in this category.
1411 */
1412 codes[0] = EXC_ARM_DA_ALIGN;
1413 }
1414 } else if (is_parity_error(fault_code)) {
1415 #if defined(APPLE_ARM64_ARCH_FAMILY)
1416 if (fault_code == FSC_SYNC_PARITY) {
1417 arm64_platform_error(state, esr, fault_addr);
1418 return;
1419 }
1420 #else
1421 panic("User parity error.");
1422 #endif
1423 } else {
1424 codes[0] = KERN_FAILURE;
1425 }
1426
1427 codes[1] = fault_addr;
1428 #if __has_feature(ptrauth_calls)
1429 bool is_data_abort = (ESR_EC(esr) == ESR_EC_DABORT_EL0);
1430 if (user_fault_addr_matches_pac_error_code(fault_addr, is_data_abort)) {
1431 exc |= EXC_PTRAUTH_BIT;
1432 }
1433 #endif /* __has_feature(ptrauth_calls) */
1434 exception_triage(exc, codes, numcodes);
1435 __builtin_unreachable();
1436 }
1437
1438 #if __ARM_PAN_AVAILABLE__
1439 static int
1440 is_pan_fault(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, fault_status_t fault_code)
1441 {
1442 // PAN (Privileged Access Never) fault occurs for data read/write in EL1 to
1443 // virtual address that is readable/writeable from both EL1 and EL0
1444
1445 // To check for PAN fault, we evaluate if the following conditions are true:
1446 // 1. This is a permission fault
1447 // 2. PAN is enabled
1448 // 3. AT instruction (on which PAN has no effect) on the same faulting address
1449 // succeeds
1450
1451 vm_offset_t pa;
1452
1453 if (!(is_permission_fault(fault_code) && get_saved_state_cpsr(state) & PSR64_PAN)) {
1454 return FALSE;
1455 }
1456
1457 if (esr & ISS_DA_WNR) {
1458 pa = mmu_kvtop_wpreflight(fault_addr);
1459 } else {
1460 pa = mmu_kvtop(fault_addr);
1461 }
1462 return (pa)? TRUE: FALSE;
1463 }
1464 #endif
1465
1466 static void
1467 handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1468 fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover, expected_fault_handler_t expected_fault_handler)
1469 {
1470 thread_t thread = current_thread();
1471 (void)esr;
1472
1473 #ifndef CONFIG_XNUPOST
1474 (void)expected_fault_handler;
1475 #endif /* CONFIG_XNUPOST */
1476
1477 #if CONFIG_DTRACE
1478 if (is_vm_fault(fault_code) && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
1479 if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */
1480 /*
1481 * Point to next instruction, or recovery handler if set.
1482 */
1483 if (recover) {
1484 thread->machine.recover_esr = esr;
1485 thread->machine.recover_far = fault_addr;
1486 set_saved_state_pc_to_recovery_handler(state, recover);
1487 } else {
1488 add_saved_state_pc(state, 4);
1489 }
1490 return;
1491 } else {
1492 panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state);
1493 }
1494 }
1495 #endif
1496
1497 #if !CONFIG_PGTRACE /* This will be moved next to pgtrace fault evaluation */
1498 if (ml_at_interrupt_context()) {
1499 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
1500 }
1501 #endif
1502
1503 if (is_vm_fault(fault_code)) {
1504 kern_return_t result = KERN_FAILURE;
1505 vm_map_t map;
1506 int interruptible;
1507
1508 /*
1509 * Ensure no faults in the physical aperture. This could happen if
1510 * a page table is incorrectly allocated from the read only region
1511 * when running with KTRR.
1512 */
1513
1514 #ifdef CONFIG_XNUPOST
1515 if (expected_fault_handler && expected_fault_handler(state)) {
1516 return;
1517 }
1518 #endif /* CONFIG_XNUPOST */
1519
1520 if (fault_addr >= gVirtBase && fault_addr < static_memory_end) {
1521 panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state);
1522 }
1523
1524 if (VM_KERNEL_ADDRESS(fault_addr) || thread == THREAD_NULL) {
1525 map = kernel_map;
1526 interruptible = THREAD_UNINT;
1527 } else {
1528 map = thread->map;
1529 interruptible = THREAD_ABORTSAFE;
1530 }
1531
1532 #if CONFIG_PGTRACE
1533 if (pgtrace_enabled) {
1534 /* Check to see if trace bit is set */
1535 result = pmap_pgtrace_fault(map->pmap, fault_addr, state);
1536 if (result == KERN_SUCCESS) {
1537 return;
1538 }
1539 }
1540
1541 if (ml_at_interrupt_context()) {
1542 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
1543 }
1544 #endif
1545
1546 /* check to see if it is just a pmap ref/modify fault */
1547 if (!is_translation_fault(fault_code)) {
1548 result = arm_fast_fault(map->pmap,
1549 fault_addr,
1550 fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), FALSE);
1551 if (result == KERN_SUCCESS) {
1552 return;
1553 }
1554 }
1555
1556 if (result != KERN_PROTECTION_FAILURE) {
1557 /*
1558 * We have to "fault" the page in.
1559 */
1560 result = vm_fault(map, fault_addr, fault_type,
1561 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, interruptible,
1562 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
1563 }
1564
1565 if (result == KERN_SUCCESS) {
1566 return;
1567 }
1568
1569 /*
1570 * If we have a recover handler, invoke it now.
1571 */
1572 if (recover) {
1573 thread->machine.recover_esr = esr;
1574 thread->machine.recover_far = fault_addr;
1575 set_saved_state_pc_to_recovery_handler(state, recover);
1576 return;
1577 }
1578
1579 #if __ARM_PAN_AVAILABLE__
1580 if (is_pan_fault(state, esr, fault_addr, fault_code)) {
1581 panic_with_thread_kernel_state("Privileged access never abort.", state);
1582 }
1583 #endif
1584
1585 #if CONFIG_PGTRACE
1586 } else if (ml_at_interrupt_context()) {
1587 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
1588 #endif
1589 } else if (is_alignment_fault(fault_code)) {
1590 if (recover) {
1591 thread->machine.recover_esr = esr;
1592 thread->machine.recover_far = fault_addr;
1593 set_saved_state_pc_to_recovery_handler(state, recover);
1594 return;
1595 }
1596 panic_with_thread_kernel_state("Unaligned kernel data abort.", state);
1597 } else if (is_parity_error(fault_code)) {
1598 #if defined(APPLE_ARM64_ARCH_FAMILY)
1599 if (fault_code == FSC_SYNC_PARITY) {
1600 arm64_platform_error(state, esr, fault_addr);
1601 return;
1602 }
1603 #else
1604 panic_with_thread_kernel_state("Kernel parity error.", state);
1605 #endif
1606 } else {
1607 kprintf("Unclassified kernel abort (fault_code=0x%x)\n", fault_code);
1608 }
1609
1610 panic_with_thread_kernel_state("Kernel data abort.", state);
1611 }
1612
1613 extern void syscall_trace(struct arm_saved_state * regs);
1614
1615 static void
1616 handle_svc(arm_saved_state_t *state)
1617 {
1618 int trap_no = get_saved_state_svc_number(state);
1619 thread_t thread = current_thread();
1620 struct proc *p;
1621
1622 #define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */
1623
1624 #define TRACE_SYSCALL 1
1625 #if TRACE_SYSCALL
1626 syscall_trace(state);
1627 #endif
1628
1629 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling SVC from userspace */
1630
1631 if (trap_no == (int)PLATFORM_SYSCALL_TRAP_NO) {
1632 platform_syscall(state);
1633 panic("Returned from platform_syscall()?");
1634 }
1635
1636 mach_kauth_cred_uthread_update();
1637
1638 if (trap_no < 0) {
1639 if (trap_no == MACH_ARM_TRAP_ABSTIME) {
1640 handle_mach_absolute_time_trap(state);
1641 return;
1642 } else if (trap_no == MACH_ARM_TRAP_CONTTIME) {
1643 handle_mach_continuous_time_trap(state);
1644 return;
1645 }
1646
1647 /* Counting perhaps better in the handler, but this is how it's been done */
1648 thread->syscalls_mach++;
1649 mach_syscall(state);
1650 } else {
1651 /* Counting perhaps better in the handler, but this is how it's been done */
1652 thread->syscalls_unix++;
1653 p = get_bsdthreadtask_info(thread);
1654
1655 assert(p);
1656
1657 unix_syscall(state, thread, (struct uthread*)thread->uthread, p);
1658 }
1659 }
1660
1661 static void
1662 handle_mach_absolute_time_trap(arm_saved_state_t *state)
1663 {
1664 uint64_t now = mach_absolute_time();
1665 saved_state64(state)->x[0] = now;
1666 }
1667
1668 static void
1669 handle_mach_continuous_time_trap(arm_saved_state_t *state)
1670 {
1671 uint64_t now = mach_continuous_time();
1672 saved_state64(state)->x[0] = now;
1673 }
1674
1675 __attribute__((noreturn))
1676 static void
1677 handle_msr_trap(arm_saved_state_t *state, uint32_t esr)
1678 {
1679 exception_type_t exception = EXC_BAD_INSTRUCTION;
1680 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1681 mach_msg_type_number_t numcodes = 2;
1682 uint32_t instr = 0;
1683
1684 if (!is_saved_state64(state)) {
1685 panic("MSR/MRS trap (ESR 0x%x) from 32-bit state\n", esr);
1686 }
1687
1688 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1689 panic("MSR/MRS trap (ESR 0x%x) from kernel\n", esr);
1690 }
1691
1692 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1693 codes[1] = instr;
1694
1695 exception_triage(exception, codes, numcodes);
1696 __builtin_unreachable();
1697 }
1698
1699
1700 static void
1701 handle_user_trapped_instruction32(arm_saved_state_t *state, uint32_t esr)
1702 {
1703 exception_type_t exception = EXC_BAD_INSTRUCTION;
1704 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1705 mach_msg_type_number_t numcodes = 2;
1706 uint32_t instr;
1707
1708 if (is_saved_state64(state)) {
1709 panic("ESR (0x%x) for instruction trapped from U32, but saved state is 64-bit.", esr);
1710 }
1711
1712 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1713 panic("ESR (0x%x) for instruction trapped from U32, actually came from kernel?", esr);
1714 }
1715
1716 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1717 codes[1] = instr;
1718
1719 exception_triage(exception, codes, numcodes);
1720 __builtin_unreachable();
1721 }
1722
1723 static void
1724 handle_simd_trap(arm_saved_state_t *state, uint32_t esr)
1725 {
1726 exception_type_t exception = EXC_BAD_INSTRUCTION;
1727 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1728 mach_msg_type_number_t numcodes = 2;
1729 uint32_t instr = 0;
1730
1731 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1732 panic("ESR (0x%x) for SIMD trap from userland, actually came from kernel?", esr);
1733 }
1734
1735 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1736 codes[1] = instr;
1737
1738 exception_triage(exception, codes, numcodes);
1739 __builtin_unreachable();
1740 }
1741
1742 void
1743 sleh_irq(arm_saved_state_t *state)
1744 {
1745 uint64_t timestamp = 0;
1746 uint32_t old_entropy_data = 0;
1747 uint32_t old_entropy_sample_count = 0;
1748 size_t entropy_index = 0;
1749 uint32_t * entropy_data_ptr = NULL;
1750 cpu_data_t * cdp __unused = getCpuDatap();
1751 #if MACH_ASSERT
1752 int preemption_level = get_preemption_level();
1753 #endif
1754
1755
1756 sleh_interrupt_handler_prologue(state, DBG_INTR_TYPE_OTHER);
1757
1758 #if USE_APPLEARMSMP
1759 PE_handle_ext_interrupt();
1760 #else
1761 /* Run the registered interrupt handler. */
1762 cdp->interrupt_handler(cdp->interrupt_target,
1763 cdp->interrupt_refCon,
1764 cdp->interrupt_nub,
1765 cdp->interrupt_source);
1766 #endif
1767
1768 /* We use interrupt timing as an entropy source. */
1769 timestamp = ml_get_timebase();
1770
1771 /*
1772 * The buffer index is subject to races, but as these races should only
1773 * result in multiple CPUs updating the same location, the end result
1774 * should be that noise gets written into the entropy buffer. As this
1775 * is the entire point of the entropy buffer, we will not worry about
1776 * these races for now.
1777 */
1778 old_entropy_sample_count = EntropyData.sample_count;
1779 EntropyData.sample_count += 1;
1780
1781 entropy_index = old_entropy_sample_count & EntropyData.buffer_index_mask;
1782 entropy_data_ptr = EntropyData.buffer + entropy_index;
1783
1784 /* Mix the timestamp data and the old data together. */
1785 old_entropy_data = *entropy_data_ptr;
1786 *entropy_data_ptr = (uint32_t)timestamp ^ (__ror(old_entropy_data, 9) & EntropyData.ror_mask);
1787
1788 sleh_interrupt_handler_epilogue();
1789 #if MACH_ASSERT
1790 if (preemption_level != get_preemption_level()) {
1791 panic("irq handler %p changed preemption level from %d to %d", cdp->interrupt_handler, preemption_level, get_preemption_level());
1792 }
1793 #endif
1794 }
1795
1796 void
1797 sleh_fiq(arm_saved_state_t *state)
1798 {
1799 unsigned int type = DBG_INTR_TYPE_UNKNOWN;
1800 #if MACH_ASSERT
1801 int preemption_level = get_preemption_level();
1802 #endif
1803
1804 #if MONOTONIC_FIQ
1805 uint64_t pmcr0 = 0, upmsr = 0;
1806 #endif /* MONOTONIC_FIQ */
1807
1808 #if defined(HAS_IPI)
1809 boolean_t is_ipi = FALSE;
1810 uint64_t ipi_sr = 0;
1811
1812 if (gFastIPI) {
1813 MRS(ipi_sr, ARM64_REG_IPI_SR);
1814
1815 if (ipi_sr & 1) {
1816 is_ipi = TRUE;
1817 }
1818 }
1819
1820 if (is_ipi) {
1821 type = DBG_INTR_TYPE_IPI;
1822 } else
1823 #endif /* defined(HAS_IPI) */
1824 #if MONOTONIC_FIQ
1825 if (mt_pmi_pending(&pmcr0, &upmsr)) {
1826 type = DBG_INTR_TYPE_PMI;
1827 } else
1828 #endif /* MONOTONIC_FIQ */
1829 if (ml_get_timer_pending()) {
1830 type = DBG_INTR_TYPE_TIMER;
1831 }
1832
1833 sleh_interrupt_handler_prologue(state, type);
1834
1835 #if defined(HAS_IPI)
1836 if (is_ipi) {
1837 /*
1838 * Order is important here: we must ack the IPI by writing IPI_SR
1839 * before we call cpu_signal_handler(). Otherwise, there will be
1840 * a window between the completion of pending-signal processing in
1841 * cpu_signal_handler() and the ack during which a newly-issued
1842 * IPI to this CPU may be lost. ISB is required to ensure the msr
1843 * is retired before execution of cpu_signal_handler().
1844 */
1845 MSR(ARM64_REG_IPI_SR, ipi_sr);
1846 __builtin_arm_isb(ISB_SY);
1847 cpu_signal_handler();
1848 } else
1849 #endif /* defined(HAS_IPI) */
1850 #if MONOTONIC_FIQ
1851 if (type == DBG_INTR_TYPE_PMI) {
1852 INTERRUPT_MASKED_DEBUG_START(mt_fiq, DBG_INTR_TYPE_PMI);
1853 mt_fiq(getCpuDatap(), pmcr0, upmsr);
1854 INTERRUPT_MASKED_DEBUG_END();
1855 } else
1856 #endif /* MONOTONIC_FIQ */
1857 {
1858 /*
1859 * We don't know that this is a timer, but we don't have insight into
1860 * the other interrupts that go down this path.
1861 */
1862
1863 cpu_data_t *cdp = getCpuDatap();
1864
1865 cdp->cpu_decrementer = -1; /* Large */
1866
1867 /*
1868 * ARM64_TODO: whether we're coming from userland is ignored right now.
1869 * We can easily thread it through, but not bothering for the
1870 * moment (AArch32 doesn't either).
1871 */
1872 INTERRUPT_MASKED_DEBUG_START(rtclock_intr, DBG_INTR_TYPE_TIMER);
1873 rtclock_intr(TRUE);
1874 INTERRUPT_MASKED_DEBUG_END();
1875 }
1876
1877 sleh_interrupt_handler_epilogue();
1878 #if MACH_ASSERT
1879 if (preemption_level != get_preemption_level()) {
1880 panic("fiq type %u changed preemption level from %d to %d", type, preemption_level, get_preemption_level());
1881 }
1882 #endif
1883 }
1884
1885 void
1886 sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far)
1887 {
1888 task_vtimer_check(current_thread());
1889
1890 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_START,
1891 esr, VM_KERNEL_ADDRHIDE(far));
1892 arm_saved_state_t *state = &context->ss;
1893 #if MACH_ASSERT
1894 int preemption_level = get_preemption_level();
1895 #endif
1896
1897 ASSERT_CONTEXT_SANITY(context);
1898 arm64_platform_error(state, esr, far);
1899 #if MACH_ASSERT
1900 if (preemption_level != get_preemption_level()) {
1901 panic("serror changed preemption level from %d to %d", preemption_level, get_preemption_level());
1902 }
1903 #endif
1904 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_END,
1905 esr, VM_KERNEL_ADDRHIDE(far));
1906 }
1907
1908 void
1909 mach_syscall_trace_exit(unsigned int retval,
1910 unsigned int call_number)
1911 {
1912 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1913 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) |
1914 DBG_FUNC_END, retval, 0, 0, 0, 0);
1915 }
1916
1917 __attribute__((noreturn))
1918 void
1919 thread_syscall_return(kern_return_t error)
1920 {
1921 thread_t thread;
1922 struct arm_saved_state *state;
1923
1924 thread = current_thread();
1925 state = get_user_regs(thread);
1926
1927 assert(is_saved_state64(state));
1928 saved_state64(state)->x[0] = error;
1929
1930 #if MACH_ASSERT
1931 kern_allocation_name_t
1932 prior __assert_only = thread_get_kernel_state(thread)->allocation_name;
1933 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
1934 #endif /* MACH_ASSERT */
1935
1936 if (kdebug_enable) {
1937 /* Invert syscall number (negative for a mach syscall) */
1938 mach_syscall_trace_exit(error, (-1) * get_saved_state_svc_number(state));
1939 }
1940
1941 thread_exception_return();
1942 }
1943
1944 void
1945 syscall_trace(
1946 struct arm_saved_state * regs __unused)
1947 {
1948 /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */
1949 }
1950
1951 static void
1952 sleh_interrupt_handler_prologue(arm_saved_state_t *state, unsigned int type)
1953 {
1954 bool is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
1955
1956 task_vtimer_check(current_thread());
1957
1958 uint64_t pc = is_user ? get_saved_state_pc(state) :
1959 VM_KERNEL_UNSLIDE(get_saved_state_pc(state));
1960
1961 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
1962 0, pc, is_user, type);
1963
1964 #if CONFIG_TELEMETRY
1965 if (telemetry_needs_record) {
1966 telemetry_mark_curthread((boolean_t)is_user, FALSE);
1967 }
1968 #endif /* CONFIG_TELEMETRY */
1969 }
1970
1971 static void
1972 sleh_interrupt_handler_epilogue(void)
1973 {
1974 #if KPERF
1975 kperf_interrupt();
1976 #endif /* KPERF */
1977 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END);
1978 }
1979
1980 void
1981 sleh_invalid_stack(arm_context_t *context, uint32_t esr __unused, vm_offset_t far __unused)
1982 {
1983 thread_t thread = current_thread();
1984 vm_offset_t kernel_stack_bottom, sp;
1985
1986 sp = get_saved_state_sp(&context->ss);
1987 kernel_stack_bottom = round_page(thread->machine.kstackptr) - KERNEL_STACK_SIZE;
1988
1989 if ((sp < kernel_stack_bottom) && (sp >= (kernel_stack_bottom - PAGE_SIZE))) {
1990 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow).", &context->ss);
1991 }
1992
1993 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context->ss);
1994 }
1995