]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/sleh.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / sleh.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2012-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <arm/caches_internal.h>
30#include <arm/cpu_data.h>
31#include <arm/cpu_data_internal.h>
32#include <arm/misc_protos.h>
33#include <arm/thread.h>
34#include <arm/rtclock.h>
35#include <arm/trap.h> /* for IS_ARM_GDB_TRAP() et al */
36#include <arm64/proc_reg.h>
37#include <arm64/machine_machdep.h>
38#include <arm64/monotonic.h>
f427ee49 39#include <arm64/instructions.h>
5ba3f43e
A
40
41#include <kern/debug.h>
42#include <kern/thread.h>
43#include <mach/exception.h>
f427ee49 44#include <mach/arm/traps.h>
5ba3f43e
A
45#include <mach/vm_types.h>
46#include <mach/machine/thread_status.h>
47
48#include <machine/atomic.h>
cb323159 49#include <machine/limits.h>
5ba3f43e
A
50
51#include <pexpert/arm/protos.h>
52
53#include <vm/vm_page.h>
54#include <vm/pmap.h>
55#include <vm/vm_fault.h>
56#include <vm/vm_kern.h>
57
f427ee49 58#include <sys/errno.h>
5ba3f43e 59#include <sys/kdebug.h>
d9a64523 60#include <kperf/kperf.h>
5ba3f43e
A
61
62#include <kern/policy_internal.h>
63#if CONFIG_TELEMETRY
64#include <kern/telemetry.h>
65#endif
66
f427ee49
A
67#include <prng/entropy.h>
68
69
5ba3f43e 70
c3c9b80d 71
5ba3f43e
A
72#ifndef __arm64__
73#error Should only be compiling for arm64.
74#endif
75
76#define TEST_CONTEXT32_SANITY(context) \
77 (context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \
78 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT)
79
80#define TEST_CONTEXT64_SANITY(context) \
81 (context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \
82 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT)
83
84#define ASSERT_CONTEXT_SANITY(context) \
85 assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context))
86
87
cb323159
A
88#define COPYIN(src, dst, size) \
89 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
90 copyin_kern(src, dst, size) : \
91 copyin(src, dst, size)
5ba3f43e 92
cb323159
A
93#define COPYOUT(src, dst, size) \
94 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
95 copyout_kern(src, dst, size) : \
96 copyout(src, dst, size)
5ba3f43e
A
97
98// Below is for concatenating a string param to a string literal
99#define STR1(x) #x
100#define STR(x) STR1(x)
101
f427ee49
A
102#define ARM64_KDBG_CODE_KERNEL (0 << 8)
103#define ARM64_KDBG_CODE_USER (1 << 8)
104#define ARM64_KDBG_CODE_GUEST (2 << 8)
105
106_Static_assert(ARM64_KDBG_CODE_GUEST <= KDBG_CODE_MAX, "arm64 KDBG trace codes out of range");
107_Static_assert(ARM64_KDBG_CODE_GUEST <= UINT16_MAX, "arm64 KDBG trace codes out of range");
108
cb323159 109void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) __abortlike;
5ba3f43e 110
cb323159 111void sleh_synchronous_sp1(arm_context_t *, uint32_t, vm_offset_t) __abortlike;
5ba3f43e 112void sleh_synchronous(arm_context_t *, uint32_t, vm_offset_t);
c3c9b80d
A
113
114
115
5ba3f43e
A
116void sleh_irq(arm_saved_state_t *);
117void sleh_fiq(arm_saved_state_t *);
118void sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far);
cb323159 119void sleh_invalid_stack(arm_context_t *context, uint32_t esr, vm_offset_t far) __dead2;
5ba3f43e
A
120
121static void sleh_interrupt_handler_prologue(arm_saved_state_t *, unsigned int type);
122static void sleh_interrupt_handler_epilogue(void);
123
124static void handle_svc(arm_saved_state_t *);
125static void handle_mach_absolute_time_trap(arm_saved_state_t *);
126static void handle_mach_continuous_time_trap(arm_saved_state_t *);
127
f427ee49 128static void handle_msr_trap(arm_saved_state_t *state, uint32_t esr);
5ba3f43e 129
cb323159 130extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, bool, bool);
5ba3f43e 131
cb323159 132static void handle_uncategorized(arm_saved_state_t *);
f427ee49 133static void handle_kernel_breakpoint(arm_saved_state_t *, uint32_t) __dead2;
eb6b6ca3 134static void handle_breakpoint(arm_saved_state_t *, uint32_t) __dead2;
5ba3f43e 135
0a7de745 136typedef void (*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *);
5ba3f43e
A
137static void inspect_instruction_abort(uint32_t, fault_status_t *, vm_prot_t *);
138static void inspect_data_abort(uint32_t, fault_status_t *, vm_prot_t *);
139
140static int is_vm_fault(fault_status_t);
d9a64523 141static int is_translation_fault(fault_status_t);
5ba3f43e
A
142static int is_alignment_fault(fault_status_t);
143
f427ee49
A
144typedef void (*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t);
145static void handle_user_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t);
146static void handle_kernel_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t);
5ba3f43e 147
cb323159
A
148static void handle_pc_align(arm_saved_state_t *ss) __dead2;
149static void handle_sp_align(arm_saved_state_t *ss) __dead2;
150static void handle_sw_step_debug(arm_saved_state_t *ss) __dead2;
151static void handle_wf_trap(arm_saved_state_t *ss) __dead2;
152static void handle_fp_trap(arm_saved_state_t *ss, uint32_t esr) __dead2;
5ba3f43e 153
cb323159 154static void handle_watchpoint(vm_offset_t fault_addr) __dead2;
5ba3f43e 155
f427ee49 156static void handle_abort(arm_saved_state_t *, uint32_t, vm_offset_t, vm_offset_t, abort_inspector_t, abort_handler_t, expected_fault_handler_t);
5ba3f43e 157
cb323159 158static void handle_user_trapped_instruction32(arm_saved_state_t *, uint32_t esr) __dead2;
5ba3f43e 159
cb323159 160static void handle_simd_trap(arm_saved_state_t *, uint32_t esr) __dead2;
5ba3f43e
A
161
162extern void mach_kauth_cred_uthread_update(void);
163void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number);
164
165struct uthread;
166struct proc;
167
f427ee49
A
168typedef uint32_t arm64_instr_t;
169
5ba3f43e
A
170extern void
171unix_syscall(struct arm_saved_state * regs, thread_t thread_act,
0a7de745 172 struct uthread * uthread, struct proc * proc);
5ba3f43e
A
173
174extern void
175mach_syscall(struct arm_saved_state*);
176
5ba3f43e
A
177#if CONFIG_DTRACE
178extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs);
179extern boolean_t dtrace_tally_fault(user_addr_t);
180
cb323159
A
181/*
182 * Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy
183 * and paste the trap instructions
184 * over from that file. Need to keep these in sync!
185 */
5ba3f43e
A
186#define FASTTRAP_ARM32_INSTR 0xe7ffdefc
187#define FASTTRAP_THUMB32_INSTR 0xdefc
188#define FASTTRAP_ARM64_INSTR 0xe7eeee7e
189
190#define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb
191#define FASTTRAP_THUMB32_RET_INSTR 0xdefb
192#define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d
193
194/* See <rdar://problem/4613924> */
195perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
196#endif
197
cb323159 198
5ba3f43e
A
199#if CONFIG_PGTRACE
200extern boolean_t pgtrace_enabled;
201#endif
202
cb323159
A
203#if HAS_TWO_STAGE_SPR_LOCK
204#ifdef CONFIG_XNUPOST
205extern volatile vm_offset_t spr_lock_test_addr;
206extern volatile uint32_t spr_lock_exception_esr;
207#endif
208#endif
209
f427ee49
A
210#if INTERRUPT_MASKED_DEBUG
211extern boolean_t interrupt_masked_debug;
212#endif
213
214extern void arm64_thread_exception_return(void) __dead2;
215
cb323159
A
216#if defined(APPLETYPHOON)
217#define CPU_NAME "Typhoon"
5ba3f43e 218#elif defined(APPLETWISTER)
cb323159 219#define CPU_NAME "Twister"
5ba3f43e 220#elif defined(APPLEHURRICANE)
cb323159 221#define CPU_NAME "Hurricane"
c6bf4f31
A
222#elif defined(APPLELIGHTNING)
223#define CPU_NAME "Lightning"
5ba3f43e 224#else
cb323159 225#define CPU_NAME "Unknown"
5ba3f43e
A
226#endif
227
228#if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT))
229#define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400)
230#define ESR_WT_REASON(esr) ((esr) & 0xff)
231
232#define WT_REASON_NONE 0
233#define WT_REASON_INTEGRITY_FAIL 1
234#define WT_REASON_BAD_SYSCALL 2
235#define WT_REASON_NOT_LOCKED 3
236#define WT_REASON_ALREADY_LOCKED 4
237#define WT_REASON_SW_REQ 5
238#define WT_REASON_PT_INVALID 6
239#define WT_REASON_PT_VIOLATION 7
240#define WT_REASON_REG_VIOLATION 8
241#endif
242
c6bf4f31
A
243#if defined(HAS_IPI)
244void cpu_signal_handler(void);
245extern unsigned int gFastIPI;
246#endif /* defined(HAS_IPI) */
5ba3f43e 247
f427ee49
A
248static arm_saved_state64_t *original_faulting_state = NULL;
249
250TUNABLE(bool, fp_exceptions_enabled, "-fp_exceptions", false);
251
d9a64523
A
252extern vm_offset_t static_memory_end;
253
f427ee49
A
254static inline int
255is_vm_fault(fault_status_t status)
256{
257 switch (status) {
258 case FSC_TRANSLATION_FAULT_L0:
259 case FSC_TRANSLATION_FAULT_L1:
260 case FSC_TRANSLATION_FAULT_L2:
261 case FSC_TRANSLATION_FAULT_L3:
262 case FSC_ACCESS_FLAG_FAULT_L1:
263 case FSC_ACCESS_FLAG_FAULT_L2:
264 case FSC_ACCESS_FLAG_FAULT_L3:
265 case FSC_PERMISSION_FAULT_L1:
266 case FSC_PERMISSION_FAULT_L2:
267 case FSC_PERMISSION_FAULT_L3:
268 return TRUE;
269 default:
270 return FALSE;
271 }
272}
273
274static inline int
275is_translation_fault(fault_status_t status)
276{
277 switch (status) {
278 case FSC_TRANSLATION_FAULT_L0:
279 case FSC_TRANSLATION_FAULT_L1:
280 case FSC_TRANSLATION_FAULT_L2:
281 case FSC_TRANSLATION_FAULT_L3:
282 return TRUE;
283 default:
284 return FALSE;
285 }
286}
287
288static inline int
289is_permission_fault(fault_status_t status)
290{
291 switch (status) {
292 case FSC_PERMISSION_FAULT_L1:
293 case FSC_PERMISSION_FAULT_L2:
294 case FSC_PERMISSION_FAULT_L3:
295 return TRUE;
296 default:
297 return FALSE;
298 }
299}
300
301static inline int
302is_alignment_fault(fault_status_t status)
303{
304 return status == FSC_ALIGNMENT_FAULT;
305}
306
307static inline int
308is_parity_error(fault_status_t status)
309{
310 switch (status) {
311 case FSC_SYNC_PARITY:
312 case FSC_ASYNC_PARITY:
313 case FSC_SYNC_PARITY_TT_L1:
314 case FSC_SYNC_PARITY_TT_L2:
315 case FSC_SYNC_PARITY_TT_L3:
316 return TRUE;
317 default:
318 return FALSE;
319 }
320}
321
cb323159 322__dead2
5ba3f43e
A
323static void
324arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
325{
326#if defined(APPLE_ARM64_ARCH_FAMILY)
327 uint64_t fed_err_sts, mmu_err_sts, lsu_err_sts;
328#if defined(NO_ECORE)
329 uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf;
330
c3c9b80d
A
331 mmu_err_sts = __builtin_arm_rsr64(STR(S3_6_C15_C0_0));
332 l2c_err_sts = __builtin_arm_rsr64(STR(S3_3_C15_C8_0));
333 l2c_err_adr = __builtin_arm_rsr64(STR(S3_3_C15_C9_0));
334 l2c_err_inf = __builtin_arm_rsr64(STR(S3_3_C15_C10_0));
335 lsu_err_sts = __builtin_arm_rsr64(STR(S3_3_C15_C0_0));
336 fed_err_sts = __builtin_arm_rsr64(STR(S3_4_C15_C0_0));
5ba3f43e
A
337
338 panic_plain("Unhandled " CPU_NAME
0a7de745
A
339 " implementation specific error. state=%p esr=%#x far=%p\n"
340 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
341 "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n",
342 state, esr, (void *)far,
343 (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts,
344 (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf);
5ba3f43e
A
345
346#elif defined(HAS_MIGSTS)
347 uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf, mpidr, migsts;
348
349 mpidr = __builtin_arm_rsr64("MPIDR_EL1");
c3c9b80d
A
350 migsts = __builtin_arm_rsr64(STR(MIGSTS_EL1));
351 mmu_err_sts = __builtin_arm_rsr64(STR(S3_6_C15_C0_0));
352 l2c_err_sts = __builtin_arm_rsr64(STR(S3_3_C15_C8_0));
353 l2c_err_adr = __builtin_arm_rsr64(STR(S3_3_C15_C9_0));
354 l2c_err_inf = __builtin_arm_rsr64(STR(S3_3_C15_C10_0));
355 lsu_err_sts = __builtin_arm_rsr64(STR(S3_3_C15_C0_0));
356 fed_err_sts = __builtin_arm_rsr64(STR(S3_4_C15_C0_0));
5ba3f43e
A
357
358 panic_plain("Unhandled " CPU_NAME
0a7de745
A
359 " implementation specific error. state=%p esr=%#x far=%p p-core?%d migsts=%p\n"
360 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
361 "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n",
362 state, esr, (void *)far, !!(mpidr & MPIDR_PNE), (void *)migsts,
363 (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts,
364 (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf);
5ba3f43e
A
365#else // !defined(NO_ECORE) && !defined(HAS_MIGSTS)
366 uint64_t llc_err_sts, llc_err_adr, llc_err_inf, mpidr;
cb323159 367#if defined(HAS_DPC_ERR)
c3c9b80d 368 uint64_t dpc_err_sts = __builtin_arm_rsr64(STR(S3_5_C15_C0_5));
cb323159 369#endif // defined(HAS_DPC_ERR)
5ba3f43e
A
370
371 mpidr = __builtin_arm_rsr64("MPIDR_EL1");
372
373 if (mpidr & MPIDR_PNE) {
c3c9b80d
A
374 mmu_err_sts = __builtin_arm_rsr64(STR(S3_6_C15_C0_0));
375 lsu_err_sts = __builtin_arm_rsr64(STR(S3_3_C15_C0_0));
376 fed_err_sts = __builtin_arm_rsr64(STR(S3_4_C15_C0_0));
5ba3f43e 377 } else {
c3c9b80d
A
378 mmu_err_sts = __builtin_arm_rsr64(STR(S3_6_C15_C2_0));
379 lsu_err_sts = __builtin_arm_rsr64(STR(S3_3_C15_C2_0));
380 fed_err_sts = __builtin_arm_rsr64(STR(S3_4_C15_C0_2));
5ba3f43e
A
381 }
382
c3c9b80d
A
383 llc_err_sts = __builtin_arm_rsr64(STR(S3_3_C15_C8_0));
384 llc_err_adr = __builtin_arm_rsr64(STR(S3_3_C15_C9_0));
385 llc_err_inf = __builtin_arm_rsr64(STR(S3_3_C15_C10_0));
5ba3f43e
A
386
387 panic_plain("Unhandled " CPU_NAME
cb323159
A
388 " implementation specific error. state=%p esr=%#x far=%p p-core?%d"
389#if defined(HAS_DPC_ERR)
390 " dpc_err_sts:%p"
391#endif
392 "\n"
0a7de745
A
393 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
394 "\tllc_err_sts:%p, llc_err_adr:%p, llc_err_inf:%p\n",
395 state, esr, (void *)far, !!(mpidr & MPIDR_PNE),
cb323159
A
396#if defined(HAS_DPC_ERR)
397 (void *)dpc_err_sts,
398#endif
0a7de745
A
399 (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts,
400 (void *)llc_err_sts, (void *)llc_err_adr, (void *)llc_err_inf);
5ba3f43e
A
401#endif
402#else // !defined(APPLE_ARM64_ARCH_FAMILY)
d9a64523 403#pragma unused (state, esr, far)
5ba3f43e
A
404 panic_plain("Unhandled implementation specific error\n");
405#endif
406}
407
408#if CONFIG_KERNEL_INTEGRITY
409#pragma clang diagnostic push
410#pragma clang diagnostic ignored "-Wunused-parameter"
411static void
0a7de745
A
412kernel_integrity_error_handler(uint32_t esr, vm_offset_t far)
413{
5ba3f43e
A
414#if defined(KERNEL_INTEGRITY_WT)
415#if (DEVELOPMENT || DEBUG)
416 if (ESR_WT_SERROR(esr)) {
417 switch (ESR_WT_REASON(esr)) {
418 case WT_REASON_INTEGRITY_FAIL:
419 panic_plain("Kernel integrity, violation in frame 0x%016lx.", far);
420 case WT_REASON_BAD_SYSCALL:
421 panic_plain("Kernel integrity, bad syscall.");
422 case WT_REASON_NOT_LOCKED:
423 panic_plain("Kernel integrity, not locked.");
424 case WT_REASON_ALREADY_LOCKED:
425 panic_plain("Kernel integrity, already locked.");
426 case WT_REASON_SW_REQ:
427 panic_plain("Kernel integrity, software request.");
428 case WT_REASON_PT_INVALID:
429 panic_plain("Kernel integrity, encountered invalid TTE/PTE while "
0a7de745 430 "walking 0x%016lx.", far);
5ba3f43e
A
431 case WT_REASON_PT_VIOLATION:
432 panic_plain("Kernel integrity, violation in mapping 0x%016lx.",
0a7de745 433 far);
5ba3f43e
A
434 case WT_REASON_REG_VIOLATION:
435 panic_plain("Kernel integrity, violation in system register %d.",
0a7de745 436 (unsigned) far);
5ba3f43e
A
437 default:
438 panic_plain("Kernel integrity, unknown (esr=0x%08x).", esr);
439 }
440 }
441#else
442 if (ESR_WT_SERROR(esr)) {
443 panic_plain("SError esr: 0x%08x far: 0x%016lx.", esr, far);
444 }
445#endif
446#endif
447}
448#pragma clang diagnostic pop
449#endif
450
451static void
452arm64_platform_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
453{
cb323159 454 cpu_data_t *cdp = getCpuDatap();
5ba3f43e
A
455
456#if CONFIG_KERNEL_INTEGRITY
457 kernel_integrity_error_handler(esr, far);
458#endif
459
f427ee49
A
460 if (PE_handle_platform_error(far)) {
461 return;
462 } else if (cdp->platform_error_handler != NULL) {
463 cdp->platform_error_handler(cdp->cpu_id, far);
0a7de745 464 } else {
5ba3f43e 465 arm64_implementation_specific_error(state, esr, far);
0a7de745 466 }
5ba3f43e
A
467}
468
469void
470panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss)
471{
472 boolean_t ss_valid;
473
474 ss_valid = is_saved_state64(ss);
475 arm_saved_state64_t *state = saved_state64(ss);
476
f427ee49
A
477 os_atomic_cmpxchg(&original_faulting_state, NULL, state, seq_cst);
478
cb323159 479 panic_plain("%s at pc 0x%016llx, lr 0x%016llx (saved state: %p%s)\n"
0a7de745
A
480 "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n"
481 "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n"
482 "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n"
483 "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n"
484 "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n"
485 "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n"
486 "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n"
487 "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n"
488 "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n",
cb323159 489 msg, state->pc, state->lr, ss, (ss_valid ? "" : " INVALID"),
0a7de745
A
490 state->x[0], state->x[1], state->x[2], state->x[3],
491 state->x[4], state->x[5], state->x[6], state->x[7],
492 state->x[8], state->x[9], state->x[10], state->x[11],
493 state->x[12], state->x[13], state->x[14], state->x[15],
494 state->x[16], state->x[17], state->x[18], state->x[19],
495 state->x[20], state->x[21], state->x[22], state->x[23],
496 state->x[24], state->x[25], state->x[26], state->x[27],
497 state->x[28], state->fp, state->lr, state->sp,
498 state->pc, state->cpsr, state->esr, state->far);
5ba3f43e
A
499}
500
5ba3f43e
A
501void
502sleh_synchronous_sp1(arm_context_t *context, uint32_t esr, vm_offset_t far __unused)
503{
cb323159
A
504 esr_exception_class_t class = ESR_EC(esr);
505 arm_saved_state_t * state = &context->ss;
5ba3f43e
A
506
507 switch (class) {
508 case ESR_EC_UNCATEGORIZED:
509 {
510 uint32_t instr = *((uint32_t*)get_saved_state_pc(state));
0a7de745 511 if (IS_ARM_GDB_TRAP(instr)) {
5ba3f43e 512 DebuggerCall(EXC_BREAKPOINT, state);
0a7de745 513 }
5ba3f43e 514 }
f427ee49 515 OS_FALLTHROUGH; // panic if we return from the debugger
5ba3f43e
A
516 default:
517 panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state);
518 }
519}
520
cb323159
A
521#if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST)
522static bool
523handle_msr_write_from_xnupost(arm_saved_state_t *state, uint32_t esr)
524{
525 user_addr_t pc = get_saved_state_pc(state);
526 if ((spr_lock_test_addr != 0) && (pc == spr_lock_test_addr)) {
527 spr_lock_exception_esr = esr;
528 set_saved_state_pc(state, pc + 4);
529 return true;
530 }
531
532 return false;
533}
534#endif
535
f427ee49
A
536__attribute__((noreturn))
537void
538thread_exception_return()
539{
540 thread_t thread = current_thread();
541 if (thread->machine.exception_trace_code != 0) {
542 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
543 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END, 0, 0, 0, 0, 0);
544 thread->machine.exception_trace_code = 0;
545 }
546
547 arm64_thread_exception_return();
548 __builtin_unreachable();
549}
550
551/*
552 * check whether task vtimers are running and set thread and CPU BSD AST
553 *
554 * must be called with interrupts masked so updates of fields are atomic
555 * must be emitted inline to avoid generating an FBT probe on the exception path
556 *
557 */
558__attribute__((__always_inline__))
559static inline void
560task_vtimer_check(thread_t thread)
561{
c3c9b80d 562 if (__improbable((thread->task != NULL) && thread->task->vtimers)) {
f427ee49
A
563 thread->ast |= AST_BSD;
564 thread->machine.CpuDatap->cpu_pending_ast |= AST_BSD;
565 }
566}
567
5ba3f43e
A
568void
569sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far)
570{
cb323159
A
571 esr_exception_class_t class = ESR_EC(esr);
572 arm_saved_state_t * state = &context->ss;
573 vm_offset_t recover = 0;
574 thread_t thread = current_thread();
575#if MACH_ASSERT
576 int preemption_level = get_preemption_level();
577#endif
f427ee49
A
578 expected_fault_handler_t expected_fault_handler = NULL;
579#ifdef CONFIG_XNUPOST
580 expected_fault_handler_t saved_expected_fault_handler = NULL;
581 uintptr_t saved_expected_fault_addr = 0;
582#endif /* CONFIG_XNUPOST */
5ba3f43e
A
583
584 ASSERT_CONTEXT_SANITY(context);
585
f427ee49
A
586 task_vtimer_check(thread);
587
588#if CONFIG_DTRACE
589 /*
590 * Handle kernel DTrace probes as early as possible to minimize the likelihood
591 * that this path will itself trigger a DTrace probe, which would lead to infinite
592 * probe recursion.
593 */
594 if (__improbable((class == ESR_EC_UNCATEGORIZED) && tempDTraceTrapHook &&
595 (tempDTraceTrapHook(EXC_BAD_INSTRUCTION, state, 0, 0) == KERN_SUCCESS))) {
596 return;
597 }
598#endif
599 bool is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
600
601 /*
602 * Use KERNEL_DEBUG_CONSTANT_IST here to avoid producing tracepoints
603 * that would disclose the behavior of PT_DENY_ATTACH processes.
604 */
605 if (is_user) {
606 thread->machine.exception_trace_code = (uint16_t)(ARM64_KDBG_CODE_USER | class);
607 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
608 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_START,
609 esr, far, get_saved_state_pc(state), 0, 0);
610 } else {
611 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
612 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_START,
613 esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
614 }
615
cb323159
A
616 if (__improbable(ESR_INSTR_IS_2BYTES(esr))) {
617 /*
618 * We no longer support 32-bit, which means no 2-byte
619 * instructions.
620 */
f427ee49 621 if (is_user) {
cb323159
A
622 panic("Exception on 2-byte instruction, "
623 "context=%p, esr=%#x, far=%p",
624 context, esr, (void *)far);
625 } else {
626 panic_with_thread_kernel_state("Exception on 2-byte instruction", state);
627 }
628 }
629
5ba3f43e
A
630 /* Don't run exception handler with recover handler set in case of double fault */
631 if (thread->recover) {
cb323159 632 recover = thread->recover;
5ba3f43e
A
633 thread->recover = (vm_offset_t)NULL;
634 }
635
f427ee49
A
636#ifdef CONFIG_XNUPOST
637 if (thread->machine.expected_fault_handler != NULL) {
638 saved_expected_fault_handler = thread->machine.expected_fault_handler;
639 saved_expected_fault_addr = thread->machine.expected_fault_addr;
640
641 thread->machine.expected_fault_handler = NULL;
642 thread->machine.expected_fault_addr = 0;
643
644 if (saved_expected_fault_addr == far) {
645 expected_fault_handler = saved_expected_fault_handler;
646 }
647 }
648#endif /* CONFIG_XNUPOST */
649
5ba3f43e 650 /* Inherit the interrupt masks from previous context */
0a7de745 651 if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) {
5ba3f43e 652 ml_set_interrupts_enabled(TRUE);
0a7de745 653 }
5ba3f43e
A
654
655 switch (class) {
656 case ESR_EC_SVC_64:
f427ee49 657 if (!is_saved_state64(state) || !is_user) {
5ba3f43e
A
658 panic("Invalid SVC_64 context");
659 }
660
661 handle_svc(state);
662 break;
663
664 case ESR_EC_DABORT_EL0:
f427ee49
A
665 handle_abort(state, esr, far, recover, inspect_data_abort, handle_user_abort, expected_fault_handler);
666 break;
5ba3f43e
A
667
668 case ESR_EC_MSR_TRAP:
f427ee49 669 handle_msr_trap(state, esr);
5ba3f43e
A
670 break;
671
f427ee49 672
5ba3f43e 673 case ESR_EC_IABORT_EL0:
f427ee49
A
674 handle_abort(state, esr, far, recover, inspect_instruction_abort, handle_user_abort, expected_fault_handler);
675 break;
5ba3f43e
A
676
677 case ESR_EC_IABORT_EL1:
f427ee49
A
678#ifdef CONFIG_XNUPOST
679 if ((expected_fault_handler != NULL) && expected_fault_handler(state)) {
680 break;
c6bf4f31 681 }
f427ee49 682#endif /* CONFIG_XNUPOST */
0a7de745 683
d9a64523 684 panic_with_thread_kernel_state("Kernel instruction fetch abort", state);
5ba3f43e
A
685
686 case ESR_EC_PC_ALIGN:
687 handle_pc_align(state);
cb323159 688 __builtin_unreachable();
5ba3f43e
A
689
690 case ESR_EC_DABORT_EL1:
f427ee49 691 handle_abort(state, esr, far, recover, inspect_data_abort, handle_kernel_abort, expected_fault_handler);
5ba3f43e
A
692 break;
693
694 case ESR_EC_UNCATEGORIZED:
695 assert(!ESR_ISS(esr));
696
cb323159
A
697#if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST)
698 if (handle_msr_write_from_xnupost(state, esr)) {
699 break;
700 }
701#endif
702 handle_uncategorized(&context->ss);
5ba3f43e
A
703 break;
704
705 case ESR_EC_SP_ALIGN:
706 handle_sp_align(state);
cb323159 707 __builtin_unreachable();
5ba3f43e
A
708
709 case ESR_EC_BKPT_AARCH32:
eb6b6ca3 710 handle_breakpoint(state, esr);
cb323159 711 __builtin_unreachable();
5ba3f43e
A
712
713 case ESR_EC_BRK_AARCH64:
714 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
f427ee49 715 handle_kernel_breakpoint(state, esr);
5ba3f43e 716 } else {
eb6b6ca3 717 handle_breakpoint(state, esr);
5ba3f43e 718 }
cb323159 719 __builtin_unreachable();
5ba3f43e
A
720
721 case ESR_EC_BKPT_REG_MATCH_EL0:
722 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
eb6b6ca3 723 handle_breakpoint(state, esr);
5ba3f43e
A
724 }
725 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
0a7de745 726 class, state, class, esr, (void *)far);
cb323159 727 __builtin_unreachable();
5ba3f43e
A
728
729 case ESR_EC_BKPT_REG_MATCH_EL1:
cb323159
A
730 panic_with_thread_kernel_state("Hardware Breakpoint Debug exception from kernel. Panic (by design)", state);
731 __builtin_unreachable();
5ba3f43e
A
732
733 case ESR_EC_SW_STEP_DEBUG_EL0:
734 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
735 handle_sw_step_debug(state);
5ba3f43e
A
736 }
737 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
0a7de745 738 class, state, class, esr, (void *)far);
cb323159 739 __builtin_unreachable();
5ba3f43e
A
740
741 case ESR_EC_SW_STEP_DEBUG_EL1:
cb323159
A
742 panic_with_thread_kernel_state("Software Step Debug exception from kernel. Panic (by design)", state);
743 __builtin_unreachable();
5ba3f43e
A
744
745 case ESR_EC_WATCHPT_MATCH_EL0:
746 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
747 handle_watchpoint(far);
5ba3f43e
A
748 }
749 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
0a7de745 750 class, state, class, esr, (void *)far);
cb323159 751 __builtin_unreachable();
5ba3f43e
A
752
753 case ESR_EC_WATCHPT_MATCH_EL1:
754 /*
755 * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
756 * abort. Turn off watchpoints and keep going; we'll turn them back on in return_from_exception..
757 */
758 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
759 arm_debug_set(NULL);
760 break; /* return to first level handler */
761 }
762 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
0a7de745 763 class, state, class, esr, (void *)far);
cb323159 764 __builtin_unreachable();
5ba3f43e
A
765
766 case ESR_EC_TRAP_SIMD_FP:
767 handle_simd_trap(state, esr);
cb323159 768 __builtin_unreachable();
5ba3f43e
A
769
770 case ESR_EC_ILLEGAL_INSTR_SET:
0a7de745
A
771 if (EXCB_ACTION_RERUN !=
772 ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET, far)) {
5ba3f43e
A
773 // instruction is not re-executed
774 panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x",
0a7de745 775 state, class, esr, (void *)far, get_saved_state_cpsr(state));
5ba3f43e
A
776 }
777 // must clear this fault in PSR to re-run
cb323159 778 mask_saved_state_cpsr(state, 0, PSR64_IL);
5ba3f43e
A
779 break;
780
781 case ESR_EC_MCR_MRC_CP15_TRAP:
782 case ESR_EC_MCRR_MRRC_CP15_TRAP:
783 case ESR_EC_MCR_MRC_CP14_TRAP:
784 case ESR_EC_LDC_STC_CP14_TRAP:
785 case ESR_EC_MCRR_MRRC_CP14_TRAP:
786 handle_user_trapped_instruction32(state, esr);
cb323159 787 __builtin_unreachable();
5ba3f43e
A
788
789 case ESR_EC_WFI_WFE:
790 // Use of WFI or WFE instruction when they have been disabled for EL0
791 handle_wf_trap(state);
cb323159
A
792 __builtin_unreachable();
793
794 case ESR_EC_FLOATING_POINT_64:
795 handle_fp_trap(state, esr);
796 __builtin_unreachable();
797
5ba3f43e
A
798 default:
799 panic("Unsupported synchronous exception. state=%p class=%u esr=%u far=%p",
0a7de745 800 state, class, esr, (void *)far);
cb323159 801 __builtin_unreachable();
5ba3f43e
A
802 }
803
f427ee49
A
804#ifdef CONFIG_XNUPOST
805 if (saved_expected_fault_handler != NULL) {
806 thread->machine.expected_fault_handler = saved_expected_fault_handler;
807 thread->machine.expected_fault_addr = saved_expected_fault_addr;
808 }
809#endif /* CONFIG_XNUPOST */
810
cb323159
A
811 if (recover) {
812 thread->recover = recover;
0a7de745 813 }
f427ee49
A
814 if (is_user) {
815 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
816 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END,
817 esr, far, get_saved_state_pc(state), 0, 0);
818 thread->machine.exception_trace_code = 0;
819 } else {
820 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
821 MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_END,
822 esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0);
823 }
cb323159
A
824#if MACH_ASSERT
825 if (preemption_level != get_preemption_level()) {
826 panic("synchronous exception changed preemption level from %d to %d", preemption_level, get_preemption_level());
827 }
828#endif
5ba3f43e
A
829}
830
831/*
832 * Uncategorized exceptions are a catch-all for general execution errors.
833 * ARM64_TODO: For now, we assume this is for undefined instruction exceptions.
834 */
835static void
cb323159 836handle_uncategorized(arm_saved_state_t *state)
5ba3f43e 837{
d9a64523 838 exception_type_t exception = EXC_BAD_INSTRUCTION;
cb323159
A
839 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
840 mach_msg_type_number_t numcodes = 2;
841 uint32_t instr = 0;
5ba3f43e 842
cb323159 843 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
5ba3f43e
A
844
845#if CONFIG_DTRACE
5ba3f43e
A
846
847 if (PSR64_IS_USER64(get_saved_state_cpsr(state))) {
848 /*
849 * For a 64bit user process, we care about all 4 bytes of the
850 * instr.
851 */
852 if (instr == FASTTRAP_ARM64_INSTR || instr == FASTTRAP_ARM64_RET_INSTR) {
0a7de745 853 if (dtrace_user_probe(state) == KERN_SUCCESS) {
5ba3f43e 854 return;
0a7de745 855 }
5ba3f43e
A
856 }
857 } else if (PSR64_IS_USER32(get_saved_state_cpsr(state))) {
858 /*
859 * For a 32bit user process, we check for thumb mode, in
860 * which case we only care about a 2 byte instruction length.
861 * For non-thumb mode, we care about all 4 bytes of the instructin.
862 */
863 if (get_saved_state_cpsr(state) & PSR64_MODE_USER32_THUMB) {
864 if (((uint16_t)instr == FASTTRAP_THUMB32_INSTR) ||
865 ((uint16_t)instr == FASTTRAP_THUMB32_RET_INSTR)) {
866 if (dtrace_user_probe(state) == KERN_SUCCESS) {
867 return;
868 }
869 }
870 } else {
871 if ((instr == FASTTRAP_ARM32_INSTR) ||
872 (instr == FASTTRAP_ARM32_RET_INSTR)) {
873 if (dtrace_user_probe(state) == KERN_SUCCESS) {
874 return;
875 }
876 }
877 }
878 }
879
880#endif /* CONFIG_DTRACE */
881
882 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
883 if (IS_ARM_GDB_TRAP(instr)) {
884 boolean_t interrupt_state;
5ba3f43e
A
885 exception = EXC_BREAKPOINT;
886
887 interrupt_state = ml_set_interrupts_enabled(FALSE);
888
889 /* Save off the context here (so that the debug logic
890 * can see the original state of this thread).
891 */
f427ee49 892 current_thread()->machine.kpcb = state;
5ba3f43e
A
893
894 /* Hop into the debugger (typically either due to a
895 * fatal exception, an explicit panic, or a stackshot
896 * request.
897 */
898 DebuggerCall(exception, state);
899
c3c9b80d 900 current_thread()->machine.kpcb = NULL;
5ba3f43e
A
901 (void) ml_set_interrupts_enabled(interrupt_state);
902 return;
903 } else {
904 panic("Undefined kernel instruction: pc=%p instr=%x\n", (void*)get_saved_state_pc(state), instr);
905 }
906 }
907
908 /*
cb323159 909 * Check for GDB breakpoint via illegal opcode.
5ba3f43e 910 */
cb323159
A
911 if (IS_ARM_GDB_TRAP(instr)) {
912 exception = EXC_BREAKPOINT;
913 codes[0] = EXC_ARM_BREAKPOINT;
914 codes[1] = instr;
5ba3f43e 915 } else {
cb323159 916 codes[1] = instr;
5ba3f43e
A
917 }
918
919 exception_triage(exception, codes, numcodes);
cb323159 920 __builtin_unreachable();
5ba3f43e
A
921}
922
eb6b6ca3
A
923#if __has_feature(ptrauth_calls)
924static const uint16_t ptrauth_brk_comment_base = 0xc470;
925
926static inline bool
927brk_comment_is_ptrauth(uint16_t comment)
928{
929 return comment >= ptrauth_brk_comment_base &&
930 comment <= ptrauth_brk_comment_base + ptrauth_key_asdb;
931}
f427ee49
A
932
933static inline const char *
934brk_comment_to_ptrauth_key(uint16_t comment)
935{
936 switch (comment - ptrauth_brk_comment_base) {
937 case ptrauth_key_asia:
938 return "IA";
939 case ptrauth_key_asib:
940 return "IB";
941 case ptrauth_key_asda:
942 return "DA";
943 case ptrauth_key_asdb:
944 return "DB";
945 default:
946 __builtin_unreachable();
947 }
948}
eb6b6ca3
A
949#endif /* __has_feature(ptrauth_calls) */
950
f427ee49
A
951static void
952handle_kernel_breakpoint(arm_saved_state_t *state, uint32_t esr)
953{
954 uint16_t comment = ISS_BRK_COMMENT(esr);
955
956#if __has_feature(ptrauth_calls)
957 if (brk_comment_is_ptrauth(comment)) {
958 const char *msg_fmt = "Break 0x%04X instruction exception from kernel. Ptrauth failure with %s key resulted in 0x%016llx";
959 char msg[strlen(msg_fmt)
960 - strlen("0x%04X") + strlen("0xFFFF")
961 - strlen("%s") + strlen("IA")
962 - strlen("0x%016llx") + strlen("0xFFFFFFFFFFFFFFFF")
963 + 1];
964 const char *key = brk_comment_to_ptrauth_key(comment);
965 snprintf(msg, sizeof(msg), msg_fmt, comment, key, saved_state64(state)->x[16]);
966
967 panic_with_thread_kernel_state(msg, state);
968 }
969#endif /* __has_feature(ptrauth_calls) */
970
971 const char *msg_fmt = "Break 0x%04X instruction exception from kernel. Panic (by design)";
972 char msg[strlen(msg_fmt) - strlen("0x%04X") + strlen("0xFFFF") + 1];
973 snprintf(msg, sizeof(msg), msg_fmt, comment);
974
975 panic_with_thread_kernel_state(msg, state);
976}
977
5ba3f43e 978static void
eb6b6ca3 979handle_breakpoint(arm_saved_state_t *state, uint32_t esr __unused)
5ba3f43e 980{
cb323159
A
981 exception_type_t exception = EXC_BREAKPOINT;
982 mach_exception_data_type_t codes[2] = {EXC_ARM_BREAKPOINT};
983 mach_msg_type_number_t numcodes = 2;
5ba3f43e 984
f427ee49 985#if __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__
eb6b6ca3
A
986 if (ESR_EC(esr) == ESR_EC_BRK_AARCH64 &&
987 brk_comment_is_ptrauth(ISS_BRK_COMMENT(esr))) {
988 exception |= EXC_PTRAUTH_BIT;
989 }
f427ee49 990#endif /* __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__ */
eb6b6ca3 991
5ba3f43e
A
992 codes[1] = get_saved_state_pc(state);
993 exception_triage(exception, codes, numcodes);
cb323159 994 __builtin_unreachable();
5ba3f43e
A
995}
996
997static void
998handle_watchpoint(vm_offset_t fault_addr)
999{
cb323159
A
1000 exception_type_t exception = EXC_BREAKPOINT;
1001 mach_exception_data_type_t codes[2] = {EXC_ARM_DA_DEBUG};
1002 mach_msg_type_number_t numcodes = 2;
5ba3f43e
A
1003
1004 codes[1] = fault_addr;
1005 exception_triage(exception, codes, numcodes);
cb323159 1006 __builtin_unreachable();
5ba3f43e
A
1007}
1008
1009static void
1010handle_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, vm_offset_t recover,
f427ee49 1011 abort_inspector_t inspect_abort, abort_handler_t handler, expected_fault_handler_t expected_fault_handler)
5ba3f43e 1012{
cb323159
A
1013 fault_status_t fault_code;
1014 vm_prot_t fault_type;
5ba3f43e
A
1015
1016 inspect_abort(ESR_ISS(esr), &fault_code, &fault_type);
f427ee49 1017 handler(state, esr, fault_addr, fault_code, fault_type, recover, expected_fault_handler);
5ba3f43e
A
1018}
1019
1020static void
1021inspect_instruction_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
1022{
1023 getCpuDatap()->cpu_stat.instr_ex_cnt++;
1024 *fault_code = ISS_IA_FSC(iss);
1025 *fault_type = (VM_PROT_READ | VM_PROT_EXECUTE);
1026}
1027
1028static void
1029inspect_data_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
1030{
1031 getCpuDatap()->cpu_stat.data_ex_cnt++;
1032 *fault_code = ISS_DA_FSC(iss);
1033
f427ee49
A
1034 /*
1035 * Cache maintenance operations always report faults as write access.
1036 * Change these to read access, unless they report a permission fault.
1037 * Only certain cache maintenance operations (e.g. 'dc ivac') require write
1038 * access to the mapping, but if a cache maintenance operation that only requires
1039 * read access generates a permission fault, then we will not be able to handle
1040 * the fault regardless of whether we treat it as a read or write fault.
1041 */
1042 if ((iss & ISS_DA_WNR) && (!(iss & ISS_DA_CM) || is_permission_fault(*fault_code))) {
5ba3f43e
A
1043 *fault_type = (VM_PROT_READ | VM_PROT_WRITE);
1044 } else {
1045 *fault_type = (VM_PROT_READ);
1046 }
1047}
1048
eb6b6ca3
A
1049#if __has_feature(ptrauth_calls)
1050static inline bool
1051fault_addr_bit(vm_offset_t fault_addr, unsigned int bit)
1052{
1053 return (bool)((fault_addr >> bit) & 1);
1054}
1055
1056/**
1057 * Determines whether a fault address taken at EL0 contains a PAC error code
1058 * corresponding to the specified kind of ptrauth key.
1059 */
1060static bool
1061user_fault_addr_matches_pac_error_code(vm_offset_t fault_addr, bool data_key)
1062{
1063 bool instruction_tbi = !(get_tcr() & TCR_TBID0_TBI_DATA_ONLY);
1064 bool tbi = data_key || __improbable(instruction_tbi);
1065 unsigned int poison_shift;
1066 if (tbi) {
1067 poison_shift = 53;
1068 } else {
1069 poison_shift = 61;
1070 }
1071
1072 /* PAC error codes are always in the form key_number:NOT(key_number) */
1073 bool poison_bit_1 = fault_addr_bit(fault_addr, poison_shift);
1074 bool poison_bit_2 = fault_addr_bit(fault_addr, poison_shift + 1);
1075 return poison_bit_1 != poison_bit_2;
1076}
1077#endif /* __has_feature(ptrauth_calls) */
1078
5ba3f43e
A
1079static void
1080handle_pc_align(arm_saved_state_t *ss)
1081{
1082 exception_type_t exc;
1083 mach_exception_data_type_t codes[2];
1084 mach_msg_type_number_t numcodes = 2;
1085
1086 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1087 panic_with_thread_kernel_state("PC alignment exception from kernel.", ss);
1088 }
1089
1090 exc = EXC_BAD_ACCESS;
eb6b6ca3
A
1091#if __has_feature(ptrauth_calls)
1092 if (user_fault_addr_matches_pac_error_code(get_saved_state_pc(ss), false)) {
1093 exc |= EXC_PTRAUTH_BIT;
1094 }
1095#endif /* __has_feature(ptrauth_calls) */
1096
5ba3f43e
A
1097 codes[0] = EXC_ARM_DA_ALIGN;
1098 codes[1] = get_saved_state_pc(ss);
1099
1100 exception_triage(exc, codes, numcodes);
cb323159 1101 __builtin_unreachable();
5ba3f43e
A
1102}
1103
1104static void
1105handle_sp_align(arm_saved_state_t *ss)
1106{
1107 exception_type_t exc;
1108 mach_exception_data_type_t codes[2];
1109 mach_msg_type_number_t numcodes = 2;
1110
1111 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
1112 panic_with_thread_kernel_state("SP alignment exception from kernel.", ss);
1113 }
1114
1115 exc = EXC_BAD_ACCESS;
eb6b6ca3
A
1116#if __has_feature(ptrauth_calls)
1117 if (user_fault_addr_matches_pac_error_code(get_saved_state_sp(ss), true)) {
1118 exc |= EXC_PTRAUTH_BIT;
1119 }
1120#endif /* __has_feature(ptrauth_calls) */
1121
5ba3f43e
A
1122 codes[0] = EXC_ARM_SP_ALIGN;
1123 codes[1] = get_saved_state_sp(ss);
1124
1125 exception_triage(exc, codes, numcodes);
cb323159 1126 __builtin_unreachable();
5ba3f43e
A
1127}
1128
1129static void
cb323159 1130handle_wf_trap(arm_saved_state_t *state)
5ba3f43e
A
1131{
1132 exception_type_t exc;
1133 mach_exception_data_type_t codes[2];
1134 mach_msg_type_number_t numcodes = 2;
cb323159
A
1135 uint32_t instr = 0;
1136
1137 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
5ba3f43e
A
1138
1139 exc = EXC_BAD_INSTRUCTION;
1140 codes[0] = EXC_ARM_UNDEFINED;
cb323159
A
1141 codes[1] = instr;
1142
1143 exception_triage(exc, codes, numcodes);
1144 __builtin_unreachable();
1145}
1146
1147static void
1148handle_fp_trap(arm_saved_state_t *state, uint32_t esr)
1149{
1150 exception_type_t exc = EXC_ARITHMETIC;
1151 mach_exception_data_type_t codes[2];
1152 mach_msg_type_number_t numcodes = 2;
1153 uint32_t instr = 0;
1154
f427ee49
A
1155 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1156 panic_with_thread_kernel_state("Floating point exception from kernel", state);
1157 }
1158
1159 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1160 codes[1] = instr;
1161
cb323159 1162 /* The floating point trap flags are only valid if TFV is set. */
f427ee49
A
1163 if (!fp_exceptions_enabled) {
1164 exc = EXC_BAD_INSTRUCTION;
1165 codes[0] = EXC_ARM_UNDEFINED;
1166 } else if (!(esr & ISS_FP_TFV)) {
cb323159
A
1167 codes[0] = EXC_ARM_FP_UNDEFINED;
1168 } else if (esr & ISS_FP_UFF) {
1169 codes[0] = EXC_ARM_FP_UF;
1170 } else if (esr & ISS_FP_OFF) {
1171 codes[0] = EXC_ARM_FP_OF;
1172 } else if (esr & ISS_FP_IOF) {
1173 codes[0] = EXC_ARM_FP_IO;
1174 } else if (esr & ISS_FP_DZF) {
1175 codes[0] = EXC_ARM_FP_DZ;
1176 } else if (esr & ISS_FP_IDF) {
1177 codes[0] = EXC_ARM_FP_ID;
1178 } else if (esr & ISS_FP_IXF) {
1179 codes[0] = EXC_ARM_FP_IX;
1180 } else {
1181 panic("Unrecognized floating point exception, state=%p, esr=%#x", state, esr);
1182 }
1183
5ba3f43e 1184 exception_triage(exc, codes, numcodes);
cb323159 1185 __builtin_unreachable();
5ba3f43e
A
1186}
1187
1188
f427ee49
A
1189
1190/*
1191 * handle_alignment_fault_from_user:
1192 * state: Saved state
1193 *
1194 * Attempts to deal with an alignment fault from userspace (possibly by
1195 * emulating the faulting instruction). If emulation failed due to an
1196 * unservicable fault, the ESR for that fault will be stored in the
1197 * recovery_esr field of the thread by the exception code.
1198 *
1199 * Returns:
1200 * -1: Emulation failed (emulation of state/instr not supported)
1201 * 0: Successfully emulated the instruction
1202 * EFAULT: Emulation failed (probably due to permissions)
1203 * EINVAL: Emulation failed (probably due to a bad address)
1204 */
1205static int
1206handle_alignment_fault_from_user(arm_saved_state_t *state, kern_return_t *vmfr)
1207{
1208 int ret = -1;
1209
1210#pragma unused (state)
1211#pragma unused (vmfr)
1212
1213 return ret;
1214}
1215
1216
5ba3f43e
A
1217static void
1218handle_sw_step_debug(arm_saved_state_t *state)
1219{
1220 thread_t thread = current_thread();
1221 exception_type_t exc;
1222 mach_exception_data_type_t codes[2];
1223 mach_msg_type_number_t numcodes = 2;
1224
1225 if (!PSR64_IS_USER(get_saved_state_cpsr(state))) {
1226 panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel.", state);
1227 }
1228
1229 // Disable single step and unmask interrupts (in the saved state, anticipating next exception return)
1230 if (thread->machine.DebugData != NULL) {
1231 thread->machine.DebugData->uds.ds64.mdscr_el1 &= ~0x1;
1232 } else {
1233 panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL.", state);
1234 }
1235
cb323159 1236 mask_saved_state_cpsr(thread->machine.upcb, 0, PSR64_SS | DAIF_IRQF | DAIF_FIQF);
5ba3f43e
A
1237
1238 // Special encoding for gdb single step event on ARM
1239 exc = EXC_BREAKPOINT;
1240 codes[0] = 1;
1241 codes[1] = 0;
1242
1243 exception_triage(exc, codes, numcodes);
cb323159 1244 __builtin_unreachable();
5ba3f43e
A
1245}
1246
cb323159
A
1247static void
1248set_saved_state_pc_to_recovery_handler(arm_saved_state_t *iss, vm_offset_t recover)
1249{
1250#if defined(HAS_APPLE_PAC)
1251 thread_t thread = current_thread();
1252 const uintptr_t disc = ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER);
1253 const char *panic_msg = "Illegal thread->recover value %p";
1254
1255 MANIPULATE_SIGNED_THREAD_STATE(iss,
1256 // recover = (vm_offset_t)ptrauth_auth_data((void *)recover, ptrauth_key_function_pointer,
1257 // ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER));
1258 "mov x1, %[recover] \n"
1259 "mov x6, %[disc] \n"
1260 "autia x1, x6 \n"
1261 // if (recover != (vm_offset_t)ptrauth_strip((void *)recover, ptrauth_key_function_pointer)) {
1262 "mov x6, x1 \n"
1263 "xpaci x6 \n"
1264 "cmp x1, x6 \n"
1265 "beq 1f \n"
1266 // panic("Illegal thread->recover value %p", (void *)recover);
1267 "mov x0, %[panic_msg] \n"
1268 "bl _panic \n"
1269 // }
1270 "1: \n"
1271 "str x1, [x0, %[SS64_PC]] \n",
1272 [recover] "r"(recover),
1273 [disc] "r"(disc),
1274 [panic_msg] "r"(panic_msg)
1275 );
1276#else
1277 set_saved_state_pc(iss, recover);
1278#endif
1279}
1280
5ba3f43e
A
1281static void
1282handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
f427ee49 1283 fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover, expected_fault_handler_t expected_fault_handler)
5ba3f43e 1284{
cb323159
A
1285 exception_type_t exc = EXC_BAD_ACCESS;
1286 mach_exception_data_type_t codes[2];
1287 mach_msg_type_number_t numcodes = 2;
1288 thread_t thread = current_thread();
5ba3f43e
A
1289
1290 (void)esr;
f427ee49 1291 (void)expected_fault_handler;
5ba3f43e 1292
0a7de745 1293 if (ml_at_interrupt_context()) {
5ba3f43e 1294 panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state);
0a7de745 1295 }
5ba3f43e
A
1296
1297 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling abort from userspace */
1298
1299 if (is_vm_fault(fault_code)) {
d9a64523
A
1300 vm_map_t map = thread->map;
1301 vm_offset_t vm_fault_addr = fault_addr;
c3c9b80d 1302 kern_return_t result = KERN_FAILURE;
5ba3f43e
A
1303
1304 assert(map != kernel_map);
1305
f427ee49 1306 if (!(fault_type & VM_PROT_EXECUTE)) {
0a7de745
A
1307 vm_fault_addr = tbi_clear(fault_addr);
1308 }
5ba3f43e
A
1309
1310#if CONFIG_DTRACE
cb323159 1311 if (thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
5ba3f43e
A
1312 if (dtrace_tally_fault(vm_fault_addr)) { /* Should a user mode fault under dtrace be ignored? */
1313 if (recover) {
f427ee49
A
1314 thread->machine.recover_esr = esr;
1315 thread->machine.recover_far = vm_fault_addr;
cb323159 1316 set_saved_state_pc_to_recovery_handler(state, recover);
5ba3f43e 1317 } else {
5ba3f43e 1318 panic_with_thread_kernel_state("copyin/out has no recovery point", state);
5ba3f43e
A
1319 }
1320 return;
1321 } else {
5ba3f43e 1322 panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", state);
5ba3f43e
A
1323 }
1324 }
1325#else
1326 (void)recover;
1327#endif
1328
1329#if CONFIG_PGTRACE
1330 if (pgtrace_enabled) {
1331 /* Check to see if trace bit is set */
1332 result = pmap_pgtrace_fault(map->pmap, fault_addr, state);
0a7de745
A
1333 if (result == KERN_SUCCESS) {
1334 return;
1335 }
5ba3f43e
A
1336 }
1337#endif
5ba3f43e 1338 /* check to see if it is just a pmap ref/modify fault */
5c9f4661 1339
c3c9b80d 1340 if (!is_translation_fault(fault_code)) {
f427ee49
A
1341 result = arm_fast_fault(map->pmap,
1342 vm_fault_addr,
1343 fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), TRUE);
5c9f4661 1344 }
c3c9b80d
A
1345 if (result == KERN_SUCCESS) {
1346 return;
1347 }
1348
1349 {
1350 /* We have to fault the page in */
1351 result = vm_fault(map, vm_fault_addr, fault_type,
1352 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE,
1353 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
5ba3f43e
A
1354 }
1355 if (result == KERN_SUCCESS || result == KERN_ABORTED) {
cb323159
A
1356 return;
1357 }
1358
1359 /*
1360 * vm_fault() should never return KERN_FAILURE for page faults from user space.
1361 * If it does, we're leaking preemption disables somewhere in the kernel.
1362 */
1363 if (__improbable(result == KERN_FAILURE)) {
1364 panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread);
5ba3f43e
A
1365 }
1366
1367 codes[0] = result;
1368 } else if (is_alignment_fault(fault_code)) {
f427ee49
A
1369 kern_return_t vmfkr = KERN_SUCCESS;
1370 thread->machine.recover_esr = 0;
1371 thread->machine.recover_far = 0;
1372 int result = handle_alignment_fault_from_user(state, &vmfkr);
1373 if (result == 0) {
1374 /* Successfully emulated, or instruction
1375 * copyin() for decode/emulation failed.
1376 * Continue, or redrive instruction.
1377 */
1378 thread_exception_return();
1379 } else if (((result == EFAULT) || (result == EINVAL)) &&
1380 (thread->machine.recover_esr == 0)) {
1381 /*
1382 * If we didn't actually take a fault, but got one of
1383 * these errors, then we failed basic sanity checks of
1384 * the fault address. Treat this as an invalid
1385 * address.
1386 */
1387 codes[0] = KERN_INVALID_ADDRESS;
1388 } else if ((result == EFAULT) &&
1389 (thread->machine.recover_esr)) {
1390 /*
1391 * Since alignment aborts are prioritized
1392 * ahead of translation aborts, the misaligned
1393 * atomic emulation flow may have triggered a
1394 * VM pagefault, which the VM could not resolve.
1395 * Report the VM fault error in codes[]
1396 */
1397
1398 codes[0] = vmfkr;
1399 assertf(vmfkr != KERN_SUCCESS, "Unexpected vmfkr 0x%x", vmfkr);
1400 /* Cause ESR_EC to reflect an EL0 abort */
1401 thread->machine.recover_esr &= ~ESR_EC_MASK;
1402 thread->machine.recover_esr |= (ESR_EC_DABORT_EL0 << ESR_EC_SHIFT);
1403 set_saved_state_esr(thread->machine.upcb, thread->machine.recover_esr);
1404 set_saved_state_far(thread->machine.upcb, thread->machine.recover_far);
1405 fault_addr = thread->machine.recover_far;
1406 } else {
1407 /* This was just an unsupported alignment
1408 * exception. Misaligned atomic emulation
1409 * timeouts fall in this category.
1410 */
1411 codes[0] = EXC_ARM_DA_ALIGN;
1412 }
5ba3f43e
A
1413 } else if (is_parity_error(fault_code)) {
1414#if defined(APPLE_ARM64_ARCH_FAMILY)
1415 if (fault_code == FSC_SYNC_PARITY) {
1416 arm64_platform_error(state, esr, fault_addr);
cb323159 1417 return;
5ba3f43e
A
1418 }
1419#else
1420 panic("User parity error.");
1421#endif
1422 } else {
1423 codes[0] = KERN_FAILURE;
1424 }
1425
1426 codes[1] = fault_addr;
eb6b6ca3
A
1427#if __has_feature(ptrauth_calls)
1428 bool is_data_abort = (ESR_EC(esr) == ESR_EC_DABORT_EL0);
1429 if (user_fault_addr_matches_pac_error_code(fault_addr, is_data_abort)) {
1430 exc |= EXC_PTRAUTH_BIT;
1431 }
1432#endif /* __has_feature(ptrauth_calls) */
5ba3f43e 1433 exception_triage(exc, codes, numcodes);
cb323159 1434 __builtin_unreachable();
5ba3f43e
A
1435}
1436
1437#if __ARM_PAN_AVAILABLE__
1438static int
1439is_pan_fault(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, fault_status_t fault_code)
1440{
1441 // PAN (Privileged Access Never) fault occurs for data read/write in EL1 to
1442 // virtual address that is readable/writeable from both EL1 and EL0
1443
1444 // To check for PAN fault, we evaluate if the following conditions are true:
1445 // 1. This is a permission fault
1446 // 2. PAN is enabled
1447 // 3. AT instruction (on which PAN has no effect) on the same faulting address
1448 // succeeds
1449
1450 vm_offset_t pa;
1451
1452 if (!(is_permission_fault(fault_code) && get_saved_state_cpsr(state) & PSR64_PAN)) {
1453 return FALSE;
1454 }
1455
1456 if (esr & ISS_DA_WNR) {
1457 pa = mmu_kvtop_wpreflight(fault_addr);
1458 } else {
1459 pa = mmu_kvtop(fault_addr);
1460 }
1461 return (pa)? TRUE: FALSE;
1462}
1463#endif
1464
1465static void
1466handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
f427ee49 1467 fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover, expected_fault_handler_t expected_fault_handler)
5ba3f43e 1468{
cb323159 1469 thread_t thread = current_thread();
5ba3f43e
A
1470 (void)esr;
1471
f427ee49
A
1472#ifndef CONFIG_XNUPOST
1473 (void)expected_fault_handler;
1474#endif /* CONFIG_XNUPOST */
1475
5ba3f43e 1476#if CONFIG_DTRACE
cb323159 1477 if (is_vm_fault(fault_code) && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
5ba3f43e
A
1478 if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */
1479 /*
1480 * Point to next instruction, or recovery handler if set.
1481 */
1482 if (recover) {
f427ee49
A
1483 thread->machine.recover_esr = esr;
1484 thread->machine.recover_far = fault_addr;
cb323159 1485 set_saved_state_pc_to_recovery_handler(state, recover);
5ba3f43e 1486 } else {
cb323159 1487 add_saved_state_pc(state, 4);
5ba3f43e
A
1488 }
1489 return;
1490 } else {
5ba3f43e 1491 panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state);
5ba3f43e
A
1492 }
1493 }
1494#endif
1495
1496#if !CONFIG_PGTRACE /* This will be moved next to pgtrace fault evaluation */
0a7de745 1497 if (ml_at_interrupt_context()) {
5ba3f43e 1498 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
0a7de745 1499 }
5ba3f43e
A
1500#endif
1501
1502 if (is_vm_fault(fault_code)) {
cb323159
A
1503 kern_return_t result = KERN_FAILURE;
1504 vm_map_t map;
1505 int interruptible;
5ba3f43e 1506
cc8bc92a
A
1507 /*
1508 * Ensure no faults in the physical aperture. This could happen if
1509 * a page table is incorrectly allocated from the read only region
1510 * when running with KTRR.
1511 */
1512
f427ee49
A
1513#ifdef CONFIG_XNUPOST
1514 if (expected_fault_handler && expected_fault_handler(state)) {
d9a64523
A
1515 return;
1516 }
f427ee49 1517#endif /* CONFIG_XNUPOST */
d9a64523
A
1518
1519 if (fault_addr >= gVirtBase && fault_addr < static_memory_end) {
0a7de745 1520 panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state);
5ba3f43e
A
1521 }
1522
1523 if (VM_KERNEL_ADDRESS(fault_addr) || thread == THREAD_NULL) {
1524 map = kernel_map;
1525 interruptible = THREAD_UNINT;
1526 } else {
1527 map = thread->map;
c3c9b80d
A
1528
1529 /**
1530 * In the case that the recovery handler is set (e.g., during copyio
1531 * and dtrace probes), we don't want the vm_fault() operation to be
1532 * aborted early. Those code paths can't handle restarting the
1533 * vm_fault() operation so don't allow it to return early without
1534 * creating the wanted mapping.
1535 */
1536 interruptible = (recover) ? THREAD_UNINT : THREAD_ABORTSAFE;
5ba3f43e
A
1537 }
1538
1539#if CONFIG_PGTRACE
1540 if (pgtrace_enabled) {
1541 /* Check to see if trace bit is set */
1542 result = pmap_pgtrace_fault(map->pmap, fault_addr, state);
0a7de745
A
1543 if (result == KERN_SUCCESS) {
1544 return;
1545 }
5ba3f43e
A
1546 }
1547
0a7de745 1548 if (ml_at_interrupt_context()) {
5ba3f43e 1549 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
0a7de745 1550 }
5ba3f43e
A
1551#endif
1552
1553 /* check to see if it is just a pmap ref/modify fault */
d9a64523 1554 if (!is_translation_fault(fault_code)) {
f427ee49
A
1555 result = arm_fast_fault(map->pmap,
1556 fault_addr,
1557 fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), FALSE);
0a7de745
A
1558 if (result == KERN_SUCCESS) {
1559 return;
1560 }
d9a64523 1561 }
5ba3f43e 1562
0a7de745 1563 if (result != KERN_PROTECTION_FAILURE) {
5ba3f43e
A
1564 /*
1565 * We have to "fault" the page in.
1566 */
1567 result = vm_fault(map, fault_addr, fault_type,
0a7de745
A
1568 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, interruptible,
1569 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
5ba3f43e
A
1570 }
1571
0a7de745
A
1572 if (result == KERN_SUCCESS) {
1573 return;
1574 }
5ba3f43e
A
1575
1576 /*
1577 * If we have a recover handler, invoke it now.
1578 */
1579 if (recover) {
f427ee49
A
1580 thread->machine.recover_esr = esr;
1581 thread->machine.recover_far = fault_addr;
cb323159 1582 set_saved_state_pc_to_recovery_handler(state, recover);
5ba3f43e
A
1583 return;
1584 }
1585
1586#if __ARM_PAN_AVAILABLE__
1587 if (is_pan_fault(state, esr, fault_addr, fault_code)) {
1588 panic_with_thread_kernel_state("Privileged access never abort.", state);
1589 }
1590#endif
1591
1592#if CONFIG_PGTRACE
cc8bc92a
A
1593 } else if (ml_at_interrupt_context()) {
1594 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
5ba3f43e
A
1595#endif
1596 } else if (is_alignment_fault(fault_code)) {
cb323159 1597 if (recover) {
f427ee49
A
1598 thread->machine.recover_esr = esr;
1599 thread->machine.recover_far = fault_addr;
cb323159
A
1600 set_saved_state_pc_to_recovery_handler(state, recover);
1601 return;
1602 }
5ba3f43e
A
1603 panic_with_thread_kernel_state("Unaligned kernel data abort.", state);
1604 } else if (is_parity_error(fault_code)) {
1605#if defined(APPLE_ARM64_ARCH_FAMILY)
1606 if (fault_code == FSC_SYNC_PARITY) {
1607 arm64_platform_error(state, esr, fault_addr);
1608 return;
1609 }
1610#else
1611 panic_with_thread_kernel_state("Kernel parity error.", state);
1612#endif
1613 } else {
1614 kprintf("Unclassified kernel abort (fault_code=0x%x)\n", fault_code);
1615 }
1616
1617 panic_with_thread_kernel_state("Kernel data abort.", state);
1618}
1619
1620extern void syscall_trace(struct arm_saved_state * regs);
1621
1622static void
1623handle_svc(arm_saved_state_t *state)
1624{
cb323159
A
1625 int trap_no = get_saved_state_svc_number(state);
1626 thread_t thread = current_thread();
1627 struct proc *p;
5ba3f43e
A
1628
1629#define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */
1630
1631#define TRACE_SYSCALL 1
1632#if TRACE_SYSCALL
1633 syscall_trace(state);
1634#endif
1635
1636 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling SVC from userspace */
1637
1638 if (trap_no == (int)PLATFORM_SYSCALL_TRAP_NO) {
1639 platform_syscall(state);
1640 panic("Returned from platform_syscall()?");
1641 }
1642
1643 mach_kauth_cred_uthread_update();
1644
1645 if (trap_no < 0) {
c3c9b80d
A
1646 switch (trap_no) {
1647 case MACH_ARM_TRAP_ABSTIME:
5ba3f43e
A
1648 handle_mach_absolute_time_trap(state);
1649 return;
c3c9b80d 1650 case MACH_ARM_TRAP_CONTTIME:
5ba3f43e
A
1651 handle_mach_continuous_time_trap(state);
1652 return;
1653 }
1654
1655 /* Counting perhaps better in the handler, but this is how it's been done */
1656 thread->syscalls_mach++;
1657 mach_syscall(state);
1658 } else {
1659 /* Counting perhaps better in the handler, but this is how it's been done */
1660 thread->syscalls_unix++;
1661 p = get_bsdthreadtask_info(thread);
1662
1663 assert(p);
1664
1665 unix_syscall(state, thread, (struct uthread*)thread->uthread, p);
1666 }
1667}
1668
1669static void
1670handle_mach_absolute_time_trap(arm_saved_state_t *state)
1671{
1672 uint64_t now = mach_absolute_time();
1673 saved_state64(state)->x[0] = now;
1674}
1675
1676static void
1677handle_mach_continuous_time_trap(arm_saved_state_t *state)
1678{
1679 uint64_t now = mach_continuous_time();
1680 saved_state64(state)->x[0] = now;
1681}
1682
c3c9b80d 1683
f427ee49 1684__attribute__((noreturn))
5ba3f43e 1685static void
f427ee49 1686handle_msr_trap(arm_saved_state_t *state, uint32_t esr)
5ba3f43e 1687{
d9a64523 1688 exception_type_t exception = EXC_BAD_INSTRUCTION;
cb323159
A
1689 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1690 mach_msg_type_number_t numcodes = 2;
1691 uint32_t instr = 0;
5ba3f43e 1692
5ba3f43e 1693 if (!is_saved_state64(state)) {
f427ee49 1694 panic("MSR/MRS trap (ESR 0x%x) from 32-bit state\n", esr);
5ba3f43e
A
1695 }
1696
1697 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
f427ee49 1698 panic("MSR/MRS trap (ESR 0x%x) from kernel\n", esr);
5ba3f43e
A
1699 }
1700
1701 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1702 codes[1] = instr;
1703
1704 exception_triage(exception, codes, numcodes);
f427ee49 1705 __builtin_unreachable();
5ba3f43e
A
1706}
1707
f427ee49 1708
5ba3f43e
A
1709static void
1710handle_user_trapped_instruction32(arm_saved_state_t *state, uint32_t esr)
1711{
d9a64523 1712 exception_type_t exception = EXC_BAD_INSTRUCTION;
cb323159
A
1713 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1714 mach_msg_type_number_t numcodes = 2;
1715 uint32_t instr;
5ba3f43e
A
1716
1717 if (is_saved_state64(state)) {
1718 panic("ESR (0x%x) for instruction trapped from U32, but saved state is 64-bit.", esr);
1719 }
1720
1721 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1722 panic("ESR (0x%x) for instruction trapped from U32, actually came from kernel?", esr);
1723 }
1724
1725 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1726 codes[1] = instr;
1727
1728 exception_triage(exception, codes, numcodes);
cb323159 1729 __builtin_unreachable();
5ba3f43e
A
1730}
1731
1732static void
1733handle_simd_trap(arm_saved_state_t *state, uint32_t esr)
1734{
d9a64523 1735 exception_type_t exception = EXC_BAD_INSTRUCTION;
cb323159
A
1736 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1737 mach_msg_type_number_t numcodes = 2;
1738 uint32_t instr = 0;
5ba3f43e
A
1739
1740 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1741 panic("ESR (0x%x) for SIMD trap from userland, actually came from kernel?", esr);
1742 }
1743
1744 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1745 codes[1] = instr;
1746
1747 exception_triage(exception, codes, numcodes);
cb323159 1748 __builtin_unreachable();
5ba3f43e
A
1749}
1750
1751void
1752sleh_irq(arm_saved_state_t *state)
1753{
f427ee49 1754 cpu_data_t * cdp __unused = getCpuDatap();
cb323159 1755#if MACH_ASSERT
d9a64523
A
1756 int preemption_level = get_preemption_level();
1757#endif
5ba3f43e 1758
cb323159 1759
5ba3f43e
A
1760 sleh_interrupt_handler_prologue(state, DBG_INTR_TYPE_OTHER);
1761
f427ee49
A
1762#if USE_APPLEARMSMP
1763 PE_handle_ext_interrupt();
1764#else
5ba3f43e
A
1765 /* Run the registered interrupt handler. */
1766 cdp->interrupt_handler(cdp->interrupt_target,
0a7de745
A
1767 cdp->interrupt_refCon,
1768 cdp->interrupt_nub,
1769 cdp->interrupt_source);
f427ee49 1770#endif
5ba3f43e 1771
2a1bd2d3 1772 entropy_collect();
5ba3f43e
A
1773
1774 sleh_interrupt_handler_epilogue();
cb323159 1775#if MACH_ASSERT
0a7de745 1776 if (preemption_level != get_preemption_level()) {
d9a64523 1777 panic("irq handler %p changed preemption level from %d to %d", cdp->interrupt_handler, preemption_level, get_preemption_level());
0a7de745 1778 }
d9a64523 1779#endif
5ba3f43e
A
1780}
1781
1782void
1783sleh_fiq(arm_saved_state_t *state)
1784{
1785 unsigned int type = DBG_INTR_TYPE_UNKNOWN;
cb323159 1786#if MACH_ASSERT
d9a64523
A
1787 int preemption_level = get_preemption_level();
1788#endif
d9a64523 1789
0a7de745
A
1790#if MONOTONIC_FIQ
1791 uint64_t pmcr0 = 0, upmsr = 0;
1792#endif /* MONOTONIC_FIQ */
1793
c6bf4f31
A
1794#if defined(HAS_IPI)
1795 boolean_t is_ipi = FALSE;
1796 uint64_t ipi_sr = 0;
1797
1798 if (gFastIPI) {
c3c9b80d 1799 MRS(ipi_sr, "S3_5_C15_C1_1");
c6bf4f31
A
1800
1801 if (ipi_sr & 1) {
1802 is_ipi = TRUE;
1803 }
1804 }
1805
1806 if (is_ipi) {
1807 type = DBG_INTR_TYPE_IPI;
1808 } else
1809#endif /* defined(HAS_IPI) */
0a7de745
A
1810#if MONOTONIC_FIQ
1811 if (mt_pmi_pending(&pmcr0, &upmsr)) {
d9a64523
A
1812 type = DBG_INTR_TYPE_PMI;
1813 } else
0a7de745 1814#endif /* MONOTONIC_FIQ */
5ba3f43e
A
1815 if (ml_get_timer_pending()) {
1816 type = DBG_INTR_TYPE_TIMER;
1817 }
1818
1819 sleh_interrupt_handler_prologue(state, type);
1820
c3c9b80d 1821
c6bf4f31
A
1822#if defined(HAS_IPI)
1823 if (is_ipi) {
1824 /*
1825 * Order is important here: we must ack the IPI by writing IPI_SR
1826 * before we call cpu_signal_handler(). Otherwise, there will be
1827 * a window between the completion of pending-signal processing in
1828 * cpu_signal_handler() and the ack during which a newly-issued
1829 * IPI to this CPU may be lost. ISB is required to ensure the msr
1830 * is retired before execution of cpu_signal_handler().
1831 */
c3c9b80d 1832 MSR("S3_5_C15_C1_1", ipi_sr);
c6bf4f31
A
1833 __builtin_arm_isb(ISB_SY);
1834 cpu_signal_handler();
1835 } else
1836#endif /* defined(HAS_IPI) */
0a7de745 1837#if MONOTONIC_FIQ
d9a64523 1838 if (type == DBG_INTR_TYPE_PMI) {
f427ee49 1839 INTERRUPT_MASKED_DEBUG_START(mt_fiq, DBG_INTR_TYPE_PMI);
0a7de745 1840 mt_fiq(getCpuDatap(), pmcr0, upmsr);
f427ee49 1841 INTERRUPT_MASKED_DEBUG_END();
d9a64523 1842 } else
0a7de745 1843#endif /* MONOTONIC_FIQ */
5ba3f43e
A
1844 {
1845 /*
1846 * We don't know that this is a timer, but we don't have insight into
1847 * the other interrupts that go down this path.
1848 */
1849
5ba3f43e
A
1850 cpu_data_t *cdp = getCpuDatap();
1851
1852 cdp->cpu_decrementer = -1; /* Large */
1853
1854 /*
1855 * ARM64_TODO: whether we're coming from userland is ignored right now.
1856 * We can easily thread it through, but not bothering for the
1857 * moment (AArch32 doesn't either).
1858 */
f427ee49 1859 INTERRUPT_MASKED_DEBUG_START(rtclock_intr, DBG_INTR_TYPE_TIMER);
5ba3f43e 1860 rtclock_intr(TRUE);
f427ee49 1861 INTERRUPT_MASKED_DEBUG_END();
5ba3f43e
A
1862 }
1863
c3c9b80d 1864
5ba3f43e 1865 sleh_interrupt_handler_epilogue();
cb323159 1866#if MACH_ASSERT
0a7de745 1867 if (preemption_level != get_preemption_level()) {
d9a64523 1868 panic("fiq type %u changed preemption level from %d to %d", type, preemption_level, get_preemption_level());
0a7de745 1869 }
d9a64523 1870#endif
5ba3f43e
A
1871}
1872
1873void
1874sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far)
1875{
f427ee49
A
1876 task_vtimer_check(current_thread());
1877
1878 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_START,
1879 esr, VM_KERNEL_ADDRHIDE(far));
cb323159
A
1880 arm_saved_state_t *state = &context->ss;
1881#if MACH_ASSERT
d9a64523
A
1882 int preemption_level = get_preemption_level();
1883#endif
5ba3f43e
A
1884
1885 ASSERT_CONTEXT_SANITY(context);
1886 arm64_platform_error(state, esr, far);
cb323159 1887#if MACH_ASSERT
0a7de745 1888 if (preemption_level != get_preemption_level()) {
d9a64523 1889 panic("serror changed preemption level from %d to %d", preemption_level, get_preemption_level());
0a7de745 1890 }
d9a64523 1891#endif
f427ee49
A
1892 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_END,
1893 esr, VM_KERNEL_ADDRHIDE(far));
5ba3f43e
A
1894}
1895
1896void
cb323159
A
1897mach_syscall_trace_exit(unsigned int retval,
1898 unsigned int call_number)
5ba3f43e
A
1899{
1900 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
cb323159
A
1901 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) |
1902 DBG_FUNC_END, retval, 0, 0, 0, 0);
5ba3f43e
A
1903}
1904
1905__attribute__((noreturn))
1906void
1907thread_syscall_return(kern_return_t error)
1908{
1909 thread_t thread;
1910 struct arm_saved_state *state;
1911
1912 thread = current_thread();
1913 state = get_user_regs(thread);
1914
1915 assert(is_saved_state64(state));
1916 saved_state64(state)->x[0] = error;
1917
cb323159 1918#if MACH_ASSERT
5ba3f43e
A
1919 kern_allocation_name_t
1920 prior __assert_only = thread_get_kernel_state(thread)->allocation_name;
1921 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
cb323159 1922#endif /* MACH_ASSERT */
5ba3f43e
A
1923
1924 if (kdebug_enable) {
1925 /* Invert syscall number (negative for a mach syscall) */
1926 mach_syscall_trace_exit(error, (-1) * get_saved_state_svc_number(state));
1927 }
1928
1929 thread_exception_return();
1930}
1931
1932void
1933syscall_trace(
0a7de745 1934 struct arm_saved_state * regs __unused)
5ba3f43e
A
1935{
1936 /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */
1937}
1938
1939static void
1940sleh_interrupt_handler_prologue(arm_saved_state_t *state, unsigned int type)
1941{
f427ee49
A
1942 bool is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
1943
1944 task_vtimer_check(current_thread());
5ba3f43e
A
1945
1946 uint64_t pc = is_user ? get_saved_state_pc(state) :
0a7de745 1947 VM_KERNEL_UNSLIDE(get_saved_state_pc(state));
5ba3f43e
A
1948
1949 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
0a7de745 1950 0, pc, is_user, type);
5ba3f43e
A
1951
1952#if CONFIG_TELEMETRY
1953 if (telemetry_needs_record) {
d9a64523 1954 telemetry_mark_curthread((boolean_t)is_user, FALSE);
5ba3f43e
A
1955 }
1956#endif /* CONFIG_TELEMETRY */
1957}
1958
1959static void
1960sleh_interrupt_handler_epilogue(void)
1961{
d9a64523
A
1962#if KPERF
1963 kperf_interrupt();
1964#endif /* KPERF */
5ba3f43e
A
1965 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END);
1966}
1967
1968void
1969sleh_invalid_stack(arm_context_t *context, uint32_t esr __unused, vm_offset_t far __unused)
1970{
1971 thread_t thread = current_thread();
1972 vm_offset_t kernel_stack_bottom, sp;
1973
1974 sp = get_saved_state_sp(&context->ss);
1975 kernel_stack_bottom = round_page(thread->machine.kstackptr) - KERNEL_STACK_SIZE;
1976
1977 if ((sp < kernel_stack_bottom) && (sp >= (kernel_stack_bottom - PAGE_SIZE))) {
1978 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow).", &context->ss);
1979 }
1980
1981 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context->ss);
1982}
f427ee49 1983