]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/sleh.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / sleh.c
1 /*
2 * Copyright (c) 2012-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/caches_internal.h>
30 #include <arm/cpu_data.h>
31 #include <arm/cpu_data_internal.h>
32 #include <arm/misc_protos.h>
33 #include <arm/thread.h>
34 #include <arm/rtclock.h>
35 #include <arm/trap.h> /* for IS_ARM_GDB_TRAP() et al */
36 #include <arm64/proc_reg.h>
37 #include <arm64/machine_machdep.h>
38 #include <arm64/monotonic.h>
39
40 #include <kern/debug.h>
41 #include <kern/thread.h>
42 #include <mach/exception.h>
43 #include <mach/vm_types.h>
44 #include <mach/machine/thread_status.h>
45
46 #include <machine/atomic.h>
47 #include <machine/limits.h>
48
49 #include <pexpert/arm/protos.h>
50
51 #include <vm/vm_page.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_fault.h>
54 #include <vm/vm_kern.h>
55
56 #include <sys/kdebug.h>
57 #include <kperf/kperf.h>
58
59 #include <kern/policy_internal.h>
60 #if CONFIG_TELEMETRY
61 #include <kern/telemetry.h>
62 #endif
63
64 #include <prng/random.h>
65
66 #ifndef __arm64__
67 #error Should only be compiling for arm64.
68 #endif
69
70 #define TEST_CONTEXT32_SANITY(context) \
71 (context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \
72 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT)
73
74 #define TEST_CONTEXT64_SANITY(context) \
75 (context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \
76 context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT)
77
78 #define ASSERT_CONTEXT_SANITY(context) \
79 assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context))
80
81
82 #define COPYIN(src, dst, size) \
83 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
84 copyin_kern(src, dst, size) : \
85 copyin(src, dst, size)
86
87 #define COPYOUT(src, dst, size) \
88 (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \
89 copyout_kern(src, dst, size) : \
90 copyout(src, dst, size)
91
92 // Below is for concatenating a string param to a string literal
93 #define STR1(x) #x
94 #define STR(x) STR1(x)
95
96 void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) __abortlike;
97
98 void sleh_synchronous_sp1(arm_context_t *, uint32_t, vm_offset_t) __abortlike;
99 void sleh_synchronous(arm_context_t *, uint32_t, vm_offset_t);
100 void sleh_irq(arm_saved_state_t *);
101 void sleh_fiq(arm_saved_state_t *);
102 void sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far);
103 void sleh_invalid_stack(arm_context_t *context, uint32_t esr, vm_offset_t far) __dead2;
104
105 static void sleh_interrupt_handler_prologue(arm_saved_state_t *, unsigned int type);
106 static void sleh_interrupt_handler_epilogue(void);
107
108 static void handle_svc(arm_saved_state_t *);
109 static void handle_mach_absolute_time_trap(arm_saved_state_t *);
110 static void handle_mach_continuous_time_trap(arm_saved_state_t *);
111
112 static void handle_msr_trap(arm_saved_state_t *state, uint32_t iss);
113
114 extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, bool, bool);
115
116 static void handle_uncategorized(arm_saved_state_t *);
117 static void handle_breakpoint(arm_saved_state_t *) __dead2;
118
119 typedef void (*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *);
120 static void inspect_instruction_abort(uint32_t, fault_status_t *, vm_prot_t *);
121 static void inspect_data_abort(uint32_t, fault_status_t *, vm_prot_t *);
122
123 static int is_vm_fault(fault_status_t);
124 static int is_translation_fault(fault_status_t);
125 static int is_alignment_fault(fault_status_t);
126
127 typedef void (*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t);
128 static void handle_user_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t);
129 static void handle_kernel_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t);
130
131 static void handle_pc_align(arm_saved_state_t *ss) __dead2;
132 static void handle_sp_align(arm_saved_state_t *ss) __dead2;
133 static void handle_sw_step_debug(arm_saved_state_t *ss) __dead2;
134 static void handle_wf_trap(arm_saved_state_t *ss) __dead2;
135 static void handle_fp_trap(arm_saved_state_t *ss, uint32_t esr) __dead2;
136
137 static void handle_watchpoint(vm_offset_t fault_addr) __dead2;
138
139 static void handle_abort(arm_saved_state_t *, uint32_t, vm_offset_t, vm_offset_t, abort_inspector_t, abort_handler_t);
140
141 static void handle_user_trapped_instruction32(arm_saved_state_t *, uint32_t esr) __dead2;
142
143 static void handle_simd_trap(arm_saved_state_t *, uint32_t esr) __dead2;
144
145 extern void mach_kauth_cred_uthread_update(void);
146 void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number);
147
148 struct uthread;
149 struct proc;
150
151 extern void
152 unix_syscall(struct arm_saved_state * regs, thread_t thread_act,
153 struct uthread * uthread, struct proc * proc);
154
155 extern void
156 mach_syscall(struct arm_saved_state*);
157
158 #if CONFIG_DTRACE
159 extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs);
160 extern boolean_t dtrace_tally_fault(user_addr_t);
161
162 /*
163 * Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy
164 * and paste the trap instructions
165 * over from that file. Need to keep these in sync!
166 */
167 #define FASTTRAP_ARM32_INSTR 0xe7ffdefc
168 #define FASTTRAP_THUMB32_INSTR 0xdefc
169 #define FASTTRAP_ARM64_INSTR 0xe7eeee7e
170
171 #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb
172 #define FASTTRAP_THUMB32_RET_INSTR 0xdefb
173 #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d
174
175 /* See <rdar://problem/4613924> */
176 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
177 #endif
178
179
180 #if CONFIG_PGTRACE
181 extern boolean_t pgtrace_enabled;
182 #endif
183
184 #if __ARM_PAN_AVAILABLE__
185 #ifdef CONFIG_XNUPOST
186 extern vm_offset_t pan_test_addr;
187 extern vm_offset_t pan_ro_addr;
188 extern volatile int pan_exception_level;
189 extern volatile char pan_fault_value;
190 #endif
191 #endif
192
193 #if HAS_TWO_STAGE_SPR_LOCK
194 #ifdef CONFIG_XNUPOST
195 extern volatile vm_offset_t spr_lock_test_addr;
196 extern volatile uint32_t spr_lock_exception_esr;
197 #endif
198 #endif
199
200 #if defined(APPLETYPHOON)
201 #define CPU_NAME "Typhoon"
202 #elif defined(APPLETWISTER)
203 #define CPU_NAME "Twister"
204 #elif defined(APPLEHURRICANE)
205 #define CPU_NAME "Hurricane"
206 #elif defined(APPLELIGHTNING)
207 #define CPU_NAME "Lightning"
208 #else
209 #define CPU_NAME "Unknown"
210 #endif
211
212 #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT))
213 #define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400)
214 #define ESR_WT_REASON(esr) ((esr) & 0xff)
215
216 #define WT_REASON_NONE 0
217 #define WT_REASON_INTEGRITY_FAIL 1
218 #define WT_REASON_BAD_SYSCALL 2
219 #define WT_REASON_NOT_LOCKED 3
220 #define WT_REASON_ALREADY_LOCKED 4
221 #define WT_REASON_SW_REQ 5
222 #define WT_REASON_PT_INVALID 6
223 #define WT_REASON_PT_VIOLATION 7
224 #define WT_REASON_REG_VIOLATION 8
225 #endif
226
227 #if defined(HAS_IPI)
228 void cpu_signal_handler(void);
229 extern unsigned int gFastIPI;
230 #endif /* defined(HAS_IPI) */
231
232 extern vm_offset_t static_memory_end;
233
234 static inline unsigned
235 __ror(unsigned value, unsigned shift)
236 {
237 return ((unsigned)(value) >> (unsigned)(shift)) |
238 (unsigned)(value) << ((unsigned)(sizeof(unsigned) * CHAR_BIT) - (unsigned)(shift));
239 }
240
241 __dead2
242 static void
243 arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
244 {
245 #if defined(APPLE_ARM64_ARCH_FAMILY)
246 uint64_t fed_err_sts, mmu_err_sts, lsu_err_sts;
247 #if defined(NO_ECORE)
248 uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf;
249
250 mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS));
251 l2c_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS));
252 l2c_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR));
253 l2c_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF));
254 lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS));
255 fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS));
256
257 panic_plain("Unhandled " CPU_NAME
258 " implementation specific error. state=%p esr=%#x far=%p\n"
259 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
260 "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n",
261 state, esr, (void *)far,
262 (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts,
263 (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf);
264
265 #elif defined(HAS_MIGSTS)
266 uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf, mpidr, migsts;
267
268 mpidr = __builtin_arm_rsr64("MPIDR_EL1");
269 migsts = __builtin_arm_rsr64(STR(ARM64_REG_MIGSTS_EL1));
270 mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS));
271 l2c_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS));
272 l2c_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR));
273 l2c_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF));
274 lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS));
275 fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS));
276
277 panic_plain("Unhandled " CPU_NAME
278 " implementation specific error. state=%p esr=%#x far=%p p-core?%d migsts=%p\n"
279 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
280 "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n",
281 state, esr, (void *)far, !!(mpidr & MPIDR_PNE), (void *)migsts,
282 (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts,
283 (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf);
284 #else // !defined(NO_ECORE) && !defined(HAS_MIGSTS)
285 uint64_t llc_err_sts, llc_err_adr, llc_err_inf, mpidr;
286 #if defined(HAS_DPC_ERR)
287 uint64_t dpc_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_DPC_ERR_STS));
288 #endif // defined(HAS_DPC_ERR)
289
290 mpidr = __builtin_arm_rsr64("MPIDR_EL1");
291
292 if (mpidr & MPIDR_PNE) {
293 mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS));
294 lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS));
295 fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS));
296 } else {
297 mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_MMU_ERR_STS));
298 lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_LSU_ERR_STS));
299 fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_FED_ERR_STS));
300 }
301
302 llc_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS));
303 llc_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR));
304 llc_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF));
305
306 panic_plain("Unhandled " CPU_NAME
307 " implementation specific error. state=%p esr=%#x far=%p p-core?%d"
308 #if defined(HAS_DPC_ERR)
309 " dpc_err_sts:%p"
310 #endif
311 "\n"
312 "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n"
313 "\tllc_err_sts:%p, llc_err_adr:%p, llc_err_inf:%p\n",
314 state, esr, (void *)far, !!(mpidr & MPIDR_PNE),
315 #if defined(HAS_DPC_ERR)
316 (void *)dpc_err_sts,
317 #endif
318 (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts,
319 (void *)llc_err_sts, (void *)llc_err_adr, (void *)llc_err_inf);
320 #endif
321 #else // !defined(APPLE_ARM64_ARCH_FAMILY)
322 #pragma unused (state, esr, far)
323 panic_plain("Unhandled implementation specific error\n");
324 #endif
325 }
326
327 #if CONFIG_KERNEL_INTEGRITY
328 #pragma clang diagnostic push
329 #pragma clang diagnostic ignored "-Wunused-parameter"
330 static void
331 kernel_integrity_error_handler(uint32_t esr, vm_offset_t far)
332 {
333 #if defined(KERNEL_INTEGRITY_WT)
334 #if (DEVELOPMENT || DEBUG)
335 if (ESR_WT_SERROR(esr)) {
336 switch (ESR_WT_REASON(esr)) {
337 case WT_REASON_INTEGRITY_FAIL:
338 panic_plain("Kernel integrity, violation in frame 0x%016lx.", far);
339 case WT_REASON_BAD_SYSCALL:
340 panic_plain("Kernel integrity, bad syscall.");
341 case WT_REASON_NOT_LOCKED:
342 panic_plain("Kernel integrity, not locked.");
343 case WT_REASON_ALREADY_LOCKED:
344 panic_plain("Kernel integrity, already locked.");
345 case WT_REASON_SW_REQ:
346 panic_plain("Kernel integrity, software request.");
347 case WT_REASON_PT_INVALID:
348 panic_plain("Kernel integrity, encountered invalid TTE/PTE while "
349 "walking 0x%016lx.", far);
350 case WT_REASON_PT_VIOLATION:
351 panic_plain("Kernel integrity, violation in mapping 0x%016lx.",
352 far);
353 case WT_REASON_REG_VIOLATION:
354 panic_plain("Kernel integrity, violation in system register %d.",
355 (unsigned) far);
356 default:
357 panic_plain("Kernel integrity, unknown (esr=0x%08x).", esr);
358 }
359 }
360 #else
361 if (ESR_WT_SERROR(esr)) {
362 panic_plain("SError esr: 0x%08x far: 0x%016lx.", esr, far);
363 }
364 #endif
365 #endif
366 }
367 #pragma clang diagnostic pop
368 #endif
369
370 static void
371 arm64_platform_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
372 {
373 cpu_data_t *cdp = getCpuDatap();
374
375 #if CONFIG_KERNEL_INTEGRITY
376 kernel_integrity_error_handler(esr, far);
377 #endif
378
379 if (cdp->platform_error_handler != (platform_error_handler_t) NULL) {
380 (*(platform_error_handler_t)cdp->platform_error_handler)(cdp->cpu_id, far);
381 } else {
382 arm64_implementation_specific_error(state, esr, far);
383 }
384 }
385
386 void
387 panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss)
388 {
389 boolean_t ss_valid;
390
391 ss_valid = is_saved_state64(ss);
392 arm_saved_state64_t *state = saved_state64(ss);
393
394 panic_plain("%s at pc 0x%016llx, lr 0x%016llx (saved state: %p%s)\n"
395 "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n"
396 "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n"
397 "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n"
398 "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n"
399 "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n"
400 "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n"
401 "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n"
402 "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n"
403 "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n",
404 msg, state->pc, state->lr, ss, (ss_valid ? "" : " INVALID"),
405 state->x[0], state->x[1], state->x[2], state->x[3],
406 state->x[4], state->x[5], state->x[6], state->x[7],
407 state->x[8], state->x[9], state->x[10], state->x[11],
408 state->x[12], state->x[13], state->x[14], state->x[15],
409 state->x[16], state->x[17], state->x[18], state->x[19],
410 state->x[20], state->x[21], state->x[22], state->x[23],
411 state->x[24], state->x[25], state->x[26], state->x[27],
412 state->x[28], state->fp, state->lr, state->sp,
413 state->pc, state->cpsr, state->esr, state->far);
414 }
415
416 void
417 sleh_synchronous_sp1(arm_context_t *context, uint32_t esr, vm_offset_t far __unused)
418 {
419 esr_exception_class_t class = ESR_EC(esr);
420 arm_saved_state_t * state = &context->ss;
421
422 switch (class) {
423 case ESR_EC_UNCATEGORIZED:
424 {
425 uint32_t instr = *((uint32_t*)get_saved_state_pc(state));
426 if (IS_ARM_GDB_TRAP(instr)) {
427 DebuggerCall(EXC_BREAKPOINT, state);
428 }
429 // Intentionally fall through to panic if we return from the debugger
430 }
431 default:
432 panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state);
433 }
434 }
435
436 #if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST)
437 static bool
438 handle_msr_write_from_xnupost(arm_saved_state_t *state, uint32_t esr)
439 {
440 user_addr_t pc = get_saved_state_pc(state);
441 if ((spr_lock_test_addr != 0) && (pc == spr_lock_test_addr)) {
442 spr_lock_exception_esr = esr;
443 set_saved_state_pc(state, pc + 4);
444 return true;
445 }
446
447 return false;
448 }
449 #endif
450
451 void
452 sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far)
453 {
454 esr_exception_class_t class = ESR_EC(esr);
455 arm_saved_state_t * state = &context->ss;
456 vm_offset_t recover = 0;
457 thread_t thread = current_thread();
458 #if MACH_ASSERT
459 int preemption_level = get_preemption_level();
460 #endif
461
462 ASSERT_CONTEXT_SANITY(context);
463
464 if (__improbable(ESR_INSTR_IS_2BYTES(esr))) {
465 /*
466 * We no longer support 32-bit, which means no 2-byte
467 * instructions.
468 */
469 if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
470 panic("Exception on 2-byte instruction, "
471 "context=%p, esr=%#x, far=%p",
472 context, esr, (void *)far);
473 } else {
474 panic_with_thread_kernel_state("Exception on 2-byte instruction", state);
475 }
476 }
477
478 /* Don't run exception handler with recover handler set in case of double fault */
479 if (thread->recover) {
480 recover = thread->recover;
481 thread->recover = (vm_offset_t)NULL;
482 }
483
484 /* Inherit the interrupt masks from previous context */
485 if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) {
486 ml_set_interrupts_enabled(TRUE);
487 }
488
489 switch (class) {
490 case ESR_EC_SVC_64:
491 if (!is_saved_state64(state) || !PSR64_IS_USER(get_saved_state_cpsr(state))) {
492 panic("Invalid SVC_64 context");
493 }
494
495 handle_svc(state);
496 break;
497
498 case ESR_EC_DABORT_EL0:
499 handle_abort(state, esr, far, recover, inspect_data_abort, handle_user_abort);
500 thread_exception_return();
501
502 case ESR_EC_MSR_TRAP:
503 handle_msr_trap(state, ESR_ISS(esr));
504 break;
505
506 case ESR_EC_IABORT_EL0:
507 handle_abort(state, esr, far, recover, inspect_instruction_abort, handle_user_abort);
508 thread_exception_return();
509
510 case ESR_EC_IABORT_EL1:
511 #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
512 {
513 extern volatile vm_offset_t ctrr_test_va;
514 if (ctrr_test_va && far == ctrr_test_va) {
515 extern volatile uint64_t ctrr_exception_esr;
516 ctrr_exception_esr = esr;
517 /* return to the instruction immediately after the call to NX page */
518 set_saved_state_pc(state, get_saved_state_lr(state));
519 break;
520 }
521 }
522 #endif
523
524 panic_with_thread_kernel_state("Kernel instruction fetch abort", state);
525
526 case ESR_EC_PC_ALIGN:
527 handle_pc_align(state);
528 __builtin_unreachable();
529
530 case ESR_EC_DABORT_EL1:
531 handle_abort(state, esr, far, recover, inspect_data_abort, handle_kernel_abort);
532 break;
533
534 case ESR_EC_UNCATEGORIZED:
535 assert(!ESR_ISS(esr));
536
537 #if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST)
538 if (handle_msr_write_from_xnupost(state, esr)) {
539 break;
540 }
541 #endif
542 handle_uncategorized(&context->ss);
543 break;
544
545 case ESR_EC_SP_ALIGN:
546 handle_sp_align(state);
547 __builtin_unreachable();
548
549 case ESR_EC_BKPT_AARCH32:
550 handle_breakpoint(state);
551 __builtin_unreachable();
552
553 case ESR_EC_BRK_AARCH64:
554 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
555 panic_with_thread_kernel_state("Break instruction exception from kernel. Panic (by design)", state);
556 } else {
557 handle_breakpoint(state);
558 }
559 __builtin_unreachable();
560
561 case ESR_EC_BKPT_REG_MATCH_EL0:
562 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
563 handle_breakpoint(state);
564 }
565 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
566 class, state, class, esr, (void *)far);
567 __builtin_unreachable();
568
569 case ESR_EC_BKPT_REG_MATCH_EL1:
570 panic_with_thread_kernel_state("Hardware Breakpoint Debug exception from kernel. Panic (by design)", state);
571 __builtin_unreachable();
572
573 case ESR_EC_SW_STEP_DEBUG_EL0:
574 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
575 handle_sw_step_debug(state);
576 }
577 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
578 class, state, class, esr, (void *)far);
579 __builtin_unreachable();
580
581 case ESR_EC_SW_STEP_DEBUG_EL1:
582 panic_with_thread_kernel_state("Software Step Debug exception from kernel. Panic (by design)", state);
583 __builtin_unreachable();
584
585 case ESR_EC_WATCHPT_MATCH_EL0:
586 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
587 handle_watchpoint(far);
588 }
589 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
590 class, state, class, esr, (void *)far);
591 __builtin_unreachable();
592
593 case ESR_EC_WATCHPT_MATCH_EL1:
594 /*
595 * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
596 * abort. Turn off watchpoints and keep going; we'll turn them back on in return_from_exception..
597 */
598 if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) {
599 arm_debug_set(NULL);
600 break; /* return to first level handler */
601 }
602 panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p",
603 class, state, class, esr, (void *)far);
604 __builtin_unreachable();
605
606 case ESR_EC_TRAP_SIMD_FP:
607 handle_simd_trap(state, esr);
608 __builtin_unreachable();
609
610 case ESR_EC_ILLEGAL_INSTR_SET:
611 if (EXCB_ACTION_RERUN !=
612 ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET, far)) {
613 // instruction is not re-executed
614 panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x",
615 state, class, esr, (void *)far, get_saved_state_cpsr(state));
616 }
617 // must clear this fault in PSR to re-run
618 mask_saved_state_cpsr(state, 0, PSR64_IL);
619 break;
620
621 case ESR_EC_MCR_MRC_CP15_TRAP:
622 case ESR_EC_MCRR_MRRC_CP15_TRAP:
623 case ESR_EC_MCR_MRC_CP14_TRAP:
624 case ESR_EC_LDC_STC_CP14_TRAP:
625 case ESR_EC_MCRR_MRRC_CP14_TRAP:
626 handle_user_trapped_instruction32(state, esr);
627 __builtin_unreachable();
628
629 case ESR_EC_WFI_WFE:
630 // Use of WFI or WFE instruction when they have been disabled for EL0
631 handle_wf_trap(state);
632 __builtin_unreachable();
633
634 case ESR_EC_FLOATING_POINT_64:
635 handle_fp_trap(state, esr);
636 __builtin_unreachable();
637
638
639 default:
640 panic("Unsupported synchronous exception. state=%p class=%u esr=%u far=%p",
641 state, class, esr, (void *)far);
642 __builtin_unreachable();
643 }
644
645 if (recover) {
646 thread->recover = recover;
647 }
648 #if MACH_ASSERT
649 if (preemption_level != get_preemption_level()) {
650 panic("synchronous exception changed preemption level from %d to %d", preemption_level, get_preemption_level());
651 }
652 #endif
653 }
654
655 /*
656 * Uncategorized exceptions are a catch-all for general execution errors.
657 * ARM64_TODO: For now, we assume this is for undefined instruction exceptions.
658 */
659 static void
660 handle_uncategorized(arm_saved_state_t *state)
661 {
662 exception_type_t exception = EXC_BAD_INSTRUCTION;
663 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
664 mach_msg_type_number_t numcodes = 2;
665 uint32_t instr = 0;
666
667 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
668
669 #if CONFIG_DTRACE
670 if (tempDTraceTrapHook && (tempDTraceTrapHook(exception, state, 0, 0) == KERN_SUCCESS)) {
671 return;
672 }
673
674 if (PSR64_IS_USER64(get_saved_state_cpsr(state))) {
675 /*
676 * For a 64bit user process, we care about all 4 bytes of the
677 * instr.
678 */
679 if (instr == FASTTRAP_ARM64_INSTR || instr == FASTTRAP_ARM64_RET_INSTR) {
680 if (dtrace_user_probe(state) == KERN_SUCCESS) {
681 return;
682 }
683 }
684 } else if (PSR64_IS_USER32(get_saved_state_cpsr(state))) {
685 /*
686 * For a 32bit user process, we check for thumb mode, in
687 * which case we only care about a 2 byte instruction length.
688 * For non-thumb mode, we care about all 4 bytes of the instructin.
689 */
690 if (get_saved_state_cpsr(state) & PSR64_MODE_USER32_THUMB) {
691 if (((uint16_t)instr == FASTTRAP_THUMB32_INSTR) ||
692 ((uint16_t)instr == FASTTRAP_THUMB32_RET_INSTR)) {
693 if (dtrace_user_probe(state) == KERN_SUCCESS) {
694 return;
695 }
696 }
697 } else {
698 if ((instr == FASTTRAP_ARM32_INSTR) ||
699 (instr == FASTTRAP_ARM32_RET_INSTR)) {
700 if (dtrace_user_probe(state) == KERN_SUCCESS) {
701 return;
702 }
703 }
704 }
705 }
706
707 #endif /* CONFIG_DTRACE */
708
709 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
710 if (IS_ARM_GDB_TRAP(instr)) {
711 boolean_t interrupt_state;
712 vm_offset_t kstackptr;
713 exception = EXC_BREAKPOINT;
714
715 interrupt_state = ml_set_interrupts_enabled(FALSE);
716
717 /* Save off the context here (so that the debug logic
718 * can see the original state of this thread).
719 */
720 kstackptr = (vm_offset_t) current_thread()->machine.kstackptr;
721 if (kstackptr) {
722 copy_signed_thread_state(&((thread_kernel_state_t) kstackptr)->machine.ss, state);
723 }
724
725 /* Hop into the debugger (typically either due to a
726 * fatal exception, an explicit panic, or a stackshot
727 * request.
728 */
729 DebuggerCall(exception, state);
730
731 (void) ml_set_interrupts_enabled(interrupt_state);
732 return;
733 } else {
734 panic("Undefined kernel instruction: pc=%p instr=%x\n", (void*)get_saved_state_pc(state), instr);
735 }
736 }
737
738 /*
739 * Check for GDB breakpoint via illegal opcode.
740 */
741 if (IS_ARM_GDB_TRAP(instr)) {
742 exception = EXC_BREAKPOINT;
743 codes[0] = EXC_ARM_BREAKPOINT;
744 codes[1] = instr;
745 } else {
746 codes[1] = instr;
747 }
748
749 exception_triage(exception, codes, numcodes);
750 __builtin_unreachable();
751 }
752
753 static void
754 handle_breakpoint(arm_saved_state_t *state)
755 {
756 exception_type_t exception = EXC_BREAKPOINT;
757 mach_exception_data_type_t codes[2] = {EXC_ARM_BREAKPOINT};
758 mach_msg_type_number_t numcodes = 2;
759
760 codes[1] = get_saved_state_pc(state);
761 exception_triage(exception, codes, numcodes);
762 __builtin_unreachable();
763 }
764
765 static void
766 handle_watchpoint(vm_offset_t fault_addr)
767 {
768 exception_type_t exception = EXC_BREAKPOINT;
769 mach_exception_data_type_t codes[2] = {EXC_ARM_DA_DEBUG};
770 mach_msg_type_number_t numcodes = 2;
771
772 codes[1] = fault_addr;
773 exception_triage(exception, codes, numcodes);
774 __builtin_unreachable();
775 }
776
777 static void
778 handle_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, vm_offset_t recover,
779 abort_inspector_t inspect_abort, abort_handler_t handler)
780 {
781 fault_status_t fault_code;
782 vm_prot_t fault_type;
783
784 inspect_abort(ESR_ISS(esr), &fault_code, &fault_type);
785 handler(state, esr, fault_addr, fault_code, fault_type, recover);
786 }
787
788 static void
789 inspect_instruction_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
790 {
791 getCpuDatap()->cpu_stat.instr_ex_cnt++;
792 *fault_code = ISS_IA_FSC(iss);
793 *fault_type = (VM_PROT_READ | VM_PROT_EXECUTE);
794 }
795
796 static void
797 inspect_data_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type)
798 {
799 getCpuDatap()->cpu_stat.data_ex_cnt++;
800 *fault_code = ISS_DA_FSC(iss);
801
802 /* Cache operations report faults as write access. Change these to read access. */
803 if ((iss & ISS_DA_WNR) && !(iss & ISS_DA_CM)) {
804 *fault_type = (VM_PROT_READ | VM_PROT_WRITE);
805 } else {
806 *fault_type = (VM_PROT_READ);
807 }
808 }
809
810 static void
811 handle_pc_align(arm_saved_state_t *ss)
812 {
813 exception_type_t exc;
814 mach_exception_data_type_t codes[2];
815 mach_msg_type_number_t numcodes = 2;
816
817 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
818 panic_with_thread_kernel_state("PC alignment exception from kernel.", ss);
819 }
820
821 exc = EXC_BAD_ACCESS;
822 codes[0] = EXC_ARM_DA_ALIGN;
823 codes[1] = get_saved_state_pc(ss);
824
825 exception_triage(exc, codes, numcodes);
826 __builtin_unreachable();
827 }
828
829 static void
830 handle_sp_align(arm_saved_state_t *ss)
831 {
832 exception_type_t exc;
833 mach_exception_data_type_t codes[2];
834 mach_msg_type_number_t numcodes = 2;
835
836 if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) {
837 panic_with_thread_kernel_state("SP alignment exception from kernel.", ss);
838 }
839
840 exc = EXC_BAD_ACCESS;
841 codes[0] = EXC_ARM_SP_ALIGN;
842 codes[1] = get_saved_state_sp(ss);
843
844 exception_triage(exc, codes, numcodes);
845 __builtin_unreachable();
846 }
847
848 static void
849 handle_wf_trap(arm_saved_state_t *state)
850 {
851 exception_type_t exc;
852 mach_exception_data_type_t codes[2];
853 mach_msg_type_number_t numcodes = 2;
854 uint32_t instr = 0;
855
856 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
857
858 exc = EXC_BAD_INSTRUCTION;
859 codes[0] = EXC_ARM_UNDEFINED;
860 codes[1] = instr;
861
862 exception_triage(exc, codes, numcodes);
863 __builtin_unreachable();
864 }
865
866 static void
867 handle_fp_trap(arm_saved_state_t *state, uint32_t esr)
868 {
869 exception_type_t exc = EXC_ARITHMETIC;
870 mach_exception_data_type_t codes[2];
871 mach_msg_type_number_t numcodes = 2;
872 uint32_t instr = 0;
873
874 /* The floating point trap flags are only valid if TFV is set. */
875 if (!(esr & ISS_FP_TFV)) {
876 codes[0] = EXC_ARM_FP_UNDEFINED;
877 } else if (esr & ISS_FP_UFF) {
878 codes[0] = EXC_ARM_FP_UF;
879 } else if (esr & ISS_FP_OFF) {
880 codes[0] = EXC_ARM_FP_OF;
881 } else if (esr & ISS_FP_IOF) {
882 codes[0] = EXC_ARM_FP_IO;
883 } else if (esr & ISS_FP_DZF) {
884 codes[0] = EXC_ARM_FP_DZ;
885 } else if (esr & ISS_FP_IDF) {
886 codes[0] = EXC_ARM_FP_ID;
887 } else if (esr & ISS_FP_IXF) {
888 codes[0] = EXC_ARM_FP_IX;
889 } else {
890 panic("Unrecognized floating point exception, state=%p, esr=%#x", state, esr);
891 }
892
893 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
894 codes[1] = instr;
895
896 exception_triage(exc, codes, numcodes);
897 __builtin_unreachable();
898 }
899
900
901 static void
902 handle_sw_step_debug(arm_saved_state_t *state)
903 {
904 thread_t thread = current_thread();
905 exception_type_t exc;
906 mach_exception_data_type_t codes[2];
907 mach_msg_type_number_t numcodes = 2;
908
909 if (!PSR64_IS_USER(get_saved_state_cpsr(state))) {
910 panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel.", state);
911 }
912
913 // Disable single step and unmask interrupts (in the saved state, anticipating next exception return)
914 if (thread->machine.DebugData != NULL) {
915 thread->machine.DebugData->uds.ds64.mdscr_el1 &= ~0x1;
916 } else {
917 panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL.", state);
918 }
919
920 mask_saved_state_cpsr(thread->machine.upcb, 0, PSR64_SS | DAIF_IRQF | DAIF_FIQF);
921
922 // Special encoding for gdb single step event on ARM
923 exc = EXC_BREAKPOINT;
924 codes[0] = 1;
925 codes[1] = 0;
926
927 exception_triage(exc, codes, numcodes);
928 __builtin_unreachable();
929 }
930
931 static int
932 is_vm_fault(fault_status_t status)
933 {
934 switch (status) {
935 case FSC_TRANSLATION_FAULT_L0:
936 case FSC_TRANSLATION_FAULT_L1:
937 case FSC_TRANSLATION_FAULT_L2:
938 case FSC_TRANSLATION_FAULT_L3:
939 case FSC_ACCESS_FLAG_FAULT_L1:
940 case FSC_ACCESS_FLAG_FAULT_L2:
941 case FSC_ACCESS_FLAG_FAULT_L3:
942 case FSC_PERMISSION_FAULT_L1:
943 case FSC_PERMISSION_FAULT_L2:
944 case FSC_PERMISSION_FAULT_L3:
945 return TRUE;
946 default:
947 return FALSE;
948 }
949 }
950
951 static int
952 is_translation_fault(fault_status_t status)
953 {
954 switch (status) {
955 case FSC_TRANSLATION_FAULT_L0:
956 case FSC_TRANSLATION_FAULT_L1:
957 case FSC_TRANSLATION_FAULT_L2:
958 case FSC_TRANSLATION_FAULT_L3:
959 return TRUE;
960 default:
961 return FALSE;
962 }
963 }
964
965 #if __ARM_PAN_AVAILABLE__ || defined(KERNEL_INTEGRITY_CTRR)
966 static int
967 is_permission_fault(fault_status_t status)
968 {
969 switch (status) {
970 case FSC_PERMISSION_FAULT_L1:
971 case FSC_PERMISSION_FAULT_L2:
972 case FSC_PERMISSION_FAULT_L3:
973 return TRUE;
974 default:
975 return FALSE;
976 }
977 }
978 #endif
979
980 static int
981 is_alignment_fault(fault_status_t status)
982 {
983 return status == FSC_ALIGNMENT_FAULT;
984 }
985
986 static int
987 is_parity_error(fault_status_t status)
988 {
989 switch (status) {
990 case FSC_SYNC_PARITY:
991 case FSC_ASYNC_PARITY:
992 case FSC_SYNC_PARITY_TT_L1:
993 case FSC_SYNC_PARITY_TT_L2:
994 case FSC_SYNC_PARITY_TT_L3:
995 return TRUE;
996 default:
997 return FALSE;
998 }
999 }
1000
1001 static void
1002 set_saved_state_pc_to_recovery_handler(arm_saved_state_t *iss, vm_offset_t recover)
1003 {
1004 #if defined(HAS_APPLE_PAC)
1005 thread_t thread = current_thread();
1006 const uintptr_t disc = ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER);
1007 const char *panic_msg = "Illegal thread->recover value %p";
1008
1009 MANIPULATE_SIGNED_THREAD_STATE(iss,
1010 // recover = (vm_offset_t)ptrauth_auth_data((void *)recover, ptrauth_key_function_pointer,
1011 // ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER));
1012 "mov x1, %[recover] \n"
1013 "mov x6, %[disc] \n"
1014 "autia x1, x6 \n"
1015 // if (recover != (vm_offset_t)ptrauth_strip((void *)recover, ptrauth_key_function_pointer)) {
1016 "mov x6, x1 \n"
1017 "xpaci x6 \n"
1018 "cmp x1, x6 \n"
1019 "beq 1f \n"
1020 // panic("Illegal thread->recover value %p", (void *)recover);
1021 "mov x0, %[panic_msg] \n"
1022 "bl _panic \n"
1023 // }
1024 "1: \n"
1025 "str x1, [x0, %[SS64_PC]] \n",
1026 [recover] "r"(recover),
1027 [disc] "r"(disc),
1028 [panic_msg] "r"(panic_msg)
1029 );
1030 #else
1031 set_saved_state_pc(iss, recover);
1032 #endif
1033 }
1034
1035 static void
1036 handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1037 fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover)
1038 {
1039 exception_type_t exc = EXC_BAD_ACCESS;
1040 mach_exception_data_type_t codes[2];
1041 mach_msg_type_number_t numcodes = 2;
1042 thread_t thread = current_thread();
1043
1044 (void)esr;
1045 (void)state;
1046
1047 if (ml_at_interrupt_context()) {
1048 panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state);
1049 }
1050
1051 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling abort from userspace */
1052
1053 if (is_vm_fault(fault_code)) {
1054 kern_return_t result = KERN_FAILURE;
1055 vm_map_t map = thread->map;
1056 vm_offset_t vm_fault_addr = fault_addr;
1057
1058 assert(map != kernel_map);
1059
1060 if (!(fault_type & VM_PROT_EXECUTE) && user_tbi_enabled()) {
1061 vm_fault_addr = tbi_clear(fault_addr);
1062 }
1063
1064 #if CONFIG_DTRACE
1065 if (thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
1066 if (dtrace_tally_fault(vm_fault_addr)) { /* Should a user mode fault under dtrace be ignored? */
1067 if (recover) {
1068 set_saved_state_pc_to_recovery_handler(state, recover);
1069 } else {
1070 ml_set_interrupts_enabled(FALSE);
1071 panic_with_thread_kernel_state("copyin/out has no recovery point", state);
1072 }
1073 return;
1074 } else {
1075 ml_set_interrupts_enabled(FALSE);
1076 panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", state);
1077 }
1078 }
1079 #else
1080 (void)recover;
1081 #endif
1082
1083 #if CONFIG_PGTRACE
1084 if (pgtrace_enabled) {
1085 /* Check to see if trace bit is set */
1086 result = pmap_pgtrace_fault(map->pmap, fault_addr, state);
1087 if (result == KERN_SUCCESS) {
1088 return;
1089 }
1090 }
1091 #endif
1092
1093 /* check to see if it is just a pmap ref/modify fault */
1094
1095 if ((result != KERN_SUCCESS) && !is_translation_fault(fault_code)) {
1096 result = arm_fast_fault(map->pmap, trunc_page(vm_fault_addr), fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), TRUE);
1097 }
1098 if (result != KERN_SUCCESS) {
1099 {
1100 /* We have to fault the page in */
1101 result = vm_fault(map, vm_fault_addr, fault_type,
1102 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE,
1103 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
1104 }
1105 }
1106 if (result == KERN_SUCCESS || result == KERN_ABORTED) {
1107 return;
1108 }
1109
1110 /*
1111 * vm_fault() should never return KERN_FAILURE for page faults from user space.
1112 * If it does, we're leaking preemption disables somewhere in the kernel.
1113 */
1114 if (__improbable(result == KERN_FAILURE)) {
1115 panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread);
1116 }
1117
1118 codes[0] = result;
1119 } else if (is_alignment_fault(fault_code)) {
1120 codes[0] = EXC_ARM_DA_ALIGN;
1121 } else if (is_parity_error(fault_code)) {
1122 #if defined(APPLE_ARM64_ARCH_FAMILY)
1123 if (fault_code == FSC_SYNC_PARITY) {
1124 arm64_platform_error(state, esr, fault_addr);
1125 return;
1126 }
1127 #else
1128 panic("User parity error.");
1129 #endif
1130 } else {
1131 codes[0] = KERN_FAILURE;
1132 }
1133
1134 codes[1] = fault_addr;
1135 exception_triage(exc, codes, numcodes);
1136 __builtin_unreachable();
1137 }
1138
1139 #if __ARM_PAN_AVAILABLE__
1140 static int
1141 is_pan_fault(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, fault_status_t fault_code)
1142 {
1143 // PAN (Privileged Access Never) fault occurs for data read/write in EL1 to
1144 // virtual address that is readable/writeable from both EL1 and EL0
1145
1146 // To check for PAN fault, we evaluate if the following conditions are true:
1147 // 1. This is a permission fault
1148 // 2. PAN is enabled
1149 // 3. AT instruction (on which PAN has no effect) on the same faulting address
1150 // succeeds
1151
1152 vm_offset_t pa;
1153
1154 if (!(is_permission_fault(fault_code) && get_saved_state_cpsr(state) & PSR64_PAN)) {
1155 return FALSE;
1156 }
1157
1158 if (esr & ISS_DA_WNR) {
1159 pa = mmu_kvtop_wpreflight(fault_addr);
1160 } else {
1161 pa = mmu_kvtop(fault_addr);
1162 }
1163 return (pa)? TRUE: FALSE;
1164 }
1165 #endif
1166
1167 static void
1168 handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr,
1169 fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover)
1170 {
1171 thread_t thread = current_thread();
1172 (void)esr;
1173
1174 #if CONFIG_DTRACE
1175 if (is_vm_fault(fault_code) && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */
1176 if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */
1177 /*
1178 * Point to next instruction, or recovery handler if set.
1179 */
1180 if (recover) {
1181 set_saved_state_pc_to_recovery_handler(state, recover);
1182 } else {
1183 add_saved_state_pc(state, 4);
1184 }
1185 return;
1186 } else {
1187 ml_set_interrupts_enabled(FALSE);
1188 panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state);
1189 }
1190 }
1191 #endif
1192
1193 #if !CONFIG_PGTRACE /* This will be moved next to pgtrace fault evaluation */
1194 if (ml_at_interrupt_context()) {
1195 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
1196 }
1197 #endif
1198
1199 if (is_vm_fault(fault_code)) {
1200 kern_return_t result = KERN_FAILURE;
1201 vm_map_t map;
1202 int interruptible;
1203
1204 /*
1205 * Ensure no faults in the physical aperture. This could happen if
1206 * a page table is incorrectly allocated from the read only region
1207 * when running with KTRR.
1208 */
1209
1210 #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
1211 extern volatile vm_offset_t ctrr_test_va;
1212 if (ctrr_test_va && fault_addr == ctrr_test_va && is_permission_fault(fault_code)) {
1213 extern volatile uint64_t ctrr_exception_esr;
1214 ctrr_exception_esr = esr;
1215 add_saved_state_pc(state, 4);
1216 return;
1217 }
1218 #endif
1219
1220 #if __ARM_PAN_AVAILABLE__ && defined(CONFIG_XNUPOST)
1221 if (is_permission_fault(fault_code) && !(get_saved_state_cpsr(state) & PSR64_PAN) &&
1222 (pan_ro_addr != 0) && (fault_addr == pan_ro_addr)) {
1223 ++pan_exception_level;
1224 // On an exception taken from a PAN-disabled context, verify
1225 // that PAN is re-enabled for the exception handler and that
1226 // accessing the test address produces a PAN fault.
1227 pan_fault_value = *(char *)pan_test_addr;
1228 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1229 add_saved_state_pc(state, 4);
1230 return;
1231 }
1232 #endif
1233
1234 if (fault_addr >= gVirtBase && fault_addr < static_memory_end) {
1235 panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state);
1236 }
1237
1238 if (VM_KERNEL_ADDRESS(fault_addr) || thread == THREAD_NULL) {
1239 map = kernel_map;
1240 interruptible = THREAD_UNINT;
1241 } else {
1242 map = thread->map;
1243 interruptible = THREAD_ABORTSAFE;
1244 }
1245
1246 #if CONFIG_PGTRACE
1247 if (pgtrace_enabled) {
1248 /* Check to see if trace bit is set */
1249 result = pmap_pgtrace_fault(map->pmap, fault_addr, state);
1250 if (result == KERN_SUCCESS) {
1251 return;
1252 }
1253 }
1254
1255 if (ml_at_interrupt_context()) {
1256 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
1257 }
1258 #endif
1259
1260 /* check to see if it is just a pmap ref/modify fault */
1261 if (!is_translation_fault(fault_code)) {
1262 result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), FALSE);
1263 if (result == KERN_SUCCESS) {
1264 return;
1265 }
1266 }
1267
1268 if (result != KERN_PROTECTION_FAILURE) {
1269 /*
1270 * We have to "fault" the page in.
1271 */
1272 result = vm_fault(map, fault_addr, fault_type,
1273 /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, interruptible,
1274 /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
1275 }
1276
1277 if (result == KERN_SUCCESS) {
1278 return;
1279 }
1280
1281 /*
1282 * If we have a recover handler, invoke it now.
1283 */
1284 if (recover) {
1285 set_saved_state_pc_to_recovery_handler(state, recover);
1286 return;
1287 }
1288
1289 #if __ARM_PAN_AVAILABLE__
1290 if (is_pan_fault(state, esr, fault_addr, fault_code)) {
1291 #ifdef CONFIG_XNUPOST
1292 if ((pan_test_addr != 0) && (fault_addr == pan_test_addr)) {
1293 ++pan_exception_level;
1294 // read the user-accessible value to make sure
1295 // pan is enabled and produces a 2nd fault from
1296 // the exception handler
1297 if (pan_exception_level == 1) {
1298 pan_fault_value = *(char *)pan_test_addr;
1299 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1300 }
1301 // this fault address is used for PAN test
1302 // disable PAN and rerun
1303 mask_saved_state_cpsr(state, 0, PSR64_PAN);
1304 return;
1305 }
1306 #endif
1307 panic_with_thread_kernel_state("Privileged access never abort.", state);
1308 }
1309 #endif
1310
1311 #if CONFIG_PGTRACE
1312 } else if (ml_at_interrupt_context()) {
1313 panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state);
1314 #endif
1315 } else if (is_alignment_fault(fault_code)) {
1316 if (recover) {
1317 set_saved_state_pc_to_recovery_handler(state, recover);
1318 return;
1319 }
1320 panic_with_thread_kernel_state("Unaligned kernel data abort.", state);
1321 } else if (is_parity_error(fault_code)) {
1322 #if defined(APPLE_ARM64_ARCH_FAMILY)
1323 if (fault_code == FSC_SYNC_PARITY) {
1324 arm64_platform_error(state, esr, fault_addr);
1325 return;
1326 }
1327 #else
1328 panic_with_thread_kernel_state("Kernel parity error.", state);
1329 #endif
1330 } else {
1331 kprintf("Unclassified kernel abort (fault_code=0x%x)\n", fault_code);
1332 }
1333
1334 panic_with_thread_kernel_state("Kernel data abort.", state);
1335 }
1336
1337 extern void syscall_trace(struct arm_saved_state * regs);
1338
1339 static void
1340 handle_svc(arm_saved_state_t *state)
1341 {
1342 int trap_no = get_saved_state_svc_number(state);
1343 thread_t thread = current_thread();
1344 struct proc *p;
1345
1346 #define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */
1347
1348 #define TRACE_SYSCALL 1
1349 #if TRACE_SYSCALL
1350 syscall_trace(state);
1351 #endif
1352
1353 thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling SVC from userspace */
1354
1355 if (trap_no == (int)PLATFORM_SYSCALL_TRAP_NO) {
1356 platform_syscall(state);
1357 panic("Returned from platform_syscall()?");
1358 }
1359
1360 mach_kauth_cred_uthread_update();
1361
1362 if (trap_no < 0) {
1363 if (trap_no == -3) {
1364 handle_mach_absolute_time_trap(state);
1365 return;
1366 } else if (trap_no == -4) {
1367 handle_mach_continuous_time_trap(state);
1368 return;
1369 }
1370
1371 /* Counting perhaps better in the handler, but this is how it's been done */
1372 thread->syscalls_mach++;
1373 mach_syscall(state);
1374 } else {
1375 /* Counting perhaps better in the handler, but this is how it's been done */
1376 thread->syscalls_unix++;
1377 p = get_bsdthreadtask_info(thread);
1378
1379 assert(p);
1380
1381 unix_syscall(state, thread, (struct uthread*)thread->uthread, p);
1382 }
1383 }
1384
1385 static void
1386 handle_mach_absolute_time_trap(arm_saved_state_t *state)
1387 {
1388 uint64_t now = mach_absolute_time();
1389 saved_state64(state)->x[0] = now;
1390 }
1391
1392 static void
1393 handle_mach_continuous_time_trap(arm_saved_state_t *state)
1394 {
1395 uint64_t now = mach_continuous_time();
1396 saved_state64(state)->x[0] = now;
1397 }
1398
1399 static void
1400 handle_msr_trap(arm_saved_state_t *state, uint32_t iss)
1401 {
1402 exception_type_t exception = EXC_BAD_INSTRUCTION;
1403 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1404 mach_msg_type_number_t numcodes = 2;
1405 uint32_t instr = 0;
1406
1407 (void)iss;
1408
1409 if (!is_saved_state64(state)) {
1410 panic("MSR/MRS trap (EC 0x%x) from 32-bit state\n", ESR_EC_MSR_TRAP);
1411 }
1412
1413 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1414 panic("MSR/MRS trap (EC 0x%x) from kernel\n", ESR_EC_MSR_TRAP);
1415 }
1416
1417 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1418 codes[1] = instr;
1419
1420 exception_triage(exception, codes, numcodes);
1421 }
1422
1423 static void
1424 handle_user_trapped_instruction32(arm_saved_state_t *state, uint32_t esr)
1425 {
1426 exception_type_t exception = EXC_BAD_INSTRUCTION;
1427 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1428 mach_msg_type_number_t numcodes = 2;
1429 uint32_t instr;
1430
1431 if (is_saved_state64(state)) {
1432 panic("ESR (0x%x) for instruction trapped from U32, but saved state is 64-bit.", esr);
1433 }
1434
1435 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1436 panic("ESR (0x%x) for instruction trapped from U32, actually came from kernel?", esr);
1437 }
1438
1439 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1440 codes[1] = instr;
1441
1442 exception_triage(exception, codes, numcodes);
1443 __builtin_unreachable();
1444 }
1445
1446 static void
1447 handle_simd_trap(arm_saved_state_t *state, uint32_t esr)
1448 {
1449 exception_type_t exception = EXC_BAD_INSTRUCTION;
1450 mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED};
1451 mach_msg_type_number_t numcodes = 2;
1452 uint32_t instr = 0;
1453
1454 if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) {
1455 panic("ESR (0x%x) for SIMD trap from userland, actually came from kernel?", esr);
1456 }
1457
1458 COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr));
1459 codes[1] = instr;
1460
1461 exception_triage(exception, codes, numcodes);
1462 __builtin_unreachable();
1463 }
1464
1465 void
1466 sleh_irq(arm_saved_state_t *state)
1467 {
1468 uint64_t timestamp = 0;
1469 uint32_t old_entropy_data = 0;
1470 uint32_t old_entropy_sample_count = 0;
1471 size_t entropy_index = 0;
1472 uint32_t * entropy_data_ptr = NULL;
1473 cpu_data_t * cdp = getCpuDatap();
1474 #if MACH_ASSERT
1475 int preemption_level = get_preemption_level();
1476 #endif
1477
1478
1479 sleh_interrupt_handler_prologue(state, DBG_INTR_TYPE_OTHER);
1480
1481 /* Run the registered interrupt handler. */
1482 cdp->interrupt_handler(cdp->interrupt_target,
1483 cdp->interrupt_refCon,
1484 cdp->interrupt_nub,
1485 cdp->interrupt_source);
1486
1487 /* We use interrupt timing as an entropy source. */
1488 timestamp = ml_get_timebase();
1489
1490 /*
1491 * The buffer index is subject to races, but as these races should only
1492 * result in multiple CPUs updating the same location, the end result
1493 * should be that noise gets written into the entropy buffer. As this
1494 * is the entire point of the entropy buffer, we will not worry about
1495 * these races for now.
1496 */
1497 old_entropy_sample_count = EntropyData.sample_count;
1498 EntropyData.sample_count += 1;
1499
1500 entropy_index = old_entropy_sample_count & ENTROPY_BUFFER_INDEX_MASK;
1501 entropy_data_ptr = EntropyData.buffer + entropy_index;
1502
1503 /* Mix the timestamp data and the old data together. */
1504 old_entropy_data = *entropy_data_ptr;
1505 *entropy_data_ptr = (uint32_t)timestamp ^ __ror(old_entropy_data, 9);
1506
1507 sleh_interrupt_handler_epilogue();
1508 #if MACH_ASSERT
1509 if (preemption_level != get_preemption_level()) {
1510 panic("irq handler %p changed preemption level from %d to %d", cdp->interrupt_handler, preemption_level, get_preemption_level());
1511 }
1512 #endif
1513 }
1514
1515 void
1516 sleh_fiq(arm_saved_state_t *state)
1517 {
1518 unsigned int type = DBG_INTR_TYPE_UNKNOWN;
1519 #if MACH_ASSERT
1520 int preemption_level = get_preemption_level();
1521 #endif
1522
1523 #if MONOTONIC_FIQ
1524 uint64_t pmcr0 = 0, upmsr = 0;
1525 #endif /* MONOTONIC_FIQ */
1526
1527 #if defined(HAS_IPI)
1528 boolean_t is_ipi = FALSE;
1529 uint64_t ipi_sr = 0;
1530
1531 if (gFastIPI) {
1532 MRS(ipi_sr, ARM64_REG_IPI_SR);
1533
1534 if (ipi_sr & 1) {
1535 is_ipi = TRUE;
1536 }
1537 }
1538
1539 if (is_ipi) {
1540 type = DBG_INTR_TYPE_IPI;
1541 } else
1542 #endif /* defined(HAS_IPI) */
1543 #if MONOTONIC_FIQ
1544 if (mt_pmi_pending(&pmcr0, &upmsr)) {
1545 type = DBG_INTR_TYPE_PMI;
1546 } else
1547 #endif /* MONOTONIC_FIQ */
1548 if (ml_get_timer_pending()) {
1549 type = DBG_INTR_TYPE_TIMER;
1550 }
1551
1552 sleh_interrupt_handler_prologue(state, type);
1553
1554 #if defined(HAS_IPI)
1555 if (is_ipi) {
1556 /*
1557 * Order is important here: we must ack the IPI by writing IPI_SR
1558 * before we call cpu_signal_handler(). Otherwise, there will be
1559 * a window between the completion of pending-signal processing in
1560 * cpu_signal_handler() and the ack during which a newly-issued
1561 * IPI to this CPU may be lost. ISB is required to ensure the msr
1562 * is retired before execution of cpu_signal_handler().
1563 */
1564 MSR(ARM64_REG_IPI_SR, ipi_sr);
1565 __builtin_arm_isb(ISB_SY);
1566 cpu_signal_handler();
1567 } else
1568 #endif /* defined(HAS_IPI) */
1569 #if MONOTONIC_FIQ
1570 if (type == DBG_INTR_TYPE_PMI) {
1571 mt_fiq(getCpuDatap(), pmcr0, upmsr);
1572 } else
1573 #endif /* MONOTONIC_FIQ */
1574 {
1575 /*
1576 * We don't know that this is a timer, but we don't have insight into
1577 * the other interrupts that go down this path.
1578 */
1579
1580 cpu_data_t *cdp = getCpuDatap();
1581
1582 cdp->cpu_decrementer = -1; /* Large */
1583
1584 /*
1585 * ARM64_TODO: whether we're coming from userland is ignored right now.
1586 * We can easily thread it through, but not bothering for the
1587 * moment (AArch32 doesn't either).
1588 */
1589 rtclock_intr(TRUE);
1590 }
1591
1592 sleh_interrupt_handler_epilogue();
1593 #if MACH_ASSERT
1594 if (preemption_level != get_preemption_level()) {
1595 panic("fiq type %u changed preemption level from %d to %d", type, preemption_level, get_preemption_level());
1596 }
1597 #endif
1598 }
1599
1600 void
1601 sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far)
1602 {
1603 arm_saved_state_t *state = &context->ss;
1604 #if MACH_ASSERT
1605 int preemption_level = get_preemption_level();
1606 #endif
1607
1608 ASSERT_CONTEXT_SANITY(context);
1609 arm64_platform_error(state, esr, far);
1610 #if MACH_ASSERT
1611 if (preemption_level != get_preemption_level()) {
1612 panic("serror changed preemption level from %d to %d", preemption_level, get_preemption_level());
1613 }
1614 #endif
1615 }
1616
1617 void
1618 mach_syscall_trace_exit(unsigned int retval,
1619 unsigned int call_number)
1620 {
1621 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1622 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) |
1623 DBG_FUNC_END, retval, 0, 0, 0, 0);
1624 }
1625
1626 __attribute__((noreturn))
1627 void
1628 thread_syscall_return(kern_return_t error)
1629 {
1630 thread_t thread;
1631 struct arm_saved_state *state;
1632
1633 thread = current_thread();
1634 state = get_user_regs(thread);
1635
1636 assert(is_saved_state64(state));
1637 saved_state64(state)->x[0] = error;
1638
1639 #if MACH_ASSERT
1640 kern_allocation_name_t
1641 prior __assert_only = thread_get_kernel_state(thread)->allocation_name;
1642 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
1643 #endif /* MACH_ASSERT */
1644
1645 if (kdebug_enable) {
1646 /* Invert syscall number (negative for a mach syscall) */
1647 mach_syscall_trace_exit(error, (-1) * get_saved_state_svc_number(state));
1648 }
1649
1650 thread_exception_return();
1651 }
1652
1653 void
1654 syscall_trace(
1655 struct arm_saved_state * regs __unused)
1656 {
1657 /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */
1658 }
1659
1660 static void
1661 sleh_interrupt_handler_prologue(arm_saved_state_t *state, unsigned int type)
1662 {
1663 uint64_t is_user = PSR64_IS_USER(get_saved_state_cpsr(state));
1664
1665 uint64_t pc = is_user ? get_saved_state_pc(state) :
1666 VM_KERNEL_UNSLIDE(get_saved_state_pc(state));
1667
1668 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
1669 0, pc, is_user, type);
1670
1671 #if CONFIG_TELEMETRY
1672 if (telemetry_needs_record) {
1673 telemetry_mark_curthread((boolean_t)is_user, FALSE);
1674 }
1675 #endif /* CONFIG_TELEMETRY */
1676 }
1677
1678 static void
1679 sleh_interrupt_handler_epilogue(void)
1680 {
1681 #if KPERF
1682 kperf_interrupt();
1683 #endif /* KPERF */
1684 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END);
1685 }
1686
1687 void
1688 sleh_invalid_stack(arm_context_t *context, uint32_t esr __unused, vm_offset_t far __unused)
1689 {
1690 thread_t thread = current_thread();
1691 vm_offset_t kernel_stack_bottom, sp;
1692
1693 sp = get_saved_state_sp(&context->ss);
1694 kernel_stack_bottom = round_page(thread->machine.kstackptr) - KERNEL_STACK_SIZE;
1695
1696 if ((sp < kernel_stack_bottom) && (sp >= (kernel_stack_bottom - PAGE_SIZE))) {
1697 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow).", &context->ss);
1698 }
1699
1700 panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context->ss);
1701 }