]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2012-2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <arm/caches_internal.h> | |
30 | #include <arm/cpu_data.h> | |
31 | #include <arm/cpu_data_internal.h> | |
32 | #include <arm/misc_protos.h> | |
33 | #include <arm/thread.h> | |
34 | #include <arm/rtclock.h> | |
35 | #include <arm/trap.h> /* for IS_ARM_GDB_TRAP() et al */ | |
36 | #include <arm64/proc_reg.h> | |
37 | #include <arm64/machine_machdep.h> | |
38 | #include <arm64/monotonic.h> | |
39 | ||
40 | #include <kern/debug.h> | |
41 | #include <kern/thread.h> | |
42 | #include <mach/exception.h> | |
43 | #include <mach/vm_types.h> | |
44 | #include <mach/machine/thread_status.h> | |
45 | ||
46 | #include <machine/atomic.h> | |
cb323159 | 47 | #include <machine/limits.h> |
5ba3f43e A |
48 | |
49 | #include <pexpert/arm/protos.h> | |
50 | ||
51 | #include <vm/vm_page.h> | |
52 | #include <vm/pmap.h> | |
53 | #include <vm/vm_fault.h> | |
54 | #include <vm/vm_kern.h> | |
55 | ||
56 | #include <sys/kdebug.h> | |
d9a64523 | 57 | #include <kperf/kperf.h> |
5ba3f43e A |
58 | |
59 | #include <kern/policy_internal.h> | |
60 | #if CONFIG_TELEMETRY | |
61 | #include <kern/telemetry.h> | |
62 | #endif | |
63 | ||
64 | #include <prng/random.h> | |
65 | ||
66 | #ifndef __arm64__ | |
67 | #error Should only be compiling for arm64. | |
68 | #endif | |
69 | ||
70 | #define TEST_CONTEXT32_SANITY(context) \ | |
71 | (context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \ | |
72 | context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT) | |
73 | ||
74 | #define TEST_CONTEXT64_SANITY(context) \ | |
75 | (context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \ | |
76 | context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT) | |
77 | ||
78 | #define ASSERT_CONTEXT_SANITY(context) \ | |
79 | assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context)) | |
80 | ||
81 | ||
cb323159 A |
82 | #define COPYIN(src, dst, size) \ |
83 | (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \ | |
84 | copyin_kern(src, dst, size) : \ | |
85 | copyin(src, dst, size) | |
5ba3f43e | 86 | |
cb323159 A |
87 | #define COPYOUT(src, dst, size) \ |
88 | (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \ | |
89 | copyout_kern(src, dst, size) : \ | |
90 | copyout(src, dst, size) | |
5ba3f43e A |
91 | |
92 | // Below is for concatenating a string param to a string literal | |
93 | #define STR1(x) #x | |
94 | #define STR(x) STR1(x) | |
95 | ||
cb323159 | 96 | void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) __abortlike; |
5ba3f43e | 97 | |
cb323159 | 98 | void sleh_synchronous_sp1(arm_context_t *, uint32_t, vm_offset_t) __abortlike; |
5ba3f43e A |
99 | void sleh_synchronous(arm_context_t *, uint32_t, vm_offset_t); |
100 | void sleh_irq(arm_saved_state_t *); | |
101 | void sleh_fiq(arm_saved_state_t *); | |
102 | void sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far); | |
cb323159 | 103 | void sleh_invalid_stack(arm_context_t *context, uint32_t esr, vm_offset_t far) __dead2; |
5ba3f43e A |
104 | |
105 | static void sleh_interrupt_handler_prologue(arm_saved_state_t *, unsigned int type); | |
106 | static void sleh_interrupt_handler_epilogue(void); | |
107 | ||
108 | static void handle_svc(arm_saved_state_t *); | |
109 | static void handle_mach_absolute_time_trap(arm_saved_state_t *); | |
110 | static void handle_mach_continuous_time_trap(arm_saved_state_t *); | |
111 | ||
112 | static void handle_msr_trap(arm_saved_state_t *state, uint32_t iss); | |
113 | ||
cb323159 | 114 | extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, bool, bool); |
5ba3f43e | 115 | |
cb323159 | 116 | static void handle_uncategorized(arm_saved_state_t *); |
eb6b6ca3 | 117 | static void handle_breakpoint(arm_saved_state_t *, uint32_t) __dead2; |
5ba3f43e | 118 | |
0a7de745 | 119 | typedef void (*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *); |
5ba3f43e A |
120 | static void inspect_instruction_abort(uint32_t, fault_status_t *, vm_prot_t *); |
121 | static void inspect_data_abort(uint32_t, fault_status_t *, vm_prot_t *); | |
122 | ||
123 | static int is_vm_fault(fault_status_t); | |
d9a64523 | 124 | static int is_translation_fault(fault_status_t); |
5ba3f43e A |
125 | static int is_alignment_fault(fault_status_t); |
126 | ||
0a7de745 | 127 | typedef void (*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); |
5ba3f43e A |
128 | static void handle_user_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); |
129 | static void handle_kernel_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); | |
130 | ||
cb323159 A |
131 | static void handle_pc_align(arm_saved_state_t *ss) __dead2; |
132 | static void handle_sp_align(arm_saved_state_t *ss) __dead2; | |
133 | static void handle_sw_step_debug(arm_saved_state_t *ss) __dead2; | |
134 | static void handle_wf_trap(arm_saved_state_t *ss) __dead2; | |
135 | static void handle_fp_trap(arm_saved_state_t *ss, uint32_t esr) __dead2; | |
5ba3f43e | 136 | |
cb323159 | 137 | static void handle_watchpoint(vm_offset_t fault_addr) __dead2; |
5ba3f43e A |
138 | |
139 | static void handle_abort(arm_saved_state_t *, uint32_t, vm_offset_t, vm_offset_t, abort_inspector_t, abort_handler_t); | |
140 | ||
cb323159 | 141 | static void handle_user_trapped_instruction32(arm_saved_state_t *, uint32_t esr) __dead2; |
5ba3f43e | 142 | |
cb323159 | 143 | static void handle_simd_trap(arm_saved_state_t *, uint32_t esr) __dead2; |
5ba3f43e A |
144 | |
145 | extern void mach_kauth_cred_uthread_update(void); | |
146 | void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number); | |
147 | ||
148 | struct uthread; | |
149 | struct proc; | |
150 | ||
151 | extern void | |
152 | unix_syscall(struct arm_saved_state * regs, thread_t thread_act, | |
0a7de745 | 153 | struct uthread * uthread, struct proc * proc); |
5ba3f43e A |
154 | |
155 | extern void | |
156 | mach_syscall(struct arm_saved_state*); | |
157 | ||
5ba3f43e A |
158 | #if CONFIG_DTRACE |
159 | extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs); | |
160 | extern boolean_t dtrace_tally_fault(user_addr_t); | |
161 | ||
cb323159 A |
162 | /* |
163 | * Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy | |
164 | * and paste the trap instructions | |
165 | * over from that file. Need to keep these in sync! | |
166 | */ | |
5ba3f43e A |
167 | #define FASTTRAP_ARM32_INSTR 0xe7ffdefc |
168 | #define FASTTRAP_THUMB32_INSTR 0xdefc | |
169 | #define FASTTRAP_ARM64_INSTR 0xe7eeee7e | |
170 | ||
171 | #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb | |
172 | #define FASTTRAP_THUMB32_RET_INSTR 0xdefb | |
173 | #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d | |
174 | ||
175 | /* See <rdar://problem/4613924> */ | |
176 | perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */ | |
177 | #endif | |
178 | ||
cb323159 | 179 | |
5ba3f43e A |
180 | #if CONFIG_PGTRACE |
181 | extern boolean_t pgtrace_enabled; | |
182 | #endif | |
183 | ||
184 | #if __ARM_PAN_AVAILABLE__ | |
d9a64523 A |
185 | #ifdef CONFIG_XNUPOST |
186 | extern vm_offset_t pan_test_addr; | |
187 | extern vm_offset_t pan_ro_addr; | |
188 | extern volatile int pan_exception_level; | |
189 | extern volatile char pan_fault_value; | |
190 | #endif | |
5ba3f43e A |
191 | #endif |
192 | ||
cb323159 A |
193 | #if HAS_TWO_STAGE_SPR_LOCK |
194 | #ifdef CONFIG_XNUPOST | |
195 | extern volatile vm_offset_t spr_lock_test_addr; | |
196 | extern volatile uint32_t spr_lock_exception_esr; | |
197 | #endif | |
198 | #endif | |
199 | ||
200 | #if defined(APPLETYPHOON) | |
201 | #define CPU_NAME "Typhoon" | |
5ba3f43e | 202 | #elif defined(APPLETWISTER) |
cb323159 | 203 | #define CPU_NAME "Twister" |
5ba3f43e | 204 | #elif defined(APPLEHURRICANE) |
cb323159 | 205 | #define CPU_NAME "Hurricane" |
c6bf4f31 A |
206 | #elif defined(APPLELIGHTNING) |
207 | #define CPU_NAME "Lightning" | |
5ba3f43e | 208 | #else |
cb323159 | 209 | #define CPU_NAME "Unknown" |
5ba3f43e A |
210 | #endif |
211 | ||
212 | #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT)) | |
213 | #define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400) | |
214 | #define ESR_WT_REASON(esr) ((esr) & 0xff) | |
215 | ||
216 | #define WT_REASON_NONE 0 | |
217 | #define WT_REASON_INTEGRITY_FAIL 1 | |
218 | #define WT_REASON_BAD_SYSCALL 2 | |
219 | #define WT_REASON_NOT_LOCKED 3 | |
220 | #define WT_REASON_ALREADY_LOCKED 4 | |
221 | #define WT_REASON_SW_REQ 5 | |
222 | #define WT_REASON_PT_INVALID 6 | |
223 | #define WT_REASON_PT_VIOLATION 7 | |
224 | #define WT_REASON_REG_VIOLATION 8 | |
225 | #endif | |
226 | ||
c6bf4f31 A |
227 | #if defined(HAS_IPI) |
228 | void cpu_signal_handler(void); | |
229 | extern unsigned int gFastIPI; | |
230 | #endif /* defined(HAS_IPI) */ | |
5ba3f43e | 231 | |
d9a64523 A |
232 | extern vm_offset_t static_memory_end; |
233 | ||
5ba3f43e A |
234 | static inline unsigned |
235 | __ror(unsigned value, unsigned shift) | |
236 | { | |
0a7de745 A |
237 | return ((unsigned)(value) >> (unsigned)(shift)) | |
238 | (unsigned)(value) << ((unsigned)(sizeof(unsigned) * CHAR_BIT) - (unsigned)(shift)); | |
5ba3f43e A |
239 | } |
240 | ||
cb323159 | 241 | __dead2 |
5ba3f43e A |
242 | static void |
243 | arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far) | |
244 | { | |
245 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
246 | uint64_t fed_err_sts, mmu_err_sts, lsu_err_sts; | |
247 | #if defined(NO_ECORE) | |
248 | uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf; | |
249 | ||
250 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS)); | |
251 | l2c_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS)); | |
252 | l2c_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR)); | |
253 | l2c_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF)); | |
254 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS)); | |
255 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS)); | |
256 | ||
257 | panic_plain("Unhandled " CPU_NAME | |
0a7de745 A |
258 | " implementation specific error. state=%p esr=%#x far=%p\n" |
259 | "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" | |
260 | "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n", | |
261 | state, esr, (void *)far, | |
262 | (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, | |
263 | (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf); | |
5ba3f43e A |
264 | |
265 | #elif defined(HAS_MIGSTS) | |
266 | uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf, mpidr, migsts; | |
267 | ||
268 | mpidr = __builtin_arm_rsr64("MPIDR_EL1"); | |
269 | migsts = __builtin_arm_rsr64(STR(ARM64_REG_MIGSTS_EL1)); | |
270 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS)); | |
271 | l2c_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS)); | |
272 | l2c_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR)); | |
273 | l2c_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF)); | |
274 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS)); | |
275 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS)); | |
276 | ||
277 | panic_plain("Unhandled " CPU_NAME | |
0a7de745 A |
278 | " implementation specific error. state=%p esr=%#x far=%p p-core?%d migsts=%p\n" |
279 | "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" | |
280 | "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n", | |
281 | state, esr, (void *)far, !!(mpidr & MPIDR_PNE), (void *)migsts, | |
282 | (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, | |
283 | (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf); | |
5ba3f43e A |
284 | #else // !defined(NO_ECORE) && !defined(HAS_MIGSTS) |
285 | uint64_t llc_err_sts, llc_err_adr, llc_err_inf, mpidr; | |
cb323159 A |
286 | #if defined(HAS_DPC_ERR) |
287 | uint64_t dpc_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_DPC_ERR_STS)); | |
288 | #endif // defined(HAS_DPC_ERR) | |
5ba3f43e A |
289 | |
290 | mpidr = __builtin_arm_rsr64("MPIDR_EL1"); | |
291 | ||
292 | if (mpidr & MPIDR_PNE) { | |
293 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS)); | |
294 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS)); | |
295 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS)); | |
296 | } else { | |
297 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_MMU_ERR_STS)); | |
298 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_LSU_ERR_STS)); | |
299 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_FED_ERR_STS)); | |
300 | } | |
301 | ||
302 | llc_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS)); | |
303 | llc_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR)); | |
304 | llc_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF)); | |
305 | ||
306 | panic_plain("Unhandled " CPU_NAME | |
cb323159 A |
307 | " implementation specific error. state=%p esr=%#x far=%p p-core?%d" |
308 | #if defined(HAS_DPC_ERR) | |
309 | " dpc_err_sts:%p" | |
310 | #endif | |
311 | "\n" | |
0a7de745 A |
312 | "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" |
313 | "\tllc_err_sts:%p, llc_err_adr:%p, llc_err_inf:%p\n", | |
314 | state, esr, (void *)far, !!(mpidr & MPIDR_PNE), | |
cb323159 A |
315 | #if defined(HAS_DPC_ERR) |
316 | (void *)dpc_err_sts, | |
317 | #endif | |
0a7de745 A |
318 | (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, |
319 | (void *)llc_err_sts, (void *)llc_err_adr, (void *)llc_err_inf); | |
5ba3f43e A |
320 | #endif |
321 | #else // !defined(APPLE_ARM64_ARCH_FAMILY) | |
d9a64523 | 322 | #pragma unused (state, esr, far) |
5ba3f43e A |
323 | panic_plain("Unhandled implementation specific error\n"); |
324 | #endif | |
325 | } | |
326 | ||
327 | #if CONFIG_KERNEL_INTEGRITY | |
328 | #pragma clang diagnostic push | |
329 | #pragma clang diagnostic ignored "-Wunused-parameter" | |
330 | static void | |
0a7de745 A |
331 | kernel_integrity_error_handler(uint32_t esr, vm_offset_t far) |
332 | { | |
5ba3f43e A |
333 | #if defined(KERNEL_INTEGRITY_WT) |
334 | #if (DEVELOPMENT || DEBUG) | |
335 | if (ESR_WT_SERROR(esr)) { | |
336 | switch (ESR_WT_REASON(esr)) { | |
337 | case WT_REASON_INTEGRITY_FAIL: | |
338 | panic_plain("Kernel integrity, violation in frame 0x%016lx.", far); | |
339 | case WT_REASON_BAD_SYSCALL: | |
340 | panic_plain("Kernel integrity, bad syscall."); | |
341 | case WT_REASON_NOT_LOCKED: | |
342 | panic_plain("Kernel integrity, not locked."); | |
343 | case WT_REASON_ALREADY_LOCKED: | |
344 | panic_plain("Kernel integrity, already locked."); | |
345 | case WT_REASON_SW_REQ: | |
346 | panic_plain("Kernel integrity, software request."); | |
347 | case WT_REASON_PT_INVALID: | |
348 | panic_plain("Kernel integrity, encountered invalid TTE/PTE while " | |
0a7de745 | 349 | "walking 0x%016lx.", far); |
5ba3f43e A |
350 | case WT_REASON_PT_VIOLATION: |
351 | panic_plain("Kernel integrity, violation in mapping 0x%016lx.", | |
0a7de745 | 352 | far); |
5ba3f43e A |
353 | case WT_REASON_REG_VIOLATION: |
354 | panic_plain("Kernel integrity, violation in system register %d.", | |
0a7de745 | 355 | (unsigned) far); |
5ba3f43e A |
356 | default: |
357 | panic_plain("Kernel integrity, unknown (esr=0x%08x).", esr); | |
358 | } | |
359 | } | |
360 | #else | |
361 | if (ESR_WT_SERROR(esr)) { | |
362 | panic_plain("SError esr: 0x%08x far: 0x%016lx.", esr, far); | |
363 | } | |
364 | #endif | |
365 | #endif | |
366 | } | |
367 | #pragma clang diagnostic pop | |
368 | #endif | |
369 | ||
370 | static void | |
371 | arm64_platform_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far) | |
372 | { | |
cb323159 | 373 | cpu_data_t *cdp = getCpuDatap(); |
5ba3f43e A |
374 | |
375 | #if CONFIG_KERNEL_INTEGRITY | |
376 | kernel_integrity_error_handler(esr, far); | |
377 | #endif | |
378 | ||
0a7de745 A |
379 | if (cdp->platform_error_handler != (platform_error_handler_t) NULL) { |
380 | (*(platform_error_handler_t)cdp->platform_error_handler)(cdp->cpu_id, far); | |
381 | } else { | |
5ba3f43e | 382 | arm64_implementation_specific_error(state, esr, far); |
0a7de745 | 383 | } |
5ba3f43e A |
384 | } |
385 | ||
386 | void | |
387 | panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) | |
388 | { | |
389 | boolean_t ss_valid; | |
390 | ||
391 | ss_valid = is_saved_state64(ss); | |
392 | arm_saved_state64_t *state = saved_state64(ss); | |
393 | ||
cb323159 | 394 | panic_plain("%s at pc 0x%016llx, lr 0x%016llx (saved state: %p%s)\n" |
0a7de745 A |
395 | "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n" |
396 | "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n" | |
397 | "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n" | |
398 | "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n" | |
399 | "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n" | |
400 | "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n" | |
401 | "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n" | |
402 | "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n" | |
403 | "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n", | |
cb323159 | 404 | msg, state->pc, state->lr, ss, (ss_valid ? "" : " INVALID"), |
0a7de745 A |
405 | state->x[0], state->x[1], state->x[2], state->x[3], |
406 | state->x[4], state->x[5], state->x[6], state->x[7], | |
407 | state->x[8], state->x[9], state->x[10], state->x[11], | |
408 | state->x[12], state->x[13], state->x[14], state->x[15], | |
409 | state->x[16], state->x[17], state->x[18], state->x[19], | |
410 | state->x[20], state->x[21], state->x[22], state->x[23], | |
411 | state->x[24], state->x[25], state->x[26], state->x[27], | |
412 | state->x[28], state->fp, state->lr, state->sp, | |
413 | state->pc, state->cpsr, state->esr, state->far); | |
5ba3f43e A |
414 | } |
415 | ||
5ba3f43e A |
416 | void |
417 | sleh_synchronous_sp1(arm_context_t *context, uint32_t esr, vm_offset_t far __unused) | |
418 | { | |
cb323159 A |
419 | esr_exception_class_t class = ESR_EC(esr); |
420 | arm_saved_state_t * state = &context->ss; | |
5ba3f43e A |
421 | |
422 | switch (class) { | |
423 | case ESR_EC_UNCATEGORIZED: | |
424 | { | |
425 | uint32_t instr = *((uint32_t*)get_saved_state_pc(state)); | |
0a7de745 | 426 | if (IS_ARM_GDB_TRAP(instr)) { |
5ba3f43e | 427 | DebuggerCall(EXC_BREAKPOINT, state); |
0a7de745 | 428 | } |
5ba3f43e A |
429 | // Intentionally fall through to panic if we return from the debugger |
430 | } | |
431 | default: | |
432 | panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state); | |
433 | } | |
434 | } | |
435 | ||
cb323159 A |
436 | #if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST) |
437 | static bool | |
438 | handle_msr_write_from_xnupost(arm_saved_state_t *state, uint32_t esr) | |
439 | { | |
440 | user_addr_t pc = get_saved_state_pc(state); | |
441 | if ((spr_lock_test_addr != 0) && (pc == spr_lock_test_addr)) { | |
442 | spr_lock_exception_esr = esr; | |
443 | set_saved_state_pc(state, pc + 4); | |
444 | return true; | |
445 | } | |
446 | ||
447 | return false; | |
448 | } | |
449 | #endif | |
450 | ||
5ba3f43e A |
451 | void |
452 | sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) | |
453 | { | |
cb323159 A |
454 | esr_exception_class_t class = ESR_EC(esr); |
455 | arm_saved_state_t * state = &context->ss; | |
456 | vm_offset_t recover = 0; | |
457 | thread_t thread = current_thread(); | |
458 | #if MACH_ASSERT | |
459 | int preemption_level = get_preemption_level(); | |
460 | #endif | |
5ba3f43e A |
461 | |
462 | ASSERT_CONTEXT_SANITY(context); | |
463 | ||
cb323159 A |
464 | if (__improbable(ESR_INSTR_IS_2BYTES(esr))) { |
465 | /* | |
466 | * We no longer support 32-bit, which means no 2-byte | |
467 | * instructions. | |
468 | */ | |
469 | if (PSR64_IS_USER(get_saved_state_cpsr(state))) { | |
470 | panic("Exception on 2-byte instruction, " | |
471 | "context=%p, esr=%#x, far=%p", | |
472 | context, esr, (void *)far); | |
473 | } else { | |
474 | panic_with_thread_kernel_state("Exception on 2-byte instruction", state); | |
475 | } | |
476 | } | |
477 | ||
5ba3f43e A |
478 | /* Don't run exception handler with recover handler set in case of double fault */ |
479 | if (thread->recover) { | |
cb323159 | 480 | recover = thread->recover; |
5ba3f43e A |
481 | thread->recover = (vm_offset_t)NULL; |
482 | } | |
483 | ||
484 | /* Inherit the interrupt masks from previous context */ | |
0a7de745 | 485 | if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) { |
5ba3f43e | 486 | ml_set_interrupts_enabled(TRUE); |
0a7de745 | 487 | } |
5ba3f43e A |
488 | |
489 | switch (class) { | |
490 | case ESR_EC_SVC_64: | |
491 | if (!is_saved_state64(state) || !PSR64_IS_USER(get_saved_state_cpsr(state))) { | |
492 | panic("Invalid SVC_64 context"); | |
493 | } | |
494 | ||
495 | handle_svc(state); | |
496 | break; | |
497 | ||
498 | case ESR_EC_DABORT_EL0: | |
499 | handle_abort(state, esr, far, recover, inspect_data_abort, handle_user_abort); | |
cb323159 | 500 | thread_exception_return(); |
5ba3f43e A |
501 | |
502 | case ESR_EC_MSR_TRAP: | |
503 | handle_msr_trap(state, ESR_ISS(esr)); | |
504 | break; | |
505 | ||
506 | case ESR_EC_IABORT_EL0: | |
507 | handle_abort(state, esr, far, recover, inspect_instruction_abort, handle_user_abort); | |
cb323159 | 508 | thread_exception_return(); |
5ba3f43e A |
509 | |
510 | case ESR_EC_IABORT_EL1: | |
c6bf4f31 A |
511 | #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) |
512 | { | |
513 | extern volatile vm_offset_t ctrr_test_va; | |
514 | if (ctrr_test_va && far == ctrr_test_va) { | |
515 | extern volatile uint64_t ctrr_exception_esr; | |
516 | ctrr_exception_esr = esr; | |
517 | /* return to the instruction immediately after the call to NX page */ | |
518 | set_saved_state_pc(state, get_saved_state_lr(state)); | |
519 | break; | |
520 | } | |
521 | } | |
522 | #endif | |
0a7de745 | 523 | |
d9a64523 | 524 | panic_with_thread_kernel_state("Kernel instruction fetch abort", state); |
5ba3f43e A |
525 | |
526 | case ESR_EC_PC_ALIGN: | |
527 | handle_pc_align(state); | |
cb323159 | 528 | __builtin_unreachable(); |
5ba3f43e A |
529 | |
530 | case ESR_EC_DABORT_EL1: | |
531 | handle_abort(state, esr, far, recover, inspect_data_abort, handle_kernel_abort); | |
532 | break; | |
533 | ||
534 | case ESR_EC_UNCATEGORIZED: | |
535 | assert(!ESR_ISS(esr)); | |
536 | ||
cb323159 A |
537 | #if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST) |
538 | if (handle_msr_write_from_xnupost(state, esr)) { | |
539 | break; | |
540 | } | |
541 | #endif | |
542 | handle_uncategorized(&context->ss); | |
5ba3f43e A |
543 | break; |
544 | ||
545 | case ESR_EC_SP_ALIGN: | |
546 | handle_sp_align(state); | |
cb323159 | 547 | __builtin_unreachable(); |
5ba3f43e A |
548 | |
549 | case ESR_EC_BKPT_AARCH32: | |
eb6b6ca3 | 550 | handle_breakpoint(state, esr); |
cb323159 | 551 | __builtin_unreachable(); |
5ba3f43e A |
552 | |
553 | case ESR_EC_BRK_AARCH64: | |
554 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { | |
cb323159 | 555 | panic_with_thread_kernel_state("Break instruction exception from kernel. Panic (by design)", state); |
5ba3f43e | 556 | } else { |
eb6b6ca3 | 557 | handle_breakpoint(state, esr); |
5ba3f43e | 558 | } |
cb323159 | 559 | __builtin_unreachable(); |
5ba3f43e A |
560 | |
561 | case ESR_EC_BKPT_REG_MATCH_EL0: | |
562 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { | |
eb6b6ca3 | 563 | handle_breakpoint(state, esr); |
5ba3f43e A |
564 | } |
565 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", | |
0a7de745 | 566 | class, state, class, esr, (void *)far); |
cb323159 | 567 | __builtin_unreachable(); |
5ba3f43e A |
568 | |
569 | case ESR_EC_BKPT_REG_MATCH_EL1: | |
cb323159 A |
570 | panic_with_thread_kernel_state("Hardware Breakpoint Debug exception from kernel. Panic (by design)", state); |
571 | __builtin_unreachable(); | |
5ba3f43e A |
572 | |
573 | case ESR_EC_SW_STEP_DEBUG_EL0: | |
574 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { | |
575 | handle_sw_step_debug(state); | |
5ba3f43e A |
576 | } |
577 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", | |
0a7de745 | 578 | class, state, class, esr, (void *)far); |
cb323159 | 579 | __builtin_unreachable(); |
5ba3f43e A |
580 | |
581 | case ESR_EC_SW_STEP_DEBUG_EL1: | |
cb323159 A |
582 | panic_with_thread_kernel_state("Software Step Debug exception from kernel. Panic (by design)", state); |
583 | __builtin_unreachable(); | |
5ba3f43e A |
584 | |
585 | case ESR_EC_WATCHPT_MATCH_EL0: | |
586 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { | |
587 | handle_watchpoint(far); | |
5ba3f43e A |
588 | } |
589 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", | |
0a7de745 | 590 | class, state, class, esr, (void *)far); |
cb323159 | 591 | __builtin_unreachable(); |
5ba3f43e A |
592 | |
593 | case ESR_EC_WATCHPT_MATCH_EL1: | |
594 | /* | |
595 | * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to | |
596 | * abort. Turn off watchpoints and keep going; we'll turn them back on in return_from_exception.. | |
597 | */ | |
598 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { | |
599 | arm_debug_set(NULL); | |
600 | break; /* return to first level handler */ | |
601 | } | |
602 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", | |
0a7de745 | 603 | class, state, class, esr, (void *)far); |
cb323159 | 604 | __builtin_unreachable(); |
5ba3f43e A |
605 | |
606 | case ESR_EC_TRAP_SIMD_FP: | |
607 | handle_simd_trap(state, esr); | |
cb323159 | 608 | __builtin_unreachable(); |
5ba3f43e A |
609 | |
610 | case ESR_EC_ILLEGAL_INSTR_SET: | |
0a7de745 A |
611 | if (EXCB_ACTION_RERUN != |
612 | ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET, far)) { | |
5ba3f43e A |
613 | // instruction is not re-executed |
614 | panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x", | |
0a7de745 | 615 | state, class, esr, (void *)far, get_saved_state_cpsr(state)); |
5ba3f43e A |
616 | } |
617 | // must clear this fault in PSR to re-run | |
cb323159 | 618 | mask_saved_state_cpsr(state, 0, PSR64_IL); |
5ba3f43e A |
619 | break; |
620 | ||
621 | case ESR_EC_MCR_MRC_CP15_TRAP: | |
622 | case ESR_EC_MCRR_MRRC_CP15_TRAP: | |
623 | case ESR_EC_MCR_MRC_CP14_TRAP: | |
624 | case ESR_EC_LDC_STC_CP14_TRAP: | |
625 | case ESR_EC_MCRR_MRRC_CP14_TRAP: | |
626 | handle_user_trapped_instruction32(state, esr); | |
cb323159 | 627 | __builtin_unreachable(); |
5ba3f43e A |
628 | |
629 | case ESR_EC_WFI_WFE: | |
630 | // Use of WFI or WFE instruction when they have been disabled for EL0 | |
631 | handle_wf_trap(state); | |
cb323159 A |
632 | __builtin_unreachable(); |
633 | ||
634 | case ESR_EC_FLOATING_POINT_64: | |
635 | handle_fp_trap(state, esr); | |
636 | __builtin_unreachable(); | |
637 | ||
5ba3f43e A |
638 | |
639 | default: | |
640 | panic("Unsupported synchronous exception. state=%p class=%u esr=%u far=%p", | |
0a7de745 | 641 | state, class, esr, (void *)far); |
cb323159 | 642 | __builtin_unreachable(); |
5ba3f43e A |
643 | } |
644 | ||
cb323159 A |
645 | if (recover) { |
646 | thread->recover = recover; | |
0a7de745 | 647 | } |
cb323159 A |
648 | #if MACH_ASSERT |
649 | if (preemption_level != get_preemption_level()) { | |
650 | panic("synchronous exception changed preemption level from %d to %d", preemption_level, get_preemption_level()); | |
651 | } | |
652 | #endif | |
5ba3f43e A |
653 | } |
654 | ||
655 | /* | |
656 | * Uncategorized exceptions are a catch-all for general execution errors. | |
657 | * ARM64_TODO: For now, we assume this is for undefined instruction exceptions. | |
658 | */ | |
659 | static void | |
cb323159 | 660 | handle_uncategorized(arm_saved_state_t *state) |
5ba3f43e | 661 | { |
d9a64523 | 662 | exception_type_t exception = EXC_BAD_INSTRUCTION; |
cb323159 A |
663 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; |
664 | mach_msg_type_number_t numcodes = 2; | |
665 | uint32_t instr = 0; | |
5ba3f43e | 666 | |
cb323159 | 667 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); |
5ba3f43e A |
668 | |
669 | #if CONFIG_DTRACE | |
670 | if (tempDTraceTrapHook && (tempDTraceTrapHook(exception, state, 0, 0) == KERN_SUCCESS)) { | |
671 | return; | |
672 | } | |
673 | ||
674 | if (PSR64_IS_USER64(get_saved_state_cpsr(state))) { | |
675 | /* | |
676 | * For a 64bit user process, we care about all 4 bytes of the | |
677 | * instr. | |
678 | */ | |
679 | if (instr == FASTTRAP_ARM64_INSTR || instr == FASTTRAP_ARM64_RET_INSTR) { | |
0a7de745 | 680 | if (dtrace_user_probe(state) == KERN_SUCCESS) { |
5ba3f43e | 681 | return; |
0a7de745 | 682 | } |
5ba3f43e A |
683 | } |
684 | } else if (PSR64_IS_USER32(get_saved_state_cpsr(state))) { | |
685 | /* | |
686 | * For a 32bit user process, we check for thumb mode, in | |
687 | * which case we only care about a 2 byte instruction length. | |
688 | * For non-thumb mode, we care about all 4 bytes of the instructin. | |
689 | */ | |
690 | if (get_saved_state_cpsr(state) & PSR64_MODE_USER32_THUMB) { | |
691 | if (((uint16_t)instr == FASTTRAP_THUMB32_INSTR) || | |
692 | ((uint16_t)instr == FASTTRAP_THUMB32_RET_INSTR)) { | |
693 | if (dtrace_user_probe(state) == KERN_SUCCESS) { | |
694 | return; | |
695 | } | |
696 | } | |
697 | } else { | |
698 | if ((instr == FASTTRAP_ARM32_INSTR) || | |
699 | (instr == FASTTRAP_ARM32_RET_INSTR)) { | |
700 | if (dtrace_user_probe(state) == KERN_SUCCESS) { | |
701 | return; | |
702 | } | |
703 | } | |
704 | } | |
705 | } | |
706 | ||
707 | #endif /* CONFIG_DTRACE */ | |
708 | ||
709 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { | |
710 | if (IS_ARM_GDB_TRAP(instr)) { | |
711 | boolean_t interrupt_state; | |
712 | vm_offset_t kstackptr; | |
713 | exception = EXC_BREAKPOINT; | |
714 | ||
715 | interrupt_state = ml_set_interrupts_enabled(FALSE); | |
716 | ||
717 | /* Save off the context here (so that the debug logic | |
718 | * can see the original state of this thread). | |
719 | */ | |
720 | kstackptr = (vm_offset_t) current_thread()->machine.kstackptr; | |
721 | if (kstackptr) { | |
cb323159 | 722 | copy_signed_thread_state(&((thread_kernel_state_t) kstackptr)->machine.ss, state); |
5ba3f43e A |
723 | } |
724 | ||
725 | /* Hop into the debugger (typically either due to a | |
726 | * fatal exception, an explicit panic, or a stackshot | |
727 | * request. | |
728 | */ | |
729 | DebuggerCall(exception, state); | |
730 | ||
731 | (void) ml_set_interrupts_enabled(interrupt_state); | |
732 | return; | |
733 | } else { | |
734 | panic("Undefined kernel instruction: pc=%p instr=%x\n", (void*)get_saved_state_pc(state), instr); | |
735 | } | |
736 | } | |
737 | ||
738 | /* | |
cb323159 | 739 | * Check for GDB breakpoint via illegal opcode. |
5ba3f43e | 740 | */ |
cb323159 A |
741 | if (IS_ARM_GDB_TRAP(instr)) { |
742 | exception = EXC_BREAKPOINT; | |
743 | codes[0] = EXC_ARM_BREAKPOINT; | |
744 | codes[1] = instr; | |
5ba3f43e | 745 | } else { |
cb323159 | 746 | codes[1] = instr; |
5ba3f43e A |
747 | } |
748 | ||
749 | exception_triage(exception, codes, numcodes); | |
cb323159 | 750 | __builtin_unreachable(); |
5ba3f43e A |
751 | } |
752 | ||
eb6b6ca3 A |
753 | #if __has_feature(ptrauth_calls) |
754 | static const uint16_t ptrauth_brk_comment_base = 0xc470; | |
755 | ||
756 | static inline bool | |
757 | brk_comment_is_ptrauth(uint16_t comment) | |
758 | { | |
759 | return comment >= ptrauth_brk_comment_base && | |
760 | comment <= ptrauth_brk_comment_base + ptrauth_key_asdb; | |
761 | } | |
762 | #endif /* __has_feature(ptrauth_calls) */ | |
763 | ||
5ba3f43e | 764 | static void |
eb6b6ca3 | 765 | handle_breakpoint(arm_saved_state_t *state, uint32_t esr __unused) |
5ba3f43e | 766 | { |
cb323159 A |
767 | exception_type_t exception = EXC_BREAKPOINT; |
768 | mach_exception_data_type_t codes[2] = {EXC_ARM_BREAKPOINT}; | |
769 | mach_msg_type_number_t numcodes = 2; | |
5ba3f43e | 770 | |
eb6b6ca3 A |
771 | #if __has_feature(ptrauth_calls) |
772 | if (ESR_EC(esr) == ESR_EC_BRK_AARCH64 && | |
773 | brk_comment_is_ptrauth(ISS_BRK_COMMENT(esr))) { | |
774 | exception |= EXC_PTRAUTH_BIT; | |
775 | } | |
776 | #endif /* __has_feature(ptrauth_calls) */ | |
777 | ||
5ba3f43e A |
778 | codes[1] = get_saved_state_pc(state); |
779 | exception_triage(exception, codes, numcodes); | |
cb323159 | 780 | __builtin_unreachable(); |
5ba3f43e A |
781 | } |
782 | ||
783 | static void | |
784 | handle_watchpoint(vm_offset_t fault_addr) | |
785 | { | |
cb323159 A |
786 | exception_type_t exception = EXC_BREAKPOINT; |
787 | mach_exception_data_type_t codes[2] = {EXC_ARM_DA_DEBUG}; | |
788 | mach_msg_type_number_t numcodes = 2; | |
5ba3f43e A |
789 | |
790 | codes[1] = fault_addr; | |
791 | exception_triage(exception, codes, numcodes); | |
cb323159 | 792 | __builtin_unreachable(); |
5ba3f43e A |
793 | } |
794 | ||
795 | static void | |
796 | handle_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, vm_offset_t recover, | |
0a7de745 | 797 | abort_inspector_t inspect_abort, abort_handler_t handler) |
5ba3f43e | 798 | { |
cb323159 A |
799 | fault_status_t fault_code; |
800 | vm_prot_t fault_type; | |
5ba3f43e A |
801 | |
802 | inspect_abort(ESR_ISS(esr), &fault_code, &fault_type); | |
803 | handler(state, esr, fault_addr, fault_code, fault_type, recover); | |
804 | } | |
805 | ||
806 | static void | |
807 | inspect_instruction_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type) | |
808 | { | |
809 | getCpuDatap()->cpu_stat.instr_ex_cnt++; | |
810 | *fault_code = ISS_IA_FSC(iss); | |
811 | *fault_type = (VM_PROT_READ | VM_PROT_EXECUTE); | |
812 | } | |
813 | ||
814 | static void | |
815 | inspect_data_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type) | |
816 | { | |
817 | getCpuDatap()->cpu_stat.data_ex_cnt++; | |
818 | *fault_code = ISS_DA_FSC(iss); | |
819 | ||
820 | /* Cache operations report faults as write access. Change these to read access. */ | |
821 | if ((iss & ISS_DA_WNR) && !(iss & ISS_DA_CM)) { | |
822 | *fault_type = (VM_PROT_READ | VM_PROT_WRITE); | |
823 | } else { | |
824 | *fault_type = (VM_PROT_READ); | |
825 | } | |
826 | } | |
827 | ||
eb6b6ca3 A |
828 | #if __has_feature(ptrauth_calls) |
829 | static inline bool | |
830 | fault_addr_bit(vm_offset_t fault_addr, unsigned int bit) | |
831 | { | |
832 | return (bool)((fault_addr >> bit) & 1); | |
833 | } | |
834 | ||
835 | /** | |
836 | * Determines whether a fault address taken at EL0 contains a PAC error code | |
837 | * corresponding to the specified kind of ptrauth key. | |
838 | */ | |
839 | static bool | |
840 | user_fault_addr_matches_pac_error_code(vm_offset_t fault_addr, bool data_key) | |
841 | { | |
842 | bool instruction_tbi = !(get_tcr() & TCR_TBID0_TBI_DATA_ONLY); | |
843 | bool tbi = data_key || __improbable(instruction_tbi); | |
844 | unsigned int poison_shift; | |
845 | if (tbi) { | |
846 | poison_shift = 53; | |
847 | } else { | |
848 | poison_shift = 61; | |
849 | } | |
850 | ||
851 | /* PAC error codes are always in the form key_number:NOT(key_number) */ | |
852 | bool poison_bit_1 = fault_addr_bit(fault_addr, poison_shift); | |
853 | bool poison_bit_2 = fault_addr_bit(fault_addr, poison_shift + 1); | |
854 | return poison_bit_1 != poison_bit_2; | |
855 | } | |
856 | #endif /* __has_feature(ptrauth_calls) */ | |
857 | ||
5ba3f43e A |
858 | static void |
859 | handle_pc_align(arm_saved_state_t *ss) | |
860 | { | |
861 | exception_type_t exc; | |
862 | mach_exception_data_type_t codes[2]; | |
863 | mach_msg_type_number_t numcodes = 2; | |
864 | ||
865 | if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) { | |
866 | panic_with_thread_kernel_state("PC alignment exception from kernel.", ss); | |
867 | } | |
868 | ||
869 | exc = EXC_BAD_ACCESS; | |
eb6b6ca3 A |
870 | #if __has_feature(ptrauth_calls) |
871 | if (user_fault_addr_matches_pac_error_code(get_saved_state_pc(ss), false)) { | |
872 | exc |= EXC_PTRAUTH_BIT; | |
873 | } | |
874 | #endif /* __has_feature(ptrauth_calls) */ | |
875 | ||
5ba3f43e A |
876 | codes[0] = EXC_ARM_DA_ALIGN; |
877 | codes[1] = get_saved_state_pc(ss); | |
878 | ||
879 | exception_triage(exc, codes, numcodes); | |
cb323159 | 880 | __builtin_unreachable(); |
5ba3f43e A |
881 | } |
882 | ||
883 | static void | |
884 | handle_sp_align(arm_saved_state_t *ss) | |
885 | { | |
886 | exception_type_t exc; | |
887 | mach_exception_data_type_t codes[2]; | |
888 | mach_msg_type_number_t numcodes = 2; | |
889 | ||
890 | if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) { | |
891 | panic_with_thread_kernel_state("SP alignment exception from kernel.", ss); | |
892 | } | |
893 | ||
894 | exc = EXC_BAD_ACCESS; | |
eb6b6ca3 A |
895 | #if __has_feature(ptrauth_calls) |
896 | if (user_fault_addr_matches_pac_error_code(get_saved_state_sp(ss), true)) { | |
897 | exc |= EXC_PTRAUTH_BIT; | |
898 | } | |
899 | #endif /* __has_feature(ptrauth_calls) */ | |
900 | ||
5ba3f43e A |
901 | codes[0] = EXC_ARM_SP_ALIGN; |
902 | codes[1] = get_saved_state_sp(ss); | |
903 | ||
904 | exception_triage(exc, codes, numcodes); | |
cb323159 | 905 | __builtin_unreachable(); |
5ba3f43e A |
906 | } |
907 | ||
908 | static void | |
cb323159 | 909 | handle_wf_trap(arm_saved_state_t *state) |
5ba3f43e A |
910 | { |
911 | exception_type_t exc; | |
912 | mach_exception_data_type_t codes[2]; | |
913 | mach_msg_type_number_t numcodes = 2; | |
cb323159 A |
914 | uint32_t instr = 0; |
915 | ||
916 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); | |
5ba3f43e A |
917 | |
918 | exc = EXC_BAD_INSTRUCTION; | |
919 | codes[0] = EXC_ARM_UNDEFINED; | |
cb323159 A |
920 | codes[1] = instr; |
921 | ||
922 | exception_triage(exc, codes, numcodes); | |
923 | __builtin_unreachable(); | |
924 | } | |
925 | ||
926 | static void | |
927 | handle_fp_trap(arm_saved_state_t *state, uint32_t esr) | |
928 | { | |
929 | exception_type_t exc = EXC_ARITHMETIC; | |
930 | mach_exception_data_type_t codes[2]; | |
931 | mach_msg_type_number_t numcodes = 2; | |
932 | uint32_t instr = 0; | |
933 | ||
934 | /* The floating point trap flags are only valid if TFV is set. */ | |
935 | if (!(esr & ISS_FP_TFV)) { | |
936 | codes[0] = EXC_ARM_FP_UNDEFINED; | |
937 | } else if (esr & ISS_FP_UFF) { | |
938 | codes[0] = EXC_ARM_FP_UF; | |
939 | } else if (esr & ISS_FP_OFF) { | |
940 | codes[0] = EXC_ARM_FP_OF; | |
941 | } else if (esr & ISS_FP_IOF) { | |
942 | codes[0] = EXC_ARM_FP_IO; | |
943 | } else if (esr & ISS_FP_DZF) { | |
944 | codes[0] = EXC_ARM_FP_DZ; | |
945 | } else if (esr & ISS_FP_IDF) { | |
946 | codes[0] = EXC_ARM_FP_ID; | |
947 | } else if (esr & ISS_FP_IXF) { | |
948 | codes[0] = EXC_ARM_FP_IX; | |
949 | } else { | |
950 | panic("Unrecognized floating point exception, state=%p, esr=%#x", state, esr); | |
951 | } | |
952 | ||
953 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); | |
954 | codes[1] = instr; | |
5ba3f43e A |
955 | |
956 | exception_triage(exc, codes, numcodes); | |
cb323159 | 957 | __builtin_unreachable(); |
5ba3f43e A |
958 | } |
959 | ||
960 | ||
961 | static void | |
962 | handle_sw_step_debug(arm_saved_state_t *state) | |
963 | { | |
964 | thread_t thread = current_thread(); | |
965 | exception_type_t exc; | |
966 | mach_exception_data_type_t codes[2]; | |
967 | mach_msg_type_number_t numcodes = 2; | |
968 | ||
969 | if (!PSR64_IS_USER(get_saved_state_cpsr(state))) { | |
970 | panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel.", state); | |
971 | } | |
972 | ||
973 | // Disable single step and unmask interrupts (in the saved state, anticipating next exception return) | |
974 | if (thread->machine.DebugData != NULL) { | |
975 | thread->machine.DebugData->uds.ds64.mdscr_el1 &= ~0x1; | |
976 | } else { | |
977 | panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL.", state); | |
978 | } | |
979 | ||
cb323159 | 980 | mask_saved_state_cpsr(thread->machine.upcb, 0, PSR64_SS | DAIF_IRQF | DAIF_FIQF); |
5ba3f43e A |
981 | |
982 | // Special encoding for gdb single step event on ARM | |
983 | exc = EXC_BREAKPOINT; | |
984 | codes[0] = 1; | |
985 | codes[1] = 0; | |
986 | ||
987 | exception_triage(exc, codes, numcodes); | |
cb323159 | 988 | __builtin_unreachable(); |
5ba3f43e A |
989 | } |
990 | ||
991 | static int | |
992 | is_vm_fault(fault_status_t status) | |
993 | { | |
994 | switch (status) { | |
995 | case FSC_TRANSLATION_FAULT_L0: | |
996 | case FSC_TRANSLATION_FAULT_L1: | |
997 | case FSC_TRANSLATION_FAULT_L2: | |
998 | case FSC_TRANSLATION_FAULT_L3: | |
999 | case FSC_ACCESS_FLAG_FAULT_L1: | |
1000 | case FSC_ACCESS_FLAG_FAULT_L2: | |
1001 | case FSC_ACCESS_FLAG_FAULT_L3: | |
1002 | case FSC_PERMISSION_FAULT_L1: | |
1003 | case FSC_PERMISSION_FAULT_L2: | |
1004 | case FSC_PERMISSION_FAULT_L3: | |
1005 | return TRUE; | |
1006 | default: | |
1007 | return FALSE; | |
1008 | } | |
1009 | } | |
1010 | ||
d9a64523 A |
1011 | static int |
1012 | is_translation_fault(fault_status_t status) | |
1013 | { | |
1014 | switch (status) { | |
1015 | case FSC_TRANSLATION_FAULT_L0: | |
1016 | case FSC_TRANSLATION_FAULT_L1: | |
1017 | case FSC_TRANSLATION_FAULT_L2: | |
1018 | case FSC_TRANSLATION_FAULT_L3: | |
1019 | return TRUE; | |
1020 | default: | |
1021 | return FALSE; | |
1022 | } | |
1023 | } | |
1024 | ||
c6bf4f31 | 1025 | #if __ARM_PAN_AVAILABLE__ || defined(KERNEL_INTEGRITY_CTRR) |
5ba3f43e A |
1026 | static int |
1027 | is_permission_fault(fault_status_t status) | |
1028 | { | |
1029 | switch (status) { | |
1030 | case FSC_PERMISSION_FAULT_L1: | |
1031 | case FSC_PERMISSION_FAULT_L2: | |
1032 | case FSC_PERMISSION_FAULT_L3: | |
1033 | return TRUE; | |
1034 | default: | |
1035 | return FALSE; | |
1036 | } | |
1037 | } | |
1038 | #endif | |
1039 | ||
1040 | static int | |
1041 | is_alignment_fault(fault_status_t status) | |
1042 | { | |
0a7de745 | 1043 | return status == FSC_ALIGNMENT_FAULT; |
5ba3f43e A |
1044 | } |
1045 | ||
1046 | static int | |
1047 | is_parity_error(fault_status_t status) | |
1048 | { | |
1049 | switch (status) { | |
1050 | case FSC_SYNC_PARITY: | |
1051 | case FSC_ASYNC_PARITY: | |
1052 | case FSC_SYNC_PARITY_TT_L1: | |
1053 | case FSC_SYNC_PARITY_TT_L2: | |
1054 | case FSC_SYNC_PARITY_TT_L3: | |
1055 | return TRUE; | |
1056 | default: | |
1057 | return FALSE; | |
1058 | } | |
1059 | } | |
1060 | ||
cb323159 A |
1061 | static void |
1062 | set_saved_state_pc_to_recovery_handler(arm_saved_state_t *iss, vm_offset_t recover) | |
1063 | { | |
1064 | #if defined(HAS_APPLE_PAC) | |
1065 | thread_t thread = current_thread(); | |
1066 | const uintptr_t disc = ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER); | |
1067 | const char *panic_msg = "Illegal thread->recover value %p"; | |
1068 | ||
1069 | MANIPULATE_SIGNED_THREAD_STATE(iss, | |
1070 | // recover = (vm_offset_t)ptrauth_auth_data((void *)recover, ptrauth_key_function_pointer, | |
1071 | // ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER)); | |
1072 | "mov x1, %[recover] \n" | |
1073 | "mov x6, %[disc] \n" | |
1074 | "autia x1, x6 \n" | |
1075 | // if (recover != (vm_offset_t)ptrauth_strip((void *)recover, ptrauth_key_function_pointer)) { | |
1076 | "mov x6, x1 \n" | |
1077 | "xpaci x6 \n" | |
1078 | "cmp x1, x6 \n" | |
1079 | "beq 1f \n" | |
1080 | // panic("Illegal thread->recover value %p", (void *)recover); | |
1081 | "mov x0, %[panic_msg] \n" | |
1082 | "bl _panic \n" | |
1083 | // } | |
1084 | "1: \n" | |
1085 | "str x1, [x0, %[SS64_PC]] \n", | |
1086 | [recover] "r"(recover), | |
1087 | [disc] "r"(disc), | |
1088 | [panic_msg] "r"(panic_msg) | |
1089 | ); | |
1090 | #else | |
1091 | set_saved_state_pc(iss, recover); | |
1092 | #endif | |
1093 | } | |
1094 | ||
5ba3f43e A |
1095 | static void |
1096 | handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, | |
0a7de745 | 1097 | fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover) |
5ba3f43e | 1098 | { |
cb323159 A |
1099 | exception_type_t exc = EXC_BAD_ACCESS; |
1100 | mach_exception_data_type_t codes[2]; | |
1101 | mach_msg_type_number_t numcodes = 2; | |
1102 | thread_t thread = current_thread(); | |
5ba3f43e A |
1103 | |
1104 | (void)esr; | |
1105 | (void)state; | |
1106 | ||
0a7de745 | 1107 | if (ml_at_interrupt_context()) { |
5ba3f43e | 1108 | panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state); |
0a7de745 | 1109 | } |
5ba3f43e A |
1110 | |
1111 | thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling abort from userspace */ | |
1112 | ||
1113 | if (is_vm_fault(fault_code)) { | |
d9a64523 A |
1114 | kern_return_t result = KERN_FAILURE; |
1115 | vm_map_t map = thread->map; | |
1116 | vm_offset_t vm_fault_addr = fault_addr; | |
5ba3f43e A |
1117 | |
1118 | assert(map != kernel_map); | |
1119 | ||
0a7de745 A |
1120 | if (!(fault_type & VM_PROT_EXECUTE) && user_tbi_enabled()) { |
1121 | vm_fault_addr = tbi_clear(fault_addr); | |
1122 | } | |
5ba3f43e A |
1123 | |
1124 | #if CONFIG_DTRACE | |
cb323159 | 1125 | if (thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */ |
5ba3f43e A |
1126 | if (dtrace_tally_fault(vm_fault_addr)) { /* Should a user mode fault under dtrace be ignored? */ |
1127 | if (recover) { | |
cb323159 | 1128 | set_saved_state_pc_to_recovery_handler(state, recover); |
5ba3f43e | 1129 | } else { |
cb323159 | 1130 | ml_set_interrupts_enabled(FALSE); |
5ba3f43e | 1131 | panic_with_thread_kernel_state("copyin/out has no recovery point", state); |
5ba3f43e A |
1132 | } |
1133 | return; | |
1134 | } else { | |
cb323159 | 1135 | ml_set_interrupts_enabled(FALSE); |
5ba3f43e | 1136 | panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", state); |
5ba3f43e A |
1137 | } |
1138 | } | |
1139 | #else | |
1140 | (void)recover; | |
1141 | #endif | |
1142 | ||
1143 | #if CONFIG_PGTRACE | |
1144 | if (pgtrace_enabled) { | |
1145 | /* Check to see if trace bit is set */ | |
1146 | result = pmap_pgtrace_fault(map->pmap, fault_addr, state); | |
0a7de745 A |
1147 | if (result == KERN_SUCCESS) { |
1148 | return; | |
1149 | } | |
5ba3f43e A |
1150 | } |
1151 | #endif | |
1152 | ||
1153 | /* check to see if it is just a pmap ref/modify fault */ | |
5c9f4661 | 1154 | |
d9a64523 | 1155 | if ((result != KERN_SUCCESS) && !is_translation_fault(fault_code)) { |
cb323159 | 1156 | result = arm_fast_fault(map->pmap, trunc_page(vm_fault_addr), fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), TRUE); |
5c9f4661 | 1157 | } |
5ba3f43e | 1158 | if (result != KERN_SUCCESS) { |
5ba3f43e A |
1159 | { |
1160 | /* We have to fault the page in */ | |
1161 | result = vm_fault(map, vm_fault_addr, fault_type, | |
0a7de745 A |
1162 | /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE, |
1163 | /* caller_pmap */ NULL, /* caller_pmap_addr */ 0); | |
5ba3f43e A |
1164 | } |
1165 | } | |
1166 | if (result == KERN_SUCCESS || result == KERN_ABORTED) { | |
cb323159 A |
1167 | return; |
1168 | } | |
1169 | ||
1170 | /* | |
1171 | * vm_fault() should never return KERN_FAILURE for page faults from user space. | |
1172 | * If it does, we're leaking preemption disables somewhere in the kernel. | |
1173 | */ | |
1174 | if (__improbable(result == KERN_FAILURE)) { | |
1175 | panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread); | |
5ba3f43e A |
1176 | } |
1177 | ||
1178 | codes[0] = result; | |
1179 | } else if (is_alignment_fault(fault_code)) { | |
1180 | codes[0] = EXC_ARM_DA_ALIGN; | |
1181 | } else if (is_parity_error(fault_code)) { | |
1182 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
1183 | if (fault_code == FSC_SYNC_PARITY) { | |
1184 | arm64_platform_error(state, esr, fault_addr); | |
cb323159 | 1185 | return; |
5ba3f43e A |
1186 | } |
1187 | #else | |
1188 | panic("User parity error."); | |
1189 | #endif | |
1190 | } else { | |
1191 | codes[0] = KERN_FAILURE; | |
1192 | } | |
1193 | ||
1194 | codes[1] = fault_addr; | |
eb6b6ca3 A |
1195 | #if __has_feature(ptrauth_calls) |
1196 | bool is_data_abort = (ESR_EC(esr) == ESR_EC_DABORT_EL0); | |
1197 | if (user_fault_addr_matches_pac_error_code(fault_addr, is_data_abort)) { | |
1198 | exc |= EXC_PTRAUTH_BIT; | |
1199 | } | |
1200 | #endif /* __has_feature(ptrauth_calls) */ | |
5ba3f43e | 1201 | exception_triage(exc, codes, numcodes); |
cb323159 | 1202 | __builtin_unreachable(); |
5ba3f43e A |
1203 | } |
1204 | ||
1205 | #if __ARM_PAN_AVAILABLE__ | |
1206 | static int | |
1207 | is_pan_fault(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, fault_status_t fault_code) | |
1208 | { | |
1209 | // PAN (Privileged Access Never) fault occurs for data read/write in EL1 to | |
1210 | // virtual address that is readable/writeable from both EL1 and EL0 | |
1211 | ||
1212 | // To check for PAN fault, we evaluate if the following conditions are true: | |
1213 | // 1. This is a permission fault | |
1214 | // 2. PAN is enabled | |
1215 | // 3. AT instruction (on which PAN has no effect) on the same faulting address | |
1216 | // succeeds | |
1217 | ||
1218 | vm_offset_t pa; | |
1219 | ||
1220 | if (!(is_permission_fault(fault_code) && get_saved_state_cpsr(state) & PSR64_PAN)) { | |
1221 | return FALSE; | |
1222 | } | |
1223 | ||
1224 | if (esr & ISS_DA_WNR) { | |
1225 | pa = mmu_kvtop_wpreflight(fault_addr); | |
1226 | } else { | |
1227 | pa = mmu_kvtop(fault_addr); | |
1228 | } | |
1229 | return (pa)? TRUE: FALSE; | |
1230 | } | |
1231 | #endif | |
1232 | ||
1233 | static void | |
1234 | handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, | |
0a7de745 | 1235 | fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover) |
5ba3f43e | 1236 | { |
cb323159 | 1237 | thread_t thread = current_thread(); |
5ba3f43e A |
1238 | (void)esr; |
1239 | ||
1240 | #if CONFIG_DTRACE | |
cb323159 | 1241 | if (is_vm_fault(fault_code) && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */ |
5ba3f43e A |
1242 | if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */ |
1243 | /* | |
1244 | * Point to next instruction, or recovery handler if set. | |
1245 | */ | |
1246 | if (recover) { | |
cb323159 | 1247 | set_saved_state_pc_to_recovery_handler(state, recover); |
5ba3f43e | 1248 | } else { |
cb323159 | 1249 | add_saved_state_pc(state, 4); |
5ba3f43e A |
1250 | } |
1251 | return; | |
1252 | } else { | |
cb323159 | 1253 | ml_set_interrupts_enabled(FALSE); |
5ba3f43e | 1254 | panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state); |
5ba3f43e A |
1255 | } |
1256 | } | |
1257 | #endif | |
1258 | ||
1259 | #if !CONFIG_PGTRACE /* This will be moved next to pgtrace fault evaluation */ | |
0a7de745 | 1260 | if (ml_at_interrupt_context()) { |
5ba3f43e | 1261 | panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state); |
0a7de745 | 1262 | } |
5ba3f43e A |
1263 | #endif |
1264 | ||
1265 | if (is_vm_fault(fault_code)) { | |
cb323159 A |
1266 | kern_return_t result = KERN_FAILURE; |
1267 | vm_map_t map; | |
1268 | int interruptible; | |
5ba3f43e | 1269 | |
cc8bc92a A |
1270 | /* |
1271 | * Ensure no faults in the physical aperture. This could happen if | |
1272 | * a page table is incorrectly allocated from the read only region | |
1273 | * when running with KTRR. | |
1274 | */ | |
1275 | ||
c6bf4f31 A |
1276 | #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) |
1277 | extern volatile vm_offset_t ctrr_test_va; | |
1278 | if (ctrr_test_va && fault_addr == ctrr_test_va && is_permission_fault(fault_code)) { | |
1279 | extern volatile uint64_t ctrr_exception_esr; | |
1280 | ctrr_exception_esr = esr; | |
1281 | add_saved_state_pc(state, 4); | |
1282 | return; | |
1283 | } | |
1284 | #endif | |
cc8bc92a | 1285 | |
d9a64523 A |
1286 | #if __ARM_PAN_AVAILABLE__ && defined(CONFIG_XNUPOST) |
1287 | if (is_permission_fault(fault_code) && !(get_saved_state_cpsr(state) & PSR64_PAN) && | |
1288 | (pan_ro_addr != 0) && (fault_addr == pan_ro_addr)) { | |
1289 | ++pan_exception_level; | |
1290 | // On an exception taken from a PAN-disabled context, verify | |
1291 | // that PAN is re-enabled for the exception handler and that | |
1292 | // accessing the test address produces a PAN fault. | |
1293 | pan_fault_value = *(char *)pan_test_addr; | |
cb323159 A |
1294 | __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context |
1295 | add_saved_state_pc(state, 4); | |
d9a64523 A |
1296 | return; |
1297 | } | |
1298 | #endif | |
1299 | ||
1300 | if (fault_addr >= gVirtBase && fault_addr < static_memory_end) { | |
0a7de745 | 1301 | panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state); |
5ba3f43e A |
1302 | } |
1303 | ||
1304 | if (VM_KERNEL_ADDRESS(fault_addr) || thread == THREAD_NULL) { | |
1305 | map = kernel_map; | |
1306 | interruptible = THREAD_UNINT; | |
1307 | } else { | |
1308 | map = thread->map; | |
1309 | interruptible = THREAD_ABORTSAFE; | |
1310 | } | |
1311 | ||
1312 | #if CONFIG_PGTRACE | |
1313 | if (pgtrace_enabled) { | |
1314 | /* Check to see if trace bit is set */ | |
1315 | result = pmap_pgtrace_fault(map->pmap, fault_addr, state); | |
0a7de745 A |
1316 | if (result == KERN_SUCCESS) { |
1317 | return; | |
1318 | } | |
5ba3f43e A |
1319 | } |
1320 | ||
0a7de745 | 1321 | if (ml_at_interrupt_context()) { |
5ba3f43e | 1322 | panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state); |
0a7de745 | 1323 | } |
5ba3f43e A |
1324 | #endif |
1325 | ||
1326 | /* check to see if it is just a pmap ref/modify fault */ | |
d9a64523 | 1327 | if (!is_translation_fault(fault_code)) { |
cb323159 | 1328 | result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), FALSE); |
0a7de745 A |
1329 | if (result == KERN_SUCCESS) { |
1330 | return; | |
1331 | } | |
d9a64523 | 1332 | } |
5ba3f43e | 1333 | |
0a7de745 | 1334 | if (result != KERN_PROTECTION_FAILURE) { |
5ba3f43e A |
1335 | /* |
1336 | * We have to "fault" the page in. | |
1337 | */ | |
1338 | result = vm_fault(map, fault_addr, fault_type, | |
0a7de745 A |
1339 | /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, interruptible, |
1340 | /* caller_pmap */ NULL, /* caller_pmap_addr */ 0); | |
5ba3f43e A |
1341 | } |
1342 | ||
0a7de745 A |
1343 | if (result == KERN_SUCCESS) { |
1344 | return; | |
1345 | } | |
5ba3f43e A |
1346 | |
1347 | /* | |
1348 | * If we have a recover handler, invoke it now. | |
1349 | */ | |
1350 | if (recover) { | |
cb323159 | 1351 | set_saved_state_pc_to_recovery_handler(state, recover); |
5ba3f43e A |
1352 | return; |
1353 | } | |
1354 | ||
1355 | #if __ARM_PAN_AVAILABLE__ | |
1356 | if (is_pan_fault(state, esr, fault_addr, fault_code)) { | |
d9a64523 | 1357 | #ifdef CONFIG_XNUPOST |
0a7de745 | 1358 | if ((pan_test_addr != 0) && (fault_addr == pan_test_addr)) { |
d9a64523 A |
1359 | ++pan_exception_level; |
1360 | // read the user-accessible value to make sure | |
1361 | // pan is enabled and produces a 2nd fault from | |
1362 | // the exception handler | |
0a7de745 A |
1363 | if (pan_exception_level == 1) { |
1364 | pan_fault_value = *(char *)pan_test_addr; | |
cb323159 | 1365 | __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context |
0a7de745 | 1366 | } |
d9a64523 A |
1367 | // this fault address is used for PAN test |
1368 | // disable PAN and rerun | |
cb323159 | 1369 | mask_saved_state_cpsr(state, 0, PSR64_PAN); |
d9a64523 A |
1370 | return; |
1371 | } | |
1372 | #endif | |
5ba3f43e A |
1373 | panic_with_thread_kernel_state("Privileged access never abort.", state); |
1374 | } | |
1375 | #endif | |
1376 | ||
1377 | #if CONFIG_PGTRACE | |
cc8bc92a A |
1378 | } else if (ml_at_interrupt_context()) { |
1379 | panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state); | |
5ba3f43e A |
1380 | #endif |
1381 | } else if (is_alignment_fault(fault_code)) { | |
cb323159 A |
1382 | if (recover) { |
1383 | set_saved_state_pc_to_recovery_handler(state, recover); | |
1384 | return; | |
1385 | } | |
5ba3f43e A |
1386 | panic_with_thread_kernel_state("Unaligned kernel data abort.", state); |
1387 | } else if (is_parity_error(fault_code)) { | |
1388 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
1389 | if (fault_code == FSC_SYNC_PARITY) { | |
1390 | arm64_platform_error(state, esr, fault_addr); | |
1391 | return; | |
1392 | } | |
1393 | #else | |
1394 | panic_with_thread_kernel_state("Kernel parity error.", state); | |
1395 | #endif | |
1396 | } else { | |
1397 | kprintf("Unclassified kernel abort (fault_code=0x%x)\n", fault_code); | |
1398 | } | |
1399 | ||
1400 | panic_with_thread_kernel_state("Kernel data abort.", state); | |
1401 | } | |
1402 | ||
1403 | extern void syscall_trace(struct arm_saved_state * regs); | |
1404 | ||
1405 | static void | |
1406 | handle_svc(arm_saved_state_t *state) | |
1407 | { | |
cb323159 A |
1408 | int trap_no = get_saved_state_svc_number(state); |
1409 | thread_t thread = current_thread(); | |
1410 | struct proc *p; | |
5ba3f43e A |
1411 | |
1412 | #define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */ | |
1413 | ||
1414 | #define TRACE_SYSCALL 1 | |
1415 | #if TRACE_SYSCALL | |
1416 | syscall_trace(state); | |
1417 | #endif | |
1418 | ||
1419 | thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling SVC from userspace */ | |
1420 | ||
1421 | if (trap_no == (int)PLATFORM_SYSCALL_TRAP_NO) { | |
1422 | platform_syscall(state); | |
1423 | panic("Returned from platform_syscall()?"); | |
1424 | } | |
1425 | ||
1426 | mach_kauth_cred_uthread_update(); | |
1427 | ||
1428 | if (trap_no < 0) { | |
1429 | if (trap_no == -3) { | |
1430 | handle_mach_absolute_time_trap(state); | |
1431 | return; | |
1432 | } else if (trap_no == -4) { | |
1433 | handle_mach_continuous_time_trap(state); | |
1434 | return; | |
1435 | } | |
1436 | ||
1437 | /* Counting perhaps better in the handler, but this is how it's been done */ | |
1438 | thread->syscalls_mach++; | |
1439 | mach_syscall(state); | |
1440 | } else { | |
1441 | /* Counting perhaps better in the handler, but this is how it's been done */ | |
1442 | thread->syscalls_unix++; | |
1443 | p = get_bsdthreadtask_info(thread); | |
1444 | ||
1445 | assert(p); | |
1446 | ||
1447 | unix_syscall(state, thread, (struct uthread*)thread->uthread, p); | |
1448 | } | |
1449 | } | |
1450 | ||
1451 | static void | |
1452 | handle_mach_absolute_time_trap(arm_saved_state_t *state) | |
1453 | { | |
1454 | uint64_t now = mach_absolute_time(); | |
1455 | saved_state64(state)->x[0] = now; | |
1456 | } | |
1457 | ||
1458 | static void | |
1459 | handle_mach_continuous_time_trap(arm_saved_state_t *state) | |
1460 | { | |
1461 | uint64_t now = mach_continuous_time(); | |
1462 | saved_state64(state)->x[0] = now; | |
1463 | } | |
1464 | ||
1465 | static void | |
1466 | handle_msr_trap(arm_saved_state_t *state, uint32_t iss) | |
1467 | { | |
d9a64523 | 1468 | exception_type_t exception = EXC_BAD_INSTRUCTION; |
cb323159 A |
1469 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; |
1470 | mach_msg_type_number_t numcodes = 2; | |
1471 | uint32_t instr = 0; | |
5ba3f43e A |
1472 | |
1473 | (void)iss; | |
1474 | ||
1475 | if (!is_saved_state64(state)) { | |
1476 | panic("MSR/MRS trap (EC 0x%x) from 32-bit state\n", ESR_EC_MSR_TRAP); | |
1477 | } | |
1478 | ||
1479 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { | |
1480 | panic("MSR/MRS trap (EC 0x%x) from kernel\n", ESR_EC_MSR_TRAP); | |
1481 | } | |
1482 | ||
1483 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); | |
1484 | codes[1] = instr; | |
1485 | ||
1486 | exception_triage(exception, codes, numcodes); | |
1487 | } | |
1488 | ||
1489 | static void | |
1490 | handle_user_trapped_instruction32(arm_saved_state_t *state, uint32_t esr) | |
1491 | { | |
d9a64523 | 1492 | exception_type_t exception = EXC_BAD_INSTRUCTION; |
cb323159 A |
1493 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; |
1494 | mach_msg_type_number_t numcodes = 2; | |
1495 | uint32_t instr; | |
5ba3f43e A |
1496 | |
1497 | if (is_saved_state64(state)) { | |
1498 | panic("ESR (0x%x) for instruction trapped from U32, but saved state is 64-bit.", esr); | |
1499 | } | |
1500 | ||
1501 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { | |
1502 | panic("ESR (0x%x) for instruction trapped from U32, actually came from kernel?", esr); | |
1503 | } | |
1504 | ||
1505 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); | |
1506 | codes[1] = instr; | |
1507 | ||
1508 | exception_triage(exception, codes, numcodes); | |
cb323159 | 1509 | __builtin_unreachable(); |
5ba3f43e A |
1510 | } |
1511 | ||
1512 | static void | |
1513 | handle_simd_trap(arm_saved_state_t *state, uint32_t esr) | |
1514 | { | |
d9a64523 | 1515 | exception_type_t exception = EXC_BAD_INSTRUCTION; |
cb323159 A |
1516 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; |
1517 | mach_msg_type_number_t numcodes = 2; | |
1518 | uint32_t instr = 0; | |
5ba3f43e A |
1519 | |
1520 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { | |
1521 | panic("ESR (0x%x) for SIMD trap from userland, actually came from kernel?", esr); | |
1522 | } | |
1523 | ||
1524 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); | |
1525 | codes[1] = instr; | |
1526 | ||
1527 | exception_triage(exception, codes, numcodes); | |
cb323159 | 1528 | __builtin_unreachable(); |
5ba3f43e A |
1529 | } |
1530 | ||
1531 | void | |
1532 | sleh_irq(arm_saved_state_t *state) | |
1533 | { | |
cb323159 A |
1534 | uint64_t timestamp = 0; |
1535 | uint32_t old_entropy_data = 0; | |
1536 | uint32_t old_entropy_sample_count = 0; | |
1537 | size_t entropy_index = 0; | |
1538 | uint32_t * entropy_data_ptr = NULL; | |
1539 | cpu_data_t * cdp = getCpuDatap(); | |
1540 | #if MACH_ASSERT | |
d9a64523 A |
1541 | int preemption_level = get_preemption_level(); |
1542 | #endif | |
5ba3f43e | 1543 | |
cb323159 | 1544 | |
5ba3f43e A |
1545 | sleh_interrupt_handler_prologue(state, DBG_INTR_TYPE_OTHER); |
1546 | ||
1547 | /* Run the registered interrupt handler. */ | |
1548 | cdp->interrupt_handler(cdp->interrupt_target, | |
0a7de745 A |
1549 | cdp->interrupt_refCon, |
1550 | cdp->interrupt_nub, | |
1551 | cdp->interrupt_source); | |
5ba3f43e A |
1552 | |
1553 | /* We use interrupt timing as an entropy source. */ | |
1554 | timestamp = ml_get_timebase(); | |
1555 | ||
1556 | /* | |
1557 | * The buffer index is subject to races, but as these races should only | |
1558 | * result in multiple CPUs updating the same location, the end result | |
1559 | * should be that noise gets written into the entropy buffer. As this | |
1560 | * is the entire point of the entropy buffer, we will not worry about | |
1561 | * these races for now. | |
1562 | */ | |
cb323159 A |
1563 | old_entropy_sample_count = EntropyData.sample_count; |
1564 | EntropyData.sample_count += 1; | |
5ba3f43e | 1565 | |
cb323159 A |
1566 | entropy_index = old_entropy_sample_count & ENTROPY_BUFFER_INDEX_MASK; |
1567 | entropy_data_ptr = EntropyData.buffer + entropy_index; | |
5ba3f43e A |
1568 | |
1569 | /* Mix the timestamp data and the old data together. */ | |
cb323159 A |
1570 | old_entropy_data = *entropy_data_ptr; |
1571 | *entropy_data_ptr = (uint32_t)timestamp ^ __ror(old_entropy_data, 9); | |
5ba3f43e A |
1572 | |
1573 | sleh_interrupt_handler_epilogue(); | |
cb323159 | 1574 | #if MACH_ASSERT |
0a7de745 | 1575 | if (preemption_level != get_preemption_level()) { |
d9a64523 | 1576 | panic("irq handler %p changed preemption level from %d to %d", cdp->interrupt_handler, preemption_level, get_preemption_level()); |
0a7de745 | 1577 | } |
d9a64523 | 1578 | #endif |
5ba3f43e A |
1579 | } |
1580 | ||
1581 | void | |
1582 | sleh_fiq(arm_saved_state_t *state) | |
1583 | { | |
1584 | unsigned int type = DBG_INTR_TYPE_UNKNOWN; | |
cb323159 | 1585 | #if MACH_ASSERT |
d9a64523 A |
1586 | int preemption_level = get_preemption_level(); |
1587 | #endif | |
d9a64523 | 1588 | |
0a7de745 A |
1589 | #if MONOTONIC_FIQ |
1590 | uint64_t pmcr0 = 0, upmsr = 0; | |
1591 | #endif /* MONOTONIC_FIQ */ | |
1592 | ||
c6bf4f31 A |
1593 | #if defined(HAS_IPI) |
1594 | boolean_t is_ipi = FALSE; | |
1595 | uint64_t ipi_sr = 0; | |
1596 | ||
1597 | if (gFastIPI) { | |
1598 | MRS(ipi_sr, ARM64_REG_IPI_SR); | |
1599 | ||
1600 | if (ipi_sr & 1) { | |
1601 | is_ipi = TRUE; | |
1602 | } | |
1603 | } | |
1604 | ||
1605 | if (is_ipi) { | |
1606 | type = DBG_INTR_TYPE_IPI; | |
1607 | } else | |
1608 | #endif /* defined(HAS_IPI) */ | |
0a7de745 A |
1609 | #if MONOTONIC_FIQ |
1610 | if (mt_pmi_pending(&pmcr0, &upmsr)) { | |
d9a64523 A |
1611 | type = DBG_INTR_TYPE_PMI; |
1612 | } else | |
0a7de745 | 1613 | #endif /* MONOTONIC_FIQ */ |
5ba3f43e A |
1614 | if (ml_get_timer_pending()) { |
1615 | type = DBG_INTR_TYPE_TIMER; | |
1616 | } | |
1617 | ||
1618 | sleh_interrupt_handler_prologue(state, type); | |
1619 | ||
c6bf4f31 A |
1620 | #if defined(HAS_IPI) |
1621 | if (is_ipi) { | |
1622 | /* | |
1623 | * Order is important here: we must ack the IPI by writing IPI_SR | |
1624 | * before we call cpu_signal_handler(). Otherwise, there will be | |
1625 | * a window between the completion of pending-signal processing in | |
1626 | * cpu_signal_handler() and the ack during which a newly-issued | |
1627 | * IPI to this CPU may be lost. ISB is required to ensure the msr | |
1628 | * is retired before execution of cpu_signal_handler(). | |
1629 | */ | |
1630 | MSR(ARM64_REG_IPI_SR, ipi_sr); | |
1631 | __builtin_arm_isb(ISB_SY); | |
1632 | cpu_signal_handler(); | |
1633 | } else | |
1634 | #endif /* defined(HAS_IPI) */ | |
0a7de745 | 1635 | #if MONOTONIC_FIQ |
d9a64523 | 1636 | if (type == DBG_INTR_TYPE_PMI) { |
0a7de745 | 1637 | mt_fiq(getCpuDatap(), pmcr0, upmsr); |
d9a64523 | 1638 | } else |
0a7de745 | 1639 | #endif /* MONOTONIC_FIQ */ |
5ba3f43e A |
1640 | { |
1641 | /* | |
1642 | * We don't know that this is a timer, but we don't have insight into | |
1643 | * the other interrupts that go down this path. | |
1644 | */ | |
1645 | ||
5ba3f43e A |
1646 | cpu_data_t *cdp = getCpuDatap(); |
1647 | ||
1648 | cdp->cpu_decrementer = -1; /* Large */ | |
1649 | ||
1650 | /* | |
1651 | * ARM64_TODO: whether we're coming from userland is ignored right now. | |
1652 | * We can easily thread it through, but not bothering for the | |
1653 | * moment (AArch32 doesn't either). | |
1654 | */ | |
1655 | rtclock_intr(TRUE); | |
1656 | } | |
1657 | ||
1658 | sleh_interrupt_handler_epilogue(); | |
cb323159 | 1659 | #if MACH_ASSERT |
0a7de745 | 1660 | if (preemption_level != get_preemption_level()) { |
d9a64523 | 1661 | panic("fiq type %u changed preemption level from %d to %d", type, preemption_level, get_preemption_level()); |
0a7de745 | 1662 | } |
d9a64523 | 1663 | #endif |
5ba3f43e A |
1664 | } |
1665 | ||
1666 | void | |
1667 | sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far) | |
1668 | { | |
cb323159 A |
1669 | arm_saved_state_t *state = &context->ss; |
1670 | #if MACH_ASSERT | |
d9a64523 A |
1671 | int preemption_level = get_preemption_level(); |
1672 | #endif | |
5ba3f43e A |
1673 | |
1674 | ASSERT_CONTEXT_SANITY(context); | |
1675 | arm64_platform_error(state, esr, far); | |
cb323159 | 1676 | #if MACH_ASSERT |
0a7de745 | 1677 | if (preemption_level != get_preemption_level()) { |
d9a64523 | 1678 | panic("serror changed preemption level from %d to %d", preemption_level, get_preemption_level()); |
0a7de745 | 1679 | } |
d9a64523 | 1680 | #endif |
5ba3f43e A |
1681 | } |
1682 | ||
1683 | void | |
cb323159 A |
1684 | mach_syscall_trace_exit(unsigned int retval, |
1685 | unsigned int call_number) | |
5ba3f43e A |
1686 | { |
1687 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
cb323159 A |
1688 | MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | |
1689 | DBG_FUNC_END, retval, 0, 0, 0, 0); | |
5ba3f43e A |
1690 | } |
1691 | ||
1692 | __attribute__((noreturn)) | |
1693 | void | |
1694 | thread_syscall_return(kern_return_t error) | |
1695 | { | |
1696 | thread_t thread; | |
1697 | struct arm_saved_state *state; | |
1698 | ||
1699 | thread = current_thread(); | |
1700 | state = get_user_regs(thread); | |
1701 | ||
1702 | assert(is_saved_state64(state)); | |
1703 | saved_state64(state)->x[0] = error; | |
1704 | ||
cb323159 | 1705 | #if MACH_ASSERT |
5ba3f43e A |
1706 | kern_allocation_name_t |
1707 | prior __assert_only = thread_get_kernel_state(thread)->allocation_name; | |
1708 | assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior)); | |
cb323159 | 1709 | #endif /* MACH_ASSERT */ |
5ba3f43e A |
1710 | |
1711 | if (kdebug_enable) { | |
1712 | /* Invert syscall number (negative for a mach syscall) */ | |
1713 | mach_syscall_trace_exit(error, (-1) * get_saved_state_svc_number(state)); | |
1714 | } | |
1715 | ||
1716 | thread_exception_return(); | |
1717 | } | |
1718 | ||
1719 | void | |
1720 | syscall_trace( | |
0a7de745 | 1721 | struct arm_saved_state * regs __unused) |
5ba3f43e A |
1722 | { |
1723 | /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */ | |
1724 | } | |
1725 | ||
1726 | static void | |
1727 | sleh_interrupt_handler_prologue(arm_saved_state_t *state, unsigned int type) | |
1728 | { | |
cb323159 | 1729 | uint64_t is_user = PSR64_IS_USER(get_saved_state_cpsr(state)); |
5ba3f43e A |
1730 | |
1731 | uint64_t pc = is_user ? get_saved_state_pc(state) : | |
0a7de745 | 1732 | VM_KERNEL_UNSLIDE(get_saved_state_pc(state)); |
5ba3f43e A |
1733 | |
1734 | KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START, | |
0a7de745 | 1735 | 0, pc, is_user, type); |
5ba3f43e A |
1736 | |
1737 | #if CONFIG_TELEMETRY | |
1738 | if (telemetry_needs_record) { | |
d9a64523 | 1739 | telemetry_mark_curthread((boolean_t)is_user, FALSE); |
5ba3f43e A |
1740 | } |
1741 | #endif /* CONFIG_TELEMETRY */ | |
1742 | } | |
1743 | ||
1744 | static void | |
1745 | sleh_interrupt_handler_epilogue(void) | |
1746 | { | |
d9a64523 A |
1747 | #if KPERF |
1748 | kperf_interrupt(); | |
1749 | #endif /* KPERF */ | |
5ba3f43e A |
1750 | KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END); |
1751 | } | |
1752 | ||
1753 | void | |
1754 | sleh_invalid_stack(arm_context_t *context, uint32_t esr __unused, vm_offset_t far __unused) | |
1755 | { | |
1756 | thread_t thread = current_thread(); | |
1757 | vm_offset_t kernel_stack_bottom, sp; | |
1758 | ||
1759 | sp = get_saved_state_sp(&context->ss); | |
1760 | kernel_stack_bottom = round_page(thread->machine.kstackptr) - KERNEL_STACK_SIZE; | |
1761 | ||
1762 | if ((sp < kernel_stack_bottom) && (sp >= (kernel_stack_bottom - PAGE_SIZE))) { | |
1763 | panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow).", &context->ss); | |
1764 | } | |
1765 | ||
1766 | panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context->ss); | |
1767 | } |