]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2012-2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <arm/caches_internal.h> | |
30 | #include <arm/cpu_data.h> | |
31 | #include <arm/cpu_data_internal.h> | |
32 | #include <arm/misc_protos.h> | |
33 | #include <arm/thread.h> | |
34 | #include <arm/rtclock.h> | |
35 | #include <arm/trap.h> /* for IS_ARM_GDB_TRAP() et al */ | |
36 | #include <arm64/proc_reg.h> | |
37 | #include <arm64/machine_machdep.h> | |
38 | #include <arm64/monotonic.h> | |
39 | ||
40 | #include <kern/debug.h> | |
41 | #include <kern/thread.h> | |
42 | #include <mach/exception.h> | |
43 | #include <mach/vm_types.h> | |
44 | #include <mach/machine/thread_status.h> | |
45 | ||
46 | #include <machine/atomic.h> | |
47 | #include <machine/machlimits.h> | |
48 | ||
49 | #include <pexpert/arm/protos.h> | |
50 | ||
51 | #include <vm/vm_page.h> | |
52 | #include <vm/pmap.h> | |
53 | #include <vm/vm_fault.h> | |
54 | #include <vm/vm_kern.h> | |
55 | ||
56 | #include <sys/kdebug.h> | |
57 | ||
58 | #include <kern/policy_internal.h> | |
59 | #if CONFIG_TELEMETRY | |
60 | #include <kern/telemetry.h> | |
61 | #endif | |
62 | ||
63 | #include <prng/random.h> | |
64 | ||
65 | #ifndef __arm64__ | |
66 | #error Should only be compiling for arm64. | |
67 | #endif | |
68 | ||
69 | #define TEST_CONTEXT32_SANITY(context) \ | |
70 | (context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \ | |
71 | context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT) | |
72 | ||
73 | #define TEST_CONTEXT64_SANITY(context) \ | |
74 | (context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \ | |
75 | context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT) | |
76 | ||
77 | #define ASSERT_CONTEXT_SANITY(context) \ | |
78 | assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context)) | |
79 | ||
80 | ||
81 | #define COPYIN(src, dst, size) \ | |
82 | (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \ | |
83 | copyin_kern(src, dst, size) \ | |
84 | : \ | |
85 | copyin(src, dst, size) | |
86 | ||
87 | #define COPYOUT(src, dst, size) \ | |
88 | (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \ | |
89 | copyout_kern(src, dst, size) \ | |
90 | : \ | |
91 | copyout(src, dst, size) | |
92 | ||
93 | // Below is for concatenating a string param to a string literal | |
94 | #define STR1(x) #x | |
95 | #define STR(x) STR1(x) | |
96 | ||
97 | void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss); | |
98 | ||
99 | void sleh_synchronous_sp1(arm_context_t *, uint32_t, vm_offset_t); | |
100 | void sleh_synchronous(arm_context_t *, uint32_t, vm_offset_t); | |
101 | void sleh_irq(arm_saved_state_t *); | |
102 | void sleh_fiq(arm_saved_state_t *); | |
103 | void sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far); | |
104 | void sleh_invalid_stack(arm_context_t *context, uint32_t esr, vm_offset_t far); | |
105 | ||
106 | static void sleh_interrupt_handler_prologue(arm_saved_state_t *, unsigned int type); | |
107 | static void sleh_interrupt_handler_epilogue(void); | |
108 | ||
109 | static void handle_svc(arm_saved_state_t *); | |
110 | static void handle_mach_absolute_time_trap(arm_saved_state_t *); | |
111 | static void handle_mach_continuous_time_trap(arm_saved_state_t *); | |
112 | ||
113 | static void handle_msr_trap(arm_saved_state_t *state, uint32_t iss); | |
114 | ||
115 | extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, boolean_t); | |
116 | ||
117 | static void handle_uncategorized(arm_saved_state_t *, boolean_t); | |
118 | static void handle_breakpoint(arm_saved_state_t *); | |
119 | ||
120 | typedef void(*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *); | |
121 | static void inspect_instruction_abort(uint32_t, fault_status_t *, vm_prot_t *); | |
122 | static void inspect_data_abort(uint32_t, fault_status_t *, vm_prot_t *); | |
123 | ||
124 | static int is_vm_fault(fault_status_t); | |
125 | static int is_alignment_fault(fault_status_t); | |
126 | ||
127 | typedef void(*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); | |
128 | static void handle_user_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); | |
129 | static void handle_kernel_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); | |
130 | ||
131 | static void handle_pc_align(arm_saved_state_t *ss); | |
132 | static void handle_sp_align(arm_saved_state_t *ss); | |
133 | static void handle_sw_step_debug(arm_saved_state_t *ss); | |
134 | static void handle_wf_trap(arm_saved_state_t *ss); | |
135 | ||
136 | static void handle_watchpoint(vm_offset_t fault_addr); | |
137 | ||
138 | static void handle_abort(arm_saved_state_t *, uint32_t, vm_offset_t, vm_offset_t, abort_inspector_t, abort_handler_t); | |
139 | ||
140 | static void handle_user_trapped_instruction32(arm_saved_state_t *, uint32_t esr); | |
141 | ||
142 | static void handle_simd_trap(arm_saved_state_t *, uint32_t esr); | |
143 | ||
144 | extern void mach_kauth_cred_uthread_update(void); | |
145 | void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number); | |
146 | ||
147 | struct uthread; | |
148 | struct proc; | |
149 | ||
150 | extern void | |
151 | unix_syscall(struct arm_saved_state * regs, thread_t thread_act, | |
152 | struct uthread * uthread, struct proc * proc); | |
153 | ||
154 | extern void | |
155 | mach_syscall(struct arm_saved_state*); | |
156 | ||
157 | volatile perfCallback perfTrapHook = NULL; /* Pointer to CHUD trap hook routine */ | |
158 | ||
159 | #if CONFIG_DTRACE | |
160 | extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs); | |
161 | extern boolean_t dtrace_tally_fault(user_addr_t); | |
162 | ||
163 | /* Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy and paste the trap instructions | |
164 | over from that file. Need to keep these in sync! */ | |
165 | #define FASTTRAP_ARM32_INSTR 0xe7ffdefc | |
166 | #define FASTTRAP_THUMB32_INSTR 0xdefc | |
167 | #define FASTTRAP_ARM64_INSTR 0xe7eeee7e | |
168 | ||
169 | #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb | |
170 | #define FASTTRAP_THUMB32_RET_INSTR 0xdefb | |
171 | #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d | |
172 | ||
173 | /* See <rdar://problem/4613924> */ | |
174 | perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */ | |
175 | #endif | |
176 | ||
177 | #if CONFIG_PGTRACE | |
178 | extern boolean_t pgtrace_enabled; | |
179 | #endif | |
180 | ||
181 | #if __ARM_PAN_AVAILABLE__ | |
182 | extern boolean_t arm_pan_enabled; | |
183 | #endif | |
184 | ||
185 | #if defined(APPLECYCLONE) | |
186 | #define CPU_NAME "Cyclone" | |
187 | #elif defined(APPLETYPHOON) | |
188 | #define CPU_NAME "Typhoon" | |
189 | #elif defined(APPLETWISTER) | |
190 | #define CPU_NAME "Twister" | |
191 | #elif defined(APPLEHURRICANE) | |
192 | #define CPU_NAME "Hurricane" | |
193 | #else | |
194 | #define CPU_NAME "Unknown" | |
195 | #endif | |
196 | ||
197 | #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT)) | |
198 | #define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400) | |
199 | #define ESR_WT_REASON(esr) ((esr) & 0xff) | |
200 | ||
201 | #define WT_REASON_NONE 0 | |
202 | #define WT_REASON_INTEGRITY_FAIL 1 | |
203 | #define WT_REASON_BAD_SYSCALL 2 | |
204 | #define WT_REASON_NOT_LOCKED 3 | |
205 | #define WT_REASON_ALREADY_LOCKED 4 | |
206 | #define WT_REASON_SW_REQ 5 | |
207 | #define WT_REASON_PT_INVALID 6 | |
208 | #define WT_REASON_PT_VIOLATION 7 | |
209 | #define WT_REASON_REG_VIOLATION 8 | |
210 | #endif | |
211 | ||
212 | ||
213 | static inline unsigned | |
214 | __ror(unsigned value, unsigned shift) | |
215 | { | |
216 | return (((unsigned)(value) >> (unsigned)(shift)) | | |
217 | (unsigned)(value) << ((unsigned)(sizeof(unsigned) * CHAR_BIT) - (unsigned)(shift))); | |
218 | } | |
219 | ||
220 | static void | |
221 | arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far) | |
222 | { | |
223 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
224 | uint64_t fed_err_sts, mmu_err_sts, lsu_err_sts; | |
225 | #if defined(NO_ECORE) | |
226 | uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf; | |
227 | ||
228 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS)); | |
229 | l2c_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS)); | |
230 | l2c_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR)); | |
231 | l2c_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF)); | |
232 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS)); | |
233 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS)); | |
234 | ||
235 | panic_plain("Unhandled " CPU_NAME | |
236 | " implementation specific error. state=%p esr=%#x far=%p\n" | |
237 | "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" | |
238 | "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n", | |
239 | state, esr, (void *)far, | |
240 | (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, | |
241 | (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf); | |
242 | ||
243 | #elif defined(HAS_MIGSTS) | |
244 | uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf, mpidr, migsts; | |
245 | ||
246 | mpidr = __builtin_arm_rsr64("MPIDR_EL1"); | |
247 | migsts = __builtin_arm_rsr64(STR(ARM64_REG_MIGSTS_EL1)); | |
248 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS)); | |
249 | l2c_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS)); | |
250 | l2c_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR)); | |
251 | l2c_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF)); | |
252 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS)); | |
253 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS)); | |
254 | ||
255 | panic_plain("Unhandled " CPU_NAME | |
256 | " implementation specific error. state=%p esr=%#x far=%p p-core?%d migsts=%p\n" | |
257 | "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" | |
258 | "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n", | |
259 | state, esr, (void *)far, !!(mpidr & MPIDR_PNE), (void *)migsts, | |
260 | (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, | |
261 | (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf); | |
262 | #else // !defined(NO_ECORE) && !defined(HAS_MIGSTS) | |
263 | uint64_t llc_err_sts, llc_err_adr, llc_err_inf, mpidr; | |
264 | ||
265 | mpidr = __builtin_arm_rsr64("MPIDR_EL1"); | |
266 | ||
267 | if (mpidr & MPIDR_PNE) { | |
268 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS)); | |
269 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS)); | |
270 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS)); | |
271 | } else { | |
272 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_MMU_ERR_STS)); | |
273 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_LSU_ERR_STS)); | |
274 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_FED_ERR_STS)); | |
275 | } | |
276 | ||
277 | llc_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS)); | |
278 | llc_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR)); | |
279 | llc_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF)); | |
280 | ||
281 | panic_plain("Unhandled " CPU_NAME | |
282 | " implementation specific error. state=%p esr=%#x far=%p p-core?%d\n" | |
283 | "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" | |
284 | "\tllc_err_sts:%p, llc_err_adr:%p, llc_err_inf:%p\n", | |
285 | state, esr, (void *)far, !!(mpidr & MPIDR_PNE), | |
286 | (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, | |
287 | (void *)llc_err_sts, (void *)llc_err_adr, (void *)llc_err_inf); | |
288 | #endif | |
289 | #else // !defined(APPLE_ARM64_ARCH_FAMILY) | |
290 | panic_plain("Unhandled implementation specific error\n"); | |
291 | #endif | |
292 | } | |
293 | ||
294 | #if CONFIG_KERNEL_INTEGRITY | |
295 | #pragma clang diagnostic push | |
296 | #pragma clang diagnostic ignored "-Wunused-parameter" | |
297 | static void | |
298 | kernel_integrity_error_handler(uint32_t esr, vm_offset_t far) { | |
299 | #if defined(KERNEL_INTEGRITY_WT) | |
300 | #if (DEVELOPMENT || DEBUG) | |
301 | if (ESR_WT_SERROR(esr)) { | |
302 | switch (ESR_WT_REASON(esr)) { | |
303 | case WT_REASON_INTEGRITY_FAIL: | |
304 | panic_plain("Kernel integrity, violation in frame 0x%016lx.", far); | |
305 | case WT_REASON_BAD_SYSCALL: | |
306 | panic_plain("Kernel integrity, bad syscall."); | |
307 | case WT_REASON_NOT_LOCKED: | |
308 | panic_plain("Kernel integrity, not locked."); | |
309 | case WT_REASON_ALREADY_LOCKED: | |
310 | panic_plain("Kernel integrity, already locked."); | |
311 | case WT_REASON_SW_REQ: | |
312 | panic_plain("Kernel integrity, software request."); | |
313 | case WT_REASON_PT_INVALID: | |
314 | panic_plain("Kernel integrity, encountered invalid TTE/PTE while " | |
315 | "walking 0x%016lx.", far); | |
316 | case WT_REASON_PT_VIOLATION: | |
317 | panic_plain("Kernel integrity, violation in mapping 0x%016lx.", | |
318 | far); | |
319 | case WT_REASON_REG_VIOLATION: | |
320 | panic_plain("Kernel integrity, violation in system register %d.", | |
321 | (unsigned) far); | |
322 | default: | |
323 | panic_plain("Kernel integrity, unknown (esr=0x%08x).", esr); | |
324 | } | |
325 | } | |
326 | #else | |
327 | if (ESR_WT_SERROR(esr)) { | |
328 | panic_plain("SError esr: 0x%08x far: 0x%016lx.", esr, far); | |
329 | } | |
330 | #endif | |
331 | #endif | |
332 | } | |
333 | #pragma clang diagnostic pop | |
334 | #endif | |
335 | ||
336 | static void | |
337 | arm64_platform_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far) | |
338 | { | |
339 | cpu_data_t *cdp = getCpuDatap(); | |
340 | ||
341 | #if CONFIG_KERNEL_INTEGRITY | |
342 | kernel_integrity_error_handler(esr, far); | |
343 | #endif | |
344 | ||
345 | if (cdp->platform_error_handler != (platform_error_handler_t) NULL) | |
346 | (*(platform_error_handler_t)cdp->platform_error_handler) (cdp->cpu_id, far); | |
347 | else | |
348 | arm64_implementation_specific_error(state, esr, far); | |
349 | } | |
350 | ||
351 | void | |
352 | panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) | |
353 | { | |
354 | boolean_t ss_valid; | |
355 | ||
356 | ss_valid = is_saved_state64(ss); | |
357 | arm_saved_state64_t *state = saved_state64(ss); | |
358 | ||
359 | panic_plain("%s (saved state: %p%s)\n" | |
360 | "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n" | |
361 | "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n" | |
362 | "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n" | |
363 | "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n" | |
364 | "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n" | |
365 | "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n" | |
366 | "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n" | |
367 | "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n" | |
368 | "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n", | |
369 | msg, ss, (ss_valid ? "" : " INVALID"), | |
370 | state->x[0], state->x[1], state->x[2], state->x[3], | |
371 | state->x[4], state->x[5], state->x[6], state->x[7], | |
372 | state->x[8], state->x[9], state->x[10], state->x[11], | |
373 | state->x[12], state->x[13], state->x[14], state->x[15], | |
374 | state->x[16], state->x[17], state->x[18], state->x[19], | |
375 | state->x[20], state->x[21], state->x[22], state->x[23], | |
376 | state->x[24], state->x[25], state->x[26], state->x[27], | |
377 | state->x[28], state->fp, state->lr, state->sp, | |
378 | state->pc, state->cpsr, state->esr, state->far); | |
379 | } | |
380 | ||
381 | ||
382 | void | |
383 | sleh_synchronous_sp1(arm_context_t *context, uint32_t esr, vm_offset_t far __unused) | |
384 | { | |
385 | esr_exception_class_t class = ESR_EC(esr); | |
386 | arm_saved_state_t *state = &context->ss; | |
387 | ||
388 | switch (class) { | |
389 | case ESR_EC_UNCATEGORIZED: | |
390 | { | |
391 | uint32_t instr = *((uint32_t*)get_saved_state_pc(state)); | |
392 | if (IS_ARM_GDB_TRAP(instr)) | |
393 | DebuggerCall(EXC_BREAKPOINT, state); | |
394 | // Intentionally fall through to panic if we return from the debugger | |
395 | } | |
396 | default: | |
397 | panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state); | |
398 | } | |
399 | } | |
400 | ||
401 | void | |
402 | sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) | |
403 | { | |
404 | esr_exception_class_t class = ESR_EC(esr); | |
405 | arm_saved_state_t *state = &context->ss; | |
406 | vm_offset_t recover = 0; | |
407 | thread_t thread = current_thread(); | |
408 | ||
409 | ASSERT_CONTEXT_SANITY(context); | |
410 | ||
411 | /* Don't run exception handler with recover handler set in case of double fault */ | |
412 | if (thread->recover) { | |
413 | recover = thread->recover; | |
414 | thread->recover = (vm_offset_t)NULL; | |
415 | } | |
416 | ||
417 | /* Inherit the interrupt masks from previous context */ | |
418 | if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) | |
419 | ml_set_interrupts_enabled(TRUE); | |
420 | ||
421 | switch (class) { | |
422 | case ESR_EC_SVC_64: | |
423 | if (!is_saved_state64(state) || !PSR64_IS_USER(get_saved_state_cpsr(state))) { | |
424 | panic("Invalid SVC_64 context"); | |
425 | } | |
426 | ||
427 | handle_svc(state); | |
428 | break; | |
429 | ||
430 | case ESR_EC_DABORT_EL0: | |
431 | handle_abort(state, esr, far, recover, inspect_data_abort, handle_user_abort); | |
432 | assert(0); /* Unreachable */ | |
433 | ||
434 | case ESR_EC_MSR_TRAP: | |
435 | handle_msr_trap(state, ESR_ISS(esr)); | |
436 | break; | |
437 | ||
438 | case ESR_EC_IABORT_EL0: | |
439 | handle_abort(state, esr, far, recover, inspect_instruction_abort, handle_user_abort); | |
440 | assert(0); /* Unreachable */ | |
441 | ||
442 | case ESR_EC_IABORT_EL1: | |
443 | panic("Kernel instruction fetch abort: pc=%p iss=0x%x far=%p. Note: the faulting frame may be missing in the backtrace.", | |
444 | (void *)get_saved_state_pc(state), ESR_ISS(esr), (void*)far); | |
445 | ||
446 | case ESR_EC_PC_ALIGN: | |
447 | handle_pc_align(state); | |
448 | assert(0); /* Unreachable */ | |
449 | break; | |
450 | ||
451 | case ESR_EC_DABORT_EL1: | |
452 | handle_abort(state, esr, far, recover, inspect_data_abort, handle_kernel_abort); | |
453 | break; | |
454 | ||
455 | case ESR_EC_UNCATEGORIZED: | |
456 | assert(!ESR_ISS(esr)); | |
457 | ||
458 | handle_uncategorized(&context->ss, ESR_INSTR_IS_2BYTES(esr)); | |
459 | /* TODO: Uncomment this after stackshot uses a brk instruction | |
460 | * rather than an undefined instruction, as stackshot is the | |
461 | * only case where we want to return to the first-level handler. | |
462 | */ | |
463 | //assert(0); /* Unreachable */ | |
464 | break; | |
465 | ||
466 | case ESR_EC_SP_ALIGN: | |
467 | handle_sp_align(state); | |
468 | assert(0); /* Unreachable */ | |
469 | break; | |
470 | ||
471 | case ESR_EC_BKPT_AARCH32: | |
472 | handle_breakpoint(state); | |
473 | assert(0); /* Unreachable */ | |
474 | break; | |
475 | ||
476 | case ESR_EC_BRK_AARCH64: | |
477 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { | |
478 | ||
479 | kprintf("Breakpoint instruction exception from kernel. Hanging here (by design).\n"); | |
480 | for (;;); | |
481 | ||
482 | __unreachable_ok_push | |
483 | DebuggerCall(EXC_BREAKPOINT, &context->ss); | |
484 | break; | |
485 | __unreachable_ok_pop | |
486 | } else { | |
487 | handle_breakpoint(state); | |
488 | assert(0); /* Unreachable */ | |
489 | } | |
490 | ||
491 | case ESR_EC_BKPT_REG_MATCH_EL0: | |
492 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { | |
493 | handle_breakpoint(state); | |
494 | assert(0); /* Unreachable */ | |
495 | } | |
496 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", | |
497 | class, state, class, esr, (void *)far); | |
498 | assert(0); /* Unreachable */ | |
499 | break; | |
500 | ||
501 | case ESR_EC_BKPT_REG_MATCH_EL1: | |
502 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { | |
503 | kprintf("Hardware Breakpoint Debug exception from kernel. Hanging here (by design).\n"); | |
504 | for (;;); | |
505 | ||
506 | __unreachable_ok_push | |
507 | DebuggerCall(EXC_BREAKPOINT, &context->ss); | |
508 | break; | |
509 | __unreachable_ok_pop | |
510 | } | |
511 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", | |
512 | class, state, class, esr, (void *)far); | |
513 | assert(0); /* Unreachable */ | |
514 | break; | |
515 | ||
516 | case ESR_EC_SW_STEP_DEBUG_EL0: | |
517 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { | |
518 | handle_sw_step_debug(state); | |
519 | assert(0); /* Unreachable */ | |
520 | } | |
521 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", | |
522 | class, state, class, esr, (void *)far); | |
523 | assert(0); /* Unreachable */ | |
524 | break; | |
525 | ||
526 | case ESR_EC_SW_STEP_DEBUG_EL1: | |
527 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { | |
528 | kprintf("Software Step Debug exception from kernel. Hanging here (by design).\n"); | |
529 | for (;;); | |
530 | ||
531 | __unreachable_ok_push | |
532 | DebuggerCall(EXC_BREAKPOINT, &context->ss); | |
533 | break; | |
534 | __unreachable_ok_pop | |
535 | } | |
536 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", | |
537 | class, state, class, esr, (void *)far); | |
538 | assert(0); /* Unreachable */ | |
539 | break; | |
540 | ||
541 | case ESR_EC_WATCHPT_MATCH_EL0: | |
542 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { | |
543 | handle_watchpoint(far); | |
544 | assert(0); /* Unreachable */ | |
545 | } | |
546 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", | |
547 | class, state, class, esr, (void *)far); | |
548 | assert(0); /* Unreachable */ | |
549 | break; | |
550 | ||
551 | case ESR_EC_WATCHPT_MATCH_EL1: | |
552 | /* | |
553 | * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to | |
554 | * abort. Turn off watchpoints and keep going; we'll turn them back on in return_from_exception.. | |
555 | */ | |
556 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { | |
557 | arm_debug_set(NULL); | |
558 | break; /* return to first level handler */ | |
559 | } | |
560 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", | |
561 | class, state, class, esr, (void *)far); | |
562 | assert(0); /* Unreachable */ | |
563 | break; | |
564 | ||
565 | case ESR_EC_TRAP_SIMD_FP: | |
566 | handle_simd_trap(state, esr); | |
567 | assert(0); | |
568 | break; | |
569 | ||
570 | case ESR_EC_ILLEGAL_INSTR_SET: | |
571 | if (EXCB_ACTION_RERUN != | |
572 | ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET, far)) { | |
573 | // instruction is not re-executed | |
574 | panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x", | |
575 | state, class, esr, (void *)far, get_saved_state_cpsr(state)); | |
576 | assert(0); | |
577 | } | |
578 | // must clear this fault in PSR to re-run | |
579 | set_saved_state_cpsr(state, get_saved_state_cpsr(state) & (~PSR64_IL)); | |
580 | break; | |
581 | ||
582 | case ESR_EC_MCR_MRC_CP15_TRAP: | |
583 | case ESR_EC_MCRR_MRRC_CP15_TRAP: | |
584 | case ESR_EC_MCR_MRC_CP14_TRAP: | |
585 | case ESR_EC_LDC_STC_CP14_TRAP: | |
586 | case ESR_EC_MCRR_MRRC_CP14_TRAP: | |
587 | handle_user_trapped_instruction32(state, esr); | |
588 | assert(0); | |
589 | break; | |
590 | ||
591 | case ESR_EC_WFI_WFE: | |
592 | // Use of WFI or WFE instruction when they have been disabled for EL0 | |
593 | handle_wf_trap(state); | |
594 | assert(0); /* Unreachable */ | |
595 | break; | |
596 | ||
597 | default: | |
598 | panic("Unsupported synchronous exception. state=%p class=%u esr=%u far=%p", | |
599 | state, class, esr, (void *)far); | |
600 | assert(0); /* Unreachable */ | |
601 | break; | |
602 | } | |
603 | ||
604 | if (recover) | |
605 | thread->recover = recover; | |
606 | } | |
607 | ||
608 | /* | |
609 | * Uncategorized exceptions are a catch-all for general execution errors. | |
610 | * ARM64_TODO: For now, we assume this is for undefined instruction exceptions. | |
611 | */ | |
612 | static void | |
613 | handle_uncategorized(arm_saved_state_t *state, boolean_t instrLen2) | |
614 | { | |
615 | exception_type_t exception = EXC_BAD_INSTRUCTION; | |
616 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; | |
617 | mach_msg_type_number_t numcodes = 2; | |
618 | uint32_t instr; | |
619 | ||
620 | if (instrLen2) { | |
621 | uint16_t instr16; | |
622 | COPYIN(get_saved_state_pc(state), (char *)&instr16, sizeof(instr16)); | |
623 | ||
624 | instr = instr16; | |
625 | } else { | |
626 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); | |
627 | } | |
628 | ||
629 | #if CONFIG_DTRACE | |
630 | if (tempDTraceTrapHook && (tempDTraceTrapHook(exception, state, 0, 0) == KERN_SUCCESS)) { | |
631 | return; | |
632 | } | |
633 | ||
634 | if (PSR64_IS_USER64(get_saved_state_cpsr(state))) { | |
635 | /* | |
636 | * For a 64bit user process, we care about all 4 bytes of the | |
637 | * instr. | |
638 | */ | |
639 | if (instr == FASTTRAP_ARM64_INSTR || instr == FASTTRAP_ARM64_RET_INSTR) { | |
640 | if (dtrace_user_probe(state) == KERN_SUCCESS) | |
641 | return; | |
642 | } | |
643 | } else if (PSR64_IS_USER32(get_saved_state_cpsr(state))) { | |
644 | /* | |
645 | * For a 32bit user process, we check for thumb mode, in | |
646 | * which case we only care about a 2 byte instruction length. | |
647 | * For non-thumb mode, we care about all 4 bytes of the instructin. | |
648 | */ | |
649 | if (get_saved_state_cpsr(state) & PSR64_MODE_USER32_THUMB) { | |
650 | if (((uint16_t)instr == FASTTRAP_THUMB32_INSTR) || | |
651 | ((uint16_t)instr == FASTTRAP_THUMB32_RET_INSTR)) { | |
652 | if (dtrace_user_probe(state) == KERN_SUCCESS) { | |
653 | return; | |
654 | } | |
655 | } | |
656 | } else { | |
657 | if ((instr == FASTTRAP_ARM32_INSTR) || | |
658 | (instr == FASTTRAP_ARM32_RET_INSTR)) { | |
659 | if (dtrace_user_probe(state) == KERN_SUCCESS) { | |
660 | return; | |
661 | } | |
662 | } | |
663 | } | |
664 | } | |
665 | ||
666 | #endif /* CONFIG_DTRACE */ | |
667 | ||
668 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { | |
669 | if (IS_ARM_GDB_TRAP(instr)) { | |
670 | boolean_t interrupt_state; | |
671 | vm_offset_t kstackptr; | |
672 | exception = EXC_BREAKPOINT; | |
673 | ||
674 | interrupt_state = ml_set_interrupts_enabled(FALSE); | |
675 | ||
676 | /* Save off the context here (so that the debug logic | |
677 | * can see the original state of this thread). | |
678 | */ | |
679 | kstackptr = (vm_offset_t) current_thread()->machine.kstackptr; | |
680 | if (kstackptr) { | |
681 | ((thread_kernel_state_t) kstackptr)->machine.ss = *state; | |
682 | } | |
683 | ||
684 | /* Hop into the debugger (typically either due to a | |
685 | * fatal exception, an explicit panic, or a stackshot | |
686 | * request. | |
687 | */ | |
688 | DebuggerCall(exception, state); | |
689 | ||
690 | (void) ml_set_interrupts_enabled(interrupt_state); | |
691 | return; | |
692 | } else { | |
693 | panic("Undefined kernel instruction: pc=%p instr=%x\n", (void*)get_saved_state_pc(state), instr); | |
694 | } | |
695 | } | |
696 | ||
697 | /* | |
698 | * Check for GDB breakpoint via illegal opcode. | |
699 | */ | |
700 | if (instrLen2) { | |
701 | if (IS_THUMB_GDB_TRAP(instr)) { | |
702 | exception = EXC_BREAKPOINT; | |
703 | codes[0] = EXC_ARM_BREAKPOINT; | |
704 | codes[1] = instr; | |
705 | } else { | |
706 | codes[1] = instr; | |
707 | } | |
708 | } else { | |
709 | if (IS_ARM_GDB_TRAP(instr)) { | |
710 | exception = EXC_BREAKPOINT; | |
711 | codes[0] = EXC_ARM_BREAKPOINT; | |
712 | codes[1] = instr; | |
713 | } else if (IS_THUMB_GDB_TRAP((instr & 0xFFFF))) { | |
714 | exception = EXC_BREAKPOINT; | |
715 | codes[0] = EXC_ARM_BREAKPOINT; | |
716 | codes[1] = instr & 0xFFFF; | |
717 | } else if (IS_THUMB_GDB_TRAP((instr >> 16))) { | |
718 | exception = EXC_BREAKPOINT; | |
719 | codes[0] = EXC_ARM_BREAKPOINT; | |
720 | codes[1] = instr >> 16; | |
721 | } else { | |
722 | codes[1] = instr; | |
723 | } | |
724 | } | |
725 | ||
726 | exception_triage(exception, codes, numcodes); | |
727 | assert(0); /* NOTREACHED */ | |
728 | } | |
729 | ||
730 | static void | |
731 | handle_breakpoint(arm_saved_state_t *state) | |
732 | { | |
733 | exception_type_t exception = EXC_BREAKPOINT; | |
734 | mach_exception_data_type_t codes[2] = {EXC_ARM_BREAKPOINT}; | |
735 | mach_msg_type_number_t numcodes = 2; | |
736 | ||
737 | codes[1] = get_saved_state_pc(state); | |
738 | exception_triage(exception, codes, numcodes); | |
739 | assert(0); /* NOTREACHED */ | |
740 | } | |
741 | ||
742 | static void | |
743 | handle_watchpoint(vm_offset_t fault_addr) | |
744 | { | |
745 | exception_type_t exception = EXC_BREAKPOINT; | |
746 | mach_exception_data_type_t codes[2] = {EXC_ARM_DA_DEBUG}; | |
747 | mach_msg_type_number_t numcodes = 2; | |
748 | ||
749 | codes[1] = fault_addr; | |
750 | exception_triage(exception, codes, numcodes); | |
751 | assert(0); /* NOTREACHED */ | |
752 | } | |
753 | ||
754 | static void | |
755 | handle_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, vm_offset_t recover, | |
756 | abort_inspector_t inspect_abort, abort_handler_t handler) | |
757 | { | |
758 | fault_status_t fault_code; | |
759 | vm_prot_t fault_type; | |
760 | ||
761 | inspect_abort(ESR_ISS(esr), &fault_code, &fault_type); | |
762 | handler(state, esr, fault_addr, fault_code, fault_type, recover); | |
763 | } | |
764 | ||
765 | static void | |
766 | inspect_instruction_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type) | |
767 | { | |
768 | getCpuDatap()->cpu_stat.instr_ex_cnt++; | |
769 | *fault_code = ISS_IA_FSC(iss); | |
770 | *fault_type = (VM_PROT_READ | VM_PROT_EXECUTE); | |
771 | } | |
772 | ||
773 | static void | |
774 | inspect_data_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type) | |
775 | { | |
776 | getCpuDatap()->cpu_stat.data_ex_cnt++; | |
777 | *fault_code = ISS_DA_FSC(iss); | |
778 | ||
779 | /* Cache operations report faults as write access. Change these to read access. */ | |
780 | if ((iss & ISS_DA_WNR) && !(iss & ISS_DA_CM)) { | |
781 | *fault_type = (VM_PROT_READ | VM_PROT_WRITE); | |
782 | } else { | |
783 | *fault_type = (VM_PROT_READ); | |
784 | } | |
785 | } | |
786 | ||
787 | static void | |
788 | handle_pc_align(arm_saved_state_t *ss) | |
789 | { | |
790 | exception_type_t exc; | |
791 | mach_exception_data_type_t codes[2]; | |
792 | mach_msg_type_number_t numcodes = 2; | |
793 | ||
794 | if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) { | |
795 | panic_with_thread_kernel_state("PC alignment exception from kernel.", ss); | |
796 | } | |
797 | ||
798 | exc = EXC_BAD_ACCESS; | |
799 | codes[0] = EXC_ARM_DA_ALIGN; | |
800 | codes[1] = get_saved_state_pc(ss); | |
801 | ||
802 | exception_triage(exc, codes, numcodes); | |
803 | assert(0); /* NOTREACHED */ | |
804 | } | |
805 | ||
806 | static void | |
807 | handle_sp_align(arm_saved_state_t *ss) | |
808 | { | |
809 | exception_type_t exc; | |
810 | mach_exception_data_type_t codes[2]; | |
811 | mach_msg_type_number_t numcodes = 2; | |
812 | ||
813 | if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) { | |
814 | panic_with_thread_kernel_state("SP alignment exception from kernel.", ss); | |
815 | } | |
816 | ||
817 | exc = EXC_BAD_ACCESS; | |
818 | codes[0] = EXC_ARM_SP_ALIGN; | |
819 | codes[1] = get_saved_state_sp(ss); | |
820 | ||
821 | exception_triage(exc, codes, numcodes); | |
822 | assert(0); /* NOTREACHED */ | |
823 | } | |
824 | ||
825 | static void | |
826 | handle_wf_trap(arm_saved_state_t *ss) | |
827 | { | |
828 | exception_type_t exc; | |
829 | mach_exception_data_type_t codes[2]; | |
830 | mach_msg_type_number_t numcodes = 2; | |
831 | ||
832 | exc = EXC_BAD_INSTRUCTION; | |
833 | codes[0] = EXC_ARM_UNDEFINED; | |
834 | codes[1] = get_saved_state_sp(ss); | |
835 | ||
836 | exception_triage(exc, codes, numcodes); | |
837 | assert(0); /* NOTREACHED */ | |
838 | } | |
839 | ||
840 | ||
841 | static void | |
842 | handle_sw_step_debug(arm_saved_state_t *state) | |
843 | { | |
844 | thread_t thread = current_thread(); | |
845 | exception_type_t exc; | |
846 | mach_exception_data_type_t codes[2]; | |
847 | mach_msg_type_number_t numcodes = 2; | |
848 | ||
849 | if (!PSR64_IS_USER(get_saved_state_cpsr(state))) { | |
850 | panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel.", state); | |
851 | } | |
852 | ||
853 | // Disable single step and unmask interrupts (in the saved state, anticipating next exception return) | |
854 | if (thread->machine.DebugData != NULL) { | |
855 | thread->machine.DebugData->uds.ds64.mdscr_el1 &= ~0x1; | |
856 | } else { | |
857 | panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL.", state); | |
858 | } | |
859 | ||
860 | set_saved_state_cpsr((thread->machine.upcb), | |
861 | get_saved_state_cpsr((thread->machine.upcb)) & ~(PSR64_SS | DAIF_IRQF | DAIF_FIQF)); | |
862 | ||
863 | // Special encoding for gdb single step event on ARM | |
864 | exc = EXC_BREAKPOINT; | |
865 | codes[0] = 1; | |
866 | codes[1] = 0; | |
867 | ||
868 | exception_triage(exc, codes, numcodes); | |
869 | assert(0); /* NOTREACHED */ | |
870 | } | |
871 | ||
872 | static int | |
873 | is_vm_fault(fault_status_t status) | |
874 | { | |
875 | switch (status) { | |
876 | case FSC_TRANSLATION_FAULT_L0: | |
877 | case FSC_TRANSLATION_FAULT_L1: | |
878 | case FSC_TRANSLATION_FAULT_L2: | |
879 | case FSC_TRANSLATION_FAULT_L3: | |
880 | case FSC_ACCESS_FLAG_FAULT_L1: | |
881 | case FSC_ACCESS_FLAG_FAULT_L2: | |
882 | case FSC_ACCESS_FLAG_FAULT_L3: | |
883 | case FSC_PERMISSION_FAULT_L1: | |
884 | case FSC_PERMISSION_FAULT_L2: | |
885 | case FSC_PERMISSION_FAULT_L3: | |
886 | return TRUE; | |
887 | default: | |
888 | return FALSE; | |
889 | } | |
890 | } | |
891 | ||
892 | #if __ARM_PAN_AVAILABLE__ | |
893 | static int | |
894 | is_permission_fault(fault_status_t status) | |
895 | { | |
896 | switch (status) { | |
897 | case FSC_PERMISSION_FAULT_L1: | |
898 | case FSC_PERMISSION_FAULT_L2: | |
899 | case FSC_PERMISSION_FAULT_L3: | |
900 | return TRUE; | |
901 | default: | |
902 | return FALSE; | |
903 | } | |
904 | } | |
905 | #endif | |
906 | ||
907 | static int | |
908 | is_alignment_fault(fault_status_t status) | |
909 | { | |
910 | return (status == FSC_ALIGNMENT_FAULT); | |
911 | } | |
912 | ||
913 | static int | |
914 | is_parity_error(fault_status_t status) | |
915 | { | |
916 | switch (status) { | |
917 | case FSC_SYNC_PARITY: | |
918 | case FSC_ASYNC_PARITY: | |
919 | case FSC_SYNC_PARITY_TT_L1: | |
920 | case FSC_SYNC_PARITY_TT_L2: | |
921 | case FSC_SYNC_PARITY_TT_L3: | |
922 | return TRUE; | |
923 | default: | |
924 | return FALSE; | |
925 | } | |
926 | } | |
927 | ||
928 | static void | |
929 | handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, | |
930 | fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover) | |
931 | { | |
932 | exception_type_t exc = EXC_BAD_ACCESS; | |
933 | mach_exception_data_type_t codes[2]; | |
934 | mach_msg_type_number_t numcodes = 2; | |
935 | thread_t thread = current_thread(); | |
936 | ||
937 | (void)esr; | |
938 | (void)state; | |
939 | ||
940 | if (ml_at_interrupt_context()) | |
941 | panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state); | |
942 | ||
943 | thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling abort from userspace */ | |
944 | ||
945 | if (is_vm_fault(fault_code)) { | |
946 | kern_return_t result; | |
947 | vm_map_t map = thread->map; | |
948 | vm_offset_t vm_fault_addr = fault_addr; | |
949 | ||
950 | assert(map != kernel_map); | |
951 | ||
952 | if (!(fault_type & VM_PROT_EXECUTE) && user_tbi_enabled()) | |
953 | vm_fault_addr = tbi_clear(fault_addr); | |
954 | ||
955 | #if CONFIG_DTRACE | |
956 | if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ | |
957 | if (dtrace_tally_fault(vm_fault_addr)) { /* Should a user mode fault under dtrace be ignored? */ | |
958 | if (recover) { | |
959 | set_saved_state_pc(state, recover); | |
960 | } else { | |
961 | boolean_t intr = ml_set_interrupts_enabled(FALSE); | |
962 | panic_with_thread_kernel_state("copyin/out has no recovery point", state); | |
963 | (void) ml_set_interrupts_enabled(intr); | |
964 | } | |
965 | return; | |
966 | } else { | |
967 | boolean_t intr = ml_set_interrupts_enabled(FALSE); | |
968 | panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", state); | |
969 | (void) ml_set_interrupts_enabled(intr); | |
970 | return; | |
971 | } | |
972 | } | |
973 | #else | |
974 | (void)recover; | |
975 | #endif | |
976 | ||
977 | #if CONFIG_PGTRACE | |
978 | if (pgtrace_enabled) { | |
979 | /* Check to see if trace bit is set */ | |
980 | result = pmap_pgtrace_fault(map->pmap, fault_addr, state); | |
981 | if (result == KERN_SUCCESS) return; | |
982 | } | |
983 | #endif | |
984 | ||
985 | /* check to see if it is just a pmap ref/modify fault */ | |
986 | result = arm_fast_fault(map->pmap, trunc_page(vm_fault_addr), fault_type, TRUE); | |
987 | if (result != KERN_SUCCESS) { | |
988 | ||
989 | { | |
990 | /* We have to fault the page in */ | |
991 | result = vm_fault(map, vm_fault_addr, fault_type, | |
992 | /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE, | |
993 | /* caller_pmap */ NULL, /* caller_pmap_addr */ 0); | |
994 | } | |
995 | } | |
996 | if (result == KERN_SUCCESS || result == KERN_ABORTED) { | |
997 | thread_exception_return(); | |
998 | /* NOTREACHED */ | |
999 | } | |
1000 | ||
1001 | codes[0] = result; | |
1002 | } else if (is_alignment_fault(fault_code)) { | |
1003 | codes[0] = EXC_ARM_DA_ALIGN; | |
1004 | } else if (is_parity_error(fault_code)) { | |
1005 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
1006 | if (fault_code == FSC_SYNC_PARITY) { | |
1007 | arm64_platform_error(state, esr, fault_addr); | |
1008 | thread_exception_return(); | |
1009 | /* NOTREACHED */ | |
1010 | } | |
1011 | #else | |
1012 | panic("User parity error."); | |
1013 | #endif | |
1014 | } else { | |
1015 | codes[0] = KERN_FAILURE; | |
1016 | } | |
1017 | ||
1018 | codes[1] = fault_addr; | |
1019 | exception_triage(exc, codes, numcodes); | |
1020 | assert(0); /* NOTREACHED */ | |
1021 | } | |
1022 | ||
1023 | #if __ARM_PAN_AVAILABLE__ | |
1024 | static int | |
1025 | is_pan_fault(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, fault_status_t fault_code) | |
1026 | { | |
1027 | // PAN (Privileged Access Never) fault occurs for data read/write in EL1 to | |
1028 | // virtual address that is readable/writeable from both EL1 and EL0 | |
1029 | ||
1030 | // To check for PAN fault, we evaluate if the following conditions are true: | |
1031 | // 1. This is a permission fault | |
1032 | // 2. PAN is enabled | |
1033 | // 3. AT instruction (on which PAN has no effect) on the same faulting address | |
1034 | // succeeds | |
1035 | ||
1036 | vm_offset_t pa; | |
1037 | ||
1038 | if (!(is_permission_fault(fault_code) && get_saved_state_cpsr(state) & PSR64_PAN)) { | |
1039 | return FALSE; | |
1040 | } | |
1041 | ||
1042 | if (esr & ISS_DA_WNR) { | |
1043 | pa = mmu_kvtop_wpreflight(fault_addr); | |
1044 | } else { | |
1045 | pa = mmu_kvtop(fault_addr); | |
1046 | } | |
1047 | return (pa)? TRUE: FALSE; | |
1048 | } | |
1049 | #endif | |
1050 | ||
1051 | static void | |
1052 | handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, | |
1053 | fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover) | |
1054 | { | |
1055 | thread_t thread = current_thread(); | |
1056 | (void)esr; | |
1057 | ||
1058 | #if CONFIG_DTRACE | |
1059 | if (is_vm_fault(fault_code) && thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ | |
1060 | if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */ | |
1061 | /* | |
1062 | * Point to next instruction, or recovery handler if set. | |
1063 | */ | |
1064 | if (recover) { | |
1065 | set_saved_state_pc(state, recover); | |
1066 | } else { | |
1067 | set_saved_state_pc(state, get_saved_state_pc(state) + 4); | |
1068 | } | |
1069 | return; | |
1070 | } else { | |
1071 | boolean_t intr = ml_set_interrupts_enabled(FALSE); | |
1072 | panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state); | |
1073 | (void) ml_set_interrupts_enabled(intr); | |
1074 | return; | |
1075 | } | |
1076 | } | |
1077 | #endif | |
1078 | ||
1079 | #if !CONFIG_PGTRACE /* This will be moved next to pgtrace fault evaluation */ | |
1080 | if (ml_at_interrupt_context()) | |
1081 | panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state); | |
1082 | #endif | |
1083 | ||
1084 | if (is_vm_fault(fault_code)) { | |
1085 | kern_return_t result; | |
1086 | vm_map_t map; | |
1087 | int interruptible; | |
1088 | ||
1089 | if (fault_addr >= gVirtBase && fault_addr < (gVirtBase+gPhysSize)) { | |
1090 | panic_with_thread_kernel_state("Unexpected fault in kernel static region\n",state); | |
1091 | } | |
1092 | ||
1093 | if (VM_KERNEL_ADDRESS(fault_addr) || thread == THREAD_NULL) { | |
1094 | map = kernel_map; | |
1095 | interruptible = THREAD_UNINT; | |
1096 | } else { | |
1097 | map = thread->map; | |
1098 | interruptible = THREAD_ABORTSAFE; | |
1099 | } | |
1100 | ||
1101 | #if CONFIG_PGTRACE | |
1102 | if (pgtrace_enabled) { | |
1103 | /* Check to see if trace bit is set */ | |
1104 | result = pmap_pgtrace_fault(map->pmap, fault_addr, state); | |
1105 | if (result == KERN_SUCCESS) return; | |
1106 | } | |
1107 | ||
1108 | if (ml_at_interrupt_context()) | |
1109 | panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state); | |
1110 | #endif | |
1111 | ||
1112 | /* check to see if it is just a pmap ref/modify fault */ | |
1113 | result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, FALSE); | |
1114 | if (result == KERN_SUCCESS) return; | |
1115 | ||
1116 | { | |
1117 | /* | |
1118 | * We have to "fault" the page in. | |
1119 | */ | |
1120 | result = vm_fault(map, fault_addr, fault_type, | |
1121 | /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, interruptible, | |
1122 | /* caller_pmap */ NULL, /* caller_pmap_addr */ 0); | |
1123 | } | |
1124 | ||
1125 | if (result == KERN_SUCCESS) return; | |
1126 | ||
1127 | /* | |
1128 | * If we have a recover handler, invoke it now. | |
1129 | */ | |
1130 | if (recover) { | |
1131 | set_saved_state_pc(state, recover); | |
1132 | return; | |
1133 | } | |
1134 | ||
1135 | #if __ARM_PAN_AVAILABLE__ | |
1136 | if (is_pan_fault(state, esr, fault_addr, fault_code)) { | |
1137 | panic_with_thread_kernel_state("Privileged access never abort.", state); | |
1138 | } | |
1139 | #endif | |
1140 | ||
1141 | #if CONFIG_PGTRACE | |
1142 | } else if (ml_at_interrupt_context()) { | |
1143 | panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state); | |
1144 | #endif | |
1145 | } else if (is_alignment_fault(fault_code)) { | |
1146 | panic_with_thread_kernel_state("Unaligned kernel data abort.", state); | |
1147 | } else if (is_parity_error(fault_code)) { | |
1148 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
1149 | if (fault_code == FSC_SYNC_PARITY) { | |
1150 | arm64_platform_error(state, esr, fault_addr); | |
1151 | return; | |
1152 | } | |
1153 | #else | |
1154 | panic_with_thread_kernel_state("Kernel parity error.", state); | |
1155 | #endif | |
1156 | } else { | |
1157 | kprintf("Unclassified kernel abort (fault_code=0x%x)\n", fault_code); | |
1158 | } | |
1159 | ||
1160 | panic_with_thread_kernel_state("Kernel data abort.", state); | |
1161 | } | |
1162 | ||
1163 | extern void syscall_trace(struct arm_saved_state * regs); | |
1164 | ||
1165 | static void | |
1166 | handle_svc(arm_saved_state_t *state) | |
1167 | { | |
1168 | int trap_no = get_saved_state_svc_number(state); | |
1169 | thread_t thread = current_thread(); | |
1170 | struct proc *p; | |
1171 | ||
1172 | #define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */ | |
1173 | ||
1174 | #define TRACE_SYSCALL 1 | |
1175 | #if TRACE_SYSCALL | |
1176 | syscall_trace(state); | |
1177 | #endif | |
1178 | ||
1179 | thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling SVC from userspace */ | |
1180 | ||
1181 | if (trap_no == (int)PLATFORM_SYSCALL_TRAP_NO) { | |
1182 | platform_syscall(state); | |
1183 | panic("Returned from platform_syscall()?"); | |
1184 | } | |
1185 | ||
1186 | mach_kauth_cred_uthread_update(); | |
1187 | ||
1188 | if (trap_no < 0) { | |
1189 | if (trap_no == -3) { | |
1190 | handle_mach_absolute_time_trap(state); | |
1191 | return; | |
1192 | } else if (trap_no == -4) { | |
1193 | handle_mach_continuous_time_trap(state); | |
1194 | return; | |
1195 | } | |
1196 | ||
1197 | /* Counting perhaps better in the handler, but this is how it's been done */ | |
1198 | thread->syscalls_mach++; | |
1199 | mach_syscall(state); | |
1200 | } else { | |
1201 | /* Counting perhaps better in the handler, but this is how it's been done */ | |
1202 | thread->syscalls_unix++; | |
1203 | p = get_bsdthreadtask_info(thread); | |
1204 | ||
1205 | assert(p); | |
1206 | ||
1207 | unix_syscall(state, thread, (struct uthread*)thread->uthread, p); | |
1208 | } | |
1209 | } | |
1210 | ||
1211 | static void | |
1212 | handle_mach_absolute_time_trap(arm_saved_state_t *state) | |
1213 | { | |
1214 | uint64_t now = mach_absolute_time(); | |
1215 | saved_state64(state)->x[0] = now; | |
1216 | } | |
1217 | ||
1218 | static void | |
1219 | handle_mach_continuous_time_trap(arm_saved_state_t *state) | |
1220 | { | |
1221 | uint64_t now = mach_continuous_time(); | |
1222 | saved_state64(state)->x[0] = now; | |
1223 | } | |
1224 | ||
1225 | static void | |
1226 | handle_msr_trap(arm_saved_state_t *state, uint32_t iss) | |
1227 | { | |
1228 | exception_type_t exception = EXC_BAD_INSTRUCTION; | |
1229 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; | |
1230 | mach_msg_type_number_t numcodes = 2; | |
1231 | uint32_t instr; | |
1232 | ||
1233 | (void)iss; | |
1234 | ||
1235 | if (!is_saved_state64(state)) { | |
1236 | panic("MSR/MRS trap (EC 0x%x) from 32-bit state\n", ESR_EC_MSR_TRAP); | |
1237 | } | |
1238 | ||
1239 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { | |
1240 | panic("MSR/MRS trap (EC 0x%x) from kernel\n", ESR_EC_MSR_TRAP); | |
1241 | } | |
1242 | ||
1243 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); | |
1244 | codes[1] = instr; | |
1245 | ||
1246 | exception_triage(exception, codes, numcodes); | |
1247 | } | |
1248 | ||
1249 | static void | |
1250 | handle_user_trapped_instruction32(arm_saved_state_t *state, uint32_t esr) | |
1251 | { | |
1252 | exception_type_t exception = EXC_BAD_INSTRUCTION; | |
1253 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; | |
1254 | mach_msg_type_number_t numcodes = 2; | |
1255 | uint32_t instr; | |
1256 | ||
1257 | if (is_saved_state64(state)) { | |
1258 | panic("ESR (0x%x) for instruction trapped from U32, but saved state is 64-bit.", esr); | |
1259 | } | |
1260 | ||
1261 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { | |
1262 | panic("ESR (0x%x) for instruction trapped from U32, actually came from kernel?", esr); | |
1263 | } | |
1264 | ||
1265 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); | |
1266 | codes[1] = instr; | |
1267 | ||
1268 | exception_triage(exception, codes, numcodes); | |
1269 | } | |
1270 | ||
1271 | static void | |
1272 | handle_simd_trap(arm_saved_state_t *state, uint32_t esr) | |
1273 | { | |
1274 | exception_type_t exception = EXC_BAD_INSTRUCTION; | |
1275 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; | |
1276 | mach_msg_type_number_t numcodes = 2; | |
1277 | uint32_t instr; | |
1278 | ||
1279 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { | |
1280 | panic("ESR (0x%x) for SIMD trap from userland, actually came from kernel?", esr); | |
1281 | } | |
1282 | ||
1283 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); | |
1284 | codes[1] = instr; | |
1285 | ||
1286 | exception_triage(exception, codes, numcodes); | |
1287 | } | |
1288 | ||
1289 | void | |
1290 | sleh_irq(arm_saved_state_t *state) | |
1291 | { | |
1292 | uint64_t timestamp = 0; | |
1293 | uint32_t old_entropy_data = 0; | |
1294 | uint32_t * old_entropy_data_ptr = NULL; | |
1295 | uint32_t * new_entropy_data_ptr = NULL; | |
1296 | cpu_data_t * cdp = getCpuDatap(); | |
1297 | ||
1298 | sleh_interrupt_handler_prologue(state, DBG_INTR_TYPE_OTHER); | |
1299 | ||
1300 | /* Run the registered interrupt handler. */ | |
1301 | cdp->interrupt_handler(cdp->interrupt_target, | |
1302 | cdp->interrupt_refCon, | |
1303 | cdp->interrupt_nub, | |
1304 | cdp->interrupt_source); | |
1305 | ||
1306 | /* We use interrupt timing as an entropy source. */ | |
1307 | timestamp = ml_get_timebase(); | |
1308 | ||
1309 | /* | |
1310 | * The buffer index is subject to races, but as these races should only | |
1311 | * result in multiple CPUs updating the same location, the end result | |
1312 | * should be that noise gets written into the entropy buffer. As this | |
1313 | * is the entire point of the entropy buffer, we will not worry about | |
1314 | * these races for now. | |
1315 | */ | |
1316 | old_entropy_data_ptr = EntropyData.index_ptr; | |
1317 | new_entropy_data_ptr = old_entropy_data_ptr + 1; | |
1318 | ||
1319 | if (new_entropy_data_ptr >= &EntropyData.buffer[ENTROPY_BUFFER_SIZE]) { | |
1320 | new_entropy_data_ptr = EntropyData.buffer; | |
1321 | } | |
1322 | ||
1323 | EntropyData.index_ptr = new_entropy_data_ptr; | |
1324 | ||
1325 | /* Mix the timestamp data and the old data together. */ | |
1326 | old_entropy_data = *old_entropy_data_ptr; | |
1327 | *old_entropy_data_ptr = (uint32_t)timestamp ^ __ror(old_entropy_data, 9); | |
1328 | ||
1329 | sleh_interrupt_handler_epilogue(); | |
1330 | } | |
1331 | ||
1332 | void | |
1333 | sleh_fiq(arm_saved_state_t *state) | |
1334 | { | |
1335 | unsigned int type = DBG_INTR_TYPE_UNKNOWN; | |
1336 | if (ml_get_timer_pending()) { | |
1337 | type = DBG_INTR_TYPE_TIMER; | |
1338 | } | |
1339 | ||
1340 | sleh_interrupt_handler_prologue(state, type); | |
1341 | ||
1342 | { | |
1343 | /* | |
1344 | * We don't know that this is a timer, but we don't have insight into | |
1345 | * the other interrupts that go down this path. | |
1346 | */ | |
1347 | ||
1348 | ||
1349 | cpu_data_t *cdp = getCpuDatap(); | |
1350 | ||
1351 | cdp->cpu_decrementer = -1; /* Large */ | |
1352 | ||
1353 | /* | |
1354 | * ARM64_TODO: whether we're coming from userland is ignored right now. | |
1355 | * We can easily thread it through, but not bothering for the | |
1356 | * moment (AArch32 doesn't either). | |
1357 | */ | |
1358 | rtclock_intr(TRUE); | |
1359 | } | |
1360 | ||
1361 | sleh_interrupt_handler_epilogue(); | |
1362 | } | |
1363 | ||
1364 | void | |
1365 | sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far) | |
1366 | { | |
1367 | arm_saved_state_t *state = &context->ss; | |
1368 | ||
1369 | ASSERT_CONTEXT_SANITY(context); | |
1370 | arm64_platform_error(state, esr, far); | |
1371 | } | |
1372 | ||
1373 | void | |
1374 | mach_syscall_trace_exit( | |
1375 | unsigned int retval, | |
1376 | unsigned int call_number) | |
1377 | { | |
1378 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
1379 | MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END, | |
1380 | retval, 0, 0, 0, 0); | |
1381 | } | |
1382 | ||
1383 | __attribute__((noreturn)) | |
1384 | void | |
1385 | thread_syscall_return(kern_return_t error) | |
1386 | { | |
1387 | thread_t thread; | |
1388 | struct arm_saved_state *state; | |
1389 | ||
1390 | thread = current_thread(); | |
1391 | state = get_user_regs(thread); | |
1392 | ||
1393 | assert(is_saved_state64(state)); | |
1394 | saved_state64(state)->x[0] = error; | |
1395 | ||
1396 | #if DEBUG || DEVELOPMENT | |
1397 | kern_allocation_name_t | |
1398 | prior __assert_only = thread_get_kernel_state(thread)->allocation_name; | |
1399 | assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior)); | |
1400 | #endif /* DEBUG || DEVELOPMENT */ | |
1401 | ||
1402 | if (kdebug_enable) { | |
1403 | /* Invert syscall number (negative for a mach syscall) */ | |
1404 | mach_syscall_trace_exit(error, (-1) * get_saved_state_svc_number(state)); | |
1405 | } | |
1406 | ||
1407 | thread_exception_return(); | |
1408 | } | |
1409 | ||
1410 | void | |
1411 | syscall_trace( | |
1412 | struct arm_saved_state * regs __unused) | |
1413 | { | |
1414 | /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */ | |
1415 | } | |
1416 | ||
1417 | static void | |
1418 | sleh_interrupt_handler_prologue(arm_saved_state_t *state, unsigned int type) | |
1419 | { | |
1420 | uint64_t is_user = PSR64_IS_USER(get_saved_state_cpsr(state)); | |
1421 | ||
1422 | uint64_t pc = is_user ? get_saved_state_pc(state) : | |
1423 | VM_KERNEL_UNSLIDE(get_saved_state_pc(state)); | |
1424 | ||
1425 | KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START, | |
1426 | 0, pc, is_user, type); | |
1427 | ||
1428 | #if CONFIG_TELEMETRY | |
1429 | if (telemetry_needs_record) { | |
1430 | telemetry_mark_curthread((boolean_t)is_user); | |
1431 | } | |
1432 | #endif /* CONFIG_TELEMETRY */ | |
1433 | } | |
1434 | ||
1435 | static void | |
1436 | sleh_interrupt_handler_epilogue(void) | |
1437 | { | |
1438 | KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END); | |
1439 | } | |
1440 | ||
1441 | void | |
1442 | sleh_invalid_stack(arm_context_t *context, uint32_t esr __unused, vm_offset_t far __unused) | |
1443 | { | |
1444 | thread_t thread = current_thread(); | |
1445 | vm_offset_t kernel_stack_bottom, sp; | |
1446 | ||
1447 | sp = get_saved_state_sp(&context->ss); | |
1448 | kernel_stack_bottom = round_page(thread->machine.kstackptr) - KERNEL_STACK_SIZE; | |
1449 | ||
1450 | if ((sp < kernel_stack_bottom) && (sp >= (kernel_stack_bottom - PAGE_SIZE))) { | |
1451 | panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow).", &context->ss); | |
1452 | } | |
1453 | ||
1454 | panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context->ss); | |
1455 | } | |
1456 |