]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2012-2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <arm/caches_internal.h> | |
30 | #include <arm/cpu_data.h> | |
31 | #include <arm/cpu_data_internal.h> | |
32 | #include <arm/misc_protos.h> | |
33 | #include <arm/thread.h> | |
34 | #include <arm/rtclock.h> | |
35 | #include <arm/trap.h> /* for IS_ARM_GDB_TRAP() et al */ | |
36 | #include <arm64/proc_reg.h> | |
37 | #include <arm64/machine_machdep.h> | |
38 | #include <arm64/monotonic.h> | |
39 | ||
40 | #include <kern/debug.h> | |
41 | #include <kern/thread.h> | |
42 | #include <mach/exception.h> | |
43 | #include <mach/vm_types.h> | |
44 | #include <mach/machine/thread_status.h> | |
45 | ||
46 | #include <machine/atomic.h> | |
cb323159 | 47 | #include <machine/limits.h> |
5ba3f43e A |
48 | |
49 | #include <pexpert/arm/protos.h> | |
50 | ||
51 | #include <vm/vm_page.h> | |
52 | #include <vm/pmap.h> | |
53 | #include <vm/vm_fault.h> | |
54 | #include <vm/vm_kern.h> | |
55 | ||
56 | #include <sys/kdebug.h> | |
d9a64523 | 57 | #include <kperf/kperf.h> |
5ba3f43e A |
58 | |
59 | #include <kern/policy_internal.h> | |
60 | #if CONFIG_TELEMETRY | |
61 | #include <kern/telemetry.h> | |
62 | #endif | |
63 | ||
64 | #include <prng/random.h> | |
65 | ||
66 | #ifndef __arm64__ | |
67 | #error Should only be compiling for arm64. | |
68 | #endif | |
69 | ||
70 | #define TEST_CONTEXT32_SANITY(context) \ | |
71 | (context->ss.ash.flavor == ARM_SAVED_STATE32 && context->ss.ash.count == ARM_SAVED_STATE32_COUNT && \ | |
72 | context->ns.nsh.flavor == ARM_NEON_SAVED_STATE32 && context->ns.nsh.count == ARM_NEON_SAVED_STATE32_COUNT) | |
73 | ||
74 | #define TEST_CONTEXT64_SANITY(context) \ | |
75 | (context->ss.ash.flavor == ARM_SAVED_STATE64 && context->ss.ash.count == ARM_SAVED_STATE64_COUNT && \ | |
76 | context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64 && context->ns.nsh.count == ARM_NEON_SAVED_STATE64_COUNT) | |
77 | ||
78 | #define ASSERT_CONTEXT_SANITY(context) \ | |
79 | assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context)) | |
80 | ||
81 | ||
cb323159 A |
82 | #define COPYIN(src, dst, size) \ |
83 | (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \ | |
84 | copyin_kern(src, dst, size) : \ | |
85 | copyin(src, dst, size) | |
5ba3f43e | 86 | |
cb323159 A |
87 | #define COPYOUT(src, dst, size) \ |
88 | (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \ | |
89 | copyout_kern(src, dst, size) : \ | |
90 | copyout(src, dst, size) | |
5ba3f43e A |
91 | |
92 | // Below is for concatenating a string param to a string literal | |
93 | #define STR1(x) #x | |
94 | #define STR(x) STR1(x) | |
95 | ||
cb323159 | 96 | void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) __abortlike; |
5ba3f43e | 97 | |
cb323159 | 98 | void sleh_synchronous_sp1(arm_context_t *, uint32_t, vm_offset_t) __abortlike; |
5ba3f43e A |
99 | void sleh_synchronous(arm_context_t *, uint32_t, vm_offset_t); |
100 | void sleh_irq(arm_saved_state_t *); | |
101 | void sleh_fiq(arm_saved_state_t *); | |
102 | void sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far); | |
cb323159 | 103 | void sleh_invalid_stack(arm_context_t *context, uint32_t esr, vm_offset_t far) __dead2; |
5ba3f43e A |
104 | |
105 | static void sleh_interrupt_handler_prologue(arm_saved_state_t *, unsigned int type); | |
106 | static void sleh_interrupt_handler_epilogue(void); | |
107 | ||
108 | static void handle_svc(arm_saved_state_t *); | |
109 | static void handle_mach_absolute_time_trap(arm_saved_state_t *); | |
110 | static void handle_mach_continuous_time_trap(arm_saved_state_t *); | |
111 | ||
112 | static void handle_msr_trap(arm_saved_state_t *state, uint32_t iss); | |
113 | ||
cb323159 | 114 | extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, bool, bool); |
5ba3f43e | 115 | |
cb323159 A |
116 | static void handle_uncategorized(arm_saved_state_t *); |
117 | static void handle_breakpoint(arm_saved_state_t *) __dead2; | |
5ba3f43e | 118 | |
0a7de745 | 119 | typedef void (*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *); |
5ba3f43e A |
120 | static void inspect_instruction_abort(uint32_t, fault_status_t *, vm_prot_t *); |
121 | static void inspect_data_abort(uint32_t, fault_status_t *, vm_prot_t *); | |
122 | ||
123 | static int is_vm_fault(fault_status_t); | |
d9a64523 | 124 | static int is_translation_fault(fault_status_t); |
5ba3f43e A |
125 | static int is_alignment_fault(fault_status_t); |
126 | ||
0a7de745 | 127 | typedef void (*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); |
5ba3f43e A |
128 | static void handle_user_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); |
129 | static void handle_kernel_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); | |
130 | ||
cb323159 A |
131 | static void handle_pc_align(arm_saved_state_t *ss) __dead2; |
132 | static void handle_sp_align(arm_saved_state_t *ss) __dead2; | |
133 | static void handle_sw_step_debug(arm_saved_state_t *ss) __dead2; | |
134 | static void handle_wf_trap(arm_saved_state_t *ss) __dead2; | |
135 | static void handle_fp_trap(arm_saved_state_t *ss, uint32_t esr) __dead2; | |
5ba3f43e | 136 | |
cb323159 | 137 | static void handle_watchpoint(vm_offset_t fault_addr) __dead2; |
5ba3f43e A |
138 | |
139 | static void handle_abort(arm_saved_state_t *, uint32_t, vm_offset_t, vm_offset_t, abort_inspector_t, abort_handler_t); | |
140 | ||
cb323159 | 141 | static void handle_user_trapped_instruction32(arm_saved_state_t *, uint32_t esr) __dead2; |
5ba3f43e | 142 | |
cb323159 | 143 | static void handle_simd_trap(arm_saved_state_t *, uint32_t esr) __dead2; |
5ba3f43e A |
144 | |
145 | extern void mach_kauth_cred_uthread_update(void); | |
146 | void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number); | |
147 | ||
148 | struct uthread; | |
149 | struct proc; | |
150 | ||
151 | extern void | |
152 | unix_syscall(struct arm_saved_state * regs, thread_t thread_act, | |
0a7de745 | 153 | struct uthread * uthread, struct proc * proc); |
5ba3f43e A |
154 | |
155 | extern void | |
156 | mach_syscall(struct arm_saved_state*); | |
157 | ||
5ba3f43e A |
158 | #if CONFIG_DTRACE |
159 | extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs); | |
160 | extern boolean_t dtrace_tally_fault(user_addr_t); | |
161 | ||
cb323159 A |
162 | /* |
163 | * Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy | |
164 | * and paste the trap instructions | |
165 | * over from that file. Need to keep these in sync! | |
166 | */ | |
5ba3f43e A |
167 | #define FASTTRAP_ARM32_INSTR 0xe7ffdefc |
168 | #define FASTTRAP_THUMB32_INSTR 0xdefc | |
169 | #define FASTTRAP_ARM64_INSTR 0xe7eeee7e | |
170 | ||
171 | #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb | |
172 | #define FASTTRAP_THUMB32_RET_INSTR 0xdefb | |
173 | #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d | |
174 | ||
175 | /* See <rdar://problem/4613924> */ | |
176 | perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */ | |
177 | #endif | |
178 | ||
cb323159 | 179 | |
5ba3f43e A |
180 | #if CONFIG_PGTRACE |
181 | extern boolean_t pgtrace_enabled; | |
182 | #endif | |
183 | ||
184 | #if __ARM_PAN_AVAILABLE__ | |
d9a64523 A |
185 | #ifdef CONFIG_XNUPOST |
186 | extern vm_offset_t pan_test_addr; | |
187 | extern vm_offset_t pan_ro_addr; | |
188 | extern volatile int pan_exception_level; | |
189 | extern volatile char pan_fault_value; | |
190 | #endif | |
5ba3f43e A |
191 | #endif |
192 | ||
cb323159 A |
193 | #if HAS_TWO_STAGE_SPR_LOCK |
194 | #ifdef CONFIG_XNUPOST | |
195 | extern volatile vm_offset_t spr_lock_test_addr; | |
196 | extern volatile uint32_t spr_lock_exception_esr; | |
197 | #endif | |
198 | #endif | |
199 | ||
200 | #if defined(APPLETYPHOON) | |
201 | #define CPU_NAME "Typhoon" | |
5ba3f43e | 202 | #elif defined(APPLETWISTER) |
cb323159 | 203 | #define CPU_NAME "Twister" |
5ba3f43e | 204 | #elif defined(APPLEHURRICANE) |
cb323159 | 205 | #define CPU_NAME "Hurricane" |
5ba3f43e | 206 | #else |
cb323159 | 207 | #define CPU_NAME "Unknown" |
5ba3f43e A |
208 | #endif |
209 | ||
210 | #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT)) | |
211 | #define ESR_WT_SERROR(esr) (((esr) & 0xffffff00) == 0xbf575400) | |
212 | #define ESR_WT_REASON(esr) ((esr) & 0xff) | |
213 | ||
214 | #define WT_REASON_NONE 0 | |
215 | #define WT_REASON_INTEGRITY_FAIL 1 | |
216 | #define WT_REASON_BAD_SYSCALL 2 | |
217 | #define WT_REASON_NOT_LOCKED 3 | |
218 | #define WT_REASON_ALREADY_LOCKED 4 | |
219 | #define WT_REASON_SW_REQ 5 | |
220 | #define WT_REASON_PT_INVALID 6 | |
221 | #define WT_REASON_PT_VIOLATION 7 | |
222 | #define WT_REASON_REG_VIOLATION 8 | |
223 | #endif | |
224 | ||
225 | ||
d9a64523 A |
226 | extern vm_offset_t static_memory_end; |
227 | ||
5ba3f43e A |
228 | static inline unsigned |
229 | __ror(unsigned value, unsigned shift) | |
230 | { | |
0a7de745 A |
231 | return ((unsigned)(value) >> (unsigned)(shift)) | |
232 | (unsigned)(value) << ((unsigned)(sizeof(unsigned) * CHAR_BIT) - (unsigned)(shift)); | |
5ba3f43e A |
233 | } |
234 | ||
cb323159 | 235 | __dead2 |
5ba3f43e A |
236 | static void |
237 | arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far) | |
238 | { | |
239 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
240 | uint64_t fed_err_sts, mmu_err_sts, lsu_err_sts; | |
241 | #if defined(NO_ECORE) | |
242 | uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf; | |
243 | ||
244 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS)); | |
245 | l2c_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS)); | |
246 | l2c_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR)); | |
247 | l2c_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF)); | |
248 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS)); | |
249 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS)); | |
250 | ||
251 | panic_plain("Unhandled " CPU_NAME | |
0a7de745 A |
252 | " implementation specific error. state=%p esr=%#x far=%p\n" |
253 | "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" | |
254 | "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n", | |
255 | state, esr, (void *)far, | |
256 | (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, | |
257 | (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf); | |
5ba3f43e A |
258 | |
259 | #elif defined(HAS_MIGSTS) | |
260 | uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf, mpidr, migsts; | |
261 | ||
262 | mpidr = __builtin_arm_rsr64("MPIDR_EL1"); | |
263 | migsts = __builtin_arm_rsr64(STR(ARM64_REG_MIGSTS_EL1)); | |
264 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS)); | |
265 | l2c_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS)); | |
266 | l2c_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR)); | |
267 | l2c_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF)); | |
268 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS)); | |
269 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS)); | |
270 | ||
271 | panic_plain("Unhandled " CPU_NAME | |
0a7de745 A |
272 | " implementation specific error. state=%p esr=%#x far=%p p-core?%d migsts=%p\n" |
273 | "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" | |
274 | "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n", | |
275 | state, esr, (void *)far, !!(mpidr & MPIDR_PNE), (void *)migsts, | |
276 | (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, | |
277 | (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf); | |
5ba3f43e A |
278 | #else // !defined(NO_ECORE) && !defined(HAS_MIGSTS) |
279 | uint64_t llc_err_sts, llc_err_adr, llc_err_inf, mpidr; | |
cb323159 A |
280 | #if defined(HAS_DPC_ERR) |
281 | uint64_t dpc_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_DPC_ERR_STS)); | |
282 | #endif // defined(HAS_DPC_ERR) | |
5ba3f43e A |
283 | |
284 | mpidr = __builtin_arm_rsr64("MPIDR_EL1"); | |
285 | ||
286 | if (mpidr & MPIDR_PNE) { | |
287 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_MMU_ERR_STS)); | |
288 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_LSU_ERR_STS)); | |
289 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS)); | |
290 | } else { | |
291 | mmu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_MMU_ERR_STS)); | |
292 | lsu_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_LSU_ERR_STS)); | |
293 | fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_E_FED_ERR_STS)); | |
294 | } | |
295 | ||
296 | llc_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_STS)); | |
297 | llc_err_adr = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_ADR)); | |
298 | llc_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF)); | |
299 | ||
300 | panic_plain("Unhandled " CPU_NAME | |
cb323159 A |
301 | " implementation specific error. state=%p esr=%#x far=%p p-core?%d" |
302 | #if defined(HAS_DPC_ERR) | |
303 | " dpc_err_sts:%p" | |
304 | #endif | |
305 | "\n" | |
0a7de745 A |
306 | "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" |
307 | "\tllc_err_sts:%p, llc_err_adr:%p, llc_err_inf:%p\n", | |
308 | state, esr, (void *)far, !!(mpidr & MPIDR_PNE), | |
cb323159 A |
309 | #if defined(HAS_DPC_ERR) |
310 | (void *)dpc_err_sts, | |
311 | #endif | |
0a7de745 A |
312 | (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, |
313 | (void *)llc_err_sts, (void *)llc_err_adr, (void *)llc_err_inf); | |
5ba3f43e A |
314 | #endif |
315 | #else // !defined(APPLE_ARM64_ARCH_FAMILY) | |
d9a64523 | 316 | #pragma unused (state, esr, far) |
5ba3f43e A |
317 | panic_plain("Unhandled implementation specific error\n"); |
318 | #endif | |
319 | } | |
320 | ||
321 | #if CONFIG_KERNEL_INTEGRITY | |
322 | #pragma clang diagnostic push | |
323 | #pragma clang diagnostic ignored "-Wunused-parameter" | |
324 | static void | |
0a7de745 A |
325 | kernel_integrity_error_handler(uint32_t esr, vm_offset_t far) |
326 | { | |
5ba3f43e A |
327 | #if defined(KERNEL_INTEGRITY_WT) |
328 | #if (DEVELOPMENT || DEBUG) | |
329 | if (ESR_WT_SERROR(esr)) { | |
330 | switch (ESR_WT_REASON(esr)) { | |
331 | case WT_REASON_INTEGRITY_FAIL: | |
332 | panic_plain("Kernel integrity, violation in frame 0x%016lx.", far); | |
333 | case WT_REASON_BAD_SYSCALL: | |
334 | panic_plain("Kernel integrity, bad syscall."); | |
335 | case WT_REASON_NOT_LOCKED: | |
336 | panic_plain("Kernel integrity, not locked."); | |
337 | case WT_REASON_ALREADY_LOCKED: | |
338 | panic_plain("Kernel integrity, already locked."); | |
339 | case WT_REASON_SW_REQ: | |
340 | panic_plain("Kernel integrity, software request."); | |
341 | case WT_REASON_PT_INVALID: | |
342 | panic_plain("Kernel integrity, encountered invalid TTE/PTE while " | |
0a7de745 | 343 | "walking 0x%016lx.", far); |
5ba3f43e A |
344 | case WT_REASON_PT_VIOLATION: |
345 | panic_plain("Kernel integrity, violation in mapping 0x%016lx.", | |
0a7de745 | 346 | far); |
5ba3f43e A |
347 | case WT_REASON_REG_VIOLATION: |
348 | panic_plain("Kernel integrity, violation in system register %d.", | |
0a7de745 | 349 | (unsigned) far); |
5ba3f43e A |
350 | default: |
351 | panic_plain("Kernel integrity, unknown (esr=0x%08x).", esr); | |
352 | } | |
353 | } | |
354 | #else | |
355 | if (ESR_WT_SERROR(esr)) { | |
356 | panic_plain("SError esr: 0x%08x far: 0x%016lx.", esr, far); | |
357 | } | |
358 | #endif | |
359 | #endif | |
360 | } | |
361 | #pragma clang diagnostic pop | |
362 | #endif | |
363 | ||
364 | static void | |
365 | arm64_platform_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far) | |
366 | { | |
cb323159 | 367 | cpu_data_t *cdp = getCpuDatap(); |
5ba3f43e A |
368 | |
369 | #if CONFIG_KERNEL_INTEGRITY | |
370 | kernel_integrity_error_handler(esr, far); | |
371 | #endif | |
372 | ||
0a7de745 A |
373 | if (cdp->platform_error_handler != (platform_error_handler_t) NULL) { |
374 | (*(platform_error_handler_t)cdp->platform_error_handler)(cdp->cpu_id, far); | |
375 | } else { | |
5ba3f43e | 376 | arm64_implementation_specific_error(state, esr, far); |
0a7de745 | 377 | } |
5ba3f43e A |
378 | } |
379 | ||
380 | void | |
381 | panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) | |
382 | { | |
383 | boolean_t ss_valid; | |
384 | ||
385 | ss_valid = is_saved_state64(ss); | |
386 | arm_saved_state64_t *state = saved_state64(ss); | |
387 | ||
cb323159 | 388 | panic_plain("%s at pc 0x%016llx, lr 0x%016llx (saved state: %p%s)\n" |
0a7de745 A |
389 | "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n" |
390 | "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n" | |
391 | "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n" | |
392 | "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n" | |
393 | "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n" | |
394 | "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n" | |
395 | "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n" | |
396 | "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n" | |
397 | "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n", | |
cb323159 | 398 | msg, state->pc, state->lr, ss, (ss_valid ? "" : " INVALID"), |
0a7de745 A |
399 | state->x[0], state->x[1], state->x[2], state->x[3], |
400 | state->x[4], state->x[5], state->x[6], state->x[7], | |
401 | state->x[8], state->x[9], state->x[10], state->x[11], | |
402 | state->x[12], state->x[13], state->x[14], state->x[15], | |
403 | state->x[16], state->x[17], state->x[18], state->x[19], | |
404 | state->x[20], state->x[21], state->x[22], state->x[23], | |
405 | state->x[24], state->x[25], state->x[26], state->x[27], | |
406 | state->x[28], state->fp, state->lr, state->sp, | |
407 | state->pc, state->cpsr, state->esr, state->far); | |
5ba3f43e A |
408 | } |
409 | ||
5ba3f43e A |
410 | void |
411 | sleh_synchronous_sp1(arm_context_t *context, uint32_t esr, vm_offset_t far __unused) | |
412 | { | |
cb323159 A |
413 | esr_exception_class_t class = ESR_EC(esr); |
414 | arm_saved_state_t * state = &context->ss; | |
5ba3f43e A |
415 | |
416 | switch (class) { | |
417 | case ESR_EC_UNCATEGORIZED: | |
418 | { | |
419 | uint32_t instr = *((uint32_t*)get_saved_state_pc(state)); | |
0a7de745 | 420 | if (IS_ARM_GDB_TRAP(instr)) { |
5ba3f43e | 421 | DebuggerCall(EXC_BREAKPOINT, state); |
0a7de745 | 422 | } |
5ba3f43e A |
423 | // Intentionally fall through to panic if we return from the debugger |
424 | } | |
425 | default: | |
426 | panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state); | |
427 | } | |
428 | } | |
429 | ||
cb323159 A |
430 | #if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST) |
431 | static bool | |
432 | handle_msr_write_from_xnupost(arm_saved_state_t *state, uint32_t esr) | |
433 | { | |
434 | user_addr_t pc = get_saved_state_pc(state); | |
435 | if ((spr_lock_test_addr != 0) && (pc == spr_lock_test_addr)) { | |
436 | spr_lock_exception_esr = esr; | |
437 | set_saved_state_pc(state, pc + 4); | |
438 | return true; | |
439 | } | |
440 | ||
441 | return false; | |
442 | } | |
443 | #endif | |
444 | ||
5ba3f43e A |
445 | void |
446 | sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) | |
447 | { | |
cb323159 A |
448 | esr_exception_class_t class = ESR_EC(esr); |
449 | arm_saved_state_t * state = &context->ss; | |
450 | vm_offset_t recover = 0; | |
451 | thread_t thread = current_thread(); | |
452 | #if MACH_ASSERT | |
453 | int preemption_level = get_preemption_level(); | |
454 | #endif | |
5ba3f43e A |
455 | |
456 | ASSERT_CONTEXT_SANITY(context); | |
457 | ||
cb323159 A |
458 | if (__improbable(ESR_INSTR_IS_2BYTES(esr))) { |
459 | /* | |
460 | * We no longer support 32-bit, which means no 2-byte | |
461 | * instructions. | |
462 | */ | |
463 | if (PSR64_IS_USER(get_saved_state_cpsr(state))) { | |
464 | panic("Exception on 2-byte instruction, " | |
465 | "context=%p, esr=%#x, far=%p", | |
466 | context, esr, (void *)far); | |
467 | } else { | |
468 | panic_with_thread_kernel_state("Exception on 2-byte instruction", state); | |
469 | } | |
470 | } | |
471 | ||
5ba3f43e A |
472 | /* Don't run exception handler with recover handler set in case of double fault */ |
473 | if (thread->recover) { | |
cb323159 | 474 | recover = thread->recover; |
5ba3f43e A |
475 | thread->recover = (vm_offset_t)NULL; |
476 | } | |
477 | ||
478 | /* Inherit the interrupt masks from previous context */ | |
0a7de745 | 479 | if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) { |
5ba3f43e | 480 | ml_set_interrupts_enabled(TRUE); |
0a7de745 | 481 | } |
5ba3f43e A |
482 | |
483 | switch (class) { | |
484 | case ESR_EC_SVC_64: | |
485 | if (!is_saved_state64(state) || !PSR64_IS_USER(get_saved_state_cpsr(state))) { | |
486 | panic("Invalid SVC_64 context"); | |
487 | } | |
488 | ||
489 | handle_svc(state); | |
490 | break; | |
491 | ||
492 | case ESR_EC_DABORT_EL0: | |
493 | handle_abort(state, esr, far, recover, inspect_data_abort, handle_user_abort); | |
cb323159 | 494 | thread_exception_return(); |
5ba3f43e A |
495 | |
496 | case ESR_EC_MSR_TRAP: | |
497 | handle_msr_trap(state, ESR_ISS(esr)); | |
498 | break; | |
499 | ||
500 | case ESR_EC_IABORT_EL0: | |
501 | handle_abort(state, esr, far, recover, inspect_instruction_abort, handle_user_abort); | |
cb323159 | 502 | thread_exception_return(); |
5ba3f43e A |
503 | |
504 | case ESR_EC_IABORT_EL1: | |
0a7de745 | 505 | |
d9a64523 | 506 | panic_with_thread_kernel_state("Kernel instruction fetch abort", state); |
5ba3f43e A |
507 | |
508 | case ESR_EC_PC_ALIGN: | |
509 | handle_pc_align(state); | |
cb323159 | 510 | __builtin_unreachable(); |
5ba3f43e A |
511 | |
512 | case ESR_EC_DABORT_EL1: | |
513 | handle_abort(state, esr, far, recover, inspect_data_abort, handle_kernel_abort); | |
514 | break; | |
515 | ||
516 | case ESR_EC_UNCATEGORIZED: | |
517 | assert(!ESR_ISS(esr)); | |
518 | ||
cb323159 A |
519 | #if defined(HAS_TWO_STAGE_SPR_LOCK) && defined(CONFIG_XNUPOST) |
520 | if (handle_msr_write_from_xnupost(state, esr)) { | |
521 | break; | |
522 | } | |
523 | #endif | |
524 | handle_uncategorized(&context->ss); | |
5ba3f43e A |
525 | break; |
526 | ||
527 | case ESR_EC_SP_ALIGN: | |
528 | handle_sp_align(state); | |
cb323159 | 529 | __builtin_unreachable(); |
5ba3f43e A |
530 | |
531 | case ESR_EC_BKPT_AARCH32: | |
532 | handle_breakpoint(state); | |
cb323159 | 533 | __builtin_unreachable(); |
5ba3f43e A |
534 | |
535 | case ESR_EC_BRK_AARCH64: | |
536 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { | |
cb323159 | 537 | panic_with_thread_kernel_state("Break instruction exception from kernel. Panic (by design)", state); |
5ba3f43e A |
538 | } else { |
539 | handle_breakpoint(state); | |
5ba3f43e | 540 | } |
cb323159 | 541 | __builtin_unreachable(); |
5ba3f43e A |
542 | |
543 | case ESR_EC_BKPT_REG_MATCH_EL0: | |
544 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { | |
545 | handle_breakpoint(state); | |
5ba3f43e A |
546 | } |
547 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", | |
0a7de745 | 548 | class, state, class, esr, (void *)far); |
cb323159 | 549 | __builtin_unreachable(); |
5ba3f43e A |
550 | |
551 | case ESR_EC_BKPT_REG_MATCH_EL1: | |
cb323159 A |
552 | panic_with_thread_kernel_state("Hardware Breakpoint Debug exception from kernel. Panic (by design)", state); |
553 | __builtin_unreachable(); | |
5ba3f43e A |
554 | |
555 | case ESR_EC_SW_STEP_DEBUG_EL0: | |
556 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { | |
557 | handle_sw_step_debug(state); | |
5ba3f43e A |
558 | } |
559 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", | |
0a7de745 | 560 | class, state, class, esr, (void *)far); |
cb323159 | 561 | __builtin_unreachable(); |
5ba3f43e A |
562 | |
563 | case ESR_EC_SW_STEP_DEBUG_EL1: | |
cb323159 A |
564 | panic_with_thread_kernel_state("Software Step Debug exception from kernel. Panic (by design)", state); |
565 | __builtin_unreachable(); | |
5ba3f43e A |
566 | |
567 | case ESR_EC_WATCHPT_MATCH_EL0: | |
568 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { | |
569 | handle_watchpoint(far); | |
5ba3f43e A |
570 | } |
571 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", | |
0a7de745 | 572 | class, state, class, esr, (void *)far); |
cb323159 | 573 | __builtin_unreachable(); |
5ba3f43e A |
574 | |
575 | case ESR_EC_WATCHPT_MATCH_EL1: | |
576 | /* | |
577 | * If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to | |
578 | * abort. Turn off watchpoints and keep going; we'll turn them back on in return_from_exception.. | |
579 | */ | |
580 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { | |
581 | arm_debug_set(NULL); | |
582 | break; /* return to first level handler */ | |
583 | } | |
584 | panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", | |
0a7de745 | 585 | class, state, class, esr, (void *)far); |
cb323159 | 586 | __builtin_unreachable(); |
5ba3f43e A |
587 | |
588 | case ESR_EC_TRAP_SIMD_FP: | |
589 | handle_simd_trap(state, esr); | |
cb323159 | 590 | __builtin_unreachable(); |
5ba3f43e A |
591 | |
592 | case ESR_EC_ILLEGAL_INSTR_SET: | |
0a7de745 A |
593 | if (EXCB_ACTION_RERUN != |
594 | ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET, far)) { | |
5ba3f43e A |
595 | // instruction is not re-executed |
596 | panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x", | |
0a7de745 | 597 | state, class, esr, (void *)far, get_saved_state_cpsr(state)); |
5ba3f43e A |
598 | } |
599 | // must clear this fault in PSR to re-run | |
cb323159 | 600 | mask_saved_state_cpsr(state, 0, PSR64_IL); |
5ba3f43e A |
601 | break; |
602 | ||
603 | case ESR_EC_MCR_MRC_CP15_TRAP: | |
604 | case ESR_EC_MCRR_MRRC_CP15_TRAP: | |
605 | case ESR_EC_MCR_MRC_CP14_TRAP: | |
606 | case ESR_EC_LDC_STC_CP14_TRAP: | |
607 | case ESR_EC_MCRR_MRRC_CP14_TRAP: | |
608 | handle_user_trapped_instruction32(state, esr); | |
cb323159 | 609 | __builtin_unreachable(); |
5ba3f43e A |
610 | |
611 | case ESR_EC_WFI_WFE: | |
612 | // Use of WFI or WFE instruction when they have been disabled for EL0 | |
613 | handle_wf_trap(state); | |
cb323159 A |
614 | __builtin_unreachable(); |
615 | ||
616 | case ESR_EC_FLOATING_POINT_64: | |
617 | handle_fp_trap(state, esr); | |
618 | __builtin_unreachable(); | |
619 | ||
5ba3f43e A |
620 | |
621 | default: | |
622 | panic("Unsupported synchronous exception. state=%p class=%u esr=%u far=%p", | |
0a7de745 | 623 | state, class, esr, (void *)far); |
cb323159 | 624 | __builtin_unreachable(); |
5ba3f43e A |
625 | } |
626 | ||
cb323159 A |
627 | if (recover) { |
628 | thread->recover = recover; | |
0a7de745 | 629 | } |
cb323159 A |
630 | #if MACH_ASSERT |
631 | if (preemption_level != get_preemption_level()) { | |
632 | panic("synchronous exception changed preemption level from %d to %d", preemption_level, get_preemption_level()); | |
633 | } | |
634 | #endif | |
5ba3f43e A |
635 | } |
636 | ||
637 | /* | |
638 | * Uncategorized exceptions are a catch-all for general execution errors. | |
639 | * ARM64_TODO: For now, we assume this is for undefined instruction exceptions. | |
640 | */ | |
641 | static void | |
cb323159 | 642 | handle_uncategorized(arm_saved_state_t *state) |
5ba3f43e | 643 | { |
d9a64523 | 644 | exception_type_t exception = EXC_BAD_INSTRUCTION; |
cb323159 A |
645 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; |
646 | mach_msg_type_number_t numcodes = 2; | |
647 | uint32_t instr = 0; | |
5ba3f43e | 648 | |
cb323159 | 649 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); |
5ba3f43e A |
650 | |
651 | #if CONFIG_DTRACE | |
652 | if (tempDTraceTrapHook && (tempDTraceTrapHook(exception, state, 0, 0) == KERN_SUCCESS)) { | |
653 | return; | |
654 | } | |
655 | ||
656 | if (PSR64_IS_USER64(get_saved_state_cpsr(state))) { | |
657 | /* | |
658 | * For a 64bit user process, we care about all 4 bytes of the | |
659 | * instr. | |
660 | */ | |
661 | if (instr == FASTTRAP_ARM64_INSTR || instr == FASTTRAP_ARM64_RET_INSTR) { | |
0a7de745 | 662 | if (dtrace_user_probe(state) == KERN_SUCCESS) { |
5ba3f43e | 663 | return; |
0a7de745 | 664 | } |
5ba3f43e A |
665 | } |
666 | } else if (PSR64_IS_USER32(get_saved_state_cpsr(state))) { | |
667 | /* | |
668 | * For a 32bit user process, we check for thumb mode, in | |
669 | * which case we only care about a 2 byte instruction length. | |
670 | * For non-thumb mode, we care about all 4 bytes of the instructin. | |
671 | */ | |
672 | if (get_saved_state_cpsr(state) & PSR64_MODE_USER32_THUMB) { | |
673 | if (((uint16_t)instr == FASTTRAP_THUMB32_INSTR) || | |
674 | ((uint16_t)instr == FASTTRAP_THUMB32_RET_INSTR)) { | |
675 | if (dtrace_user_probe(state) == KERN_SUCCESS) { | |
676 | return; | |
677 | } | |
678 | } | |
679 | } else { | |
680 | if ((instr == FASTTRAP_ARM32_INSTR) || | |
681 | (instr == FASTTRAP_ARM32_RET_INSTR)) { | |
682 | if (dtrace_user_probe(state) == KERN_SUCCESS) { | |
683 | return; | |
684 | } | |
685 | } | |
686 | } | |
687 | } | |
688 | ||
689 | #endif /* CONFIG_DTRACE */ | |
690 | ||
691 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { | |
692 | if (IS_ARM_GDB_TRAP(instr)) { | |
693 | boolean_t interrupt_state; | |
694 | vm_offset_t kstackptr; | |
695 | exception = EXC_BREAKPOINT; | |
696 | ||
697 | interrupt_state = ml_set_interrupts_enabled(FALSE); | |
698 | ||
699 | /* Save off the context here (so that the debug logic | |
700 | * can see the original state of this thread). | |
701 | */ | |
702 | kstackptr = (vm_offset_t) current_thread()->machine.kstackptr; | |
703 | if (kstackptr) { | |
cb323159 | 704 | copy_signed_thread_state(&((thread_kernel_state_t) kstackptr)->machine.ss, state); |
5ba3f43e A |
705 | } |
706 | ||
707 | /* Hop into the debugger (typically either due to a | |
708 | * fatal exception, an explicit panic, or a stackshot | |
709 | * request. | |
710 | */ | |
711 | DebuggerCall(exception, state); | |
712 | ||
713 | (void) ml_set_interrupts_enabled(interrupt_state); | |
714 | return; | |
715 | } else { | |
716 | panic("Undefined kernel instruction: pc=%p instr=%x\n", (void*)get_saved_state_pc(state), instr); | |
717 | } | |
718 | } | |
719 | ||
720 | /* | |
cb323159 | 721 | * Check for GDB breakpoint via illegal opcode. |
5ba3f43e | 722 | */ |
cb323159 A |
723 | if (IS_ARM_GDB_TRAP(instr)) { |
724 | exception = EXC_BREAKPOINT; | |
725 | codes[0] = EXC_ARM_BREAKPOINT; | |
726 | codes[1] = instr; | |
5ba3f43e | 727 | } else { |
cb323159 | 728 | codes[1] = instr; |
5ba3f43e A |
729 | } |
730 | ||
731 | exception_triage(exception, codes, numcodes); | |
cb323159 | 732 | __builtin_unreachable(); |
5ba3f43e A |
733 | } |
734 | ||
735 | static void | |
736 | handle_breakpoint(arm_saved_state_t *state) | |
737 | { | |
cb323159 A |
738 | exception_type_t exception = EXC_BREAKPOINT; |
739 | mach_exception_data_type_t codes[2] = {EXC_ARM_BREAKPOINT}; | |
740 | mach_msg_type_number_t numcodes = 2; | |
5ba3f43e A |
741 | |
742 | codes[1] = get_saved_state_pc(state); | |
743 | exception_triage(exception, codes, numcodes); | |
cb323159 | 744 | __builtin_unreachable(); |
5ba3f43e A |
745 | } |
746 | ||
747 | static void | |
748 | handle_watchpoint(vm_offset_t fault_addr) | |
749 | { | |
cb323159 A |
750 | exception_type_t exception = EXC_BREAKPOINT; |
751 | mach_exception_data_type_t codes[2] = {EXC_ARM_DA_DEBUG}; | |
752 | mach_msg_type_number_t numcodes = 2; | |
5ba3f43e A |
753 | |
754 | codes[1] = fault_addr; | |
755 | exception_triage(exception, codes, numcodes); | |
cb323159 | 756 | __builtin_unreachable(); |
5ba3f43e A |
757 | } |
758 | ||
759 | static void | |
760 | handle_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, vm_offset_t recover, | |
0a7de745 | 761 | abort_inspector_t inspect_abort, abort_handler_t handler) |
5ba3f43e | 762 | { |
cb323159 A |
763 | fault_status_t fault_code; |
764 | vm_prot_t fault_type; | |
5ba3f43e A |
765 | |
766 | inspect_abort(ESR_ISS(esr), &fault_code, &fault_type); | |
767 | handler(state, esr, fault_addr, fault_code, fault_type, recover); | |
768 | } | |
769 | ||
770 | static void | |
771 | inspect_instruction_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type) | |
772 | { | |
773 | getCpuDatap()->cpu_stat.instr_ex_cnt++; | |
774 | *fault_code = ISS_IA_FSC(iss); | |
775 | *fault_type = (VM_PROT_READ | VM_PROT_EXECUTE); | |
776 | } | |
777 | ||
778 | static void | |
779 | inspect_data_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_type) | |
780 | { | |
781 | getCpuDatap()->cpu_stat.data_ex_cnt++; | |
782 | *fault_code = ISS_DA_FSC(iss); | |
783 | ||
784 | /* Cache operations report faults as write access. Change these to read access. */ | |
785 | if ((iss & ISS_DA_WNR) && !(iss & ISS_DA_CM)) { | |
786 | *fault_type = (VM_PROT_READ | VM_PROT_WRITE); | |
787 | } else { | |
788 | *fault_type = (VM_PROT_READ); | |
789 | } | |
790 | } | |
791 | ||
792 | static void | |
793 | handle_pc_align(arm_saved_state_t *ss) | |
794 | { | |
795 | exception_type_t exc; | |
796 | mach_exception_data_type_t codes[2]; | |
797 | mach_msg_type_number_t numcodes = 2; | |
798 | ||
799 | if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) { | |
800 | panic_with_thread_kernel_state("PC alignment exception from kernel.", ss); | |
801 | } | |
802 | ||
803 | exc = EXC_BAD_ACCESS; | |
804 | codes[0] = EXC_ARM_DA_ALIGN; | |
805 | codes[1] = get_saved_state_pc(ss); | |
806 | ||
807 | exception_triage(exc, codes, numcodes); | |
cb323159 | 808 | __builtin_unreachable(); |
5ba3f43e A |
809 | } |
810 | ||
811 | static void | |
812 | handle_sp_align(arm_saved_state_t *ss) | |
813 | { | |
814 | exception_type_t exc; | |
815 | mach_exception_data_type_t codes[2]; | |
816 | mach_msg_type_number_t numcodes = 2; | |
817 | ||
818 | if (!PSR64_IS_USER(get_saved_state_cpsr(ss))) { | |
819 | panic_with_thread_kernel_state("SP alignment exception from kernel.", ss); | |
820 | } | |
821 | ||
822 | exc = EXC_BAD_ACCESS; | |
823 | codes[0] = EXC_ARM_SP_ALIGN; | |
824 | codes[1] = get_saved_state_sp(ss); | |
825 | ||
826 | exception_triage(exc, codes, numcodes); | |
cb323159 | 827 | __builtin_unreachable(); |
5ba3f43e A |
828 | } |
829 | ||
830 | static void | |
cb323159 | 831 | handle_wf_trap(arm_saved_state_t *state) |
5ba3f43e A |
832 | { |
833 | exception_type_t exc; | |
834 | mach_exception_data_type_t codes[2]; | |
835 | mach_msg_type_number_t numcodes = 2; | |
cb323159 A |
836 | uint32_t instr = 0; |
837 | ||
838 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); | |
5ba3f43e A |
839 | |
840 | exc = EXC_BAD_INSTRUCTION; | |
841 | codes[0] = EXC_ARM_UNDEFINED; | |
cb323159 A |
842 | codes[1] = instr; |
843 | ||
844 | exception_triage(exc, codes, numcodes); | |
845 | __builtin_unreachable(); | |
846 | } | |
847 | ||
848 | static void | |
849 | handle_fp_trap(arm_saved_state_t *state, uint32_t esr) | |
850 | { | |
851 | exception_type_t exc = EXC_ARITHMETIC; | |
852 | mach_exception_data_type_t codes[2]; | |
853 | mach_msg_type_number_t numcodes = 2; | |
854 | uint32_t instr = 0; | |
855 | ||
856 | /* The floating point trap flags are only valid if TFV is set. */ | |
857 | if (!(esr & ISS_FP_TFV)) { | |
858 | codes[0] = EXC_ARM_FP_UNDEFINED; | |
859 | } else if (esr & ISS_FP_UFF) { | |
860 | codes[0] = EXC_ARM_FP_UF; | |
861 | } else if (esr & ISS_FP_OFF) { | |
862 | codes[0] = EXC_ARM_FP_OF; | |
863 | } else if (esr & ISS_FP_IOF) { | |
864 | codes[0] = EXC_ARM_FP_IO; | |
865 | } else if (esr & ISS_FP_DZF) { | |
866 | codes[0] = EXC_ARM_FP_DZ; | |
867 | } else if (esr & ISS_FP_IDF) { | |
868 | codes[0] = EXC_ARM_FP_ID; | |
869 | } else if (esr & ISS_FP_IXF) { | |
870 | codes[0] = EXC_ARM_FP_IX; | |
871 | } else { | |
872 | panic("Unrecognized floating point exception, state=%p, esr=%#x", state, esr); | |
873 | } | |
874 | ||
875 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); | |
876 | codes[1] = instr; | |
5ba3f43e A |
877 | |
878 | exception_triage(exc, codes, numcodes); | |
cb323159 | 879 | __builtin_unreachable(); |
5ba3f43e A |
880 | } |
881 | ||
882 | ||
883 | static void | |
884 | handle_sw_step_debug(arm_saved_state_t *state) | |
885 | { | |
886 | thread_t thread = current_thread(); | |
887 | exception_type_t exc; | |
888 | mach_exception_data_type_t codes[2]; | |
889 | mach_msg_type_number_t numcodes = 2; | |
890 | ||
891 | if (!PSR64_IS_USER(get_saved_state_cpsr(state))) { | |
892 | panic_with_thread_kernel_state("SW_STEP_DEBUG exception from kernel.", state); | |
893 | } | |
894 | ||
895 | // Disable single step and unmask interrupts (in the saved state, anticipating next exception return) | |
896 | if (thread->machine.DebugData != NULL) { | |
897 | thread->machine.DebugData->uds.ds64.mdscr_el1 &= ~0x1; | |
898 | } else { | |
899 | panic_with_thread_kernel_state("SW_STEP_DEBUG exception thread DebugData is NULL.", state); | |
900 | } | |
901 | ||
cb323159 | 902 | mask_saved_state_cpsr(thread->machine.upcb, 0, PSR64_SS | DAIF_IRQF | DAIF_FIQF); |
5ba3f43e A |
903 | |
904 | // Special encoding for gdb single step event on ARM | |
905 | exc = EXC_BREAKPOINT; | |
906 | codes[0] = 1; | |
907 | codes[1] = 0; | |
908 | ||
909 | exception_triage(exc, codes, numcodes); | |
cb323159 | 910 | __builtin_unreachable(); |
5ba3f43e A |
911 | } |
912 | ||
913 | static int | |
914 | is_vm_fault(fault_status_t status) | |
915 | { | |
916 | switch (status) { | |
917 | case FSC_TRANSLATION_FAULT_L0: | |
918 | case FSC_TRANSLATION_FAULT_L1: | |
919 | case FSC_TRANSLATION_FAULT_L2: | |
920 | case FSC_TRANSLATION_FAULT_L3: | |
921 | case FSC_ACCESS_FLAG_FAULT_L1: | |
922 | case FSC_ACCESS_FLAG_FAULT_L2: | |
923 | case FSC_ACCESS_FLAG_FAULT_L3: | |
924 | case FSC_PERMISSION_FAULT_L1: | |
925 | case FSC_PERMISSION_FAULT_L2: | |
926 | case FSC_PERMISSION_FAULT_L3: | |
927 | return TRUE; | |
928 | default: | |
929 | return FALSE; | |
930 | } | |
931 | } | |
932 | ||
d9a64523 A |
933 | static int |
934 | is_translation_fault(fault_status_t status) | |
935 | { | |
936 | switch (status) { | |
937 | case FSC_TRANSLATION_FAULT_L0: | |
938 | case FSC_TRANSLATION_FAULT_L1: | |
939 | case FSC_TRANSLATION_FAULT_L2: | |
940 | case FSC_TRANSLATION_FAULT_L3: | |
941 | return TRUE; | |
942 | default: | |
943 | return FALSE; | |
944 | } | |
945 | } | |
946 | ||
5ba3f43e A |
947 | #if __ARM_PAN_AVAILABLE__ |
948 | static int | |
949 | is_permission_fault(fault_status_t status) | |
950 | { | |
951 | switch (status) { | |
952 | case FSC_PERMISSION_FAULT_L1: | |
953 | case FSC_PERMISSION_FAULT_L2: | |
954 | case FSC_PERMISSION_FAULT_L3: | |
955 | return TRUE; | |
956 | default: | |
957 | return FALSE; | |
958 | } | |
959 | } | |
960 | #endif | |
961 | ||
962 | static int | |
963 | is_alignment_fault(fault_status_t status) | |
964 | { | |
0a7de745 | 965 | return status == FSC_ALIGNMENT_FAULT; |
5ba3f43e A |
966 | } |
967 | ||
968 | static int | |
969 | is_parity_error(fault_status_t status) | |
970 | { | |
971 | switch (status) { | |
972 | case FSC_SYNC_PARITY: | |
973 | case FSC_ASYNC_PARITY: | |
974 | case FSC_SYNC_PARITY_TT_L1: | |
975 | case FSC_SYNC_PARITY_TT_L2: | |
976 | case FSC_SYNC_PARITY_TT_L3: | |
977 | return TRUE; | |
978 | default: | |
979 | return FALSE; | |
980 | } | |
981 | } | |
982 | ||
cb323159 A |
983 | static void |
984 | set_saved_state_pc_to_recovery_handler(arm_saved_state_t *iss, vm_offset_t recover) | |
985 | { | |
986 | #if defined(HAS_APPLE_PAC) | |
987 | thread_t thread = current_thread(); | |
988 | const uintptr_t disc = ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER); | |
989 | const char *panic_msg = "Illegal thread->recover value %p"; | |
990 | ||
991 | MANIPULATE_SIGNED_THREAD_STATE(iss, | |
992 | // recover = (vm_offset_t)ptrauth_auth_data((void *)recover, ptrauth_key_function_pointer, | |
993 | // ptrauth_blend_discriminator(&thread->recover, PAC_DISCRIMINATOR_RECOVER)); | |
994 | "mov x1, %[recover] \n" | |
995 | "mov x6, %[disc] \n" | |
996 | "autia x1, x6 \n" | |
997 | // if (recover != (vm_offset_t)ptrauth_strip((void *)recover, ptrauth_key_function_pointer)) { | |
998 | "mov x6, x1 \n" | |
999 | "xpaci x6 \n" | |
1000 | "cmp x1, x6 \n" | |
1001 | "beq 1f \n" | |
1002 | // panic("Illegal thread->recover value %p", (void *)recover); | |
1003 | "mov x0, %[panic_msg] \n" | |
1004 | "bl _panic \n" | |
1005 | // } | |
1006 | "1: \n" | |
1007 | "str x1, [x0, %[SS64_PC]] \n", | |
1008 | [recover] "r"(recover), | |
1009 | [disc] "r"(disc), | |
1010 | [panic_msg] "r"(panic_msg) | |
1011 | ); | |
1012 | #else | |
1013 | set_saved_state_pc(iss, recover); | |
1014 | #endif | |
1015 | } | |
1016 | ||
5ba3f43e A |
1017 | static void |
1018 | handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, | |
0a7de745 | 1019 | fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover) |
5ba3f43e | 1020 | { |
cb323159 A |
1021 | exception_type_t exc = EXC_BAD_ACCESS; |
1022 | mach_exception_data_type_t codes[2]; | |
1023 | mach_msg_type_number_t numcodes = 2; | |
1024 | thread_t thread = current_thread(); | |
5ba3f43e A |
1025 | |
1026 | (void)esr; | |
1027 | (void)state; | |
1028 | ||
0a7de745 | 1029 | if (ml_at_interrupt_context()) { |
5ba3f43e | 1030 | panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state); |
0a7de745 | 1031 | } |
5ba3f43e A |
1032 | |
1033 | thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling abort from userspace */ | |
1034 | ||
1035 | if (is_vm_fault(fault_code)) { | |
d9a64523 A |
1036 | kern_return_t result = KERN_FAILURE; |
1037 | vm_map_t map = thread->map; | |
1038 | vm_offset_t vm_fault_addr = fault_addr; | |
5ba3f43e A |
1039 | |
1040 | assert(map != kernel_map); | |
1041 | ||
0a7de745 A |
1042 | if (!(fault_type & VM_PROT_EXECUTE) && user_tbi_enabled()) { |
1043 | vm_fault_addr = tbi_clear(fault_addr); | |
1044 | } | |
5ba3f43e A |
1045 | |
1046 | #if CONFIG_DTRACE | |
cb323159 | 1047 | if (thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */ |
5ba3f43e A |
1048 | if (dtrace_tally_fault(vm_fault_addr)) { /* Should a user mode fault under dtrace be ignored? */ |
1049 | if (recover) { | |
cb323159 | 1050 | set_saved_state_pc_to_recovery_handler(state, recover); |
5ba3f43e | 1051 | } else { |
cb323159 | 1052 | ml_set_interrupts_enabled(FALSE); |
5ba3f43e | 1053 | panic_with_thread_kernel_state("copyin/out has no recovery point", state); |
5ba3f43e A |
1054 | } |
1055 | return; | |
1056 | } else { | |
cb323159 | 1057 | ml_set_interrupts_enabled(FALSE); |
5ba3f43e | 1058 | panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", state); |
5ba3f43e A |
1059 | } |
1060 | } | |
1061 | #else | |
1062 | (void)recover; | |
1063 | #endif | |
1064 | ||
1065 | #if CONFIG_PGTRACE | |
1066 | if (pgtrace_enabled) { | |
1067 | /* Check to see if trace bit is set */ | |
1068 | result = pmap_pgtrace_fault(map->pmap, fault_addr, state); | |
0a7de745 A |
1069 | if (result == KERN_SUCCESS) { |
1070 | return; | |
1071 | } | |
5ba3f43e A |
1072 | } |
1073 | #endif | |
1074 | ||
1075 | /* check to see if it is just a pmap ref/modify fault */ | |
5c9f4661 | 1076 | |
d9a64523 | 1077 | if ((result != KERN_SUCCESS) && !is_translation_fault(fault_code)) { |
cb323159 | 1078 | result = arm_fast_fault(map->pmap, trunc_page(vm_fault_addr), fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), TRUE); |
5c9f4661 | 1079 | } |
5ba3f43e | 1080 | if (result != KERN_SUCCESS) { |
5ba3f43e A |
1081 | { |
1082 | /* We have to fault the page in */ | |
1083 | result = vm_fault(map, vm_fault_addr, fault_type, | |
0a7de745 A |
1084 | /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE, |
1085 | /* caller_pmap */ NULL, /* caller_pmap_addr */ 0); | |
5ba3f43e A |
1086 | } |
1087 | } | |
1088 | if (result == KERN_SUCCESS || result == KERN_ABORTED) { | |
cb323159 A |
1089 | return; |
1090 | } | |
1091 | ||
1092 | /* | |
1093 | * vm_fault() should never return KERN_FAILURE for page faults from user space. | |
1094 | * If it does, we're leaking preemption disables somewhere in the kernel. | |
1095 | */ | |
1096 | if (__improbable(result == KERN_FAILURE)) { | |
1097 | panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread); | |
5ba3f43e A |
1098 | } |
1099 | ||
1100 | codes[0] = result; | |
1101 | } else if (is_alignment_fault(fault_code)) { | |
1102 | codes[0] = EXC_ARM_DA_ALIGN; | |
1103 | } else if (is_parity_error(fault_code)) { | |
1104 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
1105 | if (fault_code == FSC_SYNC_PARITY) { | |
1106 | arm64_platform_error(state, esr, fault_addr); | |
cb323159 | 1107 | return; |
5ba3f43e A |
1108 | } |
1109 | #else | |
1110 | panic("User parity error."); | |
1111 | #endif | |
1112 | } else { | |
1113 | codes[0] = KERN_FAILURE; | |
1114 | } | |
1115 | ||
1116 | codes[1] = fault_addr; | |
1117 | exception_triage(exc, codes, numcodes); | |
cb323159 | 1118 | __builtin_unreachable(); |
5ba3f43e A |
1119 | } |
1120 | ||
1121 | #if __ARM_PAN_AVAILABLE__ | |
1122 | static int | |
1123 | is_pan_fault(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, fault_status_t fault_code) | |
1124 | { | |
1125 | // PAN (Privileged Access Never) fault occurs for data read/write in EL1 to | |
1126 | // virtual address that is readable/writeable from both EL1 and EL0 | |
1127 | ||
1128 | // To check for PAN fault, we evaluate if the following conditions are true: | |
1129 | // 1. This is a permission fault | |
1130 | // 2. PAN is enabled | |
1131 | // 3. AT instruction (on which PAN has no effect) on the same faulting address | |
1132 | // succeeds | |
1133 | ||
1134 | vm_offset_t pa; | |
1135 | ||
1136 | if (!(is_permission_fault(fault_code) && get_saved_state_cpsr(state) & PSR64_PAN)) { | |
1137 | return FALSE; | |
1138 | } | |
1139 | ||
1140 | if (esr & ISS_DA_WNR) { | |
1141 | pa = mmu_kvtop_wpreflight(fault_addr); | |
1142 | } else { | |
1143 | pa = mmu_kvtop(fault_addr); | |
1144 | } | |
1145 | return (pa)? TRUE: FALSE; | |
1146 | } | |
1147 | #endif | |
1148 | ||
1149 | static void | |
1150 | handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, | |
0a7de745 | 1151 | fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover) |
5ba3f43e | 1152 | { |
cb323159 | 1153 | thread_t thread = current_thread(); |
5ba3f43e A |
1154 | (void)esr; |
1155 | ||
1156 | #if CONFIG_DTRACE | |
cb323159 | 1157 | if (is_vm_fault(fault_code) && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */ |
5ba3f43e A |
1158 | if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */ |
1159 | /* | |
1160 | * Point to next instruction, or recovery handler if set. | |
1161 | */ | |
1162 | if (recover) { | |
cb323159 | 1163 | set_saved_state_pc_to_recovery_handler(state, recover); |
5ba3f43e | 1164 | } else { |
cb323159 | 1165 | add_saved_state_pc(state, 4); |
5ba3f43e A |
1166 | } |
1167 | return; | |
1168 | } else { | |
cb323159 | 1169 | ml_set_interrupts_enabled(FALSE); |
5ba3f43e | 1170 | panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state); |
5ba3f43e A |
1171 | } |
1172 | } | |
1173 | #endif | |
1174 | ||
1175 | #if !CONFIG_PGTRACE /* This will be moved next to pgtrace fault evaluation */ | |
0a7de745 | 1176 | if (ml_at_interrupt_context()) { |
5ba3f43e | 1177 | panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state); |
0a7de745 | 1178 | } |
5ba3f43e A |
1179 | #endif |
1180 | ||
1181 | if (is_vm_fault(fault_code)) { | |
cb323159 A |
1182 | kern_return_t result = KERN_FAILURE; |
1183 | vm_map_t map; | |
1184 | int interruptible; | |
5ba3f43e | 1185 | |
cc8bc92a A |
1186 | /* |
1187 | * Ensure no faults in the physical aperture. This could happen if | |
1188 | * a page table is incorrectly allocated from the read only region | |
1189 | * when running with KTRR. | |
1190 | */ | |
1191 | ||
1192 | ||
d9a64523 A |
1193 | #if __ARM_PAN_AVAILABLE__ && defined(CONFIG_XNUPOST) |
1194 | if (is_permission_fault(fault_code) && !(get_saved_state_cpsr(state) & PSR64_PAN) && | |
1195 | (pan_ro_addr != 0) && (fault_addr == pan_ro_addr)) { | |
1196 | ++pan_exception_level; | |
1197 | // On an exception taken from a PAN-disabled context, verify | |
1198 | // that PAN is re-enabled for the exception handler and that | |
1199 | // accessing the test address produces a PAN fault. | |
1200 | pan_fault_value = *(char *)pan_test_addr; | |
cb323159 A |
1201 | __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context |
1202 | add_saved_state_pc(state, 4); | |
d9a64523 A |
1203 | return; |
1204 | } | |
1205 | #endif | |
1206 | ||
1207 | if (fault_addr >= gVirtBase && fault_addr < static_memory_end) { | |
0a7de745 | 1208 | panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state); |
5ba3f43e A |
1209 | } |
1210 | ||
1211 | if (VM_KERNEL_ADDRESS(fault_addr) || thread == THREAD_NULL) { | |
1212 | map = kernel_map; | |
1213 | interruptible = THREAD_UNINT; | |
1214 | } else { | |
1215 | map = thread->map; | |
1216 | interruptible = THREAD_ABORTSAFE; | |
1217 | } | |
1218 | ||
1219 | #if CONFIG_PGTRACE | |
1220 | if (pgtrace_enabled) { | |
1221 | /* Check to see if trace bit is set */ | |
1222 | result = pmap_pgtrace_fault(map->pmap, fault_addr, state); | |
0a7de745 A |
1223 | if (result == KERN_SUCCESS) { |
1224 | return; | |
1225 | } | |
5ba3f43e A |
1226 | } |
1227 | ||
0a7de745 | 1228 | if (ml_at_interrupt_context()) { |
5ba3f43e | 1229 | panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state); |
0a7de745 | 1230 | } |
5ba3f43e A |
1231 | #endif |
1232 | ||
1233 | /* check to see if it is just a pmap ref/modify fault */ | |
d9a64523 | 1234 | if (!is_translation_fault(fault_code)) { |
cb323159 | 1235 | result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), FALSE); |
0a7de745 A |
1236 | if (result == KERN_SUCCESS) { |
1237 | return; | |
1238 | } | |
d9a64523 | 1239 | } |
5ba3f43e | 1240 | |
0a7de745 | 1241 | if (result != KERN_PROTECTION_FAILURE) { |
5ba3f43e A |
1242 | /* |
1243 | * We have to "fault" the page in. | |
1244 | */ | |
1245 | result = vm_fault(map, fault_addr, fault_type, | |
0a7de745 A |
1246 | /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, interruptible, |
1247 | /* caller_pmap */ NULL, /* caller_pmap_addr */ 0); | |
5ba3f43e A |
1248 | } |
1249 | ||
0a7de745 A |
1250 | if (result == KERN_SUCCESS) { |
1251 | return; | |
1252 | } | |
5ba3f43e A |
1253 | |
1254 | /* | |
1255 | * If we have a recover handler, invoke it now. | |
1256 | */ | |
1257 | if (recover) { | |
cb323159 | 1258 | set_saved_state_pc_to_recovery_handler(state, recover); |
5ba3f43e A |
1259 | return; |
1260 | } | |
1261 | ||
1262 | #if __ARM_PAN_AVAILABLE__ | |
1263 | if (is_pan_fault(state, esr, fault_addr, fault_code)) { | |
d9a64523 | 1264 | #ifdef CONFIG_XNUPOST |
0a7de745 | 1265 | if ((pan_test_addr != 0) && (fault_addr == pan_test_addr)) { |
d9a64523 A |
1266 | ++pan_exception_level; |
1267 | // read the user-accessible value to make sure | |
1268 | // pan is enabled and produces a 2nd fault from | |
1269 | // the exception handler | |
0a7de745 A |
1270 | if (pan_exception_level == 1) { |
1271 | pan_fault_value = *(char *)pan_test_addr; | |
cb323159 | 1272 | __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context |
0a7de745 | 1273 | } |
d9a64523 A |
1274 | // this fault address is used for PAN test |
1275 | // disable PAN and rerun | |
cb323159 | 1276 | mask_saved_state_cpsr(state, 0, PSR64_PAN); |
d9a64523 A |
1277 | return; |
1278 | } | |
1279 | #endif | |
5ba3f43e A |
1280 | panic_with_thread_kernel_state("Privileged access never abort.", state); |
1281 | } | |
1282 | #endif | |
1283 | ||
1284 | #if CONFIG_PGTRACE | |
cc8bc92a A |
1285 | } else if (ml_at_interrupt_context()) { |
1286 | panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state); | |
5ba3f43e A |
1287 | #endif |
1288 | } else if (is_alignment_fault(fault_code)) { | |
cb323159 A |
1289 | if (recover) { |
1290 | set_saved_state_pc_to_recovery_handler(state, recover); | |
1291 | return; | |
1292 | } | |
5ba3f43e A |
1293 | panic_with_thread_kernel_state("Unaligned kernel data abort.", state); |
1294 | } else if (is_parity_error(fault_code)) { | |
1295 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
1296 | if (fault_code == FSC_SYNC_PARITY) { | |
1297 | arm64_platform_error(state, esr, fault_addr); | |
1298 | return; | |
1299 | } | |
1300 | #else | |
1301 | panic_with_thread_kernel_state("Kernel parity error.", state); | |
1302 | #endif | |
1303 | } else { | |
1304 | kprintf("Unclassified kernel abort (fault_code=0x%x)\n", fault_code); | |
1305 | } | |
1306 | ||
1307 | panic_with_thread_kernel_state("Kernel data abort.", state); | |
1308 | } | |
1309 | ||
1310 | extern void syscall_trace(struct arm_saved_state * regs); | |
1311 | ||
1312 | static void | |
1313 | handle_svc(arm_saved_state_t *state) | |
1314 | { | |
cb323159 A |
1315 | int trap_no = get_saved_state_svc_number(state); |
1316 | thread_t thread = current_thread(); | |
1317 | struct proc *p; | |
5ba3f43e A |
1318 | |
1319 | #define handle_svc_kprintf(x...) /* kprintf("handle_svc: " x) */ | |
1320 | ||
1321 | #define TRACE_SYSCALL 1 | |
1322 | #if TRACE_SYSCALL | |
1323 | syscall_trace(state); | |
1324 | #endif | |
1325 | ||
1326 | thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling SVC from userspace */ | |
1327 | ||
1328 | if (trap_no == (int)PLATFORM_SYSCALL_TRAP_NO) { | |
1329 | platform_syscall(state); | |
1330 | panic("Returned from platform_syscall()?"); | |
1331 | } | |
1332 | ||
1333 | mach_kauth_cred_uthread_update(); | |
1334 | ||
1335 | if (trap_no < 0) { | |
1336 | if (trap_no == -3) { | |
1337 | handle_mach_absolute_time_trap(state); | |
1338 | return; | |
1339 | } else if (trap_no == -4) { | |
1340 | handle_mach_continuous_time_trap(state); | |
1341 | return; | |
1342 | } | |
1343 | ||
1344 | /* Counting perhaps better in the handler, but this is how it's been done */ | |
1345 | thread->syscalls_mach++; | |
1346 | mach_syscall(state); | |
1347 | } else { | |
1348 | /* Counting perhaps better in the handler, but this is how it's been done */ | |
1349 | thread->syscalls_unix++; | |
1350 | p = get_bsdthreadtask_info(thread); | |
1351 | ||
1352 | assert(p); | |
1353 | ||
1354 | unix_syscall(state, thread, (struct uthread*)thread->uthread, p); | |
1355 | } | |
1356 | } | |
1357 | ||
1358 | static void | |
1359 | handle_mach_absolute_time_trap(arm_saved_state_t *state) | |
1360 | { | |
1361 | uint64_t now = mach_absolute_time(); | |
1362 | saved_state64(state)->x[0] = now; | |
1363 | } | |
1364 | ||
1365 | static void | |
1366 | handle_mach_continuous_time_trap(arm_saved_state_t *state) | |
1367 | { | |
1368 | uint64_t now = mach_continuous_time(); | |
1369 | saved_state64(state)->x[0] = now; | |
1370 | } | |
1371 | ||
1372 | static void | |
1373 | handle_msr_trap(arm_saved_state_t *state, uint32_t iss) | |
1374 | { | |
d9a64523 | 1375 | exception_type_t exception = EXC_BAD_INSTRUCTION; |
cb323159 A |
1376 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; |
1377 | mach_msg_type_number_t numcodes = 2; | |
1378 | uint32_t instr = 0; | |
5ba3f43e A |
1379 | |
1380 | (void)iss; | |
1381 | ||
1382 | if (!is_saved_state64(state)) { | |
1383 | panic("MSR/MRS trap (EC 0x%x) from 32-bit state\n", ESR_EC_MSR_TRAP); | |
1384 | } | |
1385 | ||
1386 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { | |
1387 | panic("MSR/MRS trap (EC 0x%x) from kernel\n", ESR_EC_MSR_TRAP); | |
1388 | } | |
1389 | ||
1390 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); | |
1391 | codes[1] = instr; | |
1392 | ||
1393 | exception_triage(exception, codes, numcodes); | |
1394 | } | |
1395 | ||
1396 | static void | |
1397 | handle_user_trapped_instruction32(arm_saved_state_t *state, uint32_t esr) | |
1398 | { | |
d9a64523 | 1399 | exception_type_t exception = EXC_BAD_INSTRUCTION; |
cb323159 A |
1400 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; |
1401 | mach_msg_type_number_t numcodes = 2; | |
1402 | uint32_t instr; | |
5ba3f43e A |
1403 | |
1404 | if (is_saved_state64(state)) { | |
1405 | panic("ESR (0x%x) for instruction trapped from U32, but saved state is 64-bit.", esr); | |
1406 | } | |
1407 | ||
1408 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { | |
1409 | panic("ESR (0x%x) for instruction trapped from U32, actually came from kernel?", esr); | |
1410 | } | |
1411 | ||
1412 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); | |
1413 | codes[1] = instr; | |
1414 | ||
1415 | exception_triage(exception, codes, numcodes); | |
cb323159 | 1416 | __builtin_unreachable(); |
5ba3f43e A |
1417 | } |
1418 | ||
1419 | static void | |
1420 | handle_simd_trap(arm_saved_state_t *state, uint32_t esr) | |
1421 | { | |
d9a64523 | 1422 | exception_type_t exception = EXC_BAD_INSTRUCTION; |
cb323159 A |
1423 | mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; |
1424 | mach_msg_type_number_t numcodes = 2; | |
1425 | uint32_t instr = 0; | |
5ba3f43e A |
1426 | |
1427 | if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { | |
1428 | panic("ESR (0x%x) for SIMD trap from userland, actually came from kernel?", esr); | |
1429 | } | |
1430 | ||
1431 | COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); | |
1432 | codes[1] = instr; | |
1433 | ||
1434 | exception_triage(exception, codes, numcodes); | |
cb323159 | 1435 | __builtin_unreachable(); |
5ba3f43e A |
1436 | } |
1437 | ||
1438 | void | |
1439 | sleh_irq(arm_saved_state_t *state) | |
1440 | { | |
cb323159 A |
1441 | uint64_t timestamp = 0; |
1442 | uint32_t old_entropy_data = 0; | |
1443 | uint32_t old_entropy_sample_count = 0; | |
1444 | size_t entropy_index = 0; | |
1445 | uint32_t * entropy_data_ptr = NULL; | |
1446 | cpu_data_t * cdp = getCpuDatap(); | |
1447 | #if MACH_ASSERT | |
d9a64523 A |
1448 | int preemption_level = get_preemption_level(); |
1449 | #endif | |
5ba3f43e | 1450 | |
cb323159 | 1451 | |
5ba3f43e A |
1452 | sleh_interrupt_handler_prologue(state, DBG_INTR_TYPE_OTHER); |
1453 | ||
1454 | /* Run the registered interrupt handler. */ | |
1455 | cdp->interrupt_handler(cdp->interrupt_target, | |
0a7de745 A |
1456 | cdp->interrupt_refCon, |
1457 | cdp->interrupt_nub, | |
1458 | cdp->interrupt_source); | |
5ba3f43e A |
1459 | |
1460 | /* We use interrupt timing as an entropy source. */ | |
1461 | timestamp = ml_get_timebase(); | |
1462 | ||
1463 | /* | |
1464 | * The buffer index is subject to races, but as these races should only | |
1465 | * result in multiple CPUs updating the same location, the end result | |
1466 | * should be that noise gets written into the entropy buffer. As this | |
1467 | * is the entire point of the entropy buffer, we will not worry about | |
1468 | * these races for now. | |
1469 | */ | |
cb323159 A |
1470 | old_entropy_sample_count = EntropyData.sample_count; |
1471 | EntropyData.sample_count += 1; | |
5ba3f43e | 1472 | |
cb323159 A |
1473 | entropy_index = old_entropy_sample_count & ENTROPY_BUFFER_INDEX_MASK; |
1474 | entropy_data_ptr = EntropyData.buffer + entropy_index; | |
5ba3f43e A |
1475 | |
1476 | /* Mix the timestamp data and the old data together. */ | |
cb323159 A |
1477 | old_entropy_data = *entropy_data_ptr; |
1478 | *entropy_data_ptr = (uint32_t)timestamp ^ __ror(old_entropy_data, 9); | |
5ba3f43e A |
1479 | |
1480 | sleh_interrupt_handler_epilogue(); | |
cb323159 | 1481 | #if MACH_ASSERT |
0a7de745 | 1482 | if (preemption_level != get_preemption_level()) { |
d9a64523 | 1483 | panic("irq handler %p changed preemption level from %d to %d", cdp->interrupt_handler, preemption_level, get_preemption_level()); |
0a7de745 | 1484 | } |
d9a64523 | 1485 | #endif |
5ba3f43e A |
1486 | } |
1487 | ||
1488 | void | |
1489 | sleh_fiq(arm_saved_state_t *state) | |
1490 | { | |
1491 | unsigned int type = DBG_INTR_TYPE_UNKNOWN; | |
cb323159 | 1492 | #if MACH_ASSERT |
d9a64523 A |
1493 | int preemption_level = get_preemption_level(); |
1494 | #endif | |
d9a64523 | 1495 | |
0a7de745 A |
1496 | #if MONOTONIC_FIQ |
1497 | uint64_t pmcr0 = 0, upmsr = 0; | |
1498 | #endif /* MONOTONIC_FIQ */ | |
1499 | ||
1500 | #if MONOTONIC_FIQ | |
1501 | if (mt_pmi_pending(&pmcr0, &upmsr)) { | |
d9a64523 A |
1502 | type = DBG_INTR_TYPE_PMI; |
1503 | } else | |
0a7de745 | 1504 | #endif /* MONOTONIC_FIQ */ |
5ba3f43e A |
1505 | if (ml_get_timer_pending()) { |
1506 | type = DBG_INTR_TYPE_TIMER; | |
1507 | } | |
1508 | ||
1509 | sleh_interrupt_handler_prologue(state, type); | |
1510 | ||
0a7de745 | 1511 | #if MONOTONIC_FIQ |
d9a64523 | 1512 | if (type == DBG_INTR_TYPE_PMI) { |
0a7de745 | 1513 | mt_fiq(getCpuDatap(), pmcr0, upmsr); |
d9a64523 | 1514 | } else |
0a7de745 | 1515 | #endif /* MONOTONIC_FIQ */ |
5ba3f43e A |
1516 | { |
1517 | /* | |
1518 | * We don't know that this is a timer, but we don't have insight into | |
1519 | * the other interrupts that go down this path. | |
1520 | */ | |
1521 | ||
5ba3f43e A |
1522 | cpu_data_t *cdp = getCpuDatap(); |
1523 | ||
1524 | cdp->cpu_decrementer = -1; /* Large */ | |
1525 | ||
1526 | /* | |
1527 | * ARM64_TODO: whether we're coming from userland is ignored right now. | |
1528 | * We can easily thread it through, but not bothering for the | |
1529 | * moment (AArch32 doesn't either). | |
1530 | */ | |
1531 | rtclock_intr(TRUE); | |
1532 | } | |
1533 | ||
1534 | sleh_interrupt_handler_epilogue(); | |
cb323159 | 1535 | #if MACH_ASSERT |
0a7de745 | 1536 | if (preemption_level != get_preemption_level()) { |
d9a64523 | 1537 | panic("fiq type %u changed preemption level from %d to %d", type, preemption_level, get_preemption_level()); |
0a7de745 | 1538 | } |
d9a64523 | 1539 | #endif |
5ba3f43e A |
1540 | } |
1541 | ||
1542 | void | |
1543 | sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far) | |
1544 | { | |
cb323159 A |
1545 | arm_saved_state_t *state = &context->ss; |
1546 | #if MACH_ASSERT | |
d9a64523 A |
1547 | int preemption_level = get_preemption_level(); |
1548 | #endif | |
5ba3f43e A |
1549 | |
1550 | ASSERT_CONTEXT_SANITY(context); | |
1551 | arm64_platform_error(state, esr, far); | |
cb323159 | 1552 | #if MACH_ASSERT |
0a7de745 | 1553 | if (preemption_level != get_preemption_level()) { |
d9a64523 | 1554 | panic("serror changed preemption level from %d to %d", preemption_level, get_preemption_level()); |
0a7de745 | 1555 | } |
d9a64523 | 1556 | #endif |
5ba3f43e A |
1557 | } |
1558 | ||
1559 | void | |
cb323159 A |
1560 | mach_syscall_trace_exit(unsigned int retval, |
1561 | unsigned int call_number) | |
5ba3f43e A |
1562 | { |
1563 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
cb323159 A |
1564 | MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | |
1565 | DBG_FUNC_END, retval, 0, 0, 0, 0); | |
5ba3f43e A |
1566 | } |
1567 | ||
1568 | __attribute__((noreturn)) | |
1569 | void | |
1570 | thread_syscall_return(kern_return_t error) | |
1571 | { | |
1572 | thread_t thread; | |
1573 | struct arm_saved_state *state; | |
1574 | ||
1575 | thread = current_thread(); | |
1576 | state = get_user_regs(thread); | |
1577 | ||
1578 | assert(is_saved_state64(state)); | |
1579 | saved_state64(state)->x[0] = error; | |
1580 | ||
cb323159 | 1581 | #if MACH_ASSERT |
5ba3f43e A |
1582 | kern_allocation_name_t |
1583 | prior __assert_only = thread_get_kernel_state(thread)->allocation_name; | |
1584 | assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior)); | |
cb323159 | 1585 | #endif /* MACH_ASSERT */ |
5ba3f43e A |
1586 | |
1587 | if (kdebug_enable) { | |
1588 | /* Invert syscall number (negative for a mach syscall) */ | |
1589 | mach_syscall_trace_exit(error, (-1) * get_saved_state_svc_number(state)); | |
1590 | } | |
1591 | ||
1592 | thread_exception_return(); | |
1593 | } | |
1594 | ||
1595 | void | |
1596 | syscall_trace( | |
0a7de745 | 1597 | struct arm_saved_state * regs __unused) |
5ba3f43e A |
1598 | { |
1599 | /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */ | |
1600 | } | |
1601 | ||
1602 | static void | |
1603 | sleh_interrupt_handler_prologue(arm_saved_state_t *state, unsigned int type) | |
1604 | { | |
cb323159 | 1605 | uint64_t is_user = PSR64_IS_USER(get_saved_state_cpsr(state)); |
5ba3f43e A |
1606 | |
1607 | uint64_t pc = is_user ? get_saved_state_pc(state) : | |
0a7de745 | 1608 | VM_KERNEL_UNSLIDE(get_saved_state_pc(state)); |
5ba3f43e A |
1609 | |
1610 | KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START, | |
0a7de745 | 1611 | 0, pc, is_user, type); |
5ba3f43e A |
1612 | |
1613 | #if CONFIG_TELEMETRY | |
1614 | if (telemetry_needs_record) { | |
d9a64523 | 1615 | telemetry_mark_curthread((boolean_t)is_user, FALSE); |
5ba3f43e A |
1616 | } |
1617 | #endif /* CONFIG_TELEMETRY */ | |
1618 | } | |
1619 | ||
1620 | static void | |
1621 | sleh_interrupt_handler_epilogue(void) | |
1622 | { | |
d9a64523 A |
1623 | #if KPERF |
1624 | kperf_interrupt(); | |
1625 | #endif /* KPERF */ | |
5ba3f43e A |
1626 | KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END); |
1627 | } | |
1628 | ||
1629 | void | |
1630 | sleh_invalid_stack(arm_context_t *context, uint32_t esr __unused, vm_offset_t far __unused) | |
1631 | { | |
1632 | thread_t thread = current_thread(); | |
1633 | vm_offset_t kernel_stack_bottom, sp; | |
1634 | ||
1635 | sp = get_saved_state_sp(&context->ss); | |
1636 | kernel_stack_bottom = round_page(thread->machine.kstackptr) - KERNEL_STACK_SIZE; | |
1637 | ||
1638 | if ((sp < kernel_stack_bottom) && (sp >= (kernel_stack_bottom - PAGE_SIZE))) { | |
1639 | panic_with_thread_kernel_state("Invalid kernel stack pointer (probable overflow).", &context->ss); | |
1640 | } | |
1641 | ||
1642 | panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context->ss); | |
1643 | } |