]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <kern/debug.h> | |
29 | #include <mach_kdp.h> | |
30 | #include <machine/endian.h> | |
31 | #include <mach/mach_types.h> | |
32 | #include <mach/boolean.h> | |
33 | #include <mach/vm_prot.h> | |
34 | #include <mach/vm_types.h> | |
35 | #include <mach/mach_traps.h> | |
36 | ||
37 | #include <mach/exception.h> | |
38 | #include <mach/kern_return.h> | |
39 | #include <mach/vm_param.h> | |
40 | #include <mach/message.h> | |
41 | #include <mach/machine/thread_status.h> | |
42 | ||
43 | #include <vm/vm_page.h> | |
44 | #include <vm/pmap.h> | |
45 | #include <vm/vm_fault.h> | |
46 | #include <vm/vm_kern.h> | |
47 | ||
48 | #include <kern/ast.h> | |
49 | #include <kern/thread.h> | |
50 | #include <kern/task.h> | |
51 | #include <kern/sched_prim.h> | |
52 | ||
53 | #include <sys/kdebug.h> | |
d9a64523 | 54 | #include <kperf/kperf.h> |
5ba3f43e A |
55 | |
56 | #include <arm/trap.h> | |
57 | #include <arm/caches_internal.h> | |
58 | #include <arm/cpu_data_internal.h> | |
59 | #include <arm/machdep_call.h> | |
60 | #include <arm/machine_routines.h> | |
61 | #include <arm/misc_protos.h> | |
62 | #include <arm/setjmp.h> | |
63 | #include <arm/proc_reg.h> | |
64 | ||
65 | /* | |
66 | * External function prototypes. | |
67 | */ | |
68 | #include <kern/syscall_sw.h> | |
69 | #include <kern/host.h> | |
70 | #include <kern/processor.h> | |
71 | ||
72 | ||
73 | #if CONFIG_DTRACE | |
74 | extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs, unsigned int instr); | |
75 | extern boolean_t dtrace_tally_fault(user_addr_t); | |
76 | ||
77 | /* Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy and paste the trap instructions | |
0a7de745 | 78 | * over from that file. Need to keep these in sync! */ |
5ba3f43e A |
79 | #define FASTTRAP_ARM_INSTR 0xe7ffdefc |
80 | #define FASTTRAP_THUMB_INSTR 0xdefc | |
81 | ||
82 | #define FASTTRAP_ARM_RET_INSTR 0xe7ffdefb | |
83 | #define FASTTRAP_THUMB_RET_INSTR 0xdefb | |
84 | ||
85 | /* See <rdar://problem/4613924> */ | |
86 | perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */ | |
87 | #endif | |
88 | ||
0a7de745 A |
89 | #define COPYIN(dst, src, size) \ |
90 | ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \ | |
91 | copyin_kern(dst, src, size) \ | |
92 | : \ | |
93 | copyin(dst, src, size) | |
5ba3f43e | 94 | |
0a7de745 A |
95 | #define COPYOUT(src, dst, size) \ |
96 | ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \ | |
97 | copyout_kern(src, dst, size) \ | |
98 | : \ | |
99 | copyout(src, dst, size) | |
5ba3f43e A |
100 | |
101 | /* Second-level exception handlers forward declarations */ | |
102 | void sleh_undef(struct arm_saved_state *, struct arm_vfpsaved_state *); | |
103 | void sleh_abort(struct arm_saved_state *, int); | |
104 | static kern_return_t sleh_alignment(struct arm_saved_state *); | |
cb323159 | 105 | static void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *regs); |
5ba3f43e | 106 | |
5ba3f43e A |
107 | int sleh_alignment_count = 0; |
108 | int trap_on_alignment_fault = 0; | |
109 | ||
110 | /* | |
111 | * Routine: sleh_undef | |
112 | * Function: Second level exception handler for undefined exception | |
113 | */ | |
114 | ||
115 | void | |
116 | sleh_undef(struct arm_saved_state * regs, struct arm_vfpsaved_state * vfp_ss __unused) | |
117 | { | |
118 | exception_type_t exception = EXC_BAD_INSTRUCTION; | |
119 | mach_exception_data_type_t code[2] = {EXC_ARM_UNDEFINED}; | |
120 | mach_msg_type_number_t codeCnt = 2; | |
121 | thread_t thread = current_thread(); | |
122 | vm_offset_t recover; | |
123 | ||
124 | recover = thread->recover; | |
125 | thread->recover = 0; | |
126 | ||
127 | getCpuDatap()->cpu_stat.undef_ex_cnt++; | |
128 | ||
129 | /* Inherit the interrupt masks from previous */ | |
0a7de745 | 130 | if (!(regs->cpsr & PSR_INTMASK)) { |
5ba3f43e | 131 | ml_set_interrupts_enabled(TRUE); |
0a7de745 | 132 | } |
5ba3f43e A |
133 | |
134 | #if CONFIG_DTRACE | |
135 | if (tempDTraceTrapHook) { | |
136 | if (tempDTraceTrapHook(exception, regs, 0, 0) == KERN_SUCCESS) { | |
137 | /* | |
138 | * If it succeeds, we are done... | |
139 | */ | |
140 | goto exit; | |
141 | } | |
142 | } | |
143 | ||
144 | /* Check to see if we've hit a userland probe */ | |
145 | if ((regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE) { | |
146 | if (regs->cpsr & PSR_TF) { | |
d9a64523 | 147 | uint16_t instr = 0; |
5ba3f43e | 148 | |
0a7de745 | 149 | if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS) { |
5ba3f43e | 150 | goto exit; |
0a7de745 | 151 | } |
5ba3f43e A |
152 | |
153 | if (instr == FASTTRAP_THUMB_INSTR || instr == FASTTRAP_THUMB_RET_INSTR) { | |
0a7de745 | 154 | if (dtrace_user_probe(regs, instr) == KERN_SUCCESS) { |
5ba3f43e A |
155 | /* If it succeeds, we are done... */ |
156 | goto exit; | |
0a7de745 | 157 | } |
5ba3f43e A |
158 | } |
159 | } else { | |
d9a64523 | 160 | uint32_t instr = 0; |
5ba3f43e | 161 | |
0a7de745 | 162 | if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS) { |
5ba3f43e | 163 | goto exit; |
0a7de745 | 164 | } |
5ba3f43e A |
165 | |
166 | if (instr == FASTTRAP_ARM_INSTR || instr == FASTTRAP_ARM_RET_INSTR) { | |
0a7de745 | 167 | if (dtrace_user_probe(regs, instr) == KERN_SUCCESS) { |
5ba3f43e A |
168 | /* If it succeeds, we are done... */ |
169 | goto exit; | |
0a7de745 | 170 | } |
5ba3f43e A |
171 | } |
172 | } | |
173 | } | |
174 | #endif /* CONFIG_DTRACE */ | |
175 | ||
176 | ||
177 | if (regs->cpsr & PSR_TF) { | |
d9a64523 | 178 | unsigned short instr = 0; |
5ba3f43e | 179 | |
0a7de745 | 180 | if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS) { |
5ba3f43e | 181 | goto exit; |
0a7de745 | 182 | } |
5ba3f43e A |
183 | |
184 | if (IS_THUMB32(instr)) { | |
d9a64523 | 185 | unsigned int instr32; |
5ba3f43e | 186 | |
0a7de745 | 187 | instr32 = (instr << 16); |
5ba3f43e | 188 | |
0a7de745 | 189 | if (COPYIN((user_addr_t)(((unsigned short *) (regs->pc)) + 1), (char *)&instr, (vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS) { |
5ba3f43e | 190 | goto exit; |
0a7de745 | 191 | } |
5ba3f43e A |
192 | |
193 | instr32 |= instr; | |
194 | code[1] = instr32; | |
195 | ||
0a7de745 | 196 | #if __ARM_VFP__ |
5ba3f43e A |
197 | if (IS_THUMB_VFP(instr32)) { |
198 | /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */ | |
0a7de745 | 199 | if (!get_vfp_enabled()) { |
5ba3f43e | 200 | panic("VFP was disabled (thumb); VFP should always be enabled"); |
0a7de745 | 201 | } |
5ba3f43e A |
202 | } |
203 | #endif | |
204 | } else { | |
205 | /* I don't believe we have any 16 bit VFP instructions, so just set code[1]. */ | |
206 | code[1] = instr; | |
207 | ||
208 | if (IS_THUMB_GDB_TRAP(instr)) { | |
209 | exception = EXC_BREAKPOINT; | |
210 | code[0] = EXC_ARM_BREAKPOINT; | |
211 | } | |
212 | } | |
213 | } else { | |
d9a64523 | 214 | uint32_t instr = 0; |
5ba3f43e | 215 | |
0a7de745 | 216 | if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS) { |
5ba3f43e | 217 | goto exit; |
0a7de745 | 218 | } |
5ba3f43e A |
219 | |
220 | code[1] = instr; | |
0a7de745 | 221 | #if __ARM_VFP__ |
5ba3f43e A |
222 | if (IS_ARM_VFP(instr)) { |
223 | /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */ | |
0a7de745 | 224 | if (!get_vfp_enabled()) { |
5ba3f43e | 225 | panic("VFP was disabled (arm); VFP should always be enabled"); |
0a7de745 | 226 | } |
5ba3f43e A |
227 | } |
228 | #endif | |
229 | ||
230 | if (IS_ARM_GDB_TRAP(instr)) { | |
231 | exception = EXC_BREAKPOINT; | |
232 | code[0] = EXC_ARM_BREAKPOINT; | |
233 | } | |
234 | } | |
235 | ||
236 | if (!((regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)) { | |
0a7de745 | 237 | boolean_t intr; |
5ba3f43e A |
238 | |
239 | intr = ml_set_interrupts_enabled(FALSE); | |
240 | ||
241 | if (exception == EXC_BREAKPOINT) { | |
242 | /* Save off the context here (so that the debug logic | |
243 | * can see the original state of this thread). | |
244 | */ | |
245 | vm_offset_t kstackptr = current_thread()->machine.kstackptr; | |
cb323159 | 246 | copy_signed_thread_state((arm_saved_state_t *)kstackptr, regs); |
5ba3f43e A |
247 | |
248 | DebuggerCall(exception, regs); | |
249 | (void) ml_set_interrupts_enabled(intr); | |
250 | goto exit; | |
251 | } | |
d9a64523 | 252 | panic_with_thread_kernel_state("undefined kernel instruction", regs); |
5ba3f43e A |
253 | |
254 | (void) ml_set_interrupts_enabled(intr); | |
5ba3f43e A |
255 | } else { |
256 | exception_triage(exception, code, codeCnt); | |
257 | /* NOTREACHED */ | |
258 | } | |
259 | ||
260 | exit: | |
0a7de745 | 261 | if (recover) { |
5ba3f43e | 262 | thread->recover = recover; |
0a7de745 | 263 | } |
5ba3f43e A |
264 | } |
265 | ||
266 | /* | |
267 | * Routine: sleh_abort | |
268 | * Function: Second level exception handler for abort(Pref/Data) | |
269 | */ | |
270 | ||
271 | void | |
272 | sleh_abort(struct arm_saved_state * regs, int type) | |
273 | { | |
0a7de745 A |
274 | int status; |
275 | int debug_status = 0; | |
5ba3f43e | 276 | int spsr; |
cb323159 | 277 | int exc = EXC_BAD_ACCESS; |
5ba3f43e A |
278 | mach_exception_data_type_t codes[2]; |
279 | vm_map_t map; | |
280 | vm_map_address_t vaddr; | |
281 | vm_map_address_t fault_addr; | |
282 | vm_prot_t fault_type; | |
283 | kern_return_t result; | |
284 | vm_offset_t recover; | |
285 | thread_t thread = current_thread(); | |
0a7de745 | 286 | boolean_t intr; |
5ba3f43e A |
287 | |
288 | recover = thread->recover; | |
289 | thread->recover = 0; | |
290 | ||
291 | status = regs->fsr & FSR_MASK; | |
292 | spsr = regs->cpsr; | |
293 | ||
294 | /* The DSFR/IFSR.ExT bit indicates "IMPLEMENTATION DEFINED" classification. | |
295 | * Allow a platform-level error handler to decode it. | |
296 | */ | |
297 | if ((regs->fsr) & FSR_EXT) { | |
0a7de745 | 298 | cpu_data_t *cdp = getCpuDatap(); |
5ba3f43e A |
299 | |
300 | if (cdp->platform_error_handler != (platform_error_handler_t) NULL) { | |
0a7de745 | 301 | (*(platform_error_handler_t)cdp->platform_error_handler)(cdp->cpu_id, 0); |
5ba3f43e A |
302 | /* If a platform error handler is registered, expect it to panic, not fall through */ |
303 | panic("Unexpected return from platform_error_handler"); | |
304 | } | |
305 | } | |
306 | ||
307 | /* Done with asynchronous handling; re-enable here so that subsequent aborts are taken as early as possible. */ | |
308 | reenable_async_aborts(); | |
309 | ||
d9a64523 A |
310 | if (ml_at_interrupt_context()) { |
311 | #if CONFIG_DTRACE | |
cb323159 | 312 | if (!(thread->t_dtrace_inprobe)) |
d9a64523 A |
313 | #endif /* CONFIG_DTRACE */ |
314 | { | |
315 | panic_with_thread_kernel_state("sleh_abort at interrupt context", regs); | |
316 | } | |
317 | } | |
5ba3f43e A |
318 | |
319 | fault_addr = vaddr = regs->far; | |
320 | ||
321 | if (type == T_DATA_ABT) { | |
322 | getCpuDatap()->cpu_stat.data_ex_cnt++; | |
323 | } else { /* T_PREFETCH_ABT */ | |
324 | getCpuDatap()->cpu_stat.instr_ex_cnt++; | |
325 | fault_type = VM_PROT_READ | VM_PROT_EXECUTE; | |
326 | } | |
327 | ||
0a7de745 A |
328 | if (status == FSR_DEBUG) { |
329 | debug_status = arm_debug_read_dscr() & ARM_DBGDSCR_MOE_MASK; | |
330 | } | |
5ba3f43e A |
331 | |
332 | /* Inherit the interrupt masks from previous */ | |
0a7de745 | 333 | if (!(spsr & PSR_INTMASK)) { |
5ba3f43e | 334 | ml_set_interrupts_enabled(TRUE); |
0a7de745 | 335 | } |
5ba3f43e A |
336 | |
337 | if (type == T_DATA_ABT) { | |
338 | /* | |
339 | * Now that interrupts are reenabled, we can perform any needed | |
340 | * copyin operations. | |
341 | * | |
342 | * Because we have reenabled interrupts, any instruction copy | |
343 | * must be a copyin, even on UP systems. | |
344 | */ | |
345 | ||
346 | if (regs->fsr & DFSR_WRITE) { | |
347 | fault_type = (VM_PROT_READ | VM_PROT_WRITE); | |
348 | /* Cache operations report faults as write access, change these to read access */ | |
349 | /* Cache operations are invoked from arm mode for now */ | |
350 | if (!(regs->cpsr & PSR_TF)) { | |
d9a64523 | 351 | unsigned int ins = 0; |
5ba3f43e | 352 | |
0a7de745 | 353 | if (COPYIN((user_addr_t)(regs->pc), (char *)&ins, (vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) { |
5ba3f43e | 354 | goto exit; |
0a7de745 | 355 | } |
5ba3f43e | 356 | |
0a7de745 | 357 | if (arm_mcr_cp15(ins) || arm_mcrr_cp15(ins)) { |
5ba3f43e | 358 | fault_type = VM_PROT_READ; |
0a7de745 | 359 | } |
5ba3f43e A |
360 | } |
361 | } else { | |
362 | fault_type = VM_PROT_READ; | |
363 | /* | |
364 | * DFSR is not getting the "write" bit set | |
365 | * when a swp instruction is encountered (even when it is | |
366 | * a write fault. | |
367 | */ | |
368 | if (!(regs->cpsr & PSR_TF)) { | |
d9a64523 | 369 | unsigned int ins = 0; |
5ba3f43e | 370 | |
0a7de745 | 371 | if (COPYIN((user_addr_t)(regs->pc), (char *)&ins, (vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) { |
5ba3f43e | 372 | goto exit; |
0a7de745 | 373 | } |
5ba3f43e | 374 | |
0a7de745 | 375 | if ((ins & ARM_SWP_MASK) == ARM_SWP) { |
5ba3f43e | 376 | fault_type = VM_PROT_WRITE; |
0a7de745 | 377 | } |
5ba3f43e A |
378 | } |
379 | } | |
380 | } | |
381 | ||
382 | if ((spsr & PSR_MODE_MASK) != PSR_USER_MODE) { | |
383 | /* Fault in kernel mode */ | |
384 | ||
385 | if ((status == FSR_DEBUG) | |
386 | && ((debug_status == ARM_DBGDSCR_MOE_ASYNC_WATCHPOINT) || (debug_status == ARM_DBGDSCR_MOE_SYNC_WATCHPOINT)) | |
387 | && (recover != 0) && (getCpuDatap()->cpu_user_debug != 0)) { | |
388 | /* If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to | |
389 | * abort. Turn off watchpoints and keep going; we'll turn them back on in load_and_go_user. | |
390 | */ | |
391 | arm_debug_set(NULL); | |
392 | goto exit; | |
393 | } | |
394 | ||
395 | if ((type == T_PREFETCH_ABT) || (status == FSR_DEBUG)) { | |
5ba3f43e A |
396 | intr = ml_set_interrupts_enabled(FALSE); |
397 | if (status == FSR_DEBUG) { | |
398 | DebuggerCall(EXC_BREAKPOINT, regs); | |
399 | (void) ml_set_interrupts_enabled(intr); | |
400 | goto exit; | |
401 | } | |
d9a64523 | 402 | panic_with_thread_kernel_state("prefetch abort in kernel mode", regs); |
5ba3f43e A |
403 | |
404 | (void) ml_set_interrupts_enabled(intr); | |
5ba3f43e | 405 | } else if (TEST_FSR_VMFAULT(status)) { |
5ba3f43e | 406 | #if CONFIG_DTRACE |
cb323159 | 407 | if (thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */ |
5ba3f43e A |
408 | if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */ |
409 | /* Point to next instruction */ | |
410 | regs->pc += ((regs->cpsr & PSR_TF) && !IS_THUMB32(*((uint16_t*) (regs->pc)))) ? 2 : 4; | |
411 | goto exit; | |
412 | } else { | |
413 | intr = ml_set_interrupts_enabled(FALSE); | |
d9a64523 | 414 | panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", regs); |
5ba3f43e A |
415 | |
416 | (void) ml_set_interrupts_enabled(intr); | |
417 | ||
418 | goto exit; | |
419 | } | |
420 | } | |
421 | #endif | |
422 | ||
0a7de745 | 423 | if (VM_KERNEL_ADDRESS(vaddr) || thread == THREAD_NULL) { |
5ba3f43e | 424 | map = kernel_map; |
0a7de745 | 425 | } else { |
5ba3f43e | 426 | map = thread->map; |
0a7de745 | 427 | } |
5ba3f43e | 428 | |
d9a64523 A |
429 | if (!TEST_FSR_TRANSLATION_FAULT(status)) { |
430 | /* check to see if it is just a pmap ref/modify fault */ | |
cb323159 | 431 | result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, (status == FSR_PACCESS), FALSE); |
0a7de745 | 432 | if (result == KERN_SUCCESS) { |
d9a64523 | 433 | goto exit; |
0a7de745 | 434 | } |
d9a64523 | 435 | } |
5ba3f43e A |
436 | |
437 | /* | |
438 | * We have to "fault" the page in. | |
439 | */ | |
440 | result = vm_fault(map, fault_addr, | |
0a7de745 A |
441 | fault_type, |
442 | FALSE /* change_wiring */, VM_KERN_MEMORY_NONE, | |
443 | (map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE, NULL, 0); | |
5ba3f43e A |
444 | |
445 | if (result == KERN_SUCCESS) { | |
446 | goto exit; | |
447 | } else { | |
448 | /* | |
449 | * If we have a recover handler, invoke it now. | |
450 | */ | |
451 | if (recover != 0) { | |
452 | regs->pc = (register_t) (recover & ~0x1); | |
453 | regs->cpsr = (regs->cpsr & ~PSR_TF) | ((recover & 0x1) << PSR_TFb); | |
454 | goto exit; | |
455 | } | |
456 | } | |
457 | } else if ((status & FSR_ALIGN_MASK) == FSR_ALIGN) { | |
458 | result = sleh_alignment(regs); | |
459 | if (result == KERN_SUCCESS) { | |
460 | goto exit; | |
461 | } else { | |
462 | intr = ml_set_interrupts_enabled(FALSE); | |
463 | ||
d9a64523 | 464 | panic_with_thread_kernel_state("unaligned kernel data access", regs); |
5ba3f43e A |
465 | |
466 | (void) ml_set_interrupts_enabled(intr); | |
467 | ||
468 | goto exit; | |
469 | } | |
5ba3f43e A |
470 | } |
471 | intr = ml_set_interrupts_enabled(FALSE); | |
472 | ||
cb323159 | 473 | panic_plain("kernel abort type %d at pc 0x%08x, lr 0x%08x: fault_type=0x%x, fault_addr=0x%x\n" |
0a7de745 A |
474 | "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n" |
475 | "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n" | |
476 | "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n" | |
477 | "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n" | |
478 | "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n", | |
cb323159 | 479 | type, regs->pc, regs->lr, fault_type, fault_addr, |
0a7de745 A |
480 | regs->r[0], regs->r[1], regs->r[2], regs->r[3], |
481 | regs->r[4], regs->r[5], regs->r[6], regs->r[7], | |
482 | regs->r[8], regs->r[9], regs->r[10], regs->r[11], | |
483 | regs->r[12], regs->sp, regs->lr, regs->pc, | |
484 | regs->cpsr, regs->fsr, regs->far); | |
5ba3f43e A |
485 | } |
486 | /* Fault in user mode */ | |
487 | ||
488 | if (TEST_FSR_VMFAULT(status)) { | |
489 | map = thread->map; | |
490 | ||
491 | #if CONFIG_DTRACE | |
cb323159 | 492 | if (thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */ |
5ba3f43e A |
493 | if (dtrace_tally_fault(fault_addr)) { /* Should a user mode fault under dtrace be ignored? */ |
494 | if (recover) { | |
495 | regs->pc = recover; | |
496 | } else { | |
497 | intr = ml_set_interrupts_enabled(FALSE); | |
498 | ||
d9a64523 | 499 | panic_with_thread_kernel_state("copyin/out has no recovery point", regs); |
5ba3f43e A |
500 | |
501 | (void) ml_set_interrupts_enabled(intr); | |
502 | } | |
503 | goto exit; | |
504 | } else { | |
505 | intr = ml_set_interrupts_enabled(FALSE); | |
506 | ||
d9a64523 | 507 | panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", regs); |
5ba3f43e A |
508 | |
509 | (void) ml_set_interrupts_enabled(intr); | |
510 | ||
511 | goto exit; | |
512 | } | |
513 | } | |
514 | #endif | |
515 | ||
d9a64523 A |
516 | if (!TEST_FSR_TRANSLATION_FAULT(status)) { |
517 | /* check to see if it is just a pmap ref/modify fault */ | |
cb323159 | 518 | result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, (status == FSR_PACCESS), TRUE); |
0a7de745 | 519 | if (result == KERN_SUCCESS) { |
d9a64523 | 520 | goto exception_return; |
0a7de745 | 521 | } |
5ba3f43e | 522 | } |
d9a64523 A |
523 | |
524 | /* | |
525 | * We have to "fault" the page in. | |
526 | */ | |
527 | result = vm_fault(map, fault_addr, fault_type, | |
0a7de745 A |
528 | FALSE /* change_wiring */, VM_KERN_MEMORY_NONE, |
529 | THREAD_ABORTSAFE, NULL, 0); | |
5ba3f43e A |
530 | if (result == KERN_SUCCESS || result == KERN_ABORTED) { |
531 | goto exception_return; | |
532 | } | |
cb323159 A |
533 | |
534 | /* | |
535 | * KERN_FAILURE here means preemption was disabled when we called vm_fault. | |
536 | * That should never happen for a page fault from user space. | |
537 | */ | |
538 | if (__improbable(result == KERN_FAILURE)) { | |
539 | panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread); | |
540 | } | |
541 | ||
5ba3f43e A |
542 | codes[0] = result; |
543 | } else if ((status & FSR_ALIGN_MASK) == FSR_ALIGN) { | |
544 | if (sleh_alignment(regs) == KERN_SUCCESS) { | |
545 | goto exception_return; | |
546 | } | |
5ba3f43e A |
547 | codes[0] = EXC_ARM_DA_ALIGN; |
548 | } else if (status == FSR_DEBUG) { | |
549 | exc = EXC_BREAKPOINT; | |
550 | codes[0] = EXC_ARM_DA_DEBUG; | |
551 | } else if ((status == FSR_SDOM) || (status == FSR_PDOM)) { | |
cb323159 | 552 | panic_with_thread_kernel_state("Unexpected domain fault", regs); |
5ba3f43e | 553 | } else { |
5ba3f43e A |
554 | codes[0] = KERN_FAILURE; |
555 | } | |
556 | ||
557 | codes[1] = vaddr; | |
558 | exception_triage(exc, codes, 2); | |
559 | /* NOTREACHED */ | |
560 | ||
561 | exception_return: | |
0a7de745 | 562 | if (recover) { |
5ba3f43e | 563 | thread->recover = recover; |
0a7de745 | 564 | } |
5ba3f43e A |
565 | thread_exception_return(); |
566 | /* NOTREACHED */ | |
567 | ||
568 | exit: | |
0a7de745 | 569 | if (recover) { |
5ba3f43e | 570 | thread->recover = recover; |
0a7de745 | 571 | } |
5ba3f43e A |
572 | return; |
573 | } | |
574 | ||
575 | ||
576 | /* | |
577 | * Routine: sleh_alignment | |
578 | * Function: Second level exception handler for alignment data fault | |
579 | */ | |
580 | ||
581 | static kern_return_t | |
582 | sleh_alignment(struct arm_saved_state * regs) | |
583 | { | |
584 | unsigned int status; | |
d9a64523 | 585 | unsigned int ins = 0; |
5ba3f43e A |
586 | unsigned int rd_index; |
587 | unsigned int base_index; | |
588 | unsigned int paddr; | |
589 | void *src; | |
590 | unsigned int reg_list; | |
591 | unsigned int pre; | |
592 | unsigned int up; | |
593 | unsigned int write_back; | |
594 | kern_return_t rc = KERN_SUCCESS; | |
595 | ||
596 | getCpuDatap()->cpu_stat.unaligned_cnt++; | |
597 | ||
598 | /* Do not try to emulate in modified execution states */ | |
0a7de745 | 599 | if (regs->cpsr & (PSR_EF | PSR_JF)) { |
5ba3f43e | 600 | return KERN_NOT_SUPPORTED; |
0a7de745 | 601 | } |
5ba3f43e A |
602 | |
603 | /* Disallow emulation of kernel instructions */ | |
0a7de745 | 604 | if ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) { |
5ba3f43e | 605 | return KERN_NOT_SUPPORTED; |
0a7de745 A |
606 | } |
607 | ||
5ba3f43e A |
608 | |
609 | #define ALIGN_THRESHOLD 1024 | |
610 | if ((sleh_alignment_count++ & (ALIGN_THRESHOLD - 1)) == | |
0a7de745 | 611 | (ALIGN_THRESHOLD - 1)) { |
5ba3f43e | 612 | kprintf("sleh_alignment: %d more alignment faults: %d total\n", |
0a7de745 A |
613 | ALIGN_THRESHOLD, sleh_alignment_count); |
614 | } | |
5ba3f43e A |
615 | |
616 | if ((trap_on_alignment_fault != 0) | |
0a7de745 | 617 | && (sleh_alignment_count % trap_on_alignment_fault == 0)) { |
5ba3f43e | 618 | return KERN_NOT_SUPPORTED; |
0a7de745 | 619 | } |
5ba3f43e A |
620 | |
621 | status = regs->fsr; | |
622 | paddr = regs->far; | |
623 | ||
624 | if (regs->cpsr & PSR_TF) { | |
0a7de745 | 625 | unsigned short ins16 = 0; |
5ba3f43e A |
626 | |
627 | /* Get aborted instruction */ | |
0a7de745 A |
628 | #if __ARM_SMP__ || __ARM_USER_PROTECT__ |
629 | if (COPYIN((user_addr_t)(regs->pc), (char *)&ins16, (vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS) { | |
5ba3f43e A |
630 | /* Failed to fetch instruction, return success to re-drive the exception */ |
631 | return KERN_SUCCESS; | |
632 | } | |
633 | #else | |
634 | ins16 = *(unsigned short *) (regs->pc); | |
635 | #endif | |
636 | ||
637 | /* | |
638 | * Map multi-word Thumb loads and stores to their ARM | |
639 | * equivalents. | |
640 | * Don't worry about single-word instructions, since those are | |
641 | * handled in hardware. | |
642 | */ | |
643 | ||
644 | reg_list = ins16 & 0xff; | |
0a7de745 | 645 | if (reg_list == 0) { |
5ba3f43e | 646 | return KERN_NOT_SUPPORTED; |
0a7de745 | 647 | } |
5ba3f43e A |
648 | |
649 | if (((ins16 & THUMB_STR_1_MASK) == THUMB_LDMIA) || | |
650 | ((ins16 & THUMB_STR_1_MASK) == THUMB_STMIA)) { | |
651 | base_index = (ins16 >> 8) & 0x7; | |
652 | ins = 0xE8800000 | (base_index << 16) | reg_list; | |
0a7de745 | 653 | if ((ins16 & THUMB_STR_1_MASK) == THUMB_LDMIA) { |
5ba3f43e | 654 | ins |= (1 << 20); |
0a7de745 | 655 | } |
5ba3f43e | 656 | if (((ins16 & THUMB_STR_1_MASK) == THUMB_STMIA) || |
0a7de745 | 657 | !(reg_list & (1 << base_index))) { |
5ba3f43e | 658 | ins |= (1 << 21); |
0a7de745 | 659 | } |
5ba3f43e A |
660 | } else if ((ins16 & THUMB_PUSH_MASK) == THUMB_POP) { |
661 | unsigned int r = (ins16 >> 8) & 1; | |
662 | ins = 0xE8BD0000 | (r << 15) | reg_list; | |
663 | } else if ((ins16 & THUMB_PUSH_MASK) == THUMB_PUSH) { | |
664 | unsigned int r = (ins16 >> 8) & 1; | |
665 | ins = 0xE92D0000 | (r << 14) | reg_list; | |
666 | } else { | |
667 | return KERN_NOT_SUPPORTED; | |
668 | } | |
669 | } else { | |
670 | /* Get aborted instruction */ | |
0a7de745 A |
671 | #if __ARM_SMP__ || __ARM_USER_PROTECT__ |
672 | if (COPYIN((user_addr_t)(regs->pc), (char *)&ins, (vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) { | |
5ba3f43e A |
673 | /* Failed to fetch instruction, return success to re-drive the exception */ |
674 | return KERN_SUCCESS; | |
675 | } | |
676 | #else | |
677 | ins = *(unsigned int *) (regs->pc); | |
678 | #endif | |
679 | } | |
680 | ||
681 | /* Don't try to emulate unconditional instructions */ | |
0a7de745 | 682 | if ((ins & 0xF0000000) == 0xF0000000) { |
5ba3f43e | 683 | return KERN_NOT_SUPPORTED; |
0a7de745 | 684 | } |
5ba3f43e A |
685 | |
686 | pre = (ins >> 24) & 1; | |
687 | up = (ins >> 23) & 1; | |
688 | reg_list = ins & 0xffff; | |
689 | write_back = (ins >> 21) & 1; | |
690 | base_index = (ins >> 16) & 0xf; | |
691 | ||
0a7de745 | 692 | if ((ins & ARM_BLK_MASK) == ARM_STM) { /* STM or LDM */ |
5ba3f43e A |
693 | int reg_count = 0; |
694 | int waddr; | |
695 | ||
696 | for (rd_index = 0; rd_index < 16; rd_index++) { | |
0a7de745 | 697 | if (reg_list & (1 << rd_index)) { |
5ba3f43e | 698 | reg_count++; |
0a7de745 | 699 | } |
5ba3f43e A |
700 | } |
701 | ||
702 | paddr = regs->r[base_index]; | |
703 | ||
704 | switch (ins & (ARM_POST_INDEXING | ARM_INCREMENT)) { | |
0a7de745 | 705 | /* Increment after */ |
5ba3f43e A |
706 | case ARM_INCREMENT: |
707 | waddr = paddr + reg_count * 4; | |
708 | break; | |
709 | ||
0a7de745 | 710 | /* Increment before */ |
5ba3f43e A |
711 | case ARM_POST_INDEXING | ARM_INCREMENT: |
712 | waddr = paddr + reg_count * 4; | |
713 | paddr += 4; | |
714 | break; | |
715 | ||
0a7de745 | 716 | /* Decrement after */ |
5ba3f43e A |
717 | case 0: |
718 | waddr = paddr - reg_count * 4; | |
719 | paddr = waddr + 4; | |
720 | break; | |
721 | ||
0a7de745 | 722 | /* Decrement before */ |
5ba3f43e A |
723 | case ARM_POST_INDEXING: |
724 | waddr = paddr - reg_count * 4; | |
725 | paddr = waddr; | |
726 | break; | |
727 | ||
728 | default: | |
729 | waddr = 0; | |
730 | } | |
731 | ||
732 | for (rd_index = 0; rd_index < 16; rd_index++) { | |
733 | if (reg_list & (1 << rd_index)) { | |
734 | src = ®s->r[rd_index]; | |
735 | ||
0a7de745 | 736 | if ((ins & (1 << 20)) == 0) { /* STM */ |
5ba3f43e | 737 | rc = COPYOUT(src, paddr, 4); |
0a7de745 | 738 | } else { /* LDM */ |
5ba3f43e | 739 | rc = COPYIN(paddr, src, 4); |
0a7de745 | 740 | } |
5ba3f43e | 741 | |
0a7de745 | 742 | if (rc != KERN_SUCCESS) { |
5ba3f43e | 743 | break; |
0a7de745 | 744 | } |
5ba3f43e A |
745 | |
746 | paddr += 4; | |
747 | } | |
748 | } | |
749 | ||
750 | paddr = waddr; | |
751 | } else { | |
752 | rc = 1; | |
753 | } | |
754 | ||
755 | if (rc == KERN_SUCCESS) { | |
0a7de745 | 756 | if (regs->cpsr & PSR_TF) { |
5ba3f43e | 757 | regs->pc += 2; |
0a7de745 | 758 | } else { |
5ba3f43e | 759 | regs->pc += 4; |
0a7de745 | 760 | } |
5ba3f43e | 761 | |
0a7de745 | 762 | if (write_back) { |
5ba3f43e | 763 | regs->r[base_index] = paddr; |
0a7de745 | 764 | } |
5ba3f43e | 765 | } |
0a7de745 | 766 | return rc; |
5ba3f43e A |
767 | } |
768 | ||
769 | ||
0a7de745 | 770 | #ifndef NO_KDEBUG |
5ba3f43e A |
771 | /* XXX quell warnings */ |
772 | void syscall_trace(struct arm_saved_state * regs); | |
773 | void syscall_trace_exit(unsigned int, unsigned int); | |
774 | void mach_syscall_trace(struct arm_saved_state * regs, unsigned int call_number); | |
775 | void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number); | |
776 | void interrupt_trace(struct arm_saved_state * regs); | |
777 | void interrupt_trace_exit(void); | |
778 | ||
779 | /* called from the fleh_swi handler, if TRACE_SYSCALL is enabled */ | |
780 | void | |
781 | syscall_trace( | |
0a7de745 | 782 | struct arm_saved_state * regs) |
5ba3f43e A |
783 | { |
784 | kprintf("syscall: %d\n", regs->r[12]); | |
785 | } | |
786 | ||
787 | void | |
788 | syscall_trace_exit( | |
0a7de745 A |
789 | unsigned int r0, |
790 | unsigned int r1) | |
5ba3f43e A |
791 | { |
792 | kprintf("syscall exit: 0x%x 0x%x\n", r0, r1); | |
793 | } | |
794 | ||
795 | void | |
796 | mach_syscall_trace( | |
0a7de745 A |
797 | struct arm_saved_state * regs, |
798 | unsigned int call_number) | |
5ba3f43e A |
799 | { |
800 | int i, argc; | |
801 | int kdarg[3] = {0, 0, 0}; | |
802 | ||
803 | argc = mach_trap_table[call_number].mach_trap_arg_count; | |
804 | ||
0a7de745 | 805 | if (argc > 3) { |
5ba3f43e | 806 | argc = 3; |
0a7de745 | 807 | } |
5ba3f43e | 808 | |
0a7de745 | 809 | for (i = 0; i < argc; i++) { |
5ba3f43e | 810 | kdarg[i] = (int) regs->r[i]; |
0a7de745 | 811 | } |
5ba3f43e A |
812 | |
813 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
0a7de745 A |
814 | MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START, |
815 | kdarg[0], kdarg[1], kdarg[2], 0, 0); | |
5ba3f43e A |
816 | } |
817 | ||
818 | void | |
819 | mach_syscall_trace_exit( | |
0a7de745 A |
820 | unsigned int retval, |
821 | unsigned int call_number) | |
5ba3f43e A |
822 | { |
823 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
0a7de745 A |
824 | MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END, |
825 | retval, 0, 0, 0, 0); | |
5ba3f43e A |
826 | } |
827 | ||
828 | void | |
829 | interrupt_trace( | |
0a7de745 | 830 | struct arm_saved_state * regs) |
5ba3f43e | 831 | { |
0a7de745 | 832 | #define UMODE(rp) (((rp)->cpsr & PSR_MODE_MASK) == PSR_USER_MODE) |
5ba3f43e A |
833 | |
834 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
0a7de745 A |
835 | MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START, |
836 | 0, UMODE(regs) ? regs->pc : VM_KERNEL_UNSLIDE(regs->pc), | |
837 | UMODE(regs), 0, 0); | |
5ba3f43e A |
838 | } |
839 | ||
840 | void | |
841 | interrupt_trace_exit( | |
0a7de745 | 842 | void) |
5ba3f43e | 843 | { |
d9a64523 A |
844 | #if KPERF |
845 | kperf_interrupt(); | |
846 | #endif /* KPERF */ | |
847 | KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END); | |
5ba3f43e A |
848 | } |
849 | #endif | |
850 | ||
851 | /* XXX quell warnings */ | |
852 | void interrupt_stats(void); | |
853 | ||
854 | /* This is called from locore.s directly. We only update per-processor interrupt counters in this function */ | |
855 | void | |
856 | interrupt_stats(void) | |
857 | { | |
858 | SCHED_STATS_INTERRUPT(current_processor()); | |
859 | } | |
860 | ||
cb323159 | 861 | __dead2 |
0a7de745 | 862 | static void |
5ba3f43e A |
863 | panic_with_thread_kernel_state(const char *msg, struct arm_saved_state *regs) |
864 | { | |
cb323159 | 865 | panic_plain("%s at pc 0x%08x, lr 0x%08x (saved state:%p)\n" |
0a7de745 A |
866 | "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n" |
867 | "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n" | |
868 | "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n" | |
869 | "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n" | |
870 | "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n", | |
cb323159 | 871 | msg, regs->pc, regs->lr, regs, |
0a7de745 A |
872 | regs->r[0], regs->r[1], regs->r[2], regs->r[3], |
873 | regs->r[4], regs->r[5], regs->r[6], regs->r[7], | |
874 | regs->r[8], regs->r[9], regs->r[10], regs->r[11], | |
875 | regs->r[12], regs->sp, regs->lr, regs->pc, | |
876 | regs->cpsr, regs->fsr, regs->far); | |
5ba3f43e | 877 | } |