]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/trap.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / osfmk / arm / trap.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <kern/debug.h>
29 #include <mach_kdp.h>
30 #include <machine/endian.h>
31 #include <mach/mach_types.h>
32 #include <mach/boolean.h>
33 #include <mach/vm_prot.h>
34 #include <mach/vm_types.h>
35 #include <mach/mach_traps.h>
36
37 #include <mach/exception.h>
38 #include <mach/kern_return.h>
39 #include <mach/vm_param.h>
40 #include <mach/message.h>
41 #include <mach/machine/thread_status.h>
42
43 #include <vm/vm_page.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_fault.h>
46 #include <vm/vm_kern.h>
47
48 #include <kern/ast.h>
49 #include <kern/thread.h>
50 #include <kern/task.h>
51 #include <kern/sched_prim.h>
52
53 #include <sys/kdebug.h>
54
55 #include <arm/trap.h>
56 #include <arm/caches_internal.h>
57 #include <arm/cpu_data_internal.h>
58 #include <arm/machdep_call.h>
59 #include <arm/machine_routines.h>
60 #include <arm/misc_protos.h>
61 #include <arm/setjmp.h>
62 #include <arm/proc_reg.h>
63
64 /*
65 * External function prototypes.
66 */
67 #include <kern/syscall_sw.h>
68 #include <kern/host.h>
69 #include <kern/processor.h>
70
71
72 #if CONFIG_DTRACE
73 extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs, unsigned int instr);
74 extern boolean_t dtrace_tally_fault(user_addr_t);
75
76 /* Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy and paste the trap instructions
77 over from that file. Need to keep these in sync! */
78 #define FASTTRAP_ARM_INSTR 0xe7ffdefc
79 #define FASTTRAP_THUMB_INSTR 0xdefc
80
81 #define FASTTRAP_ARM_RET_INSTR 0xe7ffdefb
82 #define FASTTRAP_THUMB_RET_INSTR 0xdefb
83
84 /* See <rdar://problem/4613924> */
85 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
86 #endif
87
88 #define COPYIN(dst, src, size) \
89 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
90 copyin_kern(dst, src, size) \
91 : \
92 copyin(dst, src, size)
93
94 #define COPYOUT(src, dst, size) \
95 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
96 copyout_kern(src, dst, size) \
97 : \
98 copyout(src, dst, size)
99
100 /* Second-level exception handlers forward declarations */
101 void sleh_undef(struct arm_saved_state *, struct arm_vfpsaved_state *);
102 void sleh_abort(struct arm_saved_state *, int);
103 static kern_return_t sleh_alignment(struct arm_saved_state *);
104 static void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *regs);
105
106
107 volatile perfCallback perfTrapHook = NULL; /* Pointer to CHUD trap hook routine */
108
109 int sleh_alignment_count = 0;
110 int trap_on_alignment_fault = 0;
111
112 /*
113 * Routine: sleh_undef
114 * Function: Second level exception handler for undefined exception
115 */
116
117 void
118 sleh_undef(struct arm_saved_state * regs, struct arm_vfpsaved_state * vfp_ss __unused)
119 {
120 exception_type_t exception = EXC_BAD_INSTRUCTION;
121 mach_exception_data_type_t code[2] = {EXC_ARM_UNDEFINED};
122 mach_msg_type_number_t codeCnt = 2;
123 thread_t thread = current_thread();
124 vm_offset_t recover;
125
126 recover = thread->recover;
127 thread->recover = 0;
128
129 getCpuDatap()->cpu_stat.undef_ex_cnt++;
130
131 /* Inherit the interrupt masks from previous */
132 if (!(regs->cpsr & PSR_INTMASK))
133 ml_set_interrupts_enabled(TRUE);
134
135 #if CONFIG_DTRACE
136 if (tempDTraceTrapHook) {
137 if (tempDTraceTrapHook(exception, regs, 0, 0) == KERN_SUCCESS) {
138 /*
139 * If it succeeds, we are done...
140 */
141 goto exit;
142 }
143 }
144
145 /* Check to see if we've hit a userland probe */
146 if ((regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE) {
147 if (regs->cpsr & PSR_TF) {
148 uint16_t instr;
149
150 if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS)
151 goto exit;
152
153 if (instr == FASTTRAP_THUMB_INSTR || instr == FASTTRAP_THUMB_RET_INSTR) {
154 if (dtrace_user_probe(regs, instr) == KERN_SUCCESS)
155 /* If it succeeds, we are done... */
156 goto exit;
157 }
158 } else {
159 uint32_t instr;
160
161 if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS)
162 goto exit;
163
164 if (instr == FASTTRAP_ARM_INSTR || instr == FASTTRAP_ARM_RET_INSTR) {
165 if (dtrace_user_probe(regs, instr) == KERN_SUCCESS)
166 /* If it succeeds, we are done... */
167 goto exit;
168 }
169 }
170 }
171 #endif /* CONFIG_DTRACE */
172
173
174 if (regs->cpsr & PSR_TF) {
175 unsigned short instr;
176
177 if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS)
178 goto exit;
179
180 if (IS_THUMB32(instr)) {
181 unsigned int instr32;
182
183 instr32 = (instr<<16);
184
185 if(COPYIN((user_addr_t)(((unsigned short *) (regs->pc))+1), (char *)&instr,(vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS)
186 goto exit;
187
188 instr32 |= instr;
189 code[1] = instr32;
190
191 #if __ARM_VFP__
192 if (IS_THUMB_VFP(instr32)) {
193 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
194 if (!get_vfp_enabled())
195 panic("VFP was disabled (thumb); VFP should always be enabled");
196 }
197 #endif
198 } else {
199 /* I don't believe we have any 16 bit VFP instructions, so just set code[1]. */
200 code[1] = instr;
201
202 if (IS_THUMB_GDB_TRAP(instr)) {
203 exception = EXC_BREAKPOINT;
204 code[0] = EXC_ARM_BREAKPOINT;
205 }
206 }
207 } else {
208 uint32_t instr;
209
210 if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS)
211 goto exit;
212
213 code[1] = instr;
214 #if __ARM_VFP__
215 if (IS_ARM_VFP(instr)) {
216 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
217 if (!get_vfp_enabled())
218 panic("VFP was disabled (arm); VFP should always be enabled");
219 }
220 #endif
221
222 if (IS_ARM_GDB_TRAP(instr)) {
223 exception = EXC_BREAKPOINT;
224 code[0] = EXC_ARM_BREAKPOINT;
225 }
226 }
227
228 if (!((regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)) {
229 boolean_t intr;
230
231 intr = ml_set_interrupts_enabled(FALSE);
232
233 if (exception == EXC_BREAKPOINT) {
234 /* Save off the context here (so that the debug logic
235 * can see the original state of this thread).
236 */
237 vm_offset_t kstackptr = current_thread()->machine.kstackptr;
238 *((arm_saved_state_t *) kstackptr) = *regs;
239
240 DebuggerCall(exception, regs);
241 (void) ml_set_interrupts_enabled(intr);
242 goto exit;
243 }
244 panic_context(exception, (void *)regs, "undefined kernel instruction\n"
245 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
246 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
247 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
248 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
249 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
250 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
251 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
252 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
253 regs->r[12], regs->sp, regs->lr, regs->pc,
254 regs->cpsr, regs->fsr, regs->far);
255
256 (void) ml_set_interrupts_enabled(intr);
257
258 } else {
259 exception_triage(exception, code, codeCnt);
260 /* NOTREACHED */
261 }
262
263 exit:
264 if (recover)
265 thread->recover = recover;
266 }
267
268 /*
269 * Routine: sleh_abort
270 * Function: Second level exception handler for abort(Pref/Data)
271 */
272
273 void
274 sleh_abort(struct arm_saved_state * regs, int type)
275 {
276 int status;
277 int debug_status=0;
278 int spsr;
279 int exc;
280 mach_exception_data_type_t codes[2];
281 vm_map_t map;
282 vm_map_address_t vaddr;
283 vm_map_address_t fault_addr;
284 vm_prot_t fault_type;
285 kern_return_t result;
286 vm_offset_t recover;
287 thread_t thread = current_thread();
288 boolean_t intr;
289
290 recover = thread->recover;
291 thread->recover = 0;
292
293 status = regs->fsr & FSR_MASK;
294 spsr = regs->cpsr;
295
296 /* The DSFR/IFSR.ExT bit indicates "IMPLEMENTATION DEFINED" classification.
297 * Allow a platform-level error handler to decode it.
298 */
299 if ((regs->fsr) & FSR_EXT) {
300 cpu_data_t *cdp = getCpuDatap();
301
302 if (cdp->platform_error_handler != (platform_error_handler_t) NULL) {
303 (*(platform_error_handler_t)cdp->platform_error_handler) (cdp->cpu_id, 0);
304 /* If a platform error handler is registered, expect it to panic, not fall through */
305 panic("Unexpected return from platform_error_handler");
306 }
307 }
308
309 /* Done with asynchronous handling; re-enable here so that subsequent aborts are taken as early as possible. */
310 reenable_async_aborts();
311
312 if (ml_at_interrupt_context())
313 panic_with_thread_kernel_state("sleh_abort at interrupt context", regs);
314
315 fault_addr = vaddr = regs->far;
316
317 if (type == T_DATA_ABT) {
318 getCpuDatap()->cpu_stat.data_ex_cnt++;
319 } else { /* T_PREFETCH_ABT */
320 getCpuDatap()->cpu_stat.instr_ex_cnt++;
321 fault_type = VM_PROT_READ | VM_PROT_EXECUTE;
322 }
323
324 if (status == FSR_DEBUG)
325 debug_status = arm_debug_read_dscr() & ARM_DBGDSCR_MOE_MASK;
326
327 /* Inherit the interrupt masks from previous */
328 if (!(spsr & PSR_INTMASK))
329 ml_set_interrupts_enabled(TRUE);
330
331 if (type == T_DATA_ABT) {
332 /*
333 * Now that interrupts are reenabled, we can perform any needed
334 * copyin operations.
335 *
336 * Because we have reenabled interrupts, any instruction copy
337 * must be a copyin, even on UP systems.
338 */
339
340 if (regs->fsr & DFSR_WRITE) {
341 fault_type = (VM_PROT_READ | VM_PROT_WRITE);
342 /* Cache operations report faults as write access, change these to read access */
343 /* Cache operations are invoked from arm mode for now */
344 if (!(regs->cpsr & PSR_TF)) {
345 unsigned int ins;
346
347 if(COPYIN((user_addr_t)(regs->pc), (char *)&ins,(vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS)
348 goto exit;
349
350 if (arm_mcr_cp15(ins) || arm_mcrr_cp15(ins))
351 fault_type = VM_PROT_READ;
352 }
353 } else {
354 fault_type = VM_PROT_READ;
355 /*
356 * DFSR is not getting the "write" bit set
357 * when a swp instruction is encountered (even when it is
358 * a write fault.
359 */
360 if (!(regs->cpsr & PSR_TF)) {
361 unsigned int ins;
362
363 if(COPYIN((user_addr_t)(regs->pc), (char *)&ins,(vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS)
364 goto exit;
365
366 if ((ins & ARM_SWP_MASK) == ARM_SWP)
367 fault_type = VM_PROT_WRITE;
368 }
369 }
370 }
371
372 if ((spsr & PSR_MODE_MASK) != PSR_USER_MODE) {
373 /* Fault in kernel mode */
374
375 if ((status == FSR_DEBUG)
376 && ((debug_status == ARM_DBGDSCR_MOE_ASYNC_WATCHPOINT) || (debug_status == ARM_DBGDSCR_MOE_SYNC_WATCHPOINT))
377 && (recover != 0) && (getCpuDatap()->cpu_user_debug != 0)) {
378 /* If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
379 * abort. Turn off watchpoints and keep going; we'll turn them back on in load_and_go_user.
380 */
381 arm_debug_set(NULL);
382 goto exit;
383 }
384
385 if ((type == T_PREFETCH_ABT) || (status == FSR_DEBUG)) {
386
387 intr = ml_set_interrupts_enabled(FALSE);
388 if (status == FSR_DEBUG) {
389 DebuggerCall(EXC_BREAKPOINT, regs);
390 (void) ml_set_interrupts_enabled(intr);
391 goto exit;
392 }
393 panic_context(EXC_BAD_ACCESS, (void*)regs, "sleh_abort: prefetch abort in kernel mode: fault_addr=0x%x\n"
394 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
395 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
396 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
397 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
398 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
399 fault_addr,
400 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
401 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
402 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
403 regs->r[12], regs->sp, regs->lr, regs->pc,
404 regs->cpsr, regs->fsr, regs->far);
405
406 (void) ml_set_interrupts_enabled(intr);
407
408 } else if (TEST_FSR_VMFAULT(status)) {
409
410 #if CONFIG_DTRACE
411 if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */
412 if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */
413 /* Point to next instruction */
414 regs->pc += ((regs->cpsr & PSR_TF) && !IS_THUMB32(*((uint16_t*) (regs->pc)))) ? 2 : 4;
415 goto exit;
416 } else {
417 intr = ml_set_interrupts_enabled(FALSE);
418 panic_context(EXC_BAD_ACCESS, (void *)regs, "Unexpected page fault under dtrace_probe"
419 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
420 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
421 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
422 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
423 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
424 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
425 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
426 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
427 regs->r[12], regs->sp, regs->lr, regs->pc,
428 regs->cpsr, regs->fsr, regs->far);
429
430 (void) ml_set_interrupts_enabled(intr);
431
432 goto exit;
433 }
434 }
435 #endif
436
437 if (VM_KERNEL_ADDRESS(vaddr) || thread == THREAD_NULL)
438 map = kernel_map;
439 else
440 map = thread->map;
441
442 /* check to see if it is just a pmap ref/modify fault */
443 result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, FALSE);
444 if (result == KERN_SUCCESS)
445 goto exit;
446
447 /*
448 * We have to "fault" the page in.
449 */
450 result = vm_fault(map, fault_addr,
451 fault_type,
452 FALSE /* change_wiring */, VM_KERN_MEMORY_NONE,
453 (map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE, NULL, 0);
454
455 if (result == KERN_SUCCESS) {
456 goto exit;
457 } else {
458 /*
459 * If we have a recover handler, invoke it now.
460 */
461 if (recover != 0) {
462 regs->pc = (register_t) (recover & ~0x1);
463 regs->cpsr = (regs->cpsr & ~PSR_TF) | ((recover & 0x1) << PSR_TFb);
464 goto exit;
465 }
466 }
467 } else if ((status & FSR_ALIGN_MASK) == FSR_ALIGN) {
468 result = sleh_alignment(regs);
469 if (result == KERN_SUCCESS) {
470 goto exit;
471 } else {
472 intr = ml_set_interrupts_enabled(FALSE);
473
474 panic_context(EXC_BAD_ACCESS, (void *)regs, "unaligned kernel data access: pc=0x%08x fault_addr=0x%x\n"
475 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
476 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
477 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
478 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
479 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
480 regs->pc, fault_addr,
481 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
482 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
483 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
484 regs->r[12], regs->sp, regs->lr, regs->pc,
485 regs->cpsr, regs->fsr, regs->far);
486
487 (void) ml_set_interrupts_enabled(intr);
488
489 goto exit;
490 }
491
492 }
493 intr = ml_set_interrupts_enabled(FALSE);
494
495 panic_context(EXC_BAD_ACCESS, (void *)regs, "kernel abort type %d: fault_type=0x%x, fault_addr=0x%x\n"
496 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
497 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
498 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
499 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
500 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
501 type, fault_type, fault_addr,
502 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
503 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
504 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
505 regs->r[12], regs->sp, regs->lr, regs->pc,
506 regs->cpsr, regs->fsr, regs->far);
507
508 (void) ml_set_interrupts_enabled(intr);
509
510 goto exit;
511 }
512 /* Fault in user mode */
513
514 if (TEST_FSR_VMFAULT(status)) {
515 map = thread->map;
516
517 #if CONFIG_DTRACE
518 if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */
519 if (dtrace_tally_fault(fault_addr)) { /* Should a user mode fault under dtrace be ignored? */
520 if (recover) {
521 regs->pc = recover;
522 } else {
523 intr = ml_set_interrupts_enabled(FALSE);
524
525 panic_context(EXC_BAD_ACCESS, (void *)regs, "copyin/out has no recovery point"
526 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
527 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
528 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
529 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
530 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
531 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
532 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
533 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
534 regs->r[12], regs->sp, regs->lr, regs->pc,
535 regs->cpsr, regs->fsr, regs->far);
536
537 (void) ml_set_interrupts_enabled(intr);
538 }
539 goto exit;
540 } else {
541 intr = ml_set_interrupts_enabled(FALSE);
542
543 panic_context(EXC_BAD_ACCESS, (void*)regs, "Unexpected UMW page fault under dtrace_probe"
544 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
545 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
546 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
547 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
548 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
549 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
550 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
551 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
552 regs->r[12], regs->sp, regs->lr, regs->pc,
553 regs->cpsr, regs->fsr, regs->far);
554
555 (void) ml_set_interrupts_enabled(intr);
556
557 goto exit;
558 }
559 }
560 #endif
561
562 /* check to see if it is just a pmap ref/modify fault */
563 result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, TRUE);
564 if (result != KERN_SUCCESS) {
565 /*
566 * We have to "fault" the page in.
567 */
568 result = vm_fault(map, fault_addr, fault_type,
569 FALSE /* change_wiring */, VM_KERN_MEMORY_NONE,
570 THREAD_ABORTSAFE, NULL, 0);
571 }
572 if (result == KERN_SUCCESS || result == KERN_ABORTED) {
573 goto exception_return;
574 }
575 exc = EXC_BAD_ACCESS;
576 codes[0] = result;
577 } else if ((status & FSR_ALIGN_MASK) == FSR_ALIGN) {
578 if (sleh_alignment(regs) == KERN_SUCCESS) {
579 goto exception_return;
580 }
581 exc = EXC_BAD_ACCESS;
582 codes[0] = EXC_ARM_DA_ALIGN;
583 } else if (status == FSR_DEBUG) {
584 exc = EXC_BREAKPOINT;
585 codes[0] = EXC_ARM_DA_DEBUG;
586 } else if ((status == FSR_SDOM) || (status == FSR_PDOM)) {
587 exc = EXC_BAD_ACCESS;
588 codes[0] = KERN_INVALID_ADDRESS;
589 } else {
590 exc = EXC_BAD_ACCESS;
591 codes[0] = KERN_FAILURE;
592 }
593
594 codes[1] = vaddr;
595 exception_triage(exc, codes, 2);
596 /* NOTREACHED */
597
598 exception_return:
599 if (recover)
600 thread->recover = recover;
601 thread_exception_return();
602 /* NOTREACHED */
603
604 exit:
605 if (recover)
606 thread->recover = recover;
607 return;
608 }
609
610
611 /*
612 * Routine: sleh_alignment
613 * Function: Second level exception handler for alignment data fault
614 */
615
616 static kern_return_t
617 sleh_alignment(struct arm_saved_state * regs)
618 {
619 unsigned int status;
620 unsigned int ins;
621 unsigned int rd_index;
622 unsigned int base_index;
623 unsigned int paddr;
624 void *src;
625 unsigned int reg_list;
626 unsigned int pre;
627 unsigned int up;
628 unsigned int write_back;
629 kern_return_t rc = KERN_SUCCESS;
630
631 getCpuDatap()->cpu_stat.unaligned_cnt++;
632
633 /* Do not try to emulate in modified execution states */
634 if (regs->cpsr & (PSR_EF | PSR_JF))
635 return KERN_NOT_SUPPORTED;
636
637 /* Disallow emulation of kernel instructions */
638 if ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE)
639 return KERN_NOT_SUPPORTED;
640
641
642 #define ALIGN_THRESHOLD 1024
643 if ((sleh_alignment_count++ & (ALIGN_THRESHOLD - 1)) ==
644 (ALIGN_THRESHOLD - 1))
645 kprintf("sleh_alignment: %d more alignment faults: %d total\n",
646 ALIGN_THRESHOLD, sleh_alignment_count);
647
648 if ((trap_on_alignment_fault != 0)
649 && (sleh_alignment_count % trap_on_alignment_fault == 0))
650 return KERN_NOT_SUPPORTED;
651
652 status = regs->fsr;
653 paddr = regs->far;
654
655 if (regs->cpsr & PSR_TF) {
656 unsigned short ins16;
657
658 /* Get aborted instruction */
659 #if __ARM_SMP__ || __ARM_USER_PROTECT__
660 if(COPYIN((user_addr_t)(regs->pc), (char *)&ins16,(vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS) {
661 /* Failed to fetch instruction, return success to re-drive the exception */
662 return KERN_SUCCESS;
663 }
664 #else
665 ins16 = *(unsigned short *) (regs->pc);
666 #endif
667
668 /*
669 * Map multi-word Thumb loads and stores to their ARM
670 * equivalents.
671 * Don't worry about single-word instructions, since those are
672 * handled in hardware.
673 */
674
675 reg_list = ins16 & 0xff;
676 if (reg_list == 0)
677 return KERN_NOT_SUPPORTED;
678
679 if (((ins16 & THUMB_STR_1_MASK) == THUMB_LDMIA) ||
680 ((ins16 & THUMB_STR_1_MASK) == THUMB_STMIA)) {
681 base_index = (ins16 >> 8) & 0x7;
682 ins = 0xE8800000 | (base_index << 16) | reg_list;
683 if ((ins16 & THUMB_STR_1_MASK) == THUMB_LDMIA)
684 ins |= (1 << 20);
685 if (((ins16 & THUMB_STR_1_MASK) == THUMB_STMIA) ||
686 !(reg_list & (1 << base_index)))
687 ins |= (1 << 21);
688 } else if ((ins16 & THUMB_PUSH_MASK) == THUMB_POP) {
689 unsigned int r = (ins16 >> 8) & 1;
690 ins = 0xE8BD0000 | (r << 15) | reg_list;
691 } else if ((ins16 & THUMB_PUSH_MASK) == THUMB_PUSH) {
692 unsigned int r = (ins16 >> 8) & 1;
693 ins = 0xE92D0000 | (r << 14) | reg_list;
694 } else {
695 return KERN_NOT_SUPPORTED;
696 }
697 } else {
698 /* Get aborted instruction */
699 #if __ARM_SMP__ || __ARM_USER_PROTECT__
700 if(COPYIN((user_addr_t)(regs->pc), (char *)&ins,(vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) {
701 /* Failed to fetch instruction, return success to re-drive the exception */
702 return KERN_SUCCESS;
703 }
704 #else
705 ins = *(unsigned int *) (regs->pc);
706 #endif
707 }
708
709 /* Don't try to emulate unconditional instructions */
710 if ((ins & 0xF0000000) == 0xF0000000)
711 return KERN_NOT_SUPPORTED;
712
713 pre = (ins >> 24) & 1;
714 up = (ins >> 23) & 1;
715 reg_list = ins & 0xffff;
716 write_back = (ins >> 21) & 1;
717 base_index = (ins >> 16) & 0xf;
718
719 if ((ins & ARM_BLK_MASK) == ARM_STM) { /* STM or LDM */
720 int reg_count = 0;
721 int waddr;
722
723 for (rd_index = 0; rd_index < 16; rd_index++) {
724 if (reg_list & (1 << rd_index))
725 reg_count++;
726 }
727
728 paddr = regs->r[base_index];
729
730 switch (ins & (ARM_POST_INDEXING | ARM_INCREMENT)) {
731 /* Increment after */
732 case ARM_INCREMENT:
733 waddr = paddr + reg_count * 4;
734 break;
735
736 /* Increment before */
737 case ARM_POST_INDEXING | ARM_INCREMENT:
738 waddr = paddr + reg_count * 4;
739 paddr += 4;
740 break;
741
742 /* Decrement after */
743 case 0:
744 waddr = paddr - reg_count * 4;
745 paddr = waddr + 4;
746 break;
747
748 /* Decrement before */
749 case ARM_POST_INDEXING:
750 waddr = paddr - reg_count * 4;
751 paddr = waddr;
752 break;
753
754 default:
755 waddr = 0;
756 }
757
758 for (rd_index = 0; rd_index < 16; rd_index++) {
759 if (reg_list & (1 << rd_index)) {
760 src = &regs->r[rd_index];
761
762 if ((ins & (1 << 20)) == 0) /* STM */
763 rc = COPYOUT(src, paddr, 4);
764 else /* LDM */
765 rc = COPYIN(paddr, src, 4);
766
767 if (rc != KERN_SUCCESS)
768 break;
769
770 paddr += 4;
771 }
772 }
773
774 paddr = waddr;
775 } else {
776 rc = 1;
777 }
778
779 if (rc == KERN_SUCCESS) {
780 if (regs->cpsr & PSR_TF)
781 regs->pc += 2;
782 else
783 regs->pc += 4;
784
785 if (write_back)
786 regs->r[base_index] = paddr;
787 }
788 return (rc);
789 }
790
791
792 #ifndef NO_KDEBUG
793 /* XXX quell warnings */
794 void syscall_trace(struct arm_saved_state * regs);
795 void syscall_trace_exit(unsigned int, unsigned int);
796 void mach_syscall_trace(struct arm_saved_state * regs, unsigned int call_number);
797 void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number);
798 void interrupt_trace(struct arm_saved_state * regs);
799 void interrupt_trace_exit(void);
800
801 /* called from the fleh_swi handler, if TRACE_SYSCALL is enabled */
802 void
803 syscall_trace(
804 struct arm_saved_state * regs)
805 {
806 kprintf("syscall: %d\n", regs->r[12]);
807 }
808
809 void
810 syscall_trace_exit(
811 unsigned int r0,
812 unsigned int r1)
813 {
814 kprintf("syscall exit: 0x%x 0x%x\n", r0, r1);
815 }
816
817 void
818 mach_syscall_trace(
819 struct arm_saved_state * regs,
820 unsigned int call_number)
821 {
822 int i, argc;
823 int kdarg[3] = {0, 0, 0};
824
825 argc = mach_trap_table[call_number].mach_trap_arg_count;
826
827 if (argc > 3)
828 argc = 3;
829
830 for (i = 0; i < argc; i++)
831 kdarg[i] = (int) regs->r[i];
832
833 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
834 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
835 kdarg[0], kdarg[1], kdarg[2], 0, 0);
836
837 }
838
839 void
840 mach_syscall_trace_exit(
841 unsigned int retval,
842 unsigned int call_number)
843 {
844 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
845 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
846 retval, 0, 0, 0, 0);
847 }
848
849 void
850 interrupt_trace(
851 struct arm_saved_state * regs)
852 {
853 #define UMODE(rp) (((rp)->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)
854
855 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
856 MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
857 0, UMODE(regs) ? regs->pc : VM_KERNEL_UNSLIDE(regs->pc),
858 UMODE(regs), 0, 0);
859 }
860
861 void
862 interrupt_trace_exit(
863 void)
864 {
865 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
866 MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END,
867 0, 0, 0, 0, 0);
868 }
869 #endif
870
871 /* XXX quell warnings */
872 void interrupt_stats(void);
873
874 /* This is called from locore.s directly. We only update per-processor interrupt counters in this function */
875 void
876 interrupt_stats(void)
877 {
878 SCHED_STATS_INTERRUPT(current_processor());
879 }
880
881 static void
882 panic_with_thread_kernel_state(const char *msg, struct arm_saved_state *regs)
883 {
884 panic_context(0, (void*)regs, "%s (saved state:%p)\n"
885 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
886 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
887 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
888 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
889 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
890 msg, regs,
891 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
892 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
893 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
894 regs->r[12], regs->sp, regs->lr, regs->pc,
895 regs->cpsr, regs->fsr, regs->far);
896
897 }