]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/trap.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / osfmk / arm / trap.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <kern/debug.h>
29 #include <mach_kdp.h>
30 #include <machine/endian.h>
31 #include <mach/mach_types.h>
32 #include <mach/boolean.h>
33 #include <mach/vm_prot.h>
34 #include <mach/vm_types.h>
35 #include <mach/mach_traps.h>
36
37 #include <mach/exception.h>
38 #include <mach/kern_return.h>
39 #include <mach/vm_param.h>
40 #include <mach/message.h>
41 #include <mach/machine/thread_status.h>
42
43 #include <vm/vm_page.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_fault.h>
46 #include <vm/vm_kern.h>
47
48 #include <kern/ast.h>
49 #include <kern/thread.h>
50 #include <kern/task.h>
51 #include <kern/sched_prim.h>
52
53 #include <sys/kdebug.h>
54 #include <kperf/kperf.h>
55
56 #include <arm/trap.h>
57 #include <arm/caches_internal.h>
58 #include <arm/cpu_data_internal.h>
59 #include <arm/machdep_call.h>
60 #include <arm/machine_routines.h>
61 #include <arm/misc_protos.h>
62 #include <arm/setjmp.h>
63 #include <arm/proc_reg.h>
64
65 /*
66 * External function prototypes.
67 */
68 #include <kern/syscall_sw.h>
69 #include <kern/host.h>
70 #include <kern/processor.h>
71
72
73 #if CONFIG_DTRACE
74 extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs, unsigned int instr);
75 extern boolean_t dtrace_tally_fault(user_addr_t);
76
77 /* Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy and paste the trap instructions
78 over from that file. Need to keep these in sync! */
79 #define FASTTRAP_ARM_INSTR 0xe7ffdefc
80 #define FASTTRAP_THUMB_INSTR 0xdefc
81
82 #define FASTTRAP_ARM_RET_INSTR 0xe7ffdefb
83 #define FASTTRAP_THUMB_RET_INSTR 0xdefb
84
85 /* See <rdar://problem/4613924> */
86 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
87 #endif
88
89 #define COPYIN(dst, src, size) \
90 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
91 copyin_kern(dst, src, size) \
92 : \
93 copyin(dst, src, size)
94
95 #define COPYOUT(src, dst, size) \
96 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
97 copyout_kern(src, dst, size) \
98 : \
99 copyout(src, dst, size)
100
101 /* Second-level exception handlers forward declarations */
102 void sleh_undef(struct arm_saved_state *, struct arm_vfpsaved_state *);
103 void sleh_abort(struct arm_saved_state *, int);
104 static kern_return_t sleh_alignment(struct arm_saved_state *);
105 static void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *regs);
106
107 int sleh_alignment_count = 0;
108 int trap_on_alignment_fault = 0;
109
110 /*
111 * Routine: sleh_undef
112 * Function: Second level exception handler for undefined exception
113 */
114
115 void
116 sleh_undef(struct arm_saved_state * regs, struct arm_vfpsaved_state * vfp_ss __unused)
117 {
118 exception_type_t exception = EXC_BAD_INSTRUCTION;
119 mach_exception_data_type_t code[2] = {EXC_ARM_UNDEFINED};
120 mach_msg_type_number_t codeCnt = 2;
121 thread_t thread = current_thread();
122 vm_offset_t recover;
123
124 recover = thread->recover;
125 thread->recover = 0;
126
127 getCpuDatap()->cpu_stat.undef_ex_cnt++;
128
129 /* Inherit the interrupt masks from previous */
130 if (!(regs->cpsr & PSR_INTMASK))
131 ml_set_interrupts_enabled(TRUE);
132
133 #if CONFIG_DTRACE
134 if (tempDTraceTrapHook) {
135 if (tempDTraceTrapHook(exception, regs, 0, 0) == KERN_SUCCESS) {
136 /*
137 * If it succeeds, we are done...
138 */
139 goto exit;
140 }
141 }
142
143 /* Check to see if we've hit a userland probe */
144 if ((regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE) {
145 if (regs->cpsr & PSR_TF) {
146 uint16_t instr = 0;
147
148 if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS)
149 goto exit;
150
151 if (instr == FASTTRAP_THUMB_INSTR || instr == FASTTRAP_THUMB_RET_INSTR) {
152 if (dtrace_user_probe(regs, instr) == KERN_SUCCESS)
153 /* If it succeeds, we are done... */
154 goto exit;
155 }
156 } else {
157 uint32_t instr = 0;
158
159 if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS)
160 goto exit;
161
162 if (instr == FASTTRAP_ARM_INSTR || instr == FASTTRAP_ARM_RET_INSTR) {
163 if (dtrace_user_probe(regs, instr) == KERN_SUCCESS)
164 /* If it succeeds, we are done... */
165 goto exit;
166 }
167 }
168 }
169 #endif /* CONFIG_DTRACE */
170
171
172 if (regs->cpsr & PSR_TF) {
173 unsigned short instr = 0;
174
175 if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS)
176 goto exit;
177
178 if (IS_THUMB32(instr)) {
179 unsigned int instr32;
180
181 instr32 = (instr<<16);
182
183 if(COPYIN((user_addr_t)(((unsigned short *) (regs->pc))+1), (char *)&instr,(vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS)
184 goto exit;
185
186 instr32 |= instr;
187 code[1] = instr32;
188
189 #if __ARM_VFP__
190 if (IS_THUMB_VFP(instr32)) {
191 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
192 if (!get_vfp_enabled())
193 panic("VFP was disabled (thumb); VFP should always be enabled");
194 }
195 #endif
196 } else {
197 /* I don't believe we have any 16 bit VFP instructions, so just set code[1]. */
198 code[1] = instr;
199
200 if (IS_THUMB_GDB_TRAP(instr)) {
201 exception = EXC_BREAKPOINT;
202 code[0] = EXC_ARM_BREAKPOINT;
203 }
204 }
205 } else {
206 uint32_t instr = 0;
207
208 if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS)
209 goto exit;
210
211 code[1] = instr;
212 #if __ARM_VFP__
213 if (IS_ARM_VFP(instr)) {
214 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
215 if (!get_vfp_enabled())
216 panic("VFP was disabled (arm); VFP should always be enabled");
217 }
218 #endif
219
220 if (IS_ARM_GDB_TRAP(instr)) {
221 exception = EXC_BREAKPOINT;
222 code[0] = EXC_ARM_BREAKPOINT;
223 }
224 }
225
226 if (!((regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)) {
227 boolean_t intr;
228
229 intr = ml_set_interrupts_enabled(FALSE);
230
231 if (exception == EXC_BREAKPOINT) {
232 /* Save off the context here (so that the debug logic
233 * can see the original state of this thread).
234 */
235 vm_offset_t kstackptr = current_thread()->machine.kstackptr;
236 *((arm_saved_state_t *) kstackptr) = *regs;
237
238 DebuggerCall(exception, regs);
239 (void) ml_set_interrupts_enabled(intr);
240 goto exit;
241 }
242 panic_with_thread_kernel_state("undefined kernel instruction", regs);
243
244 (void) ml_set_interrupts_enabled(intr);
245
246 } else {
247 exception_triage(exception, code, codeCnt);
248 /* NOTREACHED */
249 }
250
251 exit:
252 if (recover)
253 thread->recover = recover;
254 }
255
256 /*
257 * Routine: sleh_abort
258 * Function: Second level exception handler for abort(Pref/Data)
259 */
260
261 void
262 sleh_abort(struct arm_saved_state * regs, int type)
263 {
264 int status;
265 int debug_status=0;
266 int spsr;
267 int exc;
268 mach_exception_data_type_t codes[2];
269 vm_map_t map;
270 vm_map_address_t vaddr;
271 vm_map_address_t fault_addr;
272 vm_prot_t fault_type;
273 kern_return_t result;
274 vm_offset_t recover;
275 thread_t thread = current_thread();
276 boolean_t intr;
277
278 recover = thread->recover;
279 thread->recover = 0;
280
281 status = regs->fsr & FSR_MASK;
282 spsr = regs->cpsr;
283
284 /* The DSFR/IFSR.ExT bit indicates "IMPLEMENTATION DEFINED" classification.
285 * Allow a platform-level error handler to decode it.
286 */
287 if ((regs->fsr) & FSR_EXT) {
288 cpu_data_t *cdp = getCpuDatap();
289
290 if (cdp->platform_error_handler != (platform_error_handler_t) NULL) {
291 (*(platform_error_handler_t)cdp->platform_error_handler) (cdp->cpu_id, 0);
292 /* If a platform error handler is registered, expect it to panic, not fall through */
293 panic("Unexpected return from platform_error_handler");
294 }
295 }
296
297 /* Done with asynchronous handling; re-enable here so that subsequent aborts are taken as early as possible. */
298 reenable_async_aborts();
299
300 if (ml_at_interrupt_context()) {
301 #if CONFIG_DTRACE
302 if (!(thread->options & TH_OPT_DTRACE))
303 #endif /* CONFIG_DTRACE */
304 {
305 panic_with_thread_kernel_state("sleh_abort at interrupt context", regs);
306 }
307 }
308
309 fault_addr = vaddr = regs->far;
310
311 if (type == T_DATA_ABT) {
312 getCpuDatap()->cpu_stat.data_ex_cnt++;
313 } else { /* T_PREFETCH_ABT */
314 getCpuDatap()->cpu_stat.instr_ex_cnt++;
315 fault_type = VM_PROT_READ | VM_PROT_EXECUTE;
316 }
317
318 if (status == FSR_DEBUG)
319 debug_status = arm_debug_read_dscr() & ARM_DBGDSCR_MOE_MASK;
320
321 /* Inherit the interrupt masks from previous */
322 if (!(spsr & PSR_INTMASK))
323 ml_set_interrupts_enabled(TRUE);
324
325 if (type == T_DATA_ABT) {
326 /*
327 * Now that interrupts are reenabled, we can perform any needed
328 * copyin operations.
329 *
330 * Because we have reenabled interrupts, any instruction copy
331 * must be a copyin, even on UP systems.
332 */
333
334 if (regs->fsr & DFSR_WRITE) {
335 fault_type = (VM_PROT_READ | VM_PROT_WRITE);
336 /* Cache operations report faults as write access, change these to read access */
337 /* Cache operations are invoked from arm mode for now */
338 if (!(regs->cpsr & PSR_TF)) {
339 unsigned int ins = 0;
340
341 if(COPYIN((user_addr_t)(regs->pc), (char *)&ins,(vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS)
342 goto exit;
343
344 if (arm_mcr_cp15(ins) || arm_mcrr_cp15(ins))
345 fault_type = VM_PROT_READ;
346 }
347 } else {
348 fault_type = VM_PROT_READ;
349 /*
350 * DFSR is not getting the "write" bit set
351 * when a swp instruction is encountered (even when it is
352 * a write fault.
353 */
354 if (!(regs->cpsr & PSR_TF)) {
355 unsigned int ins = 0;
356
357 if(COPYIN((user_addr_t)(regs->pc), (char *)&ins,(vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS)
358 goto exit;
359
360 if ((ins & ARM_SWP_MASK) == ARM_SWP)
361 fault_type = VM_PROT_WRITE;
362 }
363 }
364 }
365
366 if ((spsr & PSR_MODE_MASK) != PSR_USER_MODE) {
367 /* Fault in kernel mode */
368
369 if ((status == FSR_DEBUG)
370 && ((debug_status == ARM_DBGDSCR_MOE_ASYNC_WATCHPOINT) || (debug_status == ARM_DBGDSCR_MOE_SYNC_WATCHPOINT))
371 && (recover != 0) && (getCpuDatap()->cpu_user_debug != 0)) {
372 /* If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
373 * abort. Turn off watchpoints and keep going; we'll turn them back on in load_and_go_user.
374 */
375 arm_debug_set(NULL);
376 goto exit;
377 }
378
379 if ((type == T_PREFETCH_ABT) || (status == FSR_DEBUG)) {
380
381 intr = ml_set_interrupts_enabled(FALSE);
382 if (status == FSR_DEBUG) {
383 DebuggerCall(EXC_BREAKPOINT, regs);
384 (void) ml_set_interrupts_enabled(intr);
385 goto exit;
386 }
387 panic_with_thread_kernel_state("prefetch abort in kernel mode", regs);
388
389 (void) ml_set_interrupts_enabled(intr);
390
391 } else if (TEST_FSR_VMFAULT(status)) {
392
393 #if CONFIG_DTRACE
394 if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */
395 if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */
396 /* Point to next instruction */
397 regs->pc += ((regs->cpsr & PSR_TF) && !IS_THUMB32(*((uint16_t*) (regs->pc)))) ? 2 : 4;
398 goto exit;
399 } else {
400 intr = ml_set_interrupts_enabled(FALSE);
401 panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", regs);
402
403 (void) ml_set_interrupts_enabled(intr);
404
405 goto exit;
406 }
407 }
408 #endif
409
410 if (VM_KERNEL_ADDRESS(vaddr) || thread == THREAD_NULL)
411 map = kernel_map;
412 else
413 map = thread->map;
414
415 if (!TEST_FSR_TRANSLATION_FAULT(status)) {
416 /* check to see if it is just a pmap ref/modify fault */
417 result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, FALSE);
418 if (result == KERN_SUCCESS)
419 goto exit;
420 }
421
422 /*
423 * We have to "fault" the page in.
424 */
425 result = vm_fault(map, fault_addr,
426 fault_type,
427 FALSE /* change_wiring */, VM_KERN_MEMORY_NONE,
428 (map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE, NULL, 0);
429
430 if (result == KERN_SUCCESS) {
431 goto exit;
432 } else {
433 /*
434 * If we have a recover handler, invoke it now.
435 */
436 if (recover != 0) {
437 regs->pc = (register_t) (recover & ~0x1);
438 regs->cpsr = (regs->cpsr & ~PSR_TF) | ((recover & 0x1) << PSR_TFb);
439 goto exit;
440 }
441 }
442 } else if ((status & FSR_ALIGN_MASK) == FSR_ALIGN) {
443 result = sleh_alignment(regs);
444 if (result == KERN_SUCCESS) {
445 goto exit;
446 } else {
447 intr = ml_set_interrupts_enabled(FALSE);
448
449 panic_with_thread_kernel_state("unaligned kernel data access", regs);
450
451 (void) ml_set_interrupts_enabled(intr);
452
453 goto exit;
454 }
455
456 }
457 intr = ml_set_interrupts_enabled(FALSE);
458
459 panic_plain("kernel abort type %d: fault_type=0x%x, fault_addr=0x%x\n"
460 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
461 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
462 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
463 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
464 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
465 type, fault_type, fault_addr,
466 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
467 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
468 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
469 regs->r[12], regs->sp, regs->lr, regs->pc,
470 regs->cpsr, regs->fsr, regs->far);
471
472 (void) ml_set_interrupts_enabled(intr);
473
474 goto exit;
475 }
476 /* Fault in user mode */
477
478 if (TEST_FSR_VMFAULT(status)) {
479 map = thread->map;
480
481 #if CONFIG_DTRACE
482 if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */
483 if (dtrace_tally_fault(fault_addr)) { /* Should a user mode fault under dtrace be ignored? */
484 if (recover) {
485 regs->pc = recover;
486 } else {
487 intr = ml_set_interrupts_enabled(FALSE);
488
489 panic_with_thread_kernel_state("copyin/out has no recovery point", regs);
490
491 (void) ml_set_interrupts_enabled(intr);
492 }
493 goto exit;
494 } else {
495 intr = ml_set_interrupts_enabled(FALSE);
496
497 panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", regs);
498
499 (void) ml_set_interrupts_enabled(intr);
500
501 goto exit;
502 }
503 }
504 #endif
505
506 if (!TEST_FSR_TRANSLATION_FAULT(status)) {
507 /* check to see if it is just a pmap ref/modify fault */
508 result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, TRUE);
509 if (result == KERN_SUCCESS)
510 goto exception_return;
511 }
512
513 /*
514 * We have to "fault" the page in.
515 */
516 result = vm_fault(map, fault_addr, fault_type,
517 FALSE /* change_wiring */, VM_KERN_MEMORY_NONE,
518 THREAD_ABORTSAFE, NULL, 0);
519 if (result == KERN_SUCCESS || result == KERN_ABORTED) {
520 goto exception_return;
521 }
522 exc = EXC_BAD_ACCESS;
523 codes[0] = result;
524 } else if ((status & FSR_ALIGN_MASK) == FSR_ALIGN) {
525 if (sleh_alignment(regs) == KERN_SUCCESS) {
526 goto exception_return;
527 }
528 exc = EXC_BAD_ACCESS;
529 codes[0] = EXC_ARM_DA_ALIGN;
530 } else if (status == FSR_DEBUG) {
531 exc = EXC_BREAKPOINT;
532 codes[0] = EXC_ARM_DA_DEBUG;
533 } else if ((status == FSR_SDOM) || (status == FSR_PDOM)) {
534 exc = EXC_BAD_ACCESS;
535 codes[0] = KERN_INVALID_ADDRESS;
536 } else {
537 exc = EXC_BAD_ACCESS;
538 codes[0] = KERN_FAILURE;
539 }
540
541 codes[1] = vaddr;
542 exception_triage(exc, codes, 2);
543 /* NOTREACHED */
544
545 exception_return:
546 if (recover)
547 thread->recover = recover;
548 thread_exception_return();
549 /* NOTREACHED */
550
551 exit:
552 if (recover)
553 thread->recover = recover;
554 return;
555 }
556
557
558 /*
559 * Routine: sleh_alignment
560 * Function: Second level exception handler for alignment data fault
561 */
562
563 static kern_return_t
564 sleh_alignment(struct arm_saved_state * regs)
565 {
566 unsigned int status;
567 unsigned int ins = 0;
568 unsigned int rd_index;
569 unsigned int base_index;
570 unsigned int paddr;
571 void *src;
572 unsigned int reg_list;
573 unsigned int pre;
574 unsigned int up;
575 unsigned int write_back;
576 kern_return_t rc = KERN_SUCCESS;
577
578 getCpuDatap()->cpu_stat.unaligned_cnt++;
579
580 /* Do not try to emulate in modified execution states */
581 if (regs->cpsr & (PSR_EF | PSR_JF))
582 return KERN_NOT_SUPPORTED;
583
584 /* Disallow emulation of kernel instructions */
585 if ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE)
586 return KERN_NOT_SUPPORTED;
587
588
589 #define ALIGN_THRESHOLD 1024
590 if ((sleh_alignment_count++ & (ALIGN_THRESHOLD - 1)) ==
591 (ALIGN_THRESHOLD - 1))
592 kprintf("sleh_alignment: %d more alignment faults: %d total\n",
593 ALIGN_THRESHOLD, sleh_alignment_count);
594
595 if ((trap_on_alignment_fault != 0)
596 && (sleh_alignment_count % trap_on_alignment_fault == 0))
597 return KERN_NOT_SUPPORTED;
598
599 status = regs->fsr;
600 paddr = regs->far;
601
602 if (regs->cpsr & PSR_TF) {
603 unsigned short ins16 = 0;
604
605 /* Get aborted instruction */
606 #if __ARM_SMP__ || __ARM_USER_PROTECT__
607 if(COPYIN((user_addr_t)(regs->pc), (char *)&ins16,(vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS) {
608 /* Failed to fetch instruction, return success to re-drive the exception */
609 return KERN_SUCCESS;
610 }
611 #else
612 ins16 = *(unsigned short *) (regs->pc);
613 #endif
614
615 /*
616 * Map multi-word Thumb loads and stores to their ARM
617 * equivalents.
618 * Don't worry about single-word instructions, since those are
619 * handled in hardware.
620 */
621
622 reg_list = ins16 & 0xff;
623 if (reg_list == 0)
624 return KERN_NOT_SUPPORTED;
625
626 if (((ins16 & THUMB_STR_1_MASK) == THUMB_LDMIA) ||
627 ((ins16 & THUMB_STR_1_MASK) == THUMB_STMIA)) {
628 base_index = (ins16 >> 8) & 0x7;
629 ins = 0xE8800000 | (base_index << 16) | reg_list;
630 if ((ins16 & THUMB_STR_1_MASK) == THUMB_LDMIA)
631 ins |= (1 << 20);
632 if (((ins16 & THUMB_STR_1_MASK) == THUMB_STMIA) ||
633 !(reg_list & (1 << base_index)))
634 ins |= (1 << 21);
635 } else if ((ins16 & THUMB_PUSH_MASK) == THUMB_POP) {
636 unsigned int r = (ins16 >> 8) & 1;
637 ins = 0xE8BD0000 | (r << 15) | reg_list;
638 } else if ((ins16 & THUMB_PUSH_MASK) == THUMB_PUSH) {
639 unsigned int r = (ins16 >> 8) & 1;
640 ins = 0xE92D0000 | (r << 14) | reg_list;
641 } else {
642 return KERN_NOT_SUPPORTED;
643 }
644 } else {
645 /* Get aborted instruction */
646 #if __ARM_SMP__ || __ARM_USER_PROTECT__
647 if(COPYIN((user_addr_t)(regs->pc), (char *)&ins,(vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) {
648 /* Failed to fetch instruction, return success to re-drive the exception */
649 return KERN_SUCCESS;
650 }
651 #else
652 ins = *(unsigned int *) (regs->pc);
653 #endif
654 }
655
656 /* Don't try to emulate unconditional instructions */
657 if ((ins & 0xF0000000) == 0xF0000000)
658 return KERN_NOT_SUPPORTED;
659
660 pre = (ins >> 24) & 1;
661 up = (ins >> 23) & 1;
662 reg_list = ins & 0xffff;
663 write_back = (ins >> 21) & 1;
664 base_index = (ins >> 16) & 0xf;
665
666 if ((ins & ARM_BLK_MASK) == ARM_STM) { /* STM or LDM */
667 int reg_count = 0;
668 int waddr;
669
670 for (rd_index = 0; rd_index < 16; rd_index++) {
671 if (reg_list & (1 << rd_index))
672 reg_count++;
673 }
674
675 paddr = regs->r[base_index];
676
677 switch (ins & (ARM_POST_INDEXING | ARM_INCREMENT)) {
678 /* Increment after */
679 case ARM_INCREMENT:
680 waddr = paddr + reg_count * 4;
681 break;
682
683 /* Increment before */
684 case ARM_POST_INDEXING | ARM_INCREMENT:
685 waddr = paddr + reg_count * 4;
686 paddr += 4;
687 break;
688
689 /* Decrement after */
690 case 0:
691 waddr = paddr - reg_count * 4;
692 paddr = waddr + 4;
693 break;
694
695 /* Decrement before */
696 case ARM_POST_INDEXING:
697 waddr = paddr - reg_count * 4;
698 paddr = waddr;
699 break;
700
701 default:
702 waddr = 0;
703 }
704
705 for (rd_index = 0; rd_index < 16; rd_index++) {
706 if (reg_list & (1 << rd_index)) {
707 src = &regs->r[rd_index];
708
709 if ((ins & (1 << 20)) == 0) /* STM */
710 rc = COPYOUT(src, paddr, 4);
711 else /* LDM */
712 rc = COPYIN(paddr, src, 4);
713
714 if (rc != KERN_SUCCESS)
715 break;
716
717 paddr += 4;
718 }
719 }
720
721 paddr = waddr;
722 } else {
723 rc = 1;
724 }
725
726 if (rc == KERN_SUCCESS) {
727 if (regs->cpsr & PSR_TF)
728 regs->pc += 2;
729 else
730 regs->pc += 4;
731
732 if (write_back)
733 regs->r[base_index] = paddr;
734 }
735 return (rc);
736 }
737
738
739 #ifndef NO_KDEBUG
740 /* XXX quell warnings */
741 void syscall_trace(struct arm_saved_state * regs);
742 void syscall_trace_exit(unsigned int, unsigned int);
743 void mach_syscall_trace(struct arm_saved_state * regs, unsigned int call_number);
744 void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number);
745 void interrupt_trace(struct arm_saved_state * regs);
746 void interrupt_trace_exit(void);
747
748 /* called from the fleh_swi handler, if TRACE_SYSCALL is enabled */
749 void
750 syscall_trace(
751 struct arm_saved_state * regs)
752 {
753 kprintf("syscall: %d\n", regs->r[12]);
754 }
755
756 void
757 syscall_trace_exit(
758 unsigned int r0,
759 unsigned int r1)
760 {
761 kprintf("syscall exit: 0x%x 0x%x\n", r0, r1);
762 }
763
764 void
765 mach_syscall_trace(
766 struct arm_saved_state * regs,
767 unsigned int call_number)
768 {
769 int i, argc;
770 int kdarg[3] = {0, 0, 0};
771
772 argc = mach_trap_table[call_number].mach_trap_arg_count;
773
774 if (argc > 3)
775 argc = 3;
776
777 for (i = 0; i < argc; i++)
778 kdarg[i] = (int) regs->r[i];
779
780 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
781 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
782 kdarg[0], kdarg[1], kdarg[2], 0, 0);
783
784 }
785
786 void
787 mach_syscall_trace_exit(
788 unsigned int retval,
789 unsigned int call_number)
790 {
791 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
792 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
793 retval, 0, 0, 0, 0);
794 }
795
796 void
797 interrupt_trace(
798 struct arm_saved_state * regs)
799 {
800 #define UMODE(rp) (((rp)->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)
801
802 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
803 MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
804 0, UMODE(regs) ? regs->pc : VM_KERNEL_UNSLIDE(regs->pc),
805 UMODE(regs), 0, 0);
806 }
807
808 void
809 interrupt_trace_exit(
810 void)
811 {
812 #if KPERF
813 kperf_interrupt();
814 #endif /* KPERF */
815 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END);
816 }
817 #endif
818
819 /* XXX quell warnings */
820 void interrupt_stats(void);
821
822 /* This is called from locore.s directly. We only update per-processor interrupt counters in this function */
823 void
824 interrupt_stats(void)
825 {
826 SCHED_STATS_INTERRUPT(current_processor());
827 }
828
829 static void
830 panic_with_thread_kernel_state(const char *msg, struct arm_saved_state *regs)
831 {
832 panic_plain("%s (saved state:%p)\n"
833 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
834 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
835 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
836 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
837 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
838 msg, regs,
839 regs->r[0], regs->r[1], regs->r[2], regs->r[3],
840 regs->r[4], regs->r[5], regs->r[6], regs->r[7],
841 regs->r[8], regs->r[9], regs->r[10], regs->r[11],
842 regs->r[12], regs->sp, regs->lr, regs->pc,
843 regs->cpsr, regs->fsr, regs->far);
844
845 }