2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <kern/debug.h>
30 #include <machine/endian.h>
31 #include <mach/mach_types.h>
32 #include <mach/boolean.h>
33 #include <mach/vm_prot.h>
34 #include <mach/vm_types.h>
35 #include <mach/mach_traps.h>
37 #include <mach/exception.h>
38 #include <mach/kern_return.h>
39 #include <mach/vm_param.h>
40 #include <mach/message.h>
41 #include <mach/machine/thread_status.h>
43 #include <vm/vm_page.h>
45 #include <vm/vm_fault.h>
46 #include <vm/vm_kern.h>
49 #include <kern/thread.h>
50 #include <kern/task.h>
51 #include <kern/sched_prim.h>
53 #include <sys/kdebug.h>
56 #include <arm/caches_internal.h>
57 #include <arm/cpu_data_internal.h>
58 #include <arm/machdep_call.h>
59 #include <arm/machine_routines.h>
60 #include <arm/misc_protos.h>
61 #include <arm/setjmp.h>
62 #include <arm/proc_reg.h>
65 * External function prototypes.
67 #include <kern/syscall_sw.h>
68 #include <kern/host.h>
69 #include <kern/processor.h>
73 extern kern_return_t
dtrace_user_probe(arm_saved_state_t
* regs
, unsigned int instr
);
74 extern boolean_t
dtrace_tally_fault(user_addr_t
);
76 /* Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy and paste the trap instructions
77 over from that file. Need to keep these in sync! */
78 #define FASTTRAP_ARM_INSTR 0xe7ffdefc
79 #define FASTTRAP_THUMB_INSTR 0xdefc
81 #define FASTTRAP_ARM_RET_INSTR 0xe7ffdefb
82 #define FASTTRAP_THUMB_RET_INSTR 0xdefb
84 /* See <rdar://problem/4613924> */
85 perfCallback tempDTraceTrapHook
= NULL
; /* Pointer to DTrace fbt trap hook routine */
88 #define COPYIN(dst, src, size) \
89 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
90 copyin_kern(dst, src, size) \
92 copyin(dst, src, size)
94 #define COPYOUT(src, dst, size) \
95 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
96 copyout_kern(src, dst, size) \
98 copyout(src, dst, size)
100 /* Second-level exception handlers forward declarations */
101 void sleh_undef(struct arm_saved_state
*, struct arm_vfpsaved_state
*);
102 void sleh_abort(struct arm_saved_state
*, int);
103 static kern_return_t
sleh_alignment(struct arm_saved_state
*);
104 static void panic_with_thread_kernel_state(const char *msg
, arm_saved_state_t
*regs
);
106 int sleh_alignment_count
= 0;
107 int trap_on_alignment_fault
= 0;
110 * Routine: sleh_undef
111 * Function: Second level exception handler for undefined exception
115 sleh_undef(struct arm_saved_state
* regs
, struct arm_vfpsaved_state
* vfp_ss __unused
)
117 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
118 mach_exception_data_type_t code
[2] = {EXC_ARM_UNDEFINED
};
119 mach_msg_type_number_t codeCnt
= 2;
120 thread_t thread
= current_thread();
123 recover
= thread
->recover
;
126 getCpuDatap()->cpu_stat
.undef_ex_cnt
++;
128 /* Inherit the interrupt masks from previous */
129 if (!(regs
->cpsr
& PSR_INTMASK
))
130 ml_set_interrupts_enabled(TRUE
);
133 if (tempDTraceTrapHook
) {
134 if (tempDTraceTrapHook(exception
, regs
, 0, 0) == KERN_SUCCESS
) {
136 * If it succeeds, we are done...
142 /* Check to see if we've hit a userland probe */
143 if ((regs
->cpsr
& PSR_MODE_MASK
) == PSR_USER_MODE
) {
144 if (regs
->cpsr
& PSR_TF
) {
147 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
,(vm_size_t
)(sizeof(uint16_t))) != KERN_SUCCESS
)
150 if (instr
== FASTTRAP_THUMB_INSTR
|| instr
== FASTTRAP_THUMB_RET_INSTR
) {
151 if (dtrace_user_probe(regs
, instr
) == KERN_SUCCESS
)
152 /* If it succeeds, we are done... */
158 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
,(vm_size_t
)(sizeof(uint32_t))) != KERN_SUCCESS
)
161 if (instr
== FASTTRAP_ARM_INSTR
|| instr
== FASTTRAP_ARM_RET_INSTR
) {
162 if (dtrace_user_probe(regs
, instr
) == KERN_SUCCESS
)
163 /* If it succeeds, we are done... */
168 #endif /* CONFIG_DTRACE */
171 if (regs
->cpsr
& PSR_TF
) {
172 unsigned short instr
;
174 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
,(vm_size_t
)(sizeof(unsigned short))) != KERN_SUCCESS
)
177 if (IS_THUMB32(instr
)) {
178 unsigned int instr32
;
180 instr32
= (instr
<<16);
182 if(COPYIN((user_addr_t
)(((unsigned short *) (regs
->pc
))+1), (char *)&instr
,(vm_size_t
)(sizeof(unsigned short))) != KERN_SUCCESS
)
189 if (IS_THUMB_VFP(instr32
)) {
190 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
191 if (!get_vfp_enabled())
192 panic("VFP was disabled (thumb); VFP should always be enabled");
196 /* I don't believe we have any 16 bit VFP instructions, so just set code[1]. */
199 if (IS_THUMB_GDB_TRAP(instr
)) {
200 exception
= EXC_BREAKPOINT
;
201 code
[0] = EXC_ARM_BREAKPOINT
;
207 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
,(vm_size_t
)(sizeof(uint32_t))) != KERN_SUCCESS
)
212 if (IS_ARM_VFP(instr
)) {
213 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
214 if (!get_vfp_enabled())
215 panic("VFP was disabled (arm); VFP should always be enabled");
219 if (IS_ARM_GDB_TRAP(instr
)) {
220 exception
= EXC_BREAKPOINT
;
221 code
[0] = EXC_ARM_BREAKPOINT
;
225 if (!((regs
->cpsr
& PSR_MODE_MASK
) == PSR_USER_MODE
)) {
228 intr
= ml_set_interrupts_enabled(FALSE
);
230 if (exception
== EXC_BREAKPOINT
) {
231 /* Save off the context here (so that the debug logic
232 * can see the original state of this thread).
234 vm_offset_t kstackptr
= current_thread()->machine
.kstackptr
;
235 *((arm_saved_state_t
*) kstackptr
) = *regs
;
237 DebuggerCall(exception
, regs
);
238 (void) ml_set_interrupts_enabled(intr
);
241 panic_context(exception
, (void *)regs
, "undefined kernel instruction\n"
242 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
243 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
244 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
245 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
246 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
247 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
248 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
249 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
250 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
251 regs
->cpsr
, regs
->fsr
, regs
->far
);
253 (void) ml_set_interrupts_enabled(intr
);
256 exception_triage(exception
, code
, codeCnt
);
262 thread
->recover
= recover
;
266 * Routine: sleh_abort
267 * Function: Second level exception handler for abort(Pref/Data)
271 sleh_abort(struct arm_saved_state
* regs
, int type
)
277 mach_exception_data_type_t codes
[2];
279 vm_map_address_t vaddr
;
280 vm_map_address_t fault_addr
;
281 vm_prot_t fault_type
;
282 kern_return_t result
;
284 thread_t thread
= current_thread();
287 recover
= thread
->recover
;
290 status
= regs
->fsr
& FSR_MASK
;
293 /* The DSFR/IFSR.ExT bit indicates "IMPLEMENTATION DEFINED" classification.
294 * Allow a platform-level error handler to decode it.
296 if ((regs
->fsr
) & FSR_EXT
) {
297 cpu_data_t
*cdp
= getCpuDatap();
299 if (cdp
->platform_error_handler
!= (platform_error_handler_t
) NULL
) {
300 (*(platform_error_handler_t
)cdp
->platform_error_handler
) (cdp
->cpu_id
, 0);
301 /* If a platform error handler is registered, expect it to panic, not fall through */
302 panic("Unexpected return from platform_error_handler");
306 /* Done with asynchronous handling; re-enable here so that subsequent aborts are taken as early as possible. */
307 reenable_async_aborts();
309 if (ml_at_interrupt_context())
310 panic_with_thread_kernel_state("sleh_abort at interrupt context", regs
);
312 fault_addr
= vaddr
= regs
->far
;
314 if (type
== T_DATA_ABT
) {
315 getCpuDatap()->cpu_stat
.data_ex_cnt
++;
316 } else { /* T_PREFETCH_ABT */
317 getCpuDatap()->cpu_stat
.instr_ex_cnt
++;
318 fault_type
= VM_PROT_READ
| VM_PROT_EXECUTE
;
321 if (status
== FSR_DEBUG
)
322 debug_status
= arm_debug_read_dscr() & ARM_DBGDSCR_MOE_MASK
;
324 /* Inherit the interrupt masks from previous */
325 if (!(spsr
& PSR_INTMASK
))
326 ml_set_interrupts_enabled(TRUE
);
328 if (type
== T_DATA_ABT
) {
330 * Now that interrupts are reenabled, we can perform any needed
333 * Because we have reenabled interrupts, any instruction copy
334 * must be a copyin, even on UP systems.
337 if (regs
->fsr
& DFSR_WRITE
) {
338 fault_type
= (VM_PROT_READ
| VM_PROT_WRITE
);
339 /* Cache operations report faults as write access, change these to read access */
340 /* Cache operations are invoked from arm mode for now */
341 if (!(regs
->cpsr
& PSR_TF
)) {
344 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins
,(vm_size_t
)(sizeof(unsigned int))) != KERN_SUCCESS
)
347 if (arm_mcr_cp15(ins
) || arm_mcrr_cp15(ins
))
348 fault_type
= VM_PROT_READ
;
351 fault_type
= VM_PROT_READ
;
353 * DFSR is not getting the "write" bit set
354 * when a swp instruction is encountered (even when it is
357 if (!(regs
->cpsr
& PSR_TF
)) {
360 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins
,(vm_size_t
)(sizeof(unsigned int))) != KERN_SUCCESS
)
363 if ((ins
& ARM_SWP_MASK
) == ARM_SWP
)
364 fault_type
= VM_PROT_WRITE
;
369 if ((spsr
& PSR_MODE_MASK
) != PSR_USER_MODE
) {
370 /* Fault in kernel mode */
372 if ((status
== FSR_DEBUG
)
373 && ((debug_status
== ARM_DBGDSCR_MOE_ASYNC_WATCHPOINT
) || (debug_status
== ARM_DBGDSCR_MOE_SYNC_WATCHPOINT
))
374 && (recover
!= 0) && (getCpuDatap()->cpu_user_debug
!= 0)) {
375 /* If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
376 * abort. Turn off watchpoints and keep going; we'll turn them back on in load_and_go_user.
382 if ((type
== T_PREFETCH_ABT
) || (status
== FSR_DEBUG
)) {
384 intr
= ml_set_interrupts_enabled(FALSE
);
385 if (status
== FSR_DEBUG
) {
386 DebuggerCall(EXC_BREAKPOINT
, regs
);
387 (void) ml_set_interrupts_enabled(intr
);
390 panic_context(EXC_BAD_ACCESS
, (void*)regs
, "sleh_abort: prefetch abort in kernel mode: fault_addr=0x%x\n"
391 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
392 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
393 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
394 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
395 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
397 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
398 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
399 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
400 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
401 regs
->cpsr
, regs
->fsr
, regs
->far
);
403 (void) ml_set_interrupts_enabled(intr
);
405 } else if (TEST_FSR_VMFAULT(status
)) {
408 if (thread
->options
& TH_OPT_DTRACE
) { /* Executing under dtrace_probe? */
409 if (dtrace_tally_fault(fault_addr
)) { /* Should a fault under dtrace be ignored? */
410 /* Point to next instruction */
411 regs
->pc
+= ((regs
->cpsr
& PSR_TF
) && !IS_THUMB32(*((uint16_t*) (regs
->pc
)))) ? 2 : 4;
414 intr
= ml_set_interrupts_enabled(FALSE
);
415 panic_context(EXC_BAD_ACCESS
, (void *)regs
, "Unexpected page fault under dtrace_probe"
416 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
417 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
418 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
419 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
420 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
421 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
422 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
423 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
424 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
425 regs
->cpsr
, regs
->fsr
, regs
->far
);
427 (void) ml_set_interrupts_enabled(intr
);
434 if (VM_KERNEL_ADDRESS(vaddr
) || thread
== THREAD_NULL
)
439 /* check to see if it is just a pmap ref/modify fault */
440 result
= arm_fast_fault(map
->pmap
, trunc_page(fault_addr
), fault_type
, FALSE
);
441 if (result
== KERN_SUCCESS
)
445 * We have to "fault" the page in.
447 result
= vm_fault(map
, fault_addr
,
449 FALSE
/* change_wiring */, VM_KERN_MEMORY_NONE
,
450 (map
== kernel_map
) ? THREAD_UNINT
: THREAD_ABORTSAFE
, NULL
, 0);
452 if (result
== KERN_SUCCESS
) {
456 * If we have a recover handler, invoke it now.
459 regs
->pc
= (register_t
) (recover
& ~0x1);
460 regs
->cpsr
= (regs
->cpsr
& ~PSR_TF
) | ((recover
& 0x1) << PSR_TFb
);
464 } else if ((status
& FSR_ALIGN_MASK
) == FSR_ALIGN
) {
465 result
= sleh_alignment(regs
);
466 if (result
== KERN_SUCCESS
) {
469 intr
= ml_set_interrupts_enabled(FALSE
);
471 panic_context(EXC_BAD_ACCESS
, (void *)regs
, "unaligned kernel data access: pc=0x%08x fault_addr=0x%x\n"
472 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
473 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
474 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
475 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
476 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
477 regs
->pc
, fault_addr
,
478 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
479 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
480 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
481 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
482 regs
->cpsr
, regs
->fsr
, regs
->far
);
484 (void) ml_set_interrupts_enabled(intr
);
490 intr
= ml_set_interrupts_enabled(FALSE
);
492 panic_context(EXC_BAD_ACCESS
, (void *)regs
, "kernel abort type %d: fault_type=0x%x, fault_addr=0x%x\n"
493 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
494 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
495 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
496 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
497 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
498 type
, fault_type
, fault_addr
,
499 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
500 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
501 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
502 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
503 regs
->cpsr
, regs
->fsr
, regs
->far
);
505 (void) ml_set_interrupts_enabled(intr
);
509 /* Fault in user mode */
511 if (TEST_FSR_VMFAULT(status
)) {
515 if (thread
->options
& TH_OPT_DTRACE
) { /* Executing under dtrace_probe? */
516 if (dtrace_tally_fault(fault_addr
)) { /* Should a user mode fault under dtrace be ignored? */
520 intr
= ml_set_interrupts_enabled(FALSE
);
522 panic_context(EXC_BAD_ACCESS
, (void *)regs
, "copyin/out has no recovery point"
523 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
524 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
525 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
526 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
527 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
528 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
529 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
530 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
531 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
532 regs
->cpsr
, regs
->fsr
, regs
->far
);
534 (void) ml_set_interrupts_enabled(intr
);
538 intr
= ml_set_interrupts_enabled(FALSE
);
540 panic_context(EXC_BAD_ACCESS
, (void*)regs
, "Unexpected UMW page fault under dtrace_probe"
541 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
542 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
543 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
544 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
545 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
546 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
547 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
548 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
549 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
550 regs
->cpsr
, regs
->fsr
, regs
->far
);
552 (void) ml_set_interrupts_enabled(intr
);
559 /* check to see if it is just a pmap ref/modify fault */
560 result
= arm_fast_fault(map
->pmap
, trunc_page(fault_addr
), fault_type
, TRUE
);
561 if (result
!= KERN_SUCCESS
) {
563 * We have to "fault" the page in.
565 result
= vm_fault(map
, fault_addr
, fault_type
,
566 FALSE
/* change_wiring */, VM_KERN_MEMORY_NONE
,
567 THREAD_ABORTSAFE
, NULL
, 0);
569 if (result
== KERN_SUCCESS
|| result
== KERN_ABORTED
) {
570 goto exception_return
;
572 exc
= EXC_BAD_ACCESS
;
574 } else if ((status
& FSR_ALIGN_MASK
) == FSR_ALIGN
) {
575 if (sleh_alignment(regs
) == KERN_SUCCESS
) {
576 goto exception_return
;
578 exc
= EXC_BAD_ACCESS
;
579 codes
[0] = EXC_ARM_DA_ALIGN
;
580 } else if (status
== FSR_DEBUG
) {
581 exc
= EXC_BREAKPOINT
;
582 codes
[0] = EXC_ARM_DA_DEBUG
;
583 } else if ((status
== FSR_SDOM
) || (status
== FSR_PDOM
)) {
584 exc
= EXC_BAD_ACCESS
;
585 codes
[0] = KERN_INVALID_ADDRESS
;
587 exc
= EXC_BAD_ACCESS
;
588 codes
[0] = KERN_FAILURE
;
592 exception_triage(exc
, codes
, 2);
597 thread
->recover
= recover
;
598 thread_exception_return();
603 thread
->recover
= recover
;
609 * Routine: sleh_alignment
610 * Function: Second level exception handler for alignment data fault
614 sleh_alignment(struct arm_saved_state
* regs
)
618 unsigned int rd_index
;
619 unsigned int base_index
;
622 unsigned int reg_list
;
625 unsigned int write_back
;
626 kern_return_t rc
= KERN_SUCCESS
;
628 getCpuDatap()->cpu_stat
.unaligned_cnt
++;
630 /* Do not try to emulate in modified execution states */
631 if (regs
->cpsr
& (PSR_EF
| PSR_JF
))
632 return KERN_NOT_SUPPORTED
;
634 /* Disallow emulation of kernel instructions */
635 if ((regs
->cpsr
& PSR_MODE_MASK
) != PSR_USER_MODE
)
636 return KERN_NOT_SUPPORTED
;
639 #define ALIGN_THRESHOLD 1024
640 if ((sleh_alignment_count
++ & (ALIGN_THRESHOLD
- 1)) ==
641 (ALIGN_THRESHOLD
- 1))
642 kprintf("sleh_alignment: %d more alignment faults: %d total\n",
643 ALIGN_THRESHOLD
, sleh_alignment_count
);
645 if ((trap_on_alignment_fault
!= 0)
646 && (sleh_alignment_count
% trap_on_alignment_fault
== 0))
647 return KERN_NOT_SUPPORTED
;
652 if (regs
->cpsr
& PSR_TF
) {
653 unsigned short ins16
;
655 /* Get aborted instruction */
656 #if __ARM_SMP__ || __ARM_USER_PROTECT__
657 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins16
,(vm_size_t
)(sizeof(uint16_t))) != KERN_SUCCESS
) {
658 /* Failed to fetch instruction, return success to re-drive the exception */
662 ins16
= *(unsigned short *) (regs
->pc
);
666 * Map multi-word Thumb loads and stores to their ARM
668 * Don't worry about single-word instructions, since those are
669 * handled in hardware.
672 reg_list
= ins16
& 0xff;
674 return KERN_NOT_SUPPORTED
;
676 if (((ins16
& THUMB_STR_1_MASK
) == THUMB_LDMIA
) ||
677 ((ins16
& THUMB_STR_1_MASK
) == THUMB_STMIA
)) {
678 base_index
= (ins16
>> 8) & 0x7;
679 ins
= 0xE8800000 | (base_index
<< 16) | reg_list
;
680 if ((ins16
& THUMB_STR_1_MASK
) == THUMB_LDMIA
)
682 if (((ins16
& THUMB_STR_1_MASK
) == THUMB_STMIA
) ||
683 !(reg_list
& (1 << base_index
)))
685 } else if ((ins16
& THUMB_PUSH_MASK
) == THUMB_POP
) {
686 unsigned int r
= (ins16
>> 8) & 1;
687 ins
= 0xE8BD0000 | (r
<< 15) | reg_list
;
688 } else if ((ins16
& THUMB_PUSH_MASK
) == THUMB_PUSH
) {
689 unsigned int r
= (ins16
>> 8) & 1;
690 ins
= 0xE92D0000 | (r
<< 14) | reg_list
;
692 return KERN_NOT_SUPPORTED
;
695 /* Get aborted instruction */
696 #if __ARM_SMP__ || __ARM_USER_PROTECT__
697 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins
,(vm_size_t
)(sizeof(unsigned int))) != KERN_SUCCESS
) {
698 /* Failed to fetch instruction, return success to re-drive the exception */
702 ins
= *(unsigned int *) (regs
->pc
);
706 /* Don't try to emulate unconditional instructions */
707 if ((ins
& 0xF0000000) == 0xF0000000)
708 return KERN_NOT_SUPPORTED
;
710 pre
= (ins
>> 24) & 1;
711 up
= (ins
>> 23) & 1;
712 reg_list
= ins
& 0xffff;
713 write_back
= (ins
>> 21) & 1;
714 base_index
= (ins
>> 16) & 0xf;
716 if ((ins
& ARM_BLK_MASK
) == ARM_STM
) { /* STM or LDM */
720 for (rd_index
= 0; rd_index
< 16; rd_index
++) {
721 if (reg_list
& (1 << rd_index
))
725 paddr
= regs
->r
[base_index
];
727 switch (ins
& (ARM_POST_INDEXING
| ARM_INCREMENT
)) {
728 /* Increment after */
730 waddr
= paddr
+ reg_count
* 4;
733 /* Increment before */
734 case ARM_POST_INDEXING
| ARM_INCREMENT
:
735 waddr
= paddr
+ reg_count
* 4;
739 /* Decrement after */
741 waddr
= paddr
- reg_count
* 4;
745 /* Decrement before */
746 case ARM_POST_INDEXING
:
747 waddr
= paddr
- reg_count
* 4;
755 for (rd_index
= 0; rd_index
< 16; rd_index
++) {
756 if (reg_list
& (1 << rd_index
)) {
757 src
= ®s
->r
[rd_index
];
759 if ((ins
& (1 << 20)) == 0) /* STM */
760 rc
= COPYOUT(src
, paddr
, 4);
762 rc
= COPYIN(paddr
, src
, 4);
764 if (rc
!= KERN_SUCCESS
)
776 if (rc
== KERN_SUCCESS
) {
777 if (regs
->cpsr
& PSR_TF
)
783 regs
->r
[base_index
] = paddr
;
790 /* XXX quell warnings */
791 void syscall_trace(struct arm_saved_state
* regs
);
792 void syscall_trace_exit(unsigned int, unsigned int);
793 void mach_syscall_trace(struct arm_saved_state
* regs
, unsigned int call_number
);
794 void mach_syscall_trace_exit(unsigned int retval
, unsigned int call_number
);
795 void interrupt_trace(struct arm_saved_state
* regs
);
796 void interrupt_trace_exit(void);
798 /* called from the fleh_swi handler, if TRACE_SYSCALL is enabled */
801 struct arm_saved_state
* regs
)
803 kprintf("syscall: %d\n", regs
->r
[12]);
811 kprintf("syscall exit: 0x%x 0x%x\n", r0
, r1
);
816 struct arm_saved_state
* regs
,
817 unsigned int call_number
)
820 int kdarg
[3] = {0, 0, 0};
822 argc
= mach_trap_table
[call_number
].mach_trap_arg_count
;
827 for (i
= 0; i
< argc
; i
++)
828 kdarg
[i
] = (int) regs
->r
[i
];
830 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
831 MACHDBG_CODE(DBG_MACH_EXCP_SC
, (call_number
)) | DBG_FUNC_START
,
832 kdarg
[0], kdarg
[1], kdarg
[2], 0, 0);
837 mach_syscall_trace_exit(
839 unsigned int call_number
)
841 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
842 MACHDBG_CODE(DBG_MACH_EXCP_SC
, (call_number
)) | DBG_FUNC_END
,
848 struct arm_saved_state
* regs
)
850 #define UMODE(rp) (((rp)->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)
852 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
853 MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_START
,
854 0, UMODE(regs
) ? regs
->pc
: VM_KERNEL_UNSLIDE(regs
->pc
),
859 interrupt_trace_exit(
862 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
863 MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_END
,
868 /* XXX quell warnings */
869 void interrupt_stats(void);
871 /* This is called from locore.s directly. We only update per-processor interrupt counters in this function */
873 interrupt_stats(void)
875 SCHED_STATS_INTERRUPT(current_processor());
879 panic_with_thread_kernel_state(const char *msg
, struct arm_saved_state
*regs
)
881 panic_context(0, (void*)regs
, "%s (saved state:%p)\n"
882 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
883 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
884 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
885 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
886 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
888 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
889 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
890 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
891 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
892 regs
->cpsr
, regs
->fsr
, regs
->far
);