2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <kern/debug.h>
30 #include <machine/endian.h>
31 #include <mach/mach_types.h>
32 #include <mach/boolean.h>
33 #include <mach/vm_prot.h>
34 #include <mach/vm_types.h>
35 #include <mach/mach_traps.h>
37 #include <mach/exception.h>
38 #include <mach/kern_return.h>
39 #include <mach/vm_param.h>
40 #include <mach/message.h>
41 #include <mach/machine/thread_status.h>
43 #include <vm/vm_page.h>
45 #include <vm/vm_fault.h>
46 #include <vm/vm_kern.h>
49 #include <kern/thread.h>
50 #include <kern/task.h>
51 #include <kern/sched_prim.h>
53 #include <sys/kdebug.h>
56 #include <arm/caches_internal.h>
57 #include <arm/cpu_data_internal.h>
58 #include <arm/machdep_call.h>
59 #include <arm/machine_routines.h>
60 #include <arm/misc_protos.h>
61 #include <arm/setjmp.h>
62 #include <arm/proc_reg.h>
65 * External function prototypes.
67 #include <kern/syscall_sw.h>
68 #include <kern/host.h>
69 #include <kern/processor.h>
73 extern kern_return_t
dtrace_user_probe(arm_saved_state_t
* regs
, unsigned int instr
);
74 extern boolean_t
dtrace_tally_fault(user_addr_t
);
76 /* Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy and paste the trap instructions
77 over from that file. Need to keep these in sync! */
78 #define FASTTRAP_ARM_INSTR 0xe7ffdefc
79 #define FASTTRAP_THUMB_INSTR 0xdefc
81 #define FASTTRAP_ARM_RET_INSTR 0xe7ffdefb
82 #define FASTTRAP_THUMB_RET_INSTR 0xdefb
84 /* See <rdar://problem/4613924> */
85 perfCallback tempDTraceTrapHook
= NULL
; /* Pointer to DTrace fbt trap hook routine */
88 #define COPYIN(dst, src, size) \
89 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
90 copyin_kern(dst, src, size) \
92 copyin(dst, src, size)
94 #define COPYOUT(src, dst, size) \
95 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
96 copyout_kern(src, dst, size) \
98 copyout(src, dst, size)
100 /* Second-level exception handlers forward declarations */
101 void sleh_undef(struct arm_saved_state
*, struct arm_vfpsaved_state
*);
102 void sleh_abort(struct arm_saved_state
*, int);
103 static kern_return_t
sleh_alignment(struct arm_saved_state
*);
104 static void panic_with_thread_kernel_state(const char *msg
, arm_saved_state_t
*regs
);
107 volatile perfCallback perfTrapHook
= NULL
; /* Pointer to CHUD trap hook routine */
109 int sleh_alignment_count
= 0;
110 int trap_on_alignment_fault
= 0;
113 * Routine: sleh_undef
114 * Function: Second level exception handler for undefined exception
118 sleh_undef(struct arm_saved_state
* regs
, struct arm_vfpsaved_state
* vfp_ss __unused
)
120 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
121 mach_exception_data_type_t code
[2] = {EXC_ARM_UNDEFINED
};
122 mach_msg_type_number_t codeCnt
= 2;
123 thread_t thread
= current_thread();
126 recover
= thread
->recover
;
129 getCpuDatap()->cpu_stat
.undef_ex_cnt
++;
131 /* Inherit the interrupt masks from previous */
132 if (!(regs
->cpsr
& PSR_INTMASK
))
133 ml_set_interrupts_enabled(TRUE
);
136 if (tempDTraceTrapHook
) {
137 if (tempDTraceTrapHook(exception
, regs
, 0, 0) == KERN_SUCCESS
) {
139 * If it succeeds, we are done...
145 /* Check to see if we've hit a userland probe */
146 if ((regs
->cpsr
& PSR_MODE_MASK
) == PSR_USER_MODE
) {
147 if (regs
->cpsr
& PSR_TF
) {
150 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
,(vm_size_t
)(sizeof(uint16_t))) != KERN_SUCCESS
)
153 if (instr
== FASTTRAP_THUMB_INSTR
|| instr
== FASTTRAP_THUMB_RET_INSTR
) {
154 if (dtrace_user_probe(regs
, instr
) == KERN_SUCCESS
)
155 /* If it succeeds, we are done... */
161 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
,(vm_size_t
)(sizeof(uint32_t))) != KERN_SUCCESS
)
164 if (instr
== FASTTRAP_ARM_INSTR
|| instr
== FASTTRAP_ARM_RET_INSTR
) {
165 if (dtrace_user_probe(regs
, instr
) == KERN_SUCCESS
)
166 /* If it succeeds, we are done... */
171 #endif /* CONFIG_DTRACE */
174 if (regs
->cpsr
& PSR_TF
) {
175 unsigned short instr
;
177 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
,(vm_size_t
)(sizeof(unsigned short))) != KERN_SUCCESS
)
180 if (IS_THUMB32(instr
)) {
181 unsigned int instr32
;
183 instr32
= (instr
<<16);
185 if(COPYIN((user_addr_t
)(((unsigned short *) (regs
->pc
))+1), (char *)&instr
,(vm_size_t
)(sizeof(unsigned short))) != KERN_SUCCESS
)
192 if (IS_THUMB_VFP(instr32
)) {
193 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
194 if (!get_vfp_enabled())
195 panic("VFP was disabled (thumb); VFP should always be enabled");
199 /* I don't believe we have any 16 bit VFP instructions, so just set code[1]. */
202 if (IS_THUMB_GDB_TRAP(instr
)) {
203 exception
= EXC_BREAKPOINT
;
204 code
[0] = EXC_ARM_BREAKPOINT
;
210 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
,(vm_size_t
)(sizeof(uint32_t))) != KERN_SUCCESS
)
215 if (IS_ARM_VFP(instr
)) {
216 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
217 if (!get_vfp_enabled())
218 panic("VFP was disabled (arm); VFP should always be enabled");
222 if (IS_ARM_GDB_TRAP(instr
)) {
223 exception
= EXC_BREAKPOINT
;
224 code
[0] = EXC_ARM_BREAKPOINT
;
228 if (!((regs
->cpsr
& PSR_MODE_MASK
) == PSR_USER_MODE
)) {
231 intr
= ml_set_interrupts_enabled(FALSE
);
233 if (exception
== EXC_BREAKPOINT
) {
234 /* Save off the context here (so that the debug logic
235 * can see the original state of this thread).
237 vm_offset_t kstackptr
= current_thread()->machine
.kstackptr
;
238 *((arm_saved_state_t
*) kstackptr
) = *regs
;
240 DebuggerCall(exception
, regs
);
241 (void) ml_set_interrupts_enabled(intr
);
244 panic_context(exception
, (void *)regs
, "undefined kernel instruction\n"
245 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
246 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
247 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
248 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
249 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
250 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
251 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
252 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
253 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
254 regs
->cpsr
, regs
->fsr
, regs
->far
);
256 (void) ml_set_interrupts_enabled(intr
);
259 exception_triage(exception
, code
, codeCnt
);
265 thread
->recover
= recover
;
269 * Routine: sleh_abort
270 * Function: Second level exception handler for abort(Pref/Data)
274 sleh_abort(struct arm_saved_state
* regs
, int type
)
280 mach_exception_data_type_t codes
[2];
282 vm_map_address_t vaddr
;
283 vm_map_address_t fault_addr
;
284 vm_prot_t fault_type
;
285 kern_return_t result
;
287 thread_t thread
= current_thread();
290 recover
= thread
->recover
;
293 status
= regs
->fsr
& FSR_MASK
;
296 /* The DSFR/IFSR.ExT bit indicates "IMPLEMENTATION DEFINED" classification.
297 * Allow a platform-level error handler to decode it.
299 if ((regs
->fsr
) & FSR_EXT
) {
300 cpu_data_t
*cdp
= getCpuDatap();
302 if (cdp
->platform_error_handler
!= (platform_error_handler_t
) NULL
) {
303 (*(platform_error_handler_t
)cdp
->platform_error_handler
) (cdp
->cpu_id
, 0);
304 /* If a platform error handler is registered, expect it to panic, not fall through */
305 panic("Unexpected return from platform_error_handler");
309 /* Done with asynchronous handling; re-enable here so that subsequent aborts are taken as early as possible. */
310 reenable_async_aborts();
312 if (ml_at_interrupt_context())
313 panic_with_thread_kernel_state("sleh_abort at interrupt context", regs
);
315 fault_addr
= vaddr
= regs
->far
;
317 if (type
== T_DATA_ABT
) {
318 getCpuDatap()->cpu_stat
.data_ex_cnt
++;
319 } else { /* T_PREFETCH_ABT */
320 getCpuDatap()->cpu_stat
.instr_ex_cnt
++;
321 fault_type
= VM_PROT_READ
| VM_PROT_EXECUTE
;
324 if (status
== FSR_DEBUG
)
325 debug_status
= arm_debug_read_dscr() & ARM_DBGDSCR_MOE_MASK
;
327 /* Inherit the interrupt masks from previous */
328 if (!(spsr
& PSR_INTMASK
))
329 ml_set_interrupts_enabled(TRUE
);
331 if (type
== T_DATA_ABT
) {
333 * Now that interrupts are reenabled, we can perform any needed
336 * Because we have reenabled interrupts, any instruction copy
337 * must be a copyin, even on UP systems.
340 if (regs
->fsr
& DFSR_WRITE
) {
341 fault_type
= (VM_PROT_READ
| VM_PROT_WRITE
);
342 /* Cache operations report faults as write access, change these to read access */
343 /* Cache operations are invoked from arm mode for now */
344 if (!(regs
->cpsr
& PSR_TF
)) {
347 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins
,(vm_size_t
)(sizeof(unsigned int))) != KERN_SUCCESS
)
350 if (arm_mcr_cp15(ins
) || arm_mcrr_cp15(ins
))
351 fault_type
= VM_PROT_READ
;
354 fault_type
= VM_PROT_READ
;
356 * DFSR is not getting the "write" bit set
357 * when a swp instruction is encountered (even when it is
360 if (!(regs
->cpsr
& PSR_TF
)) {
363 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins
,(vm_size_t
)(sizeof(unsigned int))) != KERN_SUCCESS
)
366 if ((ins
& ARM_SWP_MASK
) == ARM_SWP
)
367 fault_type
= VM_PROT_WRITE
;
372 if ((spsr
& PSR_MODE_MASK
) != PSR_USER_MODE
) {
373 /* Fault in kernel mode */
375 if ((status
== FSR_DEBUG
)
376 && ((debug_status
== ARM_DBGDSCR_MOE_ASYNC_WATCHPOINT
) || (debug_status
== ARM_DBGDSCR_MOE_SYNC_WATCHPOINT
))
377 && (recover
!= 0) && (getCpuDatap()->cpu_user_debug
!= 0)) {
378 /* If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
379 * abort. Turn off watchpoints and keep going; we'll turn them back on in load_and_go_user.
385 if ((type
== T_PREFETCH_ABT
) || (status
== FSR_DEBUG
)) {
387 intr
= ml_set_interrupts_enabled(FALSE
);
388 if (status
== FSR_DEBUG
) {
389 DebuggerCall(EXC_BREAKPOINT
, regs
);
390 (void) ml_set_interrupts_enabled(intr
);
393 panic_context(EXC_BAD_ACCESS
, (void*)regs
, "sleh_abort: prefetch abort in kernel mode: fault_addr=0x%x\n"
394 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
395 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
396 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
397 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
398 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
400 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
401 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
402 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
403 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
404 regs
->cpsr
, regs
->fsr
, regs
->far
);
406 (void) ml_set_interrupts_enabled(intr
);
408 } else if (TEST_FSR_VMFAULT(status
)) {
411 if (thread
->options
& TH_OPT_DTRACE
) { /* Executing under dtrace_probe? */
412 if (dtrace_tally_fault(fault_addr
)) { /* Should a fault under dtrace be ignored? */
413 /* Point to next instruction */
414 regs
->pc
+= ((regs
->cpsr
& PSR_TF
) && !IS_THUMB32(*((uint16_t*) (regs
->pc
)))) ? 2 : 4;
417 intr
= ml_set_interrupts_enabled(FALSE
);
418 panic_context(EXC_BAD_ACCESS
, (void *)regs
, "Unexpected page fault under dtrace_probe"
419 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
420 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
421 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
422 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
423 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
424 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
425 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
426 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
427 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
428 regs
->cpsr
, regs
->fsr
, regs
->far
);
430 (void) ml_set_interrupts_enabled(intr
);
437 if (VM_KERNEL_ADDRESS(vaddr
) || thread
== THREAD_NULL
)
442 /* check to see if it is just a pmap ref/modify fault */
443 result
= arm_fast_fault(map
->pmap
, trunc_page(fault_addr
), fault_type
, FALSE
);
444 if (result
== KERN_SUCCESS
)
448 * We have to "fault" the page in.
450 result
= vm_fault(map
, fault_addr
,
452 FALSE
/* change_wiring */, VM_KERN_MEMORY_NONE
,
453 (map
== kernel_map
) ? THREAD_UNINT
: THREAD_ABORTSAFE
, NULL
, 0);
455 if (result
== KERN_SUCCESS
) {
459 * If we have a recover handler, invoke it now.
462 regs
->pc
= (register_t
) (recover
& ~0x1);
463 regs
->cpsr
= (regs
->cpsr
& ~PSR_TF
) | ((recover
& 0x1) << PSR_TFb
);
467 } else if ((status
& FSR_ALIGN_MASK
) == FSR_ALIGN
) {
468 result
= sleh_alignment(regs
);
469 if (result
== KERN_SUCCESS
) {
472 intr
= ml_set_interrupts_enabled(FALSE
);
474 panic_context(EXC_BAD_ACCESS
, (void *)regs
, "unaligned kernel data access: pc=0x%08x fault_addr=0x%x\n"
475 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
476 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
477 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
478 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
479 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
480 regs
->pc
, fault_addr
,
481 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
482 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
483 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
484 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
485 regs
->cpsr
, regs
->fsr
, regs
->far
);
487 (void) ml_set_interrupts_enabled(intr
);
493 intr
= ml_set_interrupts_enabled(FALSE
);
495 panic_context(EXC_BAD_ACCESS
, (void *)regs
, "kernel abort type %d: fault_type=0x%x, fault_addr=0x%x\n"
496 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
497 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
498 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
499 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
500 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
501 type
, fault_type
, fault_addr
,
502 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
503 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
504 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
505 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
506 regs
->cpsr
, regs
->fsr
, regs
->far
);
508 (void) ml_set_interrupts_enabled(intr
);
512 /* Fault in user mode */
514 if (TEST_FSR_VMFAULT(status
)) {
518 if (thread
->options
& TH_OPT_DTRACE
) { /* Executing under dtrace_probe? */
519 if (dtrace_tally_fault(fault_addr
)) { /* Should a user mode fault under dtrace be ignored? */
523 intr
= ml_set_interrupts_enabled(FALSE
);
525 panic_context(EXC_BAD_ACCESS
, (void *)regs
, "copyin/out has no recovery point"
526 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
527 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
528 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
529 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
530 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
531 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
532 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
533 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
534 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
535 regs
->cpsr
, regs
->fsr
, regs
->far
);
537 (void) ml_set_interrupts_enabled(intr
);
541 intr
= ml_set_interrupts_enabled(FALSE
);
543 panic_context(EXC_BAD_ACCESS
, (void*)regs
, "Unexpected UMW page fault under dtrace_probe"
544 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
545 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
546 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
547 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
548 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
549 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
550 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
551 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
552 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
553 regs
->cpsr
, regs
->fsr
, regs
->far
);
555 (void) ml_set_interrupts_enabled(intr
);
562 /* check to see if it is just a pmap ref/modify fault */
563 result
= arm_fast_fault(map
->pmap
, trunc_page(fault_addr
), fault_type
, TRUE
);
564 if (result
!= KERN_SUCCESS
) {
566 * We have to "fault" the page in.
568 result
= vm_fault(map
, fault_addr
, fault_type
,
569 FALSE
/* change_wiring */, VM_KERN_MEMORY_NONE
,
570 THREAD_ABORTSAFE
, NULL
, 0);
572 if (result
== KERN_SUCCESS
|| result
== KERN_ABORTED
) {
573 goto exception_return
;
575 exc
= EXC_BAD_ACCESS
;
577 } else if ((status
& FSR_ALIGN_MASK
) == FSR_ALIGN
) {
578 if (sleh_alignment(regs
) == KERN_SUCCESS
) {
579 goto exception_return
;
581 exc
= EXC_BAD_ACCESS
;
582 codes
[0] = EXC_ARM_DA_ALIGN
;
583 } else if (status
== FSR_DEBUG
) {
584 exc
= EXC_BREAKPOINT
;
585 codes
[0] = EXC_ARM_DA_DEBUG
;
586 } else if ((status
== FSR_SDOM
) || (status
== FSR_PDOM
)) {
587 exc
= EXC_BAD_ACCESS
;
588 codes
[0] = KERN_INVALID_ADDRESS
;
590 exc
= EXC_BAD_ACCESS
;
591 codes
[0] = KERN_FAILURE
;
595 exception_triage(exc
, codes
, 2);
600 thread
->recover
= recover
;
601 thread_exception_return();
606 thread
->recover
= recover
;
612 * Routine: sleh_alignment
613 * Function: Second level exception handler for alignment data fault
617 sleh_alignment(struct arm_saved_state
* regs
)
621 unsigned int rd_index
;
622 unsigned int base_index
;
625 unsigned int reg_list
;
628 unsigned int write_back
;
629 kern_return_t rc
= KERN_SUCCESS
;
631 getCpuDatap()->cpu_stat
.unaligned_cnt
++;
633 /* Do not try to emulate in modified execution states */
634 if (regs
->cpsr
& (PSR_EF
| PSR_JF
))
635 return KERN_NOT_SUPPORTED
;
637 /* Disallow emulation of kernel instructions */
638 if ((regs
->cpsr
& PSR_MODE_MASK
) != PSR_USER_MODE
)
639 return KERN_NOT_SUPPORTED
;
642 #define ALIGN_THRESHOLD 1024
643 if ((sleh_alignment_count
++ & (ALIGN_THRESHOLD
- 1)) ==
644 (ALIGN_THRESHOLD
- 1))
645 kprintf("sleh_alignment: %d more alignment faults: %d total\n",
646 ALIGN_THRESHOLD
, sleh_alignment_count
);
648 if ((trap_on_alignment_fault
!= 0)
649 && (sleh_alignment_count
% trap_on_alignment_fault
== 0))
650 return KERN_NOT_SUPPORTED
;
655 if (regs
->cpsr
& PSR_TF
) {
656 unsigned short ins16
;
658 /* Get aborted instruction */
659 #if __ARM_SMP__ || __ARM_USER_PROTECT__
660 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins16
,(vm_size_t
)(sizeof(uint16_t))) != KERN_SUCCESS
) {
661 /* Failed to fetch instruction, return success to re-drive the exception */
665 ins16
= *(unsigned short *) (regs
->pc
);
669 * Map multi-word Thumb loads and stores to their ARM
671 * Don't worry about single-word instructions, since those are
672 * handled in hardware.
675 reg_list
= ins16
& 0xff;
677 return KERN_NOT_SUPPORTED
;
679 if (((ins16
& THUMB_STR_1_MASK
) == THUMB_LDMIA
) ||
680 ((ins16
& THUMB_STR_1_MASK
) == THUMB_STMIA
)) {
681 base_index
= (ins16
>> 8) & 0x7;
682 ins
= 0xE8800000 | (base_index
<< 16) | reg_list
;
683 if ((ins16
& THUMB_STR_1_MASK
) == THUMB_LDMIA
)
685 if (((ins16
& THUMB_STR_1_MASK
) == THUMB_STMIA
) ||
686 !(reg_list
& (1 << base_index
)))
688 } else if ((ins16
& THUMB_PUSH_MASK
) == THUMB_POP
) {
689 unsigned int r
= (ins16
>> 8) & 1;
690 ins
= 0xE8BD0000 | (r
<< 15) | reg_list
;
691 } else if ((ins16
& THUMB_PUSH_MASK
) == THUMB_PUSH
) {
692 unsigned int r
= (ins16
>> 8) & 1;
693 ins
= 0xE92D0000 | (r
<< 14) | reg_list
;
695 return KERN_NOT_SUPPORTED
;
698 /* Get aborted instruction */
699 #if __ARM_SMP__ || __ARM_USER_PROTECT__
700 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins
,(vm_size_t
)(sizeof(unsigned int))) != KERN_SUCCESS
) {
701 /* Failed to fetch instruction, return success to re-drive the exception */
705 ins
= *(unsigned int *) (regs
->pc
);
709 /* Don't try to emulate unconditional instructions */
710 if ((ins
& 0xF0000000) == 0xF0000000)
711 return KERN_NOT_SUPPORTED
;
713 pre
= (ins
>> 24) & 1;
714 up
= (ins
>> 23) & 1;
715 reg_list
= ins
& 0xffff;
716 write_back
= (ins
>> 21) & 1;
717 base_index
= (ins
>> 16) & 0xf;
719 if ((ins
& ARM_BLK_MASK
) == ARM_STM
) { /* STM or LDM */
723 for (rd_index
= 0; rd_index
< 16; rd_index
++) {
724 if (reg_list
& (1 << rd_index
))
728 paddr
= regs
->r
[base_index
];
730 switch (ins
& (ARM_POST_INDEXING
| ARM_INCREMENT
)) {
731 /* Increment after */
733 waddr
= paddr
+ reg_count
* 4;
736 /* Increment before */
737 case ARM_POST_INDEXING
| ARM_INCREMENT
:
738 waddr
= paddr
+ reg_count
* 4;
742 /* Decrement after */
744 waddr
= paddr
- reg_count
* 4;
748 /* Decrement before */
749 case ARM_POST_INDEXING
:
750 waddr
= paddr
- reg_count
* 4;
758 for (rd_index
= 0; rd_index
< 16; rd_index
++) {
759 if (reg_list
& (1 << rd_index
)) {
760 src
= ®s
->r
[rd_index
];
762 if ((ins
& (1 << 20)) == 0) /* STM */
763 rc
= COPYOUT(src
, paddr
, 4);
765 rc
= COPYIN(paddr
, src
, 4);
767 if (rc
!= KERN_SUCCESS
)
779 if (rc
== KERN_SUCCESS
) {
780 if (regs
->cpsr
& PSR_TF
)
786 regs
->r
[base_index
] = paddr
;
793 /* XXX quell warnings */
794 void syscall_trace(struct arm_saved_state
* regs
);
795 void syscall_trace_exit(unsigned int, unsigned int);
796 void mach_syscall_trace(struct arm_saved_state
* regs
, unsigned int call_number
);
797 void mach_syscall_trace_exit(unsigned int retval
, unsigned int call_number
);
798 void interrupt_trace(struct arm_saved_state
* regs
);
799 void interrupt_trace_exit(void);
801 /* called from the fleh_swi handler, if TRACE_SYSCALL is enabled */
804 struct arm_saved_state
* regs
)
806 kprintf("syscall: %d\n", regs
->r
[12]);
814 kprintf("syscall exit: 0x%x 0x%x\n", r0
, r1
);
819 struct arm_saved_state
* regs
,
820 unsigned int call_number
)
823 int kdarg
[3] = {0, 0, 0};
825 argc
= mach_trap_table
[call_number
].mach_trap_arg_count
;
830 for (i
= 0; i
< argc
; i
++)
831 kdarg
[i
] = (int) regs
->r
[i
];
833 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
834 MACHDBG_CODE(DBG_MACH_EXCP_SC
, (call_number
)) | DBG_FUNC_START
,
835 kdarg
[0], kdarg
[1], kdarg
[2], 0, 0);
840 mach_syscall_trace_exit(
842 unsigned int call_number
)
844 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
845 MACHDBG_CODE(DBG_MACH_EXCP_SC
, (call_number
)) | DBG_FUNC_END
,
851 struct arm_saved_state
* regs
)
853 #define UMODE(rp) (((rp)->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)
855 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
856 MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_START
,
857 0, UMODE(regs
) ? regs
->pc
: VM_KERNEL_UNSLIDE(regs
->pc
),
862 interrupt_trace_exit(
865 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
866 MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_END
,
871 /* XXX quell warnings */
872 void interrupt_stats(void);
874 /* This is called from locore.s directly. We only update per-processor interrupt counters in this function */
876 interrupt_stats(void)
878 SCHED_STATS_INTERRUPT(current_processor());
882 panic_with_thread_kernel_state(const char *msg
, struct arm_saved_state
*regs
)
884 panic_context(0, (void*)regs
, "%s (saved state:%p)\n"
885 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
886 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
887 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
888 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
889 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
891 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
892 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
893 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
894 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
895 regs
->cpsr
, regs
->fsr
, regs
->far
);