2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <kern/debug.h>
30 #include <machine/endian.h>
31 #include <mach/mach_types.h>
32 #include <mach/boolean.h>
33 #include <mach/vm_prot.h>
34 #include <mach/vm_types.h>
35 #include <mach/mach_traps.h>
37 #include <mach/exception.h>
38 #include <mach/kern_return.h>
39 #include <mach/vm_param.h>
40 #include <mach/message.h>
41 #include <mach/machine/thread_status.h>
43 #include <vm/vm_page.h>
45 #include <vm/vm_fault.h>
46 #include <vm/vm_kern.h>
49 #include <kern/thread.h>
50 #include <kern/task.h>
51 #include <kern/sched_prim.h>
53 #include <sys/kdebug.h>
54 #include <kperf/kperf.h>
57 #include <arm/caches_internal.h>
58 #include <arm/cpu_data_internal.h>
59 #include <arm/machdep_call.h>
60 #include <arm/machine_routines.h>
61 #include <arm/misc_protos.h>
62 #include <arm/setjmp.h>
63 #include <arm/proc_reg.h>
66 * External function prototypes.
68 #include <kern/syscall_sw.h>
69 #include <kern/host.h>
70 #include <kern/processor.h>
74 extern kern_return_t
dtrace_user_probe(arm_saved_state_t
* regs
, unsigned int instr
);
75 extern boolean_t
dtrace_tally_fault(user_addr_t
);
77 /* Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy and paste the trap instructions
78 over from that file. Need to keep these in sync! */
79 #define FASTTRAP_ARM_INSTR 0xe7ffdefc
80 #define FASTTRAP_THUMB_INSTR 0xdefc
82 #define FASTTRAP_ARM_RET_INSTR 0xe7ffdefb
83 #define FASTTRAP_THUMB_RET_INSTR 0xdefb
85 /* See <rdar://problem/4613924> */
86 perfCallback tempDTraceTrapHook
= NULL
; /* Pointer to DTrace fbt trap hook routine */
89 #define COPYIN(dst, src, size) \
90 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
91 copyin_kern(dst, src, size) \
93 copyin(dst, src, size)
95 #define COPYOUT(src, dst, size) \
96 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
97 copyout_kern(src, dst, size) \
99 copyout(src, dst, size)
101 /* Second-level exception handlers forward declarations */
102 void sleh_undef(struct arm_saved_state
*, struct arm_vfpsaved_state
*);
103 void sleh_abort(struct arm_saved_state
*, int);
104 static kern_return_t
sleh_alignment(struct arm_saved_state
*);
105 static void panic_with_thread_kernel_state(const char *msg
, arm_saved_state_t
*regs
);
107 int sleh_alignment_count
= 0;
108 int trap_on_alignment_fault
= 0;
111 * Routine: sleh_undef
112 * Function: Second level exception handler for undefined exception
116 sleh_undef(struct arm_saved_state
* regs
, struct arm_vfpsaved_state
* vfp_ss __unused
)
118 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
119 mach_exception_data_type_t code
[2] = {EXC_ARM_UNDEFINED
};
120 mach_msg_type_number_t codeCnt
= 2;
121 thread_t thread
= current_thread();
124 recover
= thread
->recover
;
127 getCpuDatap()->cpu_stat
.undef_ex_cnt
++;
129 /* Inherit the interrupt masks from previous */
130 if (!(regs
->cpsr
& PSR_INTMASK
))
131 ml_set_interrupts_enabled(TRUE
);
134 if (tempDTraceTrapHook
) {
135 if (tempDTraceTrapHook(exception
, regs
, 0, 0) == KERN_SUCCESS
) {
137 * If it succeeds, we are done...
143 /* Check to see if we've hit a userland probe */
144 if ((regs
->cpsr
& PSR_MODE_MASK
) == PSR_USER_MODE
) {
145 if (regs
->cpsr
& PSR_TF
) {
148 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
,(vm_size_t
)(sizeof(uint16_t))) != KERN_SUCCESS
)
151 if (instr
== FASTTRAP_THUMB_INSTR
|| instr
== FASTTRAP_THUMB_RET_INSTR
) {
152 if (dtrace_user_probe(regs
, instr
) == KERN_SUCCESS
)
153 /* If it succeeds, we are done... */
159 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
,(vm_size_t
)(sizeof(uint32_t))) != KERN_SUCCESS
)
162 if (instr
== FASTTRAP_ARM_INSTR
|| instr
== FASTTRAP_ARM_RET_INSTR
) {
163 if (dtrace_user_probe(regs
, instr
) == KERN_SUCCESS
)
164 /* If it succeeds, we are done... */
169 #endif /* CONFIG_DTRACE */
172 if (regs
->cpsr
& PSR_TF
) {
173 unsigned short instr
= 0;
175 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
,(vm_size_t
)(sizeof(unsigned short))) != KERN_SUCCESS
)
178 if (IS_THUMB32(instr
)) {
179 unsigned int instr32
;
181 instr32
= (instr
<<16);
183 if(COPYIN((user_addr_t
)(((unsigned short *) (regs
->pc
))+1), (char *)&instr
,(vm_size_t
)(sizeof(unsigned short))) != KERN_SUCCESS
)
190 if (IS_THUMB_VFP(instr32
)) {
191 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
192 if (!get_vfp_enabled())
193 panic("VFP was disabled (thumb); VFP should always be enabled");
197 /* I don't believe we have any 16 bit VFP instructions, so just set code[1]. */
200 if (IS_THUMB_GDB_TRAP(instr
)) {
201 exception
= EXC_BREAKPOINT
;
202 code
[0] = EXC_ARM_BREAKPOINT
;
208 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
,(vm_size_t
)(sizeof(uint32_t))) != KERN_SUCCESS
)
213 if (IS_ARM_VFP(instr
)) {
214 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
215 if (!get_vfp_enabled())
216 panic("VFP was disabled (arm); VFP should always be enabled");
220 if (IS_ARM_GDB_TRAP(instr
)) {
221 exception
= EXC_BREAKPOINT
;
222 code
[0] = EXC_ARM_BREAKPOINT
;
226 if (!((regs
->cpsr
& PSR_MODE_MASK
) == PSR_USER_MODE
)) {
229 intr
= ml_set_interrupts_enabled(FALSE
);
231 if (exception
== EXC_BREAKPOINT
) {
232 /* Save off the context here (so that the debug logic
233 * can see the original state of this thread).
235 vm_offset_t kstackptr
= current_thread()->machine
.kstackptr
;
236 *((arm_saved_state_t
*) kstackptr
) = *regs
;
238 DebuggerCall(exception
, regs
);
239 (void) ml_set_interrupts_enabled(intr
);
242 panic_with_thread_kernel_state("undefined kernel instruction", regs
);
244 (void) ml_set_interrupts_enabled(intr
);
247 exception_triage(exception
, code
, codeCnt
);
253 thread
->recover
= recover
;
257 * Routine: sleh_abort
258 * Function: Second level exception handler for abort(Pref/Data)
262 sleh_abort(struct arm_saved_state
* regs
, int type
)
268 mach_exception_data_type_t codes
[2];
270 vm_map_address_t vaddr
;
271 vm_map_address_t fault_addr
;
272 vm_prot_t fault_type
;
273 kern_return_t result
;
275 thread_t thread
= current_thread();
278 recover
= thread
->recover
;
281 status
= regs
->fsr
& FSR_MASK
;
284 /* The DSFR/IFSR.ExT bit indicates "IMPLEMENTATION DEFINED" classification.
285 * Allow a platform-level error handler to decode it.
287 if ((regs
->fsr
) & FSR_EXT
) {
288 cpu_data_t
*cdp
= getCpuDatap();
290 if (cdp
->platform_error_handler
!= (platform_error_handler_t
) NULL
) {
291 (*(platform_error_handler_t
)cdp
->platform_error_handler
) (cdp
->cpu_id
, 0);
292 /* If a platform error handler is registered, expect it to panic, not fall through */
293 panic("Unexpected return from platform_error_handler");
297 /* Done with asynchronous handling; re-enable here so that subsequent aborts are taken as early as possible. */
298 reenable_async_aborts();
300 if (ml_at_interrupt_context()) {
302 if (!(thread
->options
& TH_OPT_DTRACE
))
303 #endif /* CONFIG_DTRACE */
305 panic_with_thread_kernel_state("sleh_abort at interrupt context", regs
);
309 fault_addr
= vaddr
= regs
->far
;
311 if (type
== T_DATA_ABT
) {
312 getCpuDatap()->cpu_stat
.data_ex_cnt
++;
313 } else { /* T_PREFETCH_ABT */
314 getCpuDatap()->cpu_stat
.instr_ex_cnt
++;
315 fault_type
= VM_PROT_READ
| VM_PROT_EXECUTE
;
318 if (status
== FSR_DEBUG
)
319 debug_status
= arm_debug_read_dscr() & ARM_DBGDSCR_MOE_MASK
;
321 /* Inherit the interrupt masks from previous */
322 if (!(spsr
& PSR_INTMASK
))
323 ml_set_interrupts_enabled(TRUE
);
325 if (type
== T_DATA_ABT
) {
327 * Now that interrupts are reenabled, we can perform any needed
330 * Because we have reenabled interrupts, any instruction copy
331 * must be a copyin, even on UP systems.
334 if (regs
->fsr
& DFSR_WRITE
) {
335 fault_type
= (VM_PROT_READ
| VM_PROT_WRITE
);
336 /* Cache operations report faults as write access, change these to read access */
337 /* Cache operations are invoked from arm mode for now */
338 if (!(regs
->cpsr
& PSR_TF
)) {
339 unsigned int ins
= 0;
341 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins
,(vm_size_t
)(sizeof(unsigned int))) != KERN_SUCCESS
)
344 if (arm_mcr_cp15(ins
) || arm_mcrr_cp15(ins
))
345 fault_type
= VM_PROT_READ
;
348 fault_type
= VM_PROT_READ
;
350 * DFSR is not getting the "write" bit set
351 * when a swp instruction is encountered (even when it is
354 if (!(regs
->cpsr
& PSR_TF
)) {
355 unsigned int ins
= 0;
357 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins
,(vm_size_t
)(sizeof(unsigned int))) != KERN_SUCCESS
)
360 if ((ins
& ARM_SWP_MASK
) == ARM_SWP
)
361 fault_type
= VM_PROT_WRITE
;
366 if ((spsr
& PSR_MODE_MASK
) != PSR_USER_MODE
) {
367 /* Fault in kernel mode */
369 if ((status
== FSR_DEBUG
)
370 && ((debug_status
== ARM_DBGDSCR_MOE_ASYNC_WATCHPOINT
) || (debug_status
== ARM_DBGDSCR_MOE_SYNC_WATCHPOINT
))
371 && (recover
!= 0) && (getCpuDatap()->cpu_user_debug
!= 0)) {
372 /* If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
373 * abort. Turn off watchpoints and keep going; we'll turn them back on in load_and_go_user.
379 if ((type
== T_PREFETCH_ABT
) || (status
== FSR_DEBUG
)) {
381 intr
= ml_set_interrupts_enabled(FALSE
);
382 if (status
== FSR_DEBUG
) {
383 DebuggerCall(EXC_BREAKPOINT
, regs
);
384 (void) ml_set_interrupts_enabled(intr
);
387 panic_with_thread_kernel_state("prefetch abort in kernel mode", regs
);
389 (void) ml_set_interrupts_enabled(intr
);
391 } else if (TEST_FSR_VMFAULT(status
)) {
394 if (thread
->options
& TH_OPT_DTRACE
) { /* Executing under dtrace_probe? */
395 if (dtrace_tally_fault(fault_addr
)) { /* Should a fault under dtrace be ignored? */
396 /* Point to next instruction */
397 regs
->pc
+= ((regs
->cpsr
& PSR_TF
) && !IS_THUMB32(*((uint16_t*) (regs
->pc
)))) ? 2 : 4;
400 intr
= ml_set_interrupts_enabled(FALSE
);
401 panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", regs
);
403 (void) ml_set_interrupts_enabled(intr
);
410 if (VM_KERNEL_ADDRESS(vaddr
) || thread
== THREAD_NULL
)
415 if (!TEST_FSR_TRANSLATION_FAULT(status
)) {
416 /* check to see if it is just a pmap ref/modify fault */
417 result
= arm_fast_fault(map
->pmap
, trunc_page(fault_addr
), fault_type
, FALSE
);
418 if (result
== KERN_SUCCESS
)
423 * We have to "fault" the page in.
425 result
= vm_fault(map
, fault_addr
,
427 FALSE
/* change_wiring */, VM_KERN_MEMORY_NONE
,
428 (map
== kernel_map
) ? THREAD_UNINT
: THREAD_ABORTSAFE
, NULL
, 0);
430 if (result
== KERN_SUCCESS
) {
434 * If we have a recover handler, invoke it now.
437 regs
->pc
= (register_t
) (recover
& ~0x1);
438 regs
->cpsr
= (regs
->cpsr
& ~PSR_TF
) | ((recover
& 0x1) << PSR_TFb
);
442 } else if ((status
& FSR_ALIGN_MASK
) == FSR_ALIGN
) {
443 result
= sleh_alignment(regs
);
444 if (result
== KERN_SUCCESS
) {
447 intr
= ml_set_interrupts_enabled(FALSE
);
449 panic_with_thread_kernel_state("unaligned kernel data access", regs
);
451 (void) ml_set_interrupts_enabled(intr
);
457 intr
= ml_set_interrupts_enabled(FALSE
);
459 panic_plain("kernel abort type %d: fault_type=0x%x, fault_addr=0x%x\n"
460 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
461 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
462 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
463 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
464 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
465 type
, fault_type
, fault_addr
,
466 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
467 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
468 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
469 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
470 regs
->cpsr
, regs
->fsr
, regs
->far
);
472 (void) ml_set_interrupts_enabled(intr
);
476 /* Fault in user mode */
478 if (TEST_FSR_VMFAULT(status
)) {
482 if (thread
->options
& TH_OPT_DTRACE
) { /* Executing under dtrace_probe? */
483 if (dtrace_tally_fault(fault_addr
)) { /* Should a user mode fault under dtrace be ignored? */
487 intr
= ml_set_interrupts_enabled(FALSE
);
489 panic_with_thread_kernel_state("copyin/out has no recovery point", regs
);
491 (void) ml_set_interrupts_enabled(intr
);
495 intr
= ml_set_interrupts_enabled(FALSE
);
497 panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", regs
);
499 (void) ml_set_interrupts_enabled(intr
);
506 if (!TEST_FSR_TRANSLATION_FAULT(status
)) {
507 /* check to see if it is just a pmap ref/modify fault */
508 result
= arm_fast_fault(map
->pmap
, trunc_page(fault_addr
), fault_type
, TRUE
);
509 if (result
== KERN_SUCCESS
)
510 goto exception_return
;
514 * We have to "fault" the page in.
516 result
= vm_fault(map
, fault_addr
, fault_type
,
517 FALSE
/* change_wiring */, VM_KERN_MEMORY_NONE
,
518 THREAD_ABORTSAFE
, NULL
, 0);
519 if (result
== KERN_SUCCESS
|| result
== KERN_ABORTED
) {
520 goto exception_return
;
522 exc
= EXC_BAD_ACCESS
;
524 } else if ((status
& FSR_ALIGN_MASK
) == FSR_ALIGN
) {
525 if (sleh_alignment(regs
) == KERN_SUCCESS
) {
526 goto exception_return
;
528 exc
= EXC_BAD_ACCESS
;
529 codes
[0] = EXC_ARM_DA_ALIGN
;
530 } else if (status
== FSR_DEBUG
) {
531 exc
= EXC_BREAKPOINT
;
532 codes
[0] = EXC_ARM_DA_DEBUG
;
533 } else if ((status
== FSR_SDOM
) || (status
== FSR_PDOM
)) {
534 exc
= EXC_BAD_ACCESS
;
535 codes
[0] = KERN_INVALID_ADDRESS
;
537 exc
= EXC_BAD_ACCESS
;
538 codes
[0] = KERN_FAILURE
;
542 exception_triage(exc
, codes
, 2);
547 thread
->recover
= recover
;
548 thread_exception_return();
553 thread
->recover
= recover
;
559 * Routine: sleh_alignment
560 * Function: Second level exception handler for alignment data fault
564 sleh_alignment(struct arm_saved_state
* regs
)
567 unsigned int ins
= 0;
568 unsigned int rd_index
;
569 unsigned int base_index
;
572 unsigned int reg_list
;
575 unsigned int write_back
;
576 kern_return_t rc
= KERN_SUCCESS
;
578 getCpuDatap()->cpu_stat
.unaligned_cnt
++;
580 /* Do not try to emulate in modified execution states */
581 if (regs
->cpsr
& (PSR_EF
| PSR_JF
))
582 return KERN_NOT_SUPPORTED
;
584 /* Disallow emulation of kernel instructions */
585 if ((regs
->cpsr
& PSR_MODE_MASK
) != PSR_USER_MODE
)
586 return KERN_NOT_SUPPORTED
;
589 #define ALIGN_THRESHOLD 1024
590 if ((sleh_alignment_count
++ & (ALIGN_THRESHOLD
- 1)) ==
591 (ALIGN_THRESHOLD
- 1))
592 kprintf("sleh_alignment: %d more alignment faults: %d total\n",
593 ALIGN_THRESHOLD
, sleh_alignment_count
);
595 if ((trap_on_alignment_fault
!= 0)
596 && (sleh_alignment_count
% trap_on_alignment_fault
== 0))
597 return KERN_NOT_SUPPORTED
;
602 if (regs
->cpsr
& PSR_TF
) {
603 unsigned short ins16
= 0;
605 /* Get aborted instruction */
606 #if __ARM_SMP__ || __ARM_USER_PROTECT__
607 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins16
,(vm_size_t
)(sizeof(uint16_t))) != KERN_SUCCESS
) {
608 /* Failed to fetch instruction, return success to re-drive the exception */
612 ins16
= *(unsigned short *) (regs
->pc
);
616 * Map multi-word Thumb loads and stores to their ARM
618 * Don't worry about single-word instructions, since those are
619 * handled in hardware.
622 reg_list
= ins16
& 0xff;
624 return KERN_NOT_SUPPORTED
;
626 if (((ins16
& THUMB_STR_1_MASK
) == THUMB_LDMIA
) ||
627 ((ins16
& THUMB_STR_1_MASK
) == THUMB_STMIA
)) {
628 base_index
= (ins16
>> 8) & 0x7;
629 ins
= 0xE8800000 | (base_index
<< 16) | reg_list
;
630 if ((ins16
& THUMB_STR_1_MASK
) == THUMB_LDMIA
)
632 if (((ins16
& THUMB_STR_1_MASK
) == THUMB_STMIA
) ||
633 !(reg_list
& (1 << base_index
)))
635 } else if ((ins16
& THUMB_PUSH_MASK
) == THUMB_POP
) {
636 unsigned int r
= (ins16
>> 8) & 1;
637 ins
= 0xE8BD0000 | (r
<< 15) | reg_list
;
638 } else if ((ins16
& THUMB_PUSH_MASK
) == THUMB_PUSH
) {
639 unsigned int r
= (ins16
>> 8) & 1;
640 ins
= 0xE92D0000 | (r
<< 14) | reg_list
;
642 return KERN_NOT_SUPPORTED
;
645 /* Get aborted instruction */
646 #if __ARM_SMP__ || __ARM_USER_PROTECT__
647 if(COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins
,(vm_size_t
)(sizeof(unsigned int))) != KERN_SUCCESS
) {
648 /* Failed to fetch instruction, return success to re-drive the exception */
652 ins
= *(unsigned int *) (regs
->pc
);
656 /* Don't try to emulate unconditional instructions */
657 if ((ins
& 0xF0000000) == 0xF0000000)
658 return KERN_NOT_SUPPORTED
;
660 pre
= (ins
>> 24) & 1;
661 up
= (ins
>> 23) & 1;
662 reg_list
= ins
& 0xffff;
663 write_back
= (ins
>> 21) & 1;
664 base_index
= (ins
>> 16) & 0xf;
666 if ((ins
& ARM_BLK_MASK
) == ARM_STM
) { /* STM or LDM */
670 for (rd_index
= 0; rd_index
< 16; rd_index
++) {
671 if (reg_list
& (1 << rd_index
))
675 paddr
= regs
->r
[base_index
];
677 switch (ins
& (ARM_POST_INDEXING
| ARM_INCREMENT
)) {
678 /* Increment after */
680 waddr
= paddr
+ reg_count
* 4;
683 /* Increment before */
684 case ARM_POST_INDEXING
| ARM_INCREMENT
:
685 waddr
= paddr
+ reg_count
* 4;
689 /* Decrement after */
691 waddr
= paddr
- reg_count
* 4;
695 /* Decrement before */
696 case ARM_POST_INDEXING
:
697 waddr
= paddr
- reg_count
* 4;
705 for (rd_index
= 0; rd_index
< 16; rd_index
++) {
706 if (reg_list
& (1 << rd_index
)) {
707 src
= ®s
->r
[rd_index
];
709 if ((ins
& (1 << 20)) == 0) /* STM */
710 rc
= COPYOUT(src
, paddr
, 4);
712 rc
= COPYIN(paddr
, src
, 4);
714 if (rc
!= KERN_SUCCESS
)
726 if (rc
== KERN_SUCCESS
) {
727 if (regs
->cpsr
& PSR_TF
)
733 regs
->r
[base_index
] = paddr
;
740 /* XXX quell warnings */
741 void syscall_trace(struct arm_saved_state
* regs
);
742 void syscall_trace_exit(unsigned int, unsigned int);
743 void mach_syscall_trace(struct arm_saved_state
* regs
, unsigned int call_number
);
744 void mach_syscall_trace_exit(unsigned int retval
, unsigned int call_number
);
745 void interrupt_trace(struct arm_saved_state
* regs
);
746 void interrupt_trace_exit(void);
748 /* called from the fleh_swi handler, if TRACE_SYSCALL is enabled */
751 struct arm_saved_state
* regs
)
753 kprintf("syscall: %d\n", regs
->r
[12]);
761 kprintf("syscall exit: 0x%x 0x%x\n", r0
, r1
);
766 struct arm_saved_state
* regs
,
767 unsigned int call_number
)
770 int kdarg
[3] = {0, 0, 0};
772 argc
= mach_trap_table
[call_number
].mach_trap_arg_count
;
777 for (i
= 0; i
< argc
; i
++)
778 kdarg
[i
] = (int) regs
->r
[i
];
780 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
781 MACHDBG_CODE(DBG_MACH_EXCP_SC
, (call_number
)) | DBG_FUNC_START
,
782 kdarg
[0], kdarg
[1], kdarg
[2], 0, 0);
787 mach_syscall_trace_exit(
789 unsigned int call_number
)
791 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
792 MACHDBG_CODE(DBG_MACH_EXCP_SC
, (call_number
)) | DBG_FUNC_END
,
798 struct arm_saved_state
* regs
)
800 #define UMODE(rp) (((rp)->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)
802 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
803 MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_START
,
804 0, UMODE(regs
) ? regs
->pc
: VM_KERNEL_UNSLIDE(regs
->pc
),
809 interrupt_trace_exit(
815 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_END
);
819 /* XXX quell warnings */
820 void interrupt_stats(void);
822 /* This is called from locore.s directly. We only update per-processor interrupt counters in this function */
824 interrupt_stats(void)
826 SCHED_STATS_INTERRUPT(current_processor());
830 panic_with_thread_kernel_state(const char *msg
, struct arm_saved_state
*regs
)
832 panic_plain("%s (saved state:%p)\n"
833 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
834 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
835 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
836 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
837 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
839 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
840 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
841 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
842 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
843 regs
->cpsr
, regs
->fsr
, regs
->far
);