2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <kern/debug.h>
30 #include <machine/endian.h>
31 #include <mach/mach_types.h>
32 #include <mach/boolean.h>
33 #include <mach/vm_prot.h>
34 #include <mach/vm_types.h>
35 #include <mach/mach_traps.h>
37 #include <mach/exception.h>
38 #include <mach/kern_return.h>
39 #include <mach/vm_param.h>
40 #include <mach/message.h>
41 #include <mach/machine/thread_status.h>
43 #include <vm/vm_page.h>
45 #include <vm/vm_fault.h>
46 #include <vm/vm_kern.h>
49 #include <kern/thread.h>
50 #include <kern/task.h>
51 #include <kern/sched_prim.h>
53 #include <sys/kdebug.h>
54 #include <kperf/kperf.h>
57 #include <arm/caches_internal.h>
58 #include <arm/cpu_data_internal.h>
59 #include <arm/machdep_call.h>
60 #include <arm/machine_routines.h>
61 #include <arm/misc_protos.h>
62 #include <arm/setjmp.h>
63 #include <arm/proc_reg.h>
66 * External function prototypes.
68 #include <kern/syscall_sw.h>
69 #include <kern/host.h>
70 #include <kern/processor.h>
74 extern kern_return_t
dtrace_user_probe(arm_saved_state_t
* regs
, unsigned int instr
);
75 extern boolean_t
dtrace_tally_fault(user_addr_t
);
77 /* Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy and paste the trap instructions
78 * over from that file. Need to keep these in sync! */
79 #define FASTTRAP_ARM_INSTR 0xe7ffdefc
80 #define FASTTRAP_THUMB_INSTR 0xdefc
82 #define FASTTRAP_ARM_RET_INSTR 0xe7ffdefb
83 #define FASTTRAP_THUMB_RET_INSTR 0xdefb
85 /* See <rdar://problem/4613924> */
86 perfCallback tempDTraceTrapHook
= NULL
; /* Pointer to DTrace fbt trap hook routine */
89 #define COPYIN(dst, src, size) \
90 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
91 copyin_kern(dst, src, size) \
93 copyin(dst, src, size)
95 #define COPYOUT(src, dst, size) \
96 ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \
97 copyout_kern(src, dst, size) \
99 copyout(src, dst, size)
101 /* Second-level exception handlers forward declarations */
102 void sleh_undef(struct arm_saved_state
*, struct arm_vfpsaved_state
*);
103 void sleh_abort(struct arm_saved_state
*, int);
104 static kern_return_t
sleh_alignment(struct arm_saved_state
*);
105 static void panic_with_thread_kernel_state(const char *msg
, arm_saved_state_t
*regs
);
107 int sleh_alignment_count
= 0;
108 int trap_on_alignment_fault
= 0;
111 * Routine: sleh_undef
112 * Function: Second level exception handler for undefined exception
116 sleh_undef(struct arm_saved_state
* regs
, struct arm_vfpsaved_state
* vfp_ss __unused
)
118 exception_type_t exception
= EXC_BAD_INSTRUCTION
;
119 mach_exception_data_type_t code
[2] = {EXC_ARM_UNDEFINED
};
120 mach_msg_type_number_t codeCnt
= 2;
121 thread_t thread
= current_thread();
124 recover
= thread
->recover
;
127 getCpuDatap()->cpu_stat
.undef_ex_cnt
++;
129 /* Inherit the interrupt masks from previous */
130 if (!(regs
->cpsr
& PSR_INTMASK
)) {
131 ml_set_interrupts_enabled(TRUE
);
135 if (tempDTraceTrapHook
) {
136 if (tempDTraceTrapHook(exception
, regs
, 0, 0) == KERN_SUCCESS
) {
138 * If it succeeds, we are done...
144 /* Check to see if we've hit a userland probe */
145 if ((regs
->cpsr
& PSR_MODE_MASK
) == PSR_USER_MODE
) {
146 if (regs
->cpsr
& PSR_TF
) {
149 if (COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
, (vm_size_t
)(sizeof(uint16_t))) != KERN_SUCCESS
) {
153 if (instr
== FASTTRAP_THUMB_INSTR
|| instr
== FASTTRAP_THUMB_RET_INSTR
) {
154 if (dtrace_user_probe(regs
, instr
) == KERN_SUCCESS
) {
155 /* If it succeeds, we are done... */
162 if (COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
, (vm_size_t
)(sizeof(uint32_t))) != KERN_SUCCESS
) {
166 if (instr
== FASTTRAP_ARM_INSTR
|| instr
== FASTTRAP_ARM_RET_INSTR
) {
167 if (dtrace_user_probe(regs
, instr
) == KERN_SUCCESS
) {
168 /* If it succeeds, we are done... */
174 #endif /* CONFIG_DTRACE */
177 if (regs
->cpsr
& PSR_TF
) {
178 unsigned short instr
= 0;
180 if (COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
, (vm_size_t
)(sizeof(unsigned short))) != KERN_SUCCESS
) {
184 if (IS_THUMB32(instr
)) {
185 unsigned int instr32
;
187 instr32
= (instr
<< 16);
189 if (COPYIN((user_addr_t
)(((unsigned short *) (regs
->pc
)) + 1), (char *)&instr
, (vm_size_t
)(sizeof(unsigned short))) != KERN_SUCCESS
) {
197 if (IS_THUMB_VFP(instr32
)) {
198 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
199 if (!get_vfp_enabled()) {
200 panic("VFP was disabled (thumb); VFP should always be enabled");
205 /* I don't believe we have any 16 bit VFP instructions, so just set code[1]. */
208 if (IS_THUMB_GDB_TRAP(instr
)) {
209 exception
= EXC_BREAKPOINT
;
210 code
[0] = EXC_ARM_BREAKPOINT
;
216 if (COPYIN((user_addr_t
)(regs
->pc
), (char *)&instr
, (vm_size_t
)(sizeof(uint32_t))) != KERN_SUCCESS
) {
222 if (IS_ARM_VFP(instr
)) {
223 /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */
224 if (!get_vfp_enabled()) {
225 panic("VFP was disabled (arm); VFP should always be enabled");
230 if (IS_ARM_GDB_TRAP(instr
)) {
231 exception
= EXC_BREAKPOINT
;
232 code
[0] = EXC_ARM_BREAKPOINT
;
236 if (!((regs
->cpsr
& PSR_MODE_MASK
) == PSR_USER_MODE
)) {
239 intr
= ml_set_interrupts_enabled(FALSE
);
241 if (exception
== EXC_BREAKPOINT
) {
242 /* Save off the context here (so that the debug logic
243 * can see the original state of this thread).
245 vm_offset_t kstackptr
= current_thread()->machine
.kstackptr
;
246 *((arm_saved_state_t
*) kstackptr
) = *regs
;
248 DebuggerCall(exception
, regs
);
249 (void) ml_set_interrupts_enabled(intr
);
252 panic_with_thread_kernel_state("undefined kernel instruction", regs
);
254 (void) ml_set_interrupts_enabled(intr
);
256 exception_triage(exception
, code
, codeCnt
);
262 thread
->recover
= recover
;
267 * Routine: sleh_abort
268 * Function: Second level exception handler for abort(Pref/Data)
272 sleh_abort(struct arm_saved_state
* regs
, int type
)
275 int debug_status
= 0;
278 mach_exception_data_type_t codes
[2];
280 vm_map_address_t vaddr
;
281 vm_map_address_t fault_addr
;
282 vm_prot_t fault_type
;
283 kern_return_t result
;
285 thread_t thread
= current_thread();
288 recover
= thread
->recover
;
291 status
= regs
->fsr
& FSR_MASK
;
294 /* The DSFR/IFSR.ExT bit indicates "IMPLEMENTATION DEFINED" classification.
295 * Allow a platform-level error handler to decode it.
297 if ((regs
->fsr
) & FSR_EXT
) {
298 cpu_data_t
*cdp
= getCpuDatap();
300 if (cdp
->platform_error_handler
!= (platform_error_handler_t
) NULL
) {
301 (*(platform_error_handler_t
)cdp
->platform_error_handler
)(cdp
->cpu_id
, 0);
302 /* If a platform error handler is registered, expect it to panic, not fall through */
303 panic("Unexpected return from platform_error_handler");
307 /* Done with asynchronous handling; re-enable here so that subsequent aborts are taken as early as possible. */
308 reenable_async_aborts();
310 if (ml_at_interrupt_context()) {
312 if (!(thread
->options
& TH_OPT_DTRACE
))
313 #endif /* CONFIG_DTRACE */
315 panic_with_thread_kernel_state("sleh_abort at interrupt context", regs
);
319 fault_addr
= vaddr
= regs
->far
;
321 if (type
== T_DATA_ABT
) {
322 getCpuDatap()->cpu_stat
.data_ex_cnt
++;
323 } else { /* T_PREFETCH_ABT */
324 getCpuDatap()->cpu_stat
.instr_ex_cnt
++;
325 fault_type
= VM_PROT_READ
| VM_PROT_EXECUTE
;
328 if (status
== FSR_DEBUG
) {
329 debug_status
= arm_debug_read_dscr() & ARM_DBGDSCR_MOE_MASK
;
332 /* Inherit the interrupt masks from previous */
333 if (!(spsr
& PSR_INTMASK
)) {
334 ml_set_interrupts_enabled(TRUE
);
337 if (type
== T_DATA_ABT
) {
339 * Now that interrupts are reenabled, we can perform any needed
342 * Because we have reenabled interrupts, any instruction copy
343 * must be a copyin, even on UP systems.
346 if (regs
->fsr
& DFSR_WRITE
) {
347 fault_type
= (VM_PROT_READ
| VM_PROT_WRITE
);
348 /* Cache operations report faults as write access, change these to read access */
349 /* Cache operations are invoked from arm mode for now */
350 if (!(regs
->cpsr
& PSR_TF
)) {
351 unsigned int ins
= 0;
353 if (COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins
, (vm_size_t
)(sizeof(unsigned int))) != KERN_SUCCESS
) {
357 if (arm_mcr_cp15(ins
) || arm_mcrr_cp15(ins
)) {
358 fault_type
= VM_PROT_READ
;
362 fault_type
= VM_PROT_READ
;
364 * DFSR is not getting the "write" bit set
365 * when a swp instruction is encountered (even when it is
368 if (!(regs
->cpsr
& PSR_TF
)) {
369 unsigned int ins
= 0;
371 if (COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins
, (vm_size_t
)(sizeof(unsigned int))) != KERN_SUCCESS
) {
375 if ((ins
& ARM_SWP_MASK
) == ARM_SWP
) {
376 fault_type
= VM_PROT_WRITE
;
382 if ((spsr
& PSR_MODE_MASK
) != PSR_USER_MODE
) {
383 /* Fault in kernel mode */
385 if ((status
== FSR_DEBUG
)
386 && ((debug_status
== ARM_DBGDSCR_MOE_ASYNC_WATCHPOINT
) || (debug_status
== ARM_DBGDSCR_MOE_SYNC_WATCHPOINT
))
387 && (recover
!= 0) && (getCpuDatap()->cpu_user_debug
!= 0)) {
388 /* If we hit a watchpoint in kernel mode, probably in a copyin/copyout which we don't want to
389 * abort. Turn off watchpoints and keep going; we'll turn them back on in load_and_go_user.
395 if ((type
== T_PREFETCH_ABT
) || (status
== FSR_DEBUG
)) {
396 intr
= ml_set_interrupts_enabled(FALSE
);
397 if (status
== FSR_DEBUG
) {
398 DebuggerCall(EXC_BREAKPOINT
, regs
);
399 (void) ml_set_interrupts_enabled(intr
);
402 panic_with_thread_kernel_state("prefetch abort in kernel mode", regs
);
404 (void) ml_set_interrupts_enabled(intr
);
405 } else if (TEST_FSR_VMFAULT(status
)) {
407 if (thread
->options
& TH_OPT_DTRACE
) { /* Executing under dtrace_probe? */
408 if (dtrace_tally_fault(fault_addr
)) { /* Should a fault under dtrace be ignored? */
409 /* Point to next instruction */
410 regs
->pc
+= ((regs
->cpsr
& PSR_TF
) && !IS_THUMB32(*((uint16_t*) (regs
->pc
)))) ? 2 : 4;
413 intr
= ml_set_interrupts_enabled(FALSE
);
414 panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", regs
);
416 (void) ml_set_interrupts_enabled(intr
);
423 if (VM_KERNEL_ADDRESS(vaddr
) || thread
== THREAD_NULL
) {
429 if (!TEST_FSR_TRANSLATION_FAULT(status
)) {
430 /* check to see if it is just a pmap ref/modify fault */
431 result
= arm_fast_fault(map
->pmap
, trunc_page(fault_addr
), fault_type
, FALSE
);
432 if (result
== KERN_SUCCESS
) {
438 * We have to "fault" the page in.
440 result
= vm_fault(map
, fault_addr
,
442 FALSE
/* change_wiring */, VM_KERN_MEMORY_NONE
,
443 (map
== kernel_map
) ? THREAD_UNINT
: THREAD_ABORTSAFE
, NULL
, 0);
445 if (result
== KERN_SUCCESS
) {
449 * If we have a recover handler, invoke it now.
452 regs
->pc
= (register_t
) (recover
& ~0x1);
453 regs
->cpsr
= (regs
->cpsr
& ~PSR_TF
) | ((recover
& 0x1) << PSR_TFb
);
457 } else if ((status
& FSR_ALIGN_MASK
) == FSR_ALIGN
) {
458 result
= sleh_alignment(regs
);
459 if (result
== KERN_SUCCESS
) {
462 intr
= ml_set_interrupts_enabled(FALSE
);
464 panic_with_thread_kernel_state("unaligned kernel data access", regs
);
466 (void) ml_set_interrupts_enabled(intr
);
471 intr
= ml_set_interrupts_enabled(FALSE
);
473 panic_plain("kernel abort type %d: fault_type=0x%x, fault_addr=0x%x\n"
474 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
475 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
476 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
477 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
478 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
479 type
, fault_type
, fault_addr
,
480 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
481 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
482 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
483 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
484 regs
->cpsr
, regs
->fsr
, regs
->far
);
486 (void) ml_set_interrupts_enabled(intr
);
490 /* Fault in user mode */
492 if (TEST_FSR_VMFAULT(status
)) {
496 if (thread
->options
& TH_OPT_DTRACE
) { /* Executing under dtrace_probe? */
497 if (dtrace_tally_fault(fault_addr
)) { /* Should a user mode fault under dtrace be ignored? */
501 intr
= ml_set_interrupts_enabled(FALSE
);
503 panic_with_thread_kernel_state("copyin/out has no recovery point", regs
);
505 (void) ml_set_interrupts_enabled(intr
);
509 intr
= ml_set_interrupts_enabled(FALSE
);
511 panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", regs
);
513 (void) ml_set_interrupts_enabled(intr
);
520 if (!TEST_FSR_TRANSLATION_FAULT(status
)) {
521 /* check to see if it is just a pmap ref/modify fault */
522 result
= arm_fast_fault(map
->pmap
, trunc_page(fault_addr
), fault_type
, TRUE
);
523 if (result
== KERN_SUCCESS
) {
524 goto exception_return
;
529 * We have to "fault" the page in.
531 result
= vm_fault(map
, fault_addr
, fault_type
,
532 FALSE
/* change_wiring */, VM_KERN_MEMORY_NONE
,
533 THREAD_ABORTSAFE
, NULL
, 0);
534 if (result
== KERN_SUCCESS
|| result
== KERN_ABORTED
) {
535 goto exception_return
;
537 exc
= EXC_BAD_ACCESS
;
539 } else if ((status
& FSR_ALIGN_MASK
) == FSR_ALIGN
) {
540 if (sleh_alignment(regs
) == KERN_SUCCESS
) {
541 goto exception_return
;
543 exc
= EXC_BAD_ACCESS
;
544 codes
[0] = EXC_ARM_DA_ALIGN
;
545 } else if (status
== FSR_DEBUG
) {
546 exc
= EXC_BREAKPOINT
;
547 codes
[0] = EXC_ARM_DA_DEBUG
;
548 } else if ((status
== FSR_SDOM
) || (status
== FSR_PDOM
)) {
549 exc
= EXC_BAD_ACCESS
;
550 codes
[0] = KERN_INVALID_ADDRESS
;
552 exc
= EXC_BAD_ACCESS
;
553 codes
[0] = KERN_FAILURE
;
557 exception_triage(exc
, codes
, 2);
562 thread
->recover
= recover
;
564 thread_exception_return();
569 thread
->recover
= recover
;
576 * Routine: sleh_alignment
577 * Function: Second level exception handler for alignment data fault
581 sleh_alignment(struct arm_saved_state
* regs
)
584 unsigned int ins
= 0;
585 unsigned int rd_index
;
586 unsigned int base_index
;
589 unsigned int reg_list
;
592 unsigned int write_back
;
593 kern_return_t rc
= KERN_SUCCESS
;
595 getCpuDatap()->cpu_stat
.unaligned_cnt
++;
597 /* Do not try to emulate in modified execution states */
598 if (regs
->cpsr
& (PSR_EF
| PSR_JF
)) {
599 return KERN_NOT_SUPPORTED
;
602 /* Disallow emulation of kernel instructions */
603 if ((regs
->cpsr
& PSR_MODE_MASK
) != PSR_USER_MODE
) {
604 return KERN_NOT_SUPPORTED
;
608 #define ALIGN_THRESHOLD 1024
609 if ((sleh_alignment_count
++ & (ALIGN_THRESHOLD
- 1)) ==
610 (ALIGN_THRESHOLD
- 1)) {
611 kprintf("sleh_alignment: %d more alignment faults: %d total\n",
612 ALIGN_THRESHOLD
, sleh_alignment_count
);
615 if ((trap_on_alignment_fault
!= 0)
616 && (sleh_alignment_count
% trap_on_alignment_fault
== 0)) {
617 return KERN_NOT_SUPPORTED
;
623 if (regs
->cpsr
& PSR_TF
) {
624 unsigned short ins16
= 0;
626 /* Get aborted instruction */
627 #if __ARM_SMP__ || __ARM_USER_PROTECT__
628 if (COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins16
, (vm_size_t
)(sizeof(uint16_t))) != KERN_SUCCESS
) {
629 /* Failed to fetch instruction, return success to re-drive the exception */
633 ins16
= *(unsigned short *) (regs
->pc
);
637 * Map multi-word Thumb loads and stores to their ARM
639 * Don't worry about single-word instructions, since those are
640 * handled in hardware.
643 reg_list
= ins16
& 0xff;
645 return KERN_NOT_SUPPORTED
;
648 if (((ins16
& THUMB_STR_1_MASK
) == THUMB_LDMIA
) ||
649 ((ins16
& THUMB_STR_1_MASK
) == THUMB_STMIA
)) {
650 base_index
= (ins16
>> 8) & 0x7;
651 ins
= 0xE8800000 | (base_index
<< 16) | reg_list
;
652 if ((ins16
& THUMB_STR_1_MASK
) == THUMB_LDMIA
) {
655 if (((ins16
& THUMB_STR_1_MASK
) == THUMB_STMIA
) ||
656 !(reg_list
& (1 << base_index
))) {
659 } else if ((ins16
& THUMB_PUSH_MASK
) == THUMB_POP
) {
660 unsigned int r
= (ins16
>> 8) & 1;
661 ins
= 0xE8BD0000 | (r
<< 15) | reg_list
;
662 } else if ((ins16
& THUMB_PUSH_MASK
) == THUMB_PUSH
) {
663 unsigned int r
= (ins16
>> 8) & 1;
664 ins
= 0xE92D0000 | (r
<< 14) | reg_list
;
666 return KERN_NOT_SUPPORTED
;
669 /* Get aborted instruction */
670 #if __ARM_SMP__ || __ARM_USER_PROTECT__
671 if (COPYIN((user_addr_t
)(regs
->pc
), (char *)&ins
, (vm_size_t
)(sizeof(unsigned int))) != KERN_SUCCESS
) {
672 /* Failed to fetch instruction, return success to re-drive the exception */
676 ins
= *(unsigned int *) (regs
->pc
);
680 /* Don't try to emulate unconditional instructions */
681 if ((ins
& 0xF0000000) == 0xF0000000) {
682 return KERN_NOT_SUPPORTED
;
685 pre
= (ins
>> 24) & 1;
686 up
= (ins
>> 23) & 1;
687 reg_list
= ins
& 0xffff;
688 write_back
= (ins
>> 21) & 1;
689 base_index
= (ins
>> 16) & 0xf;
691 if ((ins
& ARM_BLK_MASK
) == ARM_STM
) { /* STM or LDM */
695 for (rd_index
= 0; rd_index
< 16; rd_index
++) {
696 if (reg_list
& (1 << rd_index
)) {
701 paddr
= regs
->r
[base_index
];
703 switch (ins
& (ARM_POST_INDEXING
| ARM_INCREMENT
)) {
704 /* Increment after */
706 waddr
= paddr
+ reg_count
* 4;
709 /* Increment before */
710 case ARM_POST_INDEXING
| ARM_INCREMENT
:
711 waddr
= paddr
+ reg_count
* 4;
715 /* Decrement after */
717 waddr
= paddr
- reg_count
* 4;
721 /* Decrement before */
722 case ARM_POST_INDEXING
:
723 waddr
= paddr
- reg_count
* 4;
731 for (rd_index
= 0; rd_index
< 16; rd_index
++) {
732 if (reg_list
& (1 << rd_index
)) {
733 src
= ®s
->r
[rd_index
];
735 if ((ins
& (1 << 20)) == 0) { /* STM */
736 rc
= COPYOUT(src
, paddr
, 4);
738 rc
= COPYIN(paddr
, src
, 4);
741 if (rc
!= KERN_SUCCESS
) {
754 if (rc
== KERN_SUCCESS
) {
755 if (regs
->cpsr
& PSR_TF
) {
762 regs
->r
[base_index
] = paddr
;
770 /* XXX quell warnings */
771 void syscall_trace(struct arm_saved_state
* regs
);
772 void syscall_trace_exit(unsigned int, unsigned int);
773 void mach_syscall_trace(struct arm_saved_state
* regs
, unsigned int call_number
);
774 void mach_syscall_trace_exit(unsigned int retval
, unsigned int call_number
);
775 void interrupt_trace(struct arm_saved_state
* regs
);
776 void interrupt_trace_exit(void);
778 /* called from the fleh_swi handler, if TRACE_SYSCALL is enabled */
781 struct arm_saved_state
* regs
)
783 kprintf("syscall: %d\n", regs
->r
[12]);
791 kprintf("syscall exit: 0x%x 0x%x\n", r0
, r1
);
796 struct arm_saved_state
* regs
,
797 unsigned int call_number
)
800 int kdarg
[3] = {0, 0, 0};
802 argc
= mach_trap_table
[call_number
].mach_trap_arg_count
;
808 for (i
= 0; i
< argc
; i
++) {
809 kdarg
[i
] = (int) regs
->r
[i
];
812 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
813 MACHDBG_CODE(DBG_MACH_EXCP_SC
, (call_number
)) | DBG_FUNC_START
,
814 kdarg
[0], kdarg
[1], kdarg
[2], 0, 0);
818 mach_syscall_trace_exit(
820 unsigned int call_number
)
822 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
823 MACHDBG_CODE(DBG_MACH_EXCP_SC
, (call_number
)) | DBG_FUNC_END
,
829 struct arm_saved_state
* regs
)
831 #define UMODE(rp) (((rp)->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)
833 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
834 MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_START
,
835 0, UMODE(regs
) ? regs
->pc
: VM_KERNEL_UNSLIDE(regs
->pc
),
840 interrupt_trace_exit(
846 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_END
);
850 /* XXX quell warnings */
851 void interrupt_stats(void);
853 /* This is called from locore.s directly. We only update per-processor interrupt counters in this function */
855 interrupt_stats(void)
857 SCHED_STATS_INTERRUPT(current_processor());
861 panic_with_thread_kernel_state(const char *msg
, struct arm_saved_state
*regs
)
863 panic_plain("%s (saved state:%p)\n"
864 "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n"
865 "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n"
866 "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
867 "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n"
868 "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
870 regs
->r
[0], regs
->r
[1], regs
->r
[2], regs
->r
[3],
871 regs
->r
[4], regs
->r
[5], regs
->r
[6], regs
->r
[7],
872 regs
->r
[8], regs
->r
[9], regs
->r
[10], regs
->r
[11],
873 regs
->r
[12], regs
->sp
, regs
->lr
, regs
->pc
,
874 regs
->cpsr
, regs
->fsr
, regs
->far
);