2 * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
36 #include <mach/mach_types.h>
37 #include <mach/mach_traps.h>
38 #include <mach/thread_status.h>
40 #include <kern/processor.h>
41 #include <kern/thread.h>
42 #include <kern/exception.h>
43 #include <kern/syscall_sw.h>
44 #include <kern/cpu_data.h>
45 #include <kern/debug.h>
47 #include <vm/vm_fault.h>
48 #include <vm/vm_kern.h> /* For kernel_map */
50 #include <ppc/misc_protos.h>
52 #include <ppc/exception.h>
53 #include <ppc/proc_reg.h> /* for SR_xxx definitions */
56 #include <ppc/mappings.h>
57 #include <ppc/Firmware.h>
58 #include <ppc/low_trace.h>
59 #include <ppc/Diagnostics.h>
60 #include <ppc/hw_perfmon.h>
61 #include <ppc/fpu_protos.h>
63 #include <sys/kdebug.h>
65 volatile perfCallback perfTrapHook
; /* Pointer to CHUD trap hook routine */
66 volatile perfCallback perfASTHook
; /* Pointer to CHUD AST hook routine */
69 extern kern_return_t
dtrace_user_probe(ppc_saved_state_t
*sv
);
71 /* See <rdar://problem/4613924> */
72 perfCallback tempDTraceTrapHook
= NULL
; /* Pointer to DTrace fbt trap hook routine */
74 extern boolean_t
dtrace_tally_fault(user_addr_t
);
78 #include <ddb/db_watch.h>
79 #include <ddb/db_run.h>
80 #include <ddb/db_break.h>
81 #include <ddb/db_trap.h>
83 boolean_t let_ddb_vm_fault
= FALSE
;
84 boolean_t debug_all_traps_with_kdb
= FALSE
;
85 extern struct db_watchpoint
*db_watchpoint_list
;
86 extern boolean_t db_watchpoints_inserted
;
87 extern boolean_t db_breakpoints_inserted
;
93 extern task_t bsd_init_task
;
94 extern char init_task_failure_data
[];
95 extern int not_in_kdp
;
97 #define PROT_EXEC (VM_PROT_EXECUTE)
98 #define PROT_RO (VM_PROT_READ)
99 #define PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
102 /* A useful macro to update the ppc_exception_state in the PCB
103 * before calling doexception
105 #define UPDATE_PPC_EXCEPTION_STATE { \
106 thread_t _thread = current_thread(); \
107 _thread->machine.pcb->save_dar = (uint64_t)dar; \
108 _thread->machine.pcb->save_dsisr = dsisr; \
109 _thread->machine.pcb->save_exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \
112 void unresolved_kernel_trap(int trapno
,
113 struct savearea
*ssp
,
116 const char *message
);
118 static void handleMck(struct savearea
*ssp
); /* Common machine check handler */
121 extern void get_procrustime(time_value_t
*);
122 extern void bsd_uprofil(time_value_t
*, user_addr_t
);
123 #endif /* MACH_BSD */
126 struct savearea
*trap(int trapno
,
127 struct savearea
*ssp
,
132 mach_exception_code_t code
= 0;
133 mach_exception_subcode_t subcode
= 0;
135 vm_map_offset_t offset
;
136 thread_t thread
= current_thread();
143 #endif /* MACH_BSD */
145 myast
= ast_pending();
146 perfCallback fn
= perfASTHook
;
148 if(*myast
& AST_CHUD_ALL
) {
149 fn(trapno
, ssp
, dsisr
, (unsigned int)dar
);
152 *myast
&= ~AST_CHUD_ALL
;
156 if(fn
) { /* Is there a hook? */
157 if(fn(trapno
, ssp
, dsisr
, (unsigned int)dar
) == KERN_SUCCESS
) return ssp
; /* If it succeeds, we are done... */
161 if(tempDTraceTrapHook
) { /* Is there a hook? */
162 if(tempDTraceTrapHook(trapno
, ssp
, dsisr
, (unsigned int)dar
) == KERN_SUCCESS
) return ssp
; /* If it succeeds, we are done... */
168 extern void fctx_text(void);
173 exception
= 0; /* Clear exception for now */
176 * Remember that we are disabled for interruptions when we come in here. Because
177 * of latency concerns, we need to enable interruptions in the interrupted process
178 * was enabled itself as soon as we can.
181 intr
= (ssp
->save_srr1
& MASK(MSR_EE
)) != 0; /* Remember if we were enabled */
183 /* Handle kernel traps first */
185 if (!USER_MODE(ssp
->save_srr1
)) {
187 * Trap came from kernel
191 case T_PREEMPT
: /* Handle a preempt trap */
192 ast_taken(AST_PREEMPTION
, FALSE
);
196 perfmon_handle_pmi(ssp
);
199 case T_RESET
: /* Reset interruption */
200 if (!Call_Debugger(trapno
, ssp
))
201 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
202 break; /* We just ignore these */
205 * These trap types should never be seen by trap()
206 * in kernel mode, anyway.
207 * Some are interrupts that should be seen by
208 * interrupt() others just don't happen because they
209 * are handled elsewhere. Some could happen but are
210 * considered to be fatal in kernel mode.
213 case T_IN_VAIN
: /* Shouldn't ever see this, lowmem_vectors eats it */
214 case T_SYSTEM_MANAGEMENT
:
215 case T_ALTIVEC_ASSIST
:
217 case T_FP_UNAVAILABLE
:
221 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
226 * Here we handle a machine check in the kernel
229 case T_MACHINE_CHECK
:
230 handleMck(ssp
); /* Common to both user and kernel */
236 * If enaNotifyEMb is set, we get here, and
237 * we have actually already emulated the unaligned access.
238 * All that we want to do here is to ignore the interrupt. This is to allow logging or
239 * tracing of unaligned accesses.
242 if(ssp
->save_hdr
.save_misc3
) { /* Was it a handled exception? */
243 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
); /* Go panic */
246 KERNEL_DEBUG_CONSTANT(
247 MACHDBG_CODE(DBG_MACH_EXCP_ALNG
, 0) | DBG_FUNC_NONE
,
248 (int)ssp
->save_srr0
- 4, (int)dar
, (int)dsisr
, (int)ssp
->save_lr
, 0);
253 * If enaNotifyEMb is set we get here, and
254 * we have actually already emulated the instruction.
255 * All that we want to do here is to ignore the interrupt. This is to allow logging or
256 * tracing of emulated instructions.
259 KERNEL_DEBUG_CONSTANT(
260 MACHDBG_CODE(DBG_MACH_EXCP_EMUL
, 0) | DBG_FUNC_NONE
,
261 (int)ssp
->save_srr0
- 4, (int)((savearea_comm
*)ssp
)->save_misc2
, (int)dsisr
, (int)ssp
->save_lr
, 0);
269 case T_RUNMODE_TRACE
:
270 case T_INSTRUCTION_BKPT
:
271 if (!Call_Debugger(trapno
, ssp
))
272 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
276 if (ssp
->save_srr1
& MASK(SRR1_PRG_TRAP
)) {
277 if (!Call_Debugger(trapno
, ssp
))
278 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
280 unresolved_kernel_trap(trapno
, ssp
,
287 mp_disable_preemption();
289 && getPerProc()->debugger_active
290 && !let_ddb_vm_fault
) {
292 * Force kdb to handle this one.
294 kdb_trap(trapno
, ssp
);
296 mp_enable_preemption();
297 #endif /* MACH_KDB */
298 /* can we take this during normal panic dump operation? */
300 && getPerProc()->debugger_active
303 * Access fault while in kernel core dump.
305 kdp_dump_trap(trapno
, ssp
);
309 if(ssp
->save_dsisr
& dsiInvMode
) { /* Did someone try to reserve cache inhibited? */
310 panic("trap: disallowed access to cache inhibited memory - %016llX\n", dar
);
313 if(intr
) ml_set_interrupts_enabled(TRUE
); /* Enable if we were */
315 if(((dar
>> 28) < 0xE) | ((dar
>> 28) > 0xF)) { /* User memory window access? */
317 offset
= (vm_map_offset_t
)dar
; /* Set the failing address */
318 map
= kernel_map
; /* No, this is a normal kernel access */
321 * Note: Some ROM device drivers will access page 0 when they start. The IOKit will
322 * set a flag to tell us to ignore any access fault on page 0. After the driver is
323 * opened, it will clear the flag.
325 if((0 == (offset
& -PAGE_SIZE
)) && /* Check for access of page 0 and */
326 ((thread
->machine
.specFlags
) & ignoreZeroFault
)) { /* special case of ignoring page zero faults */
327 ssp
->save_srr0
+= 4; /* Point to next instruction */
332 if (thread
->options
& TH_OPT_DTRACE
) { /* Executing under dtrace_probe? */
333 if (dtrace_tally_fault(dar
)) { /* Should a fault under dtrace be ignored? */
334 ssp
->save_srr0
+= 4; /* Point to next instruction */
337 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, "Unexpected page fault under dtrace_probe");
342 code
= vm_fault(map
, vm_map_trunc_page(offset
),
343 dsisr
& MASK(DSISR_WRITE
) ? PROT_RW
: PROT_RO
,
344 FALSE
, THREAD_UNINT
, NULL
, vm_map_trunc_page(0));
346 if (code
!= KERN_SUCCESS
) {
347 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
349 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
350 ssp
->save_dsisr
= (ssp
->save_dsisr
&
351 ~((MASK(DSISR_NOEX
) | MASK(DSISR_PROT
)))) | MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
356 /* If we get here, the fault was due to a user memory window access */
359 if (thread
->options
& TH_OPT_DTRACE
) { /* Executing under dtrace_probe? */
360 if (dtrace_tally_fault(dar
)) { /* Should a user memory window access fault under dtrace be ignored? */
361 if (thread
->recover
) {
362 ssp
->save_srr0
= thread
->recover
;
363 thread
->recover
= (vm_offset_t
)NULL
;
365 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, "copyin/out has no recovery point");
369 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, "Unexpected UMW page fault under dtrace_probe");
376 offset
= (vm_map_offset_t
)(thread
->machine
.umwRelo
+ dar
); /* Compute the user space address */
378 code
= vm_fault(map
, vm_map_trunc_page(offset
),
379 dsisr
& MASK(DSISR_WRITE
) ? PROT_RW
: PROT_RO
,
380 FALSE
, THREAD_UNINT
, NULL
, vm_map_trunc_page(0));
382 /* If we failed, there should be a recovery
385 if (code
!= KERN_SUCCESS
) {
386 if (thread
->recover
) {
387 ssp
->save_srr0
= thread
->recover
;
388 thread
->recover
= (vm_offset_t
)NULL
;
390 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, "copyin/out has no recovery point");
394 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
395 ssp
->save_dsisr
= (ssp
->save_dsisr
&
396 ~((MASK(DSISR_NOEX
) | MASK(DSISR_PROT
)))) | MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
401 case T_INSTRUCTION_ACCESS
:
405 && getPerProc()->debugger_active
406 && !let_ddb_vm_fault
) {
408 * Force kdb to handle this one.
410 kdb_trap(trapno
, ssp
);
412 #endif /* MACH_KDB */
414 /* Same as for data access, except fault type
415 * is PROT_EXEC and addr comes from srr0
418 if(intr
) ml_set_interrupts_enabled(TRUE
); /* Enable if we were */
422 code
= vm_fault(map
, vm_map_trunc_page(ssp
->save_srr0
),
423 (PROT_EXEC
| PROT_RO
), FALSE
, THREAD_UNINT
, NULL
, vm_map_trunc_page(0));
425 if (code
!= KERN_SUCCESS
) {
426 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
428 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
429 ssp
->save_srr1
= (ssp
->save_srr1
&
430 ~((unsigned long long)(MASK(DSISR_NOEX
) | MASK(DSISR_PROT
)))) | MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
434 /* Usually shandler handles all the system calls, but the
435 * atomic thread switcher may throwup (via thandler) and
436 * have to pass it up to the exception handler.
440 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
444 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
450 * Processing for user state traps with interrupt enabled
451 * For T_AST, interrupts are enabled in the AST delivery
454 ml_set_interrupts_enabled(TRUE
);
458 get_procrustime(&tv
);
460 #endif /* MACH_BSD */
464 * Trap came from user task
470 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
474 perfmon_handle_pmi(ssp
);
478 * These trap types should never be seen by trap()
479 * Some are interrupts that should be seen by
480 * interrupt() others just don't happen because they
481 * are handled elsewhere.
484 case T_IN_VAIN
: /* Shouldn't ever see this, lowmem_vectors eats it */
486 case T_FP_UNAVAILABLE
:
487 case T_SYSTEM_MANAGEMENT
:
493 ml_set_interrupts_enabled(FALSE
); /* Turn off interruptions */
495 panic("Unexpected user state trap(cpu %d): 0x%08X DSISR=0x%08X DAR=0x%016llX PC=0x%016llX, MSR=0x%016llX\n",
496 cpu_number(), trapno
, dsisr
, dar
, ssp
->save_srr0
, ssp
->save_srr1
);
501 * Here we handle a machine check in user state
504 case T_MACHINE_CHECK
:
505 handleMck(ssp
); /* Common to both user and kernel */
509 ml_set_interrupts_enabled(FALSE
); /* Turn off interruptions */
510 if (!Call_Debugger(trapno
, ssp
))
511 panic("Unexpected Reset exception: srr0 = %016llx, srr1 = %016llx\n",
512 ssp
->save_srr0
, ssp
->save_srr1
);
513 break; /* We just ignore these */
517 * If enaNotifyEMb is set, we get here, and
518 * we have actually already emulated the unaligned access.
519 * All that we want to do here is to ignore the interrupt. This is to allow logging or
520 * tracing of unaligned accesses.
523 KERNEL_DEBUG_CONSTANT(
524 MACHDBG_CODE(DBG_MACH_EXCP_ALNG
, 0) | DBG_FUNC_NONE
,
525 (int)ssp
->save_srr0
- 4, (int)dar
, (int)dsisr
, (int)ssp
->save_lr
, 0);
527 if(ssp
->save_hdr
.save_misc3
) { /* Was it a handled exception? */
528 exception
= EXC_BAD_ACCESS
; /* Yes, throw exception */
529 code
= EXC_PPC_UNALIGNED
;
536 * If enaNotifyEMb is set we get here, and
537 * we have actually already emulated the instruction.
538 * All that we want to do here is to ignore the interrupt. This is to allow logging or
539 * tracing of emulated instructions.
542 KERNEL_DEBUG_CONSTANT(
543 MACHDBG_CODE(DBG_MACH_EXCP_EMUL
, 0) | DBG_FUNC_NONE
,
544 (int)ssp
->save_srr0
- 4, (int)((savearea_comm
*)ssp
)->save_misc2
, (int)dsisr
, (int)ssp
->save_lr
, 0);
547 case T_TRACE
: /* Real PPC chips */
548 case T_INSTRUCTION_BKPT
:
549 exception
= EXC_BREAKPOINT
;
550 code
= EXC_PPC_TRACE
;
551 subcode
= ssp
->save_srr0
;
555 if (ssp
->save_srr1
& MASK(SRR1_PRG_FE
)) {
556 fpu_save(thread
->machine
.curctx
);
557 UPDATE_PPC_EXCEPTION_STATE
;
558 exception
= EXC_ARITHMETIC
;
559 code
= EXC_ARITHMETIC
;
561 mp_disable_preemption();
562 subcode
= ssp
->save_fpscr
;
563 mp_enable_preemption();
565 else if (ssp
->save_srr1
& MASK(SRR1_PRG_ILL_INS
)) {
567 UPDATE_PPC_EXCEPTION_STATE
568 exception
= EXC_BAD_INSTRUCTION
;
569 code
= EXC_PPC_UNIPL_INST
;
570 subcode
= ssp
->save_srr0
;
571 } else if ((unsigned int)ssp
->save_srr1
& MASK(SRR1_PRG_PRV_INS
)) {
573 UPDATE_PPC_EXCEPTION_STATE
;
574 exception
= EXC_BAD_INSTRUCTION
;
575 code
= EXC_PPC_PRIVINST
;
576 subcode
= ssp
->save_srr0
;
577 } else if (ssp
->save_srr1
& MASK(SRR1_PRG_TRAP
)) {
580 if (copyin(ssp
->save_srr0
, (char *) &inst
, 4 )) panic("copyin failed\n");
582 if(dgWork
.dgFlags
& enaDiagTrap
) { /* Is the diagnostic trap enabled? */
583 if((inst
& 0xFFFFFFF0) == 0x0FFFFFF0) { /* Is this a TWI 31,R31,0xFFFx? */
584 if(diagTrap(ssp
, inst
& 0xF)) { /* Call the trap code */
585 ssp
->save_srr0
+= 4ULL; /* If we eat the trap, bump pc */
586 exception
= 0; /* Clear exception */
587 break; /* All done here */
593 if(inst
== 0x0FFFDDDD) { /* Is this the dtrace trap? */
594 ret
= dtrace_user_probe((ppc_saved_state_t
*)ssp
); /* Go check if it is for real and process if so... */
595 if(ret
== KERN_SUCCESS
) { /* Was it really? */
596 exception
= 0; /* Clear the exception */
597 break; /* Go flow through and out... */
602 UPDATE_PPC_EXCEPTION_STATE
;
604 if (inst
== 0x7FE00008) {
605 exception
= EXC_BREAKPOINT
;
606 code
= EXC_PPC_BREAKPOINT
;
608 exception
= EXC_SOFTWARE
;
611 subcode
= ssp
->save_srr0
;
616 case T_DTRACE_RET
: /* Are we returning from a dtrace injection? */
617 ret
= dtrace_user_probe((ppc_saved_state_t
*)ssp
); /* Call the probe function if so... */
618 if(ret
== KERN_SUCCESS
) { /* Did this actually work? */
619 exception
= 0; /* Clear the exception */
620 break; /* Go flow through and out... */
625 case T_ALTIVEC_ASSIST
:
626 UPDATE_PPC_EXCEPTION_STATE
;
627 exception
= EXC_ARITHMETIC
;
628 code
= EXC_PPC_ALTIVECASSIST
;
629 subcode
= ssp
->save_srr0
;
635 if(ssp
->save_dsisr
& dsiInvMode
) { /* Did someone try to reserve cache inhibited? */
636 UPDATE_PPC_EXCEPTION_STATE
; /* Don't even bother VM with this one */
637 exception
= EXC_BAD_ACCESS
;
642 code
= vm_fault(map
, vm_map_trunc_page(dar
),
643 dsisr
& MASK(DSISR_WRITE
) ? PROT_RW
: PROT_RO
,
644 FALSE
, THREAD_ABORTSAFE
, NULL
, vm_map_trunc_page(0));
646 if ((code
!= KERN_SUCCESS
) && (code
!= KERN_ABORTED
)) {
647 UPDATE_PPC_EXCEPTION_STATE
;
648 exception
= EXC_BAD_ACCESS
;
651 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to retry fault */
652 ssp
->save_dsisr
= (ssp
->save_dsisr
&
653 ~((MASK(DSISR_NOEX
) | MASK(DSISR_PROT
)))) | MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
657 case T_INSTRUCTION_ACCESS
:
658 /* Same as for data access, except fault type
659 * is PROT_EXEC and addr comes from srr0
663 code
= vm_fault(map
, vm_map_trunc_page(ssp
->save_srr0
),
664 (PROT_EXEC
| PROT_RO
), FALSE
, THREAD_ABORTSAFE
, NULL
, vm_map_trunc_page(0));
666 if ((code
!= KERN_SUCCESS
) && (code
!= KERN_ABORTED
)) {
667 UPDATE_PPC_EXCEPTION_STATE
;
668 exception
= EXC_BAD_ACCESS
;
669 subcode
= ssp
->save_srr0
;
671 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
672 ssp
->save_srr1
= (ssp
->save_srr1
&
673 ~((unsigned long long)(MASK(DSISR_NOEX
) | MASK(DSISR_PROT
)))) | MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
678 /* AST delivery is done below */
685 bsd_uprofil(&tv
, ssp
->save_srr0
);
687 #endif /* MACH_BSD */
691 /* if this is the init task, save the exception information */
692 /* this probably is a fatal exception */
694 if(bsd_init_task
== current_task()) {
698 buf
= init_task_failure_data
;
701 buf
+= sprintf(buf
, "Exception Code = 0x%x, Subcode = 0x%x\n", code
, subcode
);
702 buf
+= sprintf(buf
, "DSISR = 0x%08x, DAR = 0x%016llx\n"
705 for (i
=0; i
<32; i
++) {
707 buf
+= sprintf(buf
, "\n%4d :",i
);
709 buf
+= sprintf(buf
, " %08x",*(&ssp
->save_r0
+i
));
712 buf
+= sprintf(buf
, "\n\n");
713 buf
+= sprintf(buf
, "cr = 0x%08X\t\t",ssp
->save_cr
);
714 buf
+= sprintf(buf
, "xer = 0x%08X\n",ssp
->save_xer
);
715 buf
+= sprintf(buf
, "lr = 0x%016llX\t\t",ssp
->save_lr
);
716 buf
+= sprintf(buf
, "ctr = 0x%016llX\n",ssp
->save_ctr
);
717 buf
+= sprintf(buf
, "srr0(iar) = 0x%016llX\t\t",ssp
->save_srr0
);
718 buf
+= sprintf(buf
, "srr1(msr) = 0x%016llX\n",ssp
->save_srr1
,
719 "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18"
720 "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT");
721 buf
+= sprintf(buf
, "\n\n");
723 /* generate some stack trace */
724 buf
+= sprintf(buf
, "Application level back trace:\n");
725 if (ssp
->save_srr1
& MASK(MSR_PR
)) {
726 char *addr
= (char*)ssp
->save_r1
;
727 unsigned int stack_buf
[3];
728 for (i
= 0; i
< 8; i
++) {
729 if (addr
== (char*)NULL
)
731 if (!copyin(ssp
->save_r1
,(char*)stack_buf
,
733 buf
+= sprintf(buf
, "0x%08X : 0x%08X\n"
735 addr
= (char*)stack_buf
[0];
744 doexception(exception
, code
, subcode
);
747 * Check to see if we need an AST, if so take care of it here
749 ml_set_interrupts_enabled(FALSE
);
751 if (USER_MODE(ssp
->save_srr1
)) {
752 myast
= ast_pending();
753 while (*myast
& AST_ALL
) {
754 ast_taken(AST_ALL
, intr
);
755 ml_set_interrupts_enabled(FALSE
);
756 myast
= ast_pending();
763 /* This routine is called from assembly before each and every system call.
764 * It must preserve r3.
767 extern int syscall_trace(int, struct savearea
*);
772 int syscall_trace(int retval
, struct savearea
*ssp
)
776 /* Always prepare to trace mach system calls */
782 argc
= mach_trap_table
[-((unsigned int)ssp
->save_r0
)].mach_trap_arg_count
;
787 for (i
=0; i
< argc
; i
++)
788 kdarg
[i
] = (int)*(&ssp
->save_r3
+ i
);
790 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
, (-(ssp
->save_r0
))) | DBG_FUNC_START
,
791 kdarg
[0], kdarg
[1], kdarg
[2], 0, 0);
796 /* This routine is called from assembly after each mach system call
797 * It must preserve r3.
800 extern int syscall_trace_end(int, struct savearea
*);
802 int syscall_trace_end(int retval
, struct savearea
*ssp
)
804 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
,(-((unsigned int)ssp
->save_r0
))) | DBG_FUNC_END
,
810 * called from syscall if there is an error
815 mach_exception_code_t code
,
816 mach_exception_subcode_t subcode
,
817 struct savearea
*ssp
)
819 register thread_t thread
;
821 thread
= current_thread();
824 panic("syscall error in boot phase");
826 if (!USER_MODE(ssp
->save_srr1
))
827 panic("system call called from kernel");
829 doexception(exception
, code
, subcode
);
834 /* Pass up a server syscall/exception */
838 mach_exception_code_t code
,
839 mach_exception_subcode_t sub
)
841 mach_exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
845 exception_triage(exc
, codes
, 2);
848 const char *trap_type
[] = {
850 "0x100 - System reset",
851 "0x200 - Machine check",
852 "0x300 - Data access",
853 "0x400 - Inst access",
857 "0x800 - Floating point",
858 "0x900 - Decrementer",
861 "0xC00 - System call",
869 "0x1300 - Inst bkpnt",
871 "0x1600 - Altivec Assist",
882 "0x2000 - Run Mode/Trace",
889 int TRAP_TYPES
= sizeof (trap_type
) / sizeof (trap_type
[0]);
891 void unresolved_kernel_trap(int trapno
,
892 struct savearea
*ssp
,
893 __unused
unsigned int dsisr
,
897 const char *trap_name
;
899 ml_set_interrupts_enabled(FALSE
); /* Turn off interruptions */
900 lastTrace
= LLTraceSet(0); /* Disable low-level tracing */
904 struct per_proc_info
*pp
;
905 kprintf(" srr0: %016llX\n", ssp
->save_srr0
); /* (TEST/DEBUG) */
906 kprintf(" srr1: %016llX\n", ssp
->save_srr1
); /* (TEST/DEBUG) */
907 kprintf(" dar: %016llX\n", ssp
->save_dar
); /* (TEST/DEBUG) */
908 kprintf(" xcp: %08X\n", ssp
->save_exception
); /* (TEST/DEBUG) */
909 kprintf(" ins0: %08X\n", ssp
->save_instr
[0]); /* (TEST/DEBUG) */
910 kprintf(" ins1: %08X\n", ssp
->save_instr
[1]); /* (TEST/DEBUG) */
911 kprintf(" ins2: %08X\n", ssp
->save_instr
[2]); /* (TEST/DEBUG) */
912 kprintf(" ins3: %08X\n", ssp
->save_instr
[3]); /* (TEST/DEBUG) */
913 kprintf(" ins4: %08X\n", ssp
->save_instr
[4]); /* (TEST/DEBUG) */
914 kprintf(" ins5: %08X\n", ssp
->save_instr
[5]); /* (TEST/DEBUG) */
915 kprintf(" ins6: %08X\n", ssp
->save_instr
[6]); /* (TEST/DEBUG) */
916 kprintf(" ins7: %08X\n", ssp
->save_instr
[7]); /* (TEST/DEBUG) */
917 pp
= getPerProc(); /* (TEST/DEBUG) */
918 kprintf("ijsave: %016llX\n", pp
->ijsave
); /* (TEST/DEBUG) */
922 if( logPanicDataToScreen
)
923 disable_debug_output
= FALSE
;
926 if ((unsigned)trapno
<= T_MAX
)
927 trap_name
= trap_type
[trapno
/ T_VECTOR_SIZE
];
929 trap_name
= "???? unrecognized exception";
933 kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%016llX PC=0x%016llX\n",
934 cpu_number(), trap_name
, dar
, ssp
->save_srr0
);
936 print_backtrace(ssp
);
938 panic_caller
= (0xFFFF0000 | (trapno
/ T_VECTOR_SIZE
) );
939 /* Commit the panic log buffer to NVRAM, unless otherwise
940 * specified via a boot-arg.
946 /* XXX: This is yet another codepath into the debugger, which should
947 * be reworked to enter the primary panic codepath instead.
948 * The idea appears to be to enter the debugger (performing a
949 * stack switch) as soon as possible, but we do have a
950 * savearea encapsulating state (accessible by walking the savearea
951 * chain), so that's superfluous.
954 (void)Call_Debugger(trapno
, ssp
);
955 panic_plain("%s", message
);
958 const char *corr
[2] = {"uncorrected", "corrected "};
960 void handleMck(struct savearea
*ssp
) { /* Common machine check handler */
966 printf("Machine check (%d) - %s - pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n",
967 cpu
, corr
[ssp
->save_hdr
.save_misc3
], ssp
->save_srr0
, ssp
->save_srr1
, ssp
->save_dsisr
, ssp
->save_dar
); /* Tell us about it */
968 printf("Machine check (%d) - AsyncSrc = %016llX, CoreFIR = %016llx\n", cpu
, ssp
->save_xdat0
, ssp
->save_xdat1
);
969 printf("Machine check (%d) - L2FIR = %016llX, BusFir = %016llx\n", cpu
, ssp
->save_xdat2
, ssp
->save_xdat3
);
971 if(ssp
->save_hdr
.save_misc3
) return; /* Leave the the machine check was recovered */
973 panic("Uncorrectable machine check: pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n"
974 " AsyncSrc = %016llX, CoreFIR = %016llx\n"
975 " L2FIR = %016llX, BusFir = %016llx\n",
976 ssp
->save_srr0
, ssp
->save_srr1
, ssp
->save_dsisr
, ssp
->save_dar
,
977 ssp
->save_xdat0
, ssp
->save_xdat1
, ssp
->save_xdat2
, ssp
->save_xdat3
);
983 thread_syscall_return(
986 register thread_t thread
= current_thread();
987 register struct savearea
*regs
= USER_REGS(thread
);
989 if (kdebug_enable
&& ((unsigned int)regs
->save_r0
& 0x80000000)) {
991 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
,(-(regs
->save_r0
))) | DBG_FUNC_END
,
996 thread_exception_return();
1003 thread_kdb_return(void)
1005 register thread_t thread
= current_thread();
1006 register struct savearea
*regs
= USER_REGS(thread
);
1008 Call_Debugger(thread
->machine
.pcb
->save_exception
, regs
);
1009 thread_exception_return();
1012 #endif /* MACH_KDB */