2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
38 #include <mach/mach_types.h>
39 #include <mach/mach_traps.h>
40 #include <mach/thread_status.h>
42 #include <kern/processor.h>
43 #include <kern/thread.h>
44 #include <kern/exception.h>
45 #include <kern/syscall_sw.h>
46 #include <kern/cpu_data.h>
47 #include <kern/debug.h>
49 #include <vm/vm_fault.h>
50 #include <vm/vm_kern.h> /* For kernel_map */
52 #include <ppc/misc_protos.h>
54 #include <ppc/exception.h>
55 #include <ppc/proc_reg.h> /* for SR_xxx definitions */
58 #include <ppc/mappings.h>
59 #include <ppc/Firmware.h>
60 #include <ppc/low_trace.h>
61 #include <ppc/Diagnostics.h>
62 #include <ppc/hw_perfmon.h>
64 #include <sys/kdebug.h>
66 perfCallback perfTrapHook
= 0; /* Pointer to CHUD trap hook routine */
67 perfCallback perfASTHook
= 0; /* Pointer to CHUD AST hook routine */
70 #include <ddb/db_watch.h>
71 #include <ddb/db_run.h>
72 #include <ddb/db_break.h>
73 #include <ddb/db_trap.h>
75 boolean_t let_ddb_vm_fault
= FALSE
;
76 boolean_t debug_all_traps_with_kdb
= FALSE
;
77 extern struct db_watchpoint
*db_watchpoint_list
;
78 extern boolean_t db_watchpoints_inserted
;
79 extern boolean_t db_breakpoints_inserted
;
85 extern task_t bsd_init_task
;
86 extern char init_task_failure_data
[];
87 extern int not_in_kdp
;
89 #define PROT_EXEC (VM_PROT_EXECUTE)
90 #define PROT_RO (VM_PROT_READ)
91 #define PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
93 /* A useful macro to update the ppc_exception_state in the PCB
94 * before calling doexception
96 #define UPDATE_PPC_EXCEPTION_STATE { \
97 thread_t _thread = current_thread(); \
98 _thread->machine.pcb->save_dar = (uint64_t)dar; \
99 _thread->machine.pcb->save_dsisr = dsisr; \
100 _thread->machine.pcb->save_exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \
103 void unresolved_kernel_trap(int trapno
,
104 struct savearea
*ssp
,
107 const char *message
);
109 static void handleMck(struct savearea
*ssp
); /* Common machine check handler */
112 extern void get_procrustime(time_value_t
*);
113 extern void bsd_uprofil(time_value_t
*, user_addr_t
);
114 #endif /* MACH_BSD */
117 struct savearea
*trap(int trapno
,
118 struct savearea
*ssp
,
127 unsigned int space
, space2
;
128 vm_map_offset_t offset
;
129 thread_t thread
= current_thread();
135 #endif /* MACH_BSD */
137 myast
= ast_pending();
139 if(*myast
& AST_PPC_CHUD_ALL
) {
140 perfASTHook(trapno
, ssp
, dsisr
, (unsigned int)dar
);
143 *myast
&= ~AST_PPC_CHUD_ALL
;
146 if(perfTrapHook
) { /* Is there a hook? */
147 if(perfTrapHook(trapno
, ssp
, dsisr
, (unsigned int)dar
) == KERN_SUCCESS
) return ssp
; /* If it succeeds, we are done... */
152 extern void fctx_text(void);
157 exception
= 0; /* Clear exception for now */
160 * Remember that we are disabled for interruptions when we come in here. Because
161 * of latency concerns, we need to enable interruptions in the interrupted process
162 * was enabled itself as soon as we can.
165 intr
= (ssp
->save_srr1
& MASK(MSR_EE
)) != 0; /* Remember if we were enabled */
167 /* Handle kernel traps first */
169 if (!USER_MODE(ssp
->save_srr1
)) {
171 * Trap came from kernel
175 case T_PREEMPT
: /* Handle a preempt trap */
176 ast_taken(AST_PREEMPTION
, FALSE
);
180 perfmon_handle_pmi(ssp
);
183 case T_RESET
: /* Reset interruption */
184 if (!Call_Debugger(trapno
, ssp
))
185 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
186 break; /* We just ignore these */
189 * These trap types should never be seen by trap()
190 * in kernel mode, anyway.
191 * Some are interrupts that should be seen by
192 * interrupt() others just don't happen because they
193 * are handled elsewhere. Some could happen but are
194 * considered to be fatal in kernel mode.
197 case T_IN_VAIN
: /* Shouldn't ever see this, lowmem_vectors eats it */
198 case T_SYSTEM_MANAGEMENT
:
199 case T_ALTIVEC_ASSIST
:
201 case T_FP_UNAVAILABLE
:
205 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
210 * Here we handle a machine check in the kernel
213 case T_MACHINE_CHECK
:
214 handleMck(ssp
); /* Common to both user and kernel */
220 * If enaNotifyEMb is set, we get here, and
221 * we have actually already emulated the unaligned access.
222 * All that we want to do here is to ignore the interrupt. This is to allow logging or
223 * tracing of unaligned accesses.
226 if(ssp
->save_hdr
.save_misc3
) { /* Was it a handled exception? */
227 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
); /* Go panic */
230 KERNEL_DEBUG_CONSTANT(
231 MACHDBG_CODE(DBG_MACH_EXCP_ALNG
, 0) | DBG_FUNC_NONE
,
232 (int)ssp
->save_srr0
- 4, (int)dar
, (int)dsisr
, (int)ssp
->save_lr
, 0);
237 * If enaNotifyEMb is set we get here, and
238 * we have actually already emulated the instruction.
239 * All that we want to do here is to ignore the interrupt. This is to allow logging or
240 * tracing of emulated instructions.
243 KERNEL_DEBUG_CONSTANT(
244 MACHDBG_CODE(DBG_MACH_EXCP_EMUL
, 0) | DBG_FUNC_NONE
,
245 (int)ssp
->save_srr0
- 4, (int)((savearea_comm
*)ssp
)->save_misc2
, (int)dsisr
, (int)ssp
->save_lr
, 0);
253 case T_RUNMODE_TRACE
:
254 case T_INSTRUCTION_BKPT
:
255 if (!Call_Debugger(trapno
, ssp
))
256 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
260 if (ssp
->save_srr1
& MASK(SRR1_PRG_TRAP
)) {
261 if (!Call_Debugger(trapno
, ssp
))
262 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
264 unresolved_kernel_trap(trapno
, ssp
,
271 mp_disable_preemption();
273 && getPerProc()->debugger_active
274 && !let_ddb_vm_fault
) {
276 * Force kdb to handle this one.
278 kdb_trap(trapno
, ssp
);
280 mp_enable_preemption();
281 #endif /* MACH_KDB */
282 /* can we take this during normal panic dump operation? */
284 && getPerProc()->debugger_active
287 * Access fault while in kernel core dump.
289 kdp_dump_trap(trapno
, ssp
);
293 if(ssp
->save_dsisr
& dsiInvMode
) { /* Did someone try to reserve cache inhibited? */
294 panic("trap: disallowed access to cache inhibited memory - %016llX\n", dar
);
297 if(intr
) ml_set_interrupts_enabled(TRUE
); /* Enable if we were */
299 if(((dar
>> 28) < 0xE) | ((dar
>> 28) > 0xF)) { /* User memory window access? */
301 offset
= (vm_map_offset_t
)dar
; /* Set the failing address */
302 map
= kernel_map
; /* No, this is a normal kernel access */
305 * Note: Some ROM device drivers will access page 0 when they start. The IOKit will
306 * set a flag to tell us to ignore any access fault on page 0. After the driver is
307 * opened, it will clear the flag.
309 if((0 == (offset
& -PAGE_SIZE
)) && /* Check for access of page 0 and */
310 ((thread
->machine
.specFlags
) & ignoreZeroFault
)) { /* special case of ignoring page zero faults */
311 ssp
->save_srr0
+= 4; /* Point to next instruction */
315 code
= vm_fault(map
, vm_map_trunc_page(offset
),
316 dsisr
& MASK(DSISR_WRITE
) ? PROT_RW
: PROT_RO
,
317 FALSE
, THREAD_UNINT
, NULL
, vm_map_trunc_page(0));
319 if (code
!= KERN_SUCCESS
) {
320 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
322 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
323 ssp
->save_dsisr
= (ssp
->save_dsisr
&
324 ~((MASK(DSISR_NOEX
) | MASK(DSISR_PROT
)))) | MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
329 /* If we get here, the fault was due to a user memory window access */
333 offset
= (vm_map_offset_t
)(thread
->machine
.umwRelo
+ dar
); /* Compute the user space address */
335 code
= vm_fault(map
, vm_map_trunc_page(offset
),
336 dsisr
& MASK(DSISR_WRITE
) ? PROT_RW
: PROT_RO
,
337 FALSE
, THREAD_UNINT
, NULL
, vm_map_trunc_page(0));
339 /* If we failed, there should be a recovery
342 if (code
!= KERN_SUCCESS
) {
343 if (thread
->recover
) {
344 ssp
->save_srr0
= thread
->recover
;
345 thread
->recover
= (vm_offset_t
)NULL
;
347 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, "copyin/out has no recovery point");
351 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
352 ssp
->save_dsisr
= (ssp
->save_dsisr
&
353 ~((MASK(DSISR_NOEX
) | MASK(DSISR_PROT
)))) | MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
358 case T_INSTRUCTION_ACCESS
:
362 && getPerProc()->debugger_active
363 && !let_ddb_vm_fault
) {
365 * Force kdb to handle this one.
367 kdb_trap(trapno
, ssp
);
369 #endif /* MACH_KDB */
371 /* Same as for data access, except fault type
372 * is PROT_EXEC and addr comes from srr0
375 if(intr
) ml_set_interrupts_enabled(TRUE
); /* Enable if we were */
379 code
= vm_fault(map
, vm_map_trunc_page(ssp
->save_srr0
),
380 PROT_EXEC
, FALSE
, THREAD_UNINT
, NULL
, vm_map_trunc_page(0));
382 if (code
!= KERN_SUCCESS
) {
383 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
385 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
386 ssp
->save_srr1
= (ssp
->save_srr1
&
387 ~((unsigned long long)(MASK(DSISR_NOEX
) | MASK(DSISR_PROT
)))) | MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
391 /* Usually shandler handles all the system calls, but the
392 * atomic thread switcher may throwup (via thandler) and
393 * have to pass it up to the exception handler.
397 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
401 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
407 * Processing for user state traps with interrupt enabled
408 * For T_AST, interrupts are enabled in the AST delivery
411 ml_set_interrupts_enabled(TRUE
);
415 get_procrustime(&tv
);
417 #endif /* MACH_BSD */
421 * Trap came from user task
427 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
431 perfmon_handle_pmi(ssp
);
435 * These trap types should never be seen by trap()
436 * Some are interrupts that should be seen by
437 * interrupt() others just don't happen because they
438 * are handled elsewhere.
441 case T_IN_VAIN
: /* Shouldn't ever see this, lowmem_vectors eats it */
443 case T_FP_UNAVAILABLE
:
444 case T_SYSTEM_MANAGEMENT
:
450 ml_set_interrupts_enabled(FALSE
); /* Turn off interruptions */
452 panic("Unexpected user state trap(cpu %d): 0x%08X DSISR=0x%08X DAR=0x%016llX PC=0x%016llX, MSR=0x%016llX\n",
453 cpu_number(), trapno
, dsisr
, dar
, ssp
->save_srr0
, ssp
->save_srr1
);
458 * Here we handle a machine check in user state
461 case T_MACHINE_CHECK
:
462 handleMck(ssp
); /* Common to both user and kernel */
466 ml_set_interrupts_enabled(FALSE
); /* Turn off interruptions */
467 if (!Call_Debugger(trapno
, ssp
))
468 panic("Unexpected Reset exception: srr0 = %016llx, srr1 = %016llx\n",
469 ssp
->save_srr0
, ssp
->save_srr1
);
470 break; /* We just ignore these */
474 * If enaNotifyEMb is set, we get here, and
475 * we have actually already emulated the unaligned access.
476 * All that we want to do here is to ignore the interrupt. This is to allow logging or
477 * tracing of unaligned accesses.
480 KERNEL_DEBUG_CONSTANT(
481 MACHDBG_CODE(DBG_MACH_EXCP_ALNG
, 0) | DBG_FUNC_NONE
,
482 (int)ssp
->save_srr0
- 4, (int)dar
, (int)dsisr
, (int)ssp
->save_lr
, 0);
484 if(ssp
->save_hdr
.save_misc3
) { /* Was it a handled exception? */
485 exception
= EXC_BAD_ACCESS
; /* Yes, throw exception */
486 code
= EXC_PPC_UNALIGNED
;
487 subcode
= (unsigned int)dar
;
493 * If enaNotifyEMb is set we get here, and
494 * we have actually already emulated the instruction.
495 * All that we want to do here is to ignore the interrupt. This is to allow logging or
496 * tracing of emulated instructions.
499 KERNEL_DEBUG_CONSTANT(
500 MACHDBG_CODE(DBG_MACH_EXCP_EMUL
, 0) | DBG_FUNC_NONE
,
501 (int)ssp
->save_srr0
- 4, (int)((savearea_comm
*)ssp
)->save_misc2
, (int)dsisr
, (int)ssp
->save_lr
, 0);
504 case T_TRACE
: /* Real PPC chips */
511 case T_INSTRUCTION_BKPT
:
512 exception
= EXC_BREAKPOINT
;
513 code
= EXC_PPC_TRACE
;
514 subcode
= (unsigned int)ssp
->save_srr0
;
518 if (ssp
->save_srr1
& MASK(SRR1_PRG_FE
)) {
519 fpu_save(thread
->machine
.curctx
);
520 UPDATE_PPC_EXCEPTION_STATE
;
521 exception
= EXC_ARITHMETIC
;
522 code
= EXC_ARITHMETIC
;
524 mp_disable_preemption();
525 subcode
= ssp
->save_fpscr
;
526 mp_enable_preemption();
528 else if (ssp
->save_srr1
& MASK(SRR1_PRG_ILL_INS
)) {
530 UPDATE_PPC_EXCEPTION_STATE
531 exception
= EXC_BAD_INSTRUCTION
;
532 code
= EXC_PPC_UNIPL_INST
;
533 subcode
= (unsigned int)ssp
->save_srr0
;
534 } else if ((unsigned int)ssp
->save_srr1
& MASK(SRR1_PRG_PRV_INS
)) {
536 UPDATE_PPC_EXCEPTION_STATE
;
537 exception
= EXC_BAD_INSTRUCTION
;
538 code
= EXC_PPC_PRIVINST
;
539 subcode
= (unsigned int)ssp
->save_srr0
;
540 } else if (ssp
->save_srr1
& MASK(SRR1_PRG_TRAP
)) {
544 //iaddr = CAST_DOWN(char *, ssp->save_srr0); /* Trim from long long and make a char pointer */
545 if (copyin(ssp
->save_srr0
, (char *) &inst
, 4 )) panic("copyin failed\n");
547 if(dgWork
.dgFlags
& enaDiagTrap
) { /* Is the diagnostic trap enabled? */
548 if((inst
& 0xFFFFFFF0) == 0x0FFFFFF0) { /* Is this a TWI 31,R31,0xFFFx? */
549 if(diagTrap(ssp
, inst
& 0xF)) { /* Call the trap code */
550 ssp
->save_srr0
+= 4ULL; /* If we eat the trap, bump pc */
551 exception
= 0; /* Clear exception */
552 break; /* All done here */
557 UPDATE_PPC_EXCEPTION_STATE
;
559 if (inst
== 0x7FE00008) {
560 exception
= EXC_BREAKPOINT
;
561 code
= EXC_PPC_BREAKPOINT
;
563 exception
= EXC_SOFTWARE
;
566 subcode
= (unsigned int)ssp
->save_srr0
;
570 case T_ALTIVEC_ASSIST
:
571 UPDATE_PPC_EXCEPTION_STATE
;
572 exception
= EXC_ARITHMETIC
;
573 code
= EXC_PPC_ALTIVECASSIST
;
574 subcode
= (unsigned int)ssp
->save_srr0
;
580 if(ssp
->save_dsisr
& dsiInvMode
) { /* Did someone try to reserve cache inhibited? */
581 UPDATE_PPC_EXCEPTION_STATE
; /* Don't even bother VM with this one */
582 exception
= EXC_BAD_ACCESS
;
583 subcode
= (unsigned int)dar
;
587 code
= vm_fault(map
, vm_map_trunc_page(dar
),
588 dsisr
& MASK(DSISR_WRITE
) ? PROT_RW
: PROT_RO
,
589 FALSE
, THREAD_ABORTSAFE
, NULL
, vm_map_trunc_page(0));
591 if ((code
!= KERN_SUCCESS
) && (code
!= KERN_ABORTED
)) {
592 UPDATE_PPC_EXCEPTION_STATE
;
593 exception
= EXC_BAD_ACCESS
;
594 subcode
= (unsigned int)dar
;
596 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
597 ssp
->save_dsisr
= (ssp
->save_dsisr
&
598 ~((MASK(DSISR_NOEX
) | MASK(DSISR_PROT
)))) | MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
602 case T_INSTRUCTION_ACCESS
:
603 /* Same as for data access, except fault type
604 * is PROT_EXEC and addr comes from srr0
608 code
= vm_fault(map
, vm_map_trunc_page(ssp
->save_srr0
),
609 PROT_EXEC
, FALSE
, THREAD_ABORTSAFE
, NULL
, vm_map_trunc_page(0));
611 if ((code
!= KERN_SUCCESS
) && (code
!= KERN_ABORTED
)) {
612 UPDATE_PPC_EXCEPTION_STATE
;
613 exception
= EXC_BAD_ACCESS
;
614 subcode
= (unsigned int)ssp
->save_srr0
;
616 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
617 ssp
->save_srr1
= (ssp
->save_srr1
&
618 ~((unsigned long long)(MASK(DSISR_NOEX
) | MASK(DSISR_PROT
)))) | MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
623 /* AST delivery is done below */
629 bsd_uprofil(&tv
, ssp
->save_srr0
);
631 #endif /* MACH_BSD */
635 /* if this is the init task, save the exception information */
636 /* this probably is a fatal exception */
638 if(bsd_init_task
== current_task()) {
642 buf
= init_task_failure_data
;
645 buf
+= sprintf(buf
, "Exception Code = 0x%x, Subcode = 0x%x\n", code
, subcode
);
646 buf
+= sprintf(buf
, "DSISR = 0x%08x, DAR = 0x%016llx\n"
649 for (i
=0; i
<32; i
++) {
651 buf
+= sprintf(buf
, "\n%4d :",i
);
653 buf
+= sprintf(buf
, " %08x",*(&ssp
->save_r0
+i
));
656 buf
+= sprintf(buf
, "\n\n");
657 buf
+= sprintf(buf
, "cr = 0x%08X\t\t",ssp
->save_cr
);
658 buf
+= sprintf(buf
, "xer = 0x%08X\n",ssp
->save_xer
);
659 buf
+= sprintf(buf
, "lr = 0x%016llX\t\t",ssp
->save_lr
);
660 buf
+= sprintf(buf
, "ctr = 0x%016llX\n",ssp
->save_ctr
);
661 buf
+= sprintf(buf
, "srr0(iar) = 0x%016llX\t\t",ssp
->save_srr0
);
662 buf
+= sprintf(buf
, "srr1(msr) = 0x%016llX\n",ssp
->save_srr1
,
663 "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18"
664 "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT");
665 buf
+= sprintf(buf
, "\n\n");
667 /* generate some stack trace */
668 buf
+= sprintf(buf
, "Application level back trace:\n");
669 if (ssp
->save_srr1
& MASK(MSR_PR
)) {
670 char *addr
= (char*)ssp
->save_r1
;
671 unsigned int stack_buf
[3];
672 for (i
= 0; i
< 8; i
++) {
673 if (addr
== (char*)NULL
)
675 if (!copyin(ssp
->save_r1
,(char*)stack_buf
,
677 buf
+= sprintf(buf
, "0x%08X : 0x%08X\n"
679 addr
= (char*)stack_buf
[0];
688 doexception(exception
, code
, subcode
);
691 * Check to see if we need an AST, if so take care of it here
693 ml_set_interrupts_enabled(FALSE
);
695 if (USER_MODE(ssp
->save_srr1
)) {
696 myast
= ast_pending();
697 while (*myast
& AST_ALL
) {
698 ast_taken(AST_ALL
, intr
);
699 ml_set_interrupts_enabled(FALSE
);
700 myast
= ast_pending();
707 /* This routine is called from assembly before each and every system call.
708 * It must preserve r3.
711 extern int syscall_trace(int, struct savearea
*);
716 int syscall_trace(int retval
, struct savearea
*ssp
)
720 /* Always prepare to trace mach system calls */
726 argc
= mach_trap_table
[-((unsigned int)ssp
->save_r0
)].mach_trap_arg_count
;
731 for (i
=0; i
< argc
; i
++)
732 kdarg
[i
] = (int)*(&ssp
->save_r3
+ i
);
734 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
, (-(ssp
->save_r0
))) | DBG_FUNC_START
,
735 kdarg
[0], kdarg
[1], kdarg
[2], 0, 0);
740 /* This routine is called from assembly after each mach system call
741 * It must preserve r3.
744 extern int syscall_trace_end(int, struct savearea
*);
746 int syscall_trace_end(int retval
, struct savearea
*ssp
)
748 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
,(-((unsigned int)ssp
->save_r0
))) | DBG_FUNC_END
,
754 * called from syscall if there is an error
761 struct savearea
*ssp
)
763 register thread_t thread
;
765 thread
= current_thread();
768 panic("syscall error in boot phase");
770 if (!USER_MODE(ssp
->save_srr1
))
771 panic("system call called from kernel");
773 doexception(exception
, code
, subcode
);
778 /* Pass up a server syscall/exception */
785 exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
789 exception_triage(exc
, codes
, 2);
792 char *trap_type
[] = {
794 "0x100 - System reset",
795 "0x200 - Machine check",
796 "0x300 - Data access",
797 "0x400 - Inst access",
801 "0x800 - Floating point",
802 "0x900 - Decrementer",
805 "0xC00 - System call",
813 "0x1300 - Inst bkpnt",
815 "0x1600 - Altivec Assist",
826 "0x2000 - Run Mode/Trace",
833 int TRAP_TYPES
= sizeof (trap_type
) / sizeof (trap_type
[0]);
835 void unresolved_kernel_trap(int trapno
,
836 struct savearea
*ssp
,
842 extern void print_backtrace(struct savearea
*);
843 extern unsigned int debug_mode
, disableDebugOuput
;
844 extern unsigned long panic_caller
;
846 ml_set_interrupts_enabled(FALSE
); /* Turn off interruptions */
847 lastTrace
= LLTraceSet(0); /* Disable low-level tracing */
849 if( logPanicDataToScreen
)
850 disableDebugOuput
= FALSE
;
853 if ((unsigned)trapno
<= T_MAX
)
854 trap_name
= trap_type
[trapno
/ T_VECTOR_SIZE
];
856 trap_name
= "???? unrecognized exception";
860 kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%016llX PC=0x%016llX\n",
861 cpu_number(), trap_name
, dar
, ssp
->save_srr0
);
863 print_backtrace(ssp
);
865 panic_caller
= (0xFFFF0000 | (trapno
/ T_VECTOR_SIZE
) );
869 (void *)Call_Debugger(trapno
, ssp
);
873 const char *corr
[2] = {"uncorrected", "corrected "};
875 void handleMck(struct savearea
*ssp
) { /* Common machine check handler */
881 printf("Machine check (%d) - %s - pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n",
882 cpu
, corr
[ssp
->save_hdr
.save_misc3
], ssp
->save_srr0
, ssp
->save_srr1
, ssp
->save_dsisr
, ssp
->save_dar
); /* Tell us about it */
883 printf("Machine check (%d) - AsyncSrc = %016llX, CoreFIR = %016llx\n", cpu
, ssp
->save_xdat0
, ssp
->save_xdat1
);
884 printf("Machine check (%d) - L2FIR = %016llX, BusFir = %016llx\n", cpu
, ssp
->save_xdat2
, ssp
->save_xdat3
);
886 if(ssp
->save_hdr
.save_misc3
) return; /* Leave the the machine check was recovered */
888 panic("Uncorrectable machine check: pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n"
889 " AsyncSrc = %016llX, CoreFIR = %016llx\n"
890 " L2FIR = %016llX, BusFir = %016llx\n",
891 ssp
->save_srr0
, ssp
->save_srr1
, ssp
->save_dsisr
, ssp
->save_dar
,
892 ssp
->save_xdat0
, ssp
->save_xdat1
, ssp
->save_xdat2
, ssp
->save_xdat3
);
898 thread_syscall_return(
901 register thread_t thread
= current_thread();
902 register struct savearea
*regs
= USER_REGS(thread
);
904 if (kdebug_enable
&& ((unsigned int)regs
->save_r0
& 0x80000000)) {
906 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
,(-(regs
->save_r0
))) | DBG_FUNC_END
,
911 thread_exception_return();
918 thread_kdb_return(void)
920 register thread_t thread
= current_thread();
921 register struct savearea
*regs
= USER_REGS(thread
);
923 Call_Debugger(thread
->machine
.pcb
->save_exception
, regs
);
924 thread_exception_return();
927 #endif /* MACH_KDB */