2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
33 #include <kern/thread.h>
34 #include <kern/exception.h>
35 #include <kern/syscall_sw.h>
36 #include <kern/cpu_data.h>
37 #include <kern/debug.h>
38 #include <mach/thread_status.h>
39 #include <vm/vm_fault.h>
40 #include <vm/vm_kern.h> /* For kernel_map */
41 #include <ppc/misc_protos.h>
43 #include <ppc/exception.h>
44 #include <ppc/proc_reg.h> /* for SR_xxx definitions */
47 #include <ppc/mappings.h>
48 #include <ppc/Firmware.h>
49 #include <ppc/low_trace.h>
50 #include <ppc/Diagnostics.h>
51 #include <ppc/hw_perfmon.h>
53 #include <sys/kdebug.h>
55 perfTrap perfTrapHook
= 0; /* Pointer to performance trap hook routine */
58 #include <ddb/db_watch.h>
59 #include <ddb/db_run.h>
60 #include <ddb/db_break.h>
61 #include <ddb/db_trap.h>
63 boolean_t let_ddb_vm_fault
= FALSE
;
64 boolean_t debug_all_traps_with_kdb
= FALSE
;
65 extern struct db_watchpoint
*db_watchpoint_list
;
66 extern boolean_t db_watchpoints_inserted
;
67 extern boolean_t db_breakpoints_inserted
;
73 extern int debugger_active
[NCPUS
];
74 extern task_t bsd_init_task
;
75 extern char init_task_failure_data
[];
78 #define PROT_EXEC (VM_PROT_EXECUTE)
79 #define PROT_RO (VM_PROT_READ)
80 #define PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
82 /* A useful macro to update the ppc_exception_state in the PCB
83 * before calling doexception
85 #define UPDATE_PPC_EXCEPTION_STATE { \
86 thread_act_t thr_act = current_act(); \
87 thr_act->mact.pcb->save_dar = (uint64_t)dar; \
88 thr_act->mact.pcb->save_dsisr = dsisr; \
89 thr_act->mact.pcb->save_exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \
92 static void unresolved_kernel_trap(int trapno
,
98 struct savearea
*trap(int trapno
,
108 unsigned int space
, space2
;
110 thread_act_t thr_act
;
115 #endif /* MACH_BSD */
117 if(perfTrapHook
) { /* Is there a hook? */
118 if(perfTrapHook(trapno
, ssp
, dsisr
, (unsigned int)dar
) == KERN_SUCCESS
) return ssp
; /* If it succeeds, we are done... */
123 extern void fctx_text(void);
128 thr_act
= current_act(); /* Get current activation */
129 exception
= 0; /* Clear exception for now */
132 * Remember that we are disabled for interruptions when we come in here. Because
133 * of latency concerns, we need to enable interruptions in the interrupted process
134 * was enabled itself as soon as we can.
137 intr
= (ssp
->save_srr1
& MASK(MSR_EE
)) != 0; /* Remember if we were enabled */
139 /* Handle kernel traps first */
141 if (!USER_MODE(ssp
->save_srr1
)) {
143 * Trap came from kernel
147 case T_PREEMPT
: /* Handle a preempt trap */
148 ast_taken(AST_PREEMPT
, FALSE
);
152 perfmon_handle_pmi(ssp
);
155 case T_RESET
: /* Reset interruption */
156 if (!Call_Debugger(trapno
, ssp
))
157 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
158 break; /* We just ignore these */
161 * These trap types should never be seen by trap()
162 * in kernel mode, anyway.
163 * Some are interrupts that should be seen by
164 * interrupt() others just don't happen because they
165 * are handled elsewhere. Some could happen but are
166 * considered to be fatal in kernel mode.
169 case T_IN_VAIN
: /* Shouldn't ever see this, lowmem_vectors eats it */
170 case T_MACHINE_CHECK
:
171 case T_SYSTEM_MANAGEMENT
:
172 case T_ALTIVEC_ASSIST
:
174 case T_FP_UNAVAILABLE
:
178 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
184 * If enaNotifyEMb is set, we get here, and
185 * we have actually already emulated the unaligned access.
186 * All that we want to do here is to ignore the interrupt. This is to allow logging or
187 * tracing of unaligned accesses.
190 KERNEL_DEBUG_CONSTANT(
191 MACHDBG_CODE(DBG_MACH_EXCP_ALNG
, 0) | DBG_FUNC_NONE
,
192 (int)ssp
->save_srr0
- 4, (int)dar
, (int)dsisr
, (int)ssp
->save_lr
, 0);
197 * If enaNotifyEMb is set we get here, and
198 * we have actually already emulated the instruction.
199 * All that we want to do here is to ignore the interrupt. This is to allow logging or
200 * tracing of emulated instructions.
203 KERNEL_DEBUG_CONSTANT(
204 MACHDBG_CODE(DBG_MACH_EXCP_EMUL
, 0) | DBG_FUNC_NONE
,
205 (int)ssp
->save_srr0
- 4, (int)((savearea_comm
*)ssp
)->save_misc2
, (int)dsisr
, (int)ssp
->save_lr
, 0);
213 case T_RUNMODE_TRACE
:
214 case T_INSTRUCTION_BKPT
:
215 if (!Call_Debugger(trapno
, ssp
))
216 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
220 if (ssp
->save_srr1
& MASK(SRR1_PRG_TRAP
)) {
221 if (!Call_Debugger(trapno
, ssp
))
222 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
224 unresolved_kernel_trap(trapno
, ssp
,
232 mp_disable_preemption();
234 && debugger_active
[cpu_number()]
235 && !let_ddb_vm_fault
) {
237 * Force kdb to handle this one.
239 kdb_trap(trapno
, ssp
);
241 mp_enable_preemption();
242 #endif /* MACH_KDB */
244 if(ssp
->save_dsisr
& dsiInvMode
) { /* Did someone try to reserve cache inhibited? */
245 panic("trap: disallowed access to cache inhibited memory - %016llX\n", dar
);
248 if(intr
) ml_set_interrupts_enabled(TRUE
); /* Enable if we were */
250 if(((dar
>> 28) < 0xE) | ((dar
>> 28) > 0xF)) { /* Is this a copy in/out? */
252 offset
= (unsigned int)dar
; /* Set the failing address */
253 map
= kernel_map
; /* No, this is a normal kernel access */
256 * Note: Some ROM device drivers will access page 0 when they start. The IOKit will
257 * set a flag to tell us to ignore any access fault on page 0. After the driver is
258 * opened, it will clear the flag.
260 if((0 == (offset
& -PAGE_SIZE
)) && /* Check for access of page 0 and */
261 ((thr_act
->mact
.specFlags
) & ignoreZeroFault
)) { /* special case of ignoring page zero faults */
262 ssp
->save_srr0
+= 4; /* Point to next instruction */
266 code
= vm_fault(map
, trunc_page_32(offset
),
267 dsisr
& MASK(DSISR_WRITE
) ? PROT_RW
: PROT_RO
,
268 FALSE
, THREAD_UNINT
, NULL
, 0);
270 if (code
!= KERN_SUCCESS
) {
271 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
273 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
274 ssp
->save_dsisr
= (ssp
->save_dsisr
&
275 ~((MASK(DSISR_NOEX
) | MASK(DSISR_PROT
)))) | MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
280 /* If we get here, the fault was due to a copyin/out */
284 offset
= (unsigned int)(thr_act
->mact
.cioRelo
+ dar
); /* Compute the user space address */
286 code
= vm_fault(map
, trunc_page_32(offset
),
287 dsisr
& MASK(DSISR_WRITE
) ? PROT_RW
: PROT_RO
,
288 FALSE
, THREAD_UNINT
, NULL
, 0);
290 /* If we failed, there should be a recovery
293 if (code
!= KERN_SUCCESS
) {
295 if (thr_act
->thread
->recover
) {
297 act_lock_thread(thr_act
);
298 ssp
->save_srr0
= thr_act
->thread
->recover
;
299 thr_act
->thread
->recover
=
301 act_unlock_thread(thr_act
);
303 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, "copyin/out has no recovery point");
307 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
308 ssp
->save_dsisr
= (ssp
->save_dsisr
&
309 ~((MASK(DSISR_NOEX
) | MASK(DSISR_PROT
)))) | MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
314 case T_INSTRUCTION_ACCESS
:
318 && debugger_active
[cpu_number()]
319 && !let_ddb_vm_fault
) {
321 * Force kdb to handle this one.
323 kdb_trap(trapno
, ssp
);
325 #endif /* MACH_KDB */
327 /* Same as for data access, except fault type
328 * is PROT_EXEC and addr comes from srr0
331 if(intr
) ml_set_interrupts_enabled(TRUE
); /* Enable if we were */
335 code
= vm_fault(map
, trunc_page_64(ssp
->save_srr0
),
336 PROT_EXEC
, FALSE
, THREAD_UNINT
, NULL
, 0);
338 if (code
!= KERN_SUCCESS
) {
339 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
341 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
342 ssp
->save_srr1
= (ssp
->save_srr1
&
343 ~((unsigned long long)(MASK(DSISR_NOEX
) | MASK(DSISR_PROT
)))) | MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
347 /* Usually shandler handles all the system calls, but the
348 * atomic thread switcher may throwup (via thandler) and
349 * have to pass it up to the exception handler.
353 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
357 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
362 ml_set_interrupts_enabled(TRUE
); /* Processing for user state traps is always enabled */
366 void get_procrustime(time_value_t
*);
368 get_procrustime(&tv
);
370 #endif /* MACH_BSD */
374 * Trap came from user task
380 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
384 perfmon_handle_pmi(ssp
);
388 * These trap types should never be seen by trap()
389 * Some are interrupts that should be seen by
390 * interrupt() others just don't happen because they
391 * are handled elsewhere.
394 case T_IN_VAIN
: /* Shouldn't ever see this, lowmem_vectors eats it */
395 case T_MACHINE_CHECK
:
397 case T_FP_UNAVAILABLE
:
398 case T_SYSTEM_MANAGEMENT
:
404 ml_set_interrupts_enabled(FALSE
); /* Turn off interruptions */
406 panic("Unexpected user state trap(cpu %d): 0x%08X DSISR=0x%08X DAR=0x%016llX PC=0x%016llX, MSR=0x%016llX\n",
407 cpu_number(), trapno
, dsisr
, dar
, ssp
->save_srr0
, ssp
->save_srr1
);
411 ml_set_interrupts_enabled(FALSE
); /* Turn off interruptions */
412 if (!Call_Debugger(trapno
, ssp
))
413 panic("Unexpected Reset exception: srr0 = %016llx, srr1 = %016llx\n",
414 ssp
->save_srr0
, ssp
->save_srr1
);
415 break; /* We just ignore these */
419 * If enaNotifyEMb is set, we get here, and
420 * we have actually already emulated the unaligned access.
421 * All that we want to do here is to ignore the interrupt. This is to allow logging or
422 * tracing of unaligned accesses.
425 KERNEL_DEBUG_CONSTANT(
426 MACHDBG_CODE(DBG_MACH_EXCP_ALNG
, 0) | DBG_FUNC_NONE
,
427 (int)ssp
->save_srr0
- 4, (int)dar
, (int)dsisr
, (int)ssp
->save_lr
, 0);
432 * If enaNotifyEMb is set we get here, and
433 * we have actually already emulated the instruction.
434 * All that we want to do here is to ignore the interrupt. This is to allow logging or
435 * tracing of emulated instructions.
438 KERNEL_DEBUG_CONSTANT(
439 MACHDBG_CODE(DBG_MACH_EXCP_EMUL
, 0) | DBG_FUNC_NONE
,
440 (int)ssp
->save_srr0
- 4, (int)((savearea_comm
*)ssp
)->save_misc2
, (int)dsisr
, (int)ssp
->save_lr
, 0);
443 case T_TRACE
: /* Real PPC chips */
450 case T_INSTRUCTION_BKPT
:
451 exception
= EXC_BREAKPOINT
;
452 code
= EXC_PPC_TRACE
;
453 subcode
= (unsigned int)ssp
->save_srr0
;
457 if (ssp
->save_srr1
& MASK(SRR1_PRG_FE
)) {
458 fpu_save(thr_act
->mact
.curctx
);
459 UPDATE_PPC_EXCEPTION_STATE
;
460 exception
= EXC_ARITHMETIC
;
461 code
= EXC_ARITHMETIC
;
463 mp_disable_preemption();
464 subcode
= ssp
->save_fpscr
;
465 mp_enable_preemption();
467 else if (ssp
->save_srr1
& MASK(SRR1_PRG_ILL_INS
)) {
469 UPDATE_PPC_EXCEPTION_STATE
470 exception
= EXC_BAD_INSTRUCTION
;
471 code
= EXC_PPC_UNIPL_INST
;
472 subcode
= (unsigned int)ssp
->save_srr0
;
473 } else if ((unsigned int)ssp
->save_srr1
& MASK(SRR1_PRG_PRV_INS
)) {
475 UPDATE_PPC_EXCEPTION_STATE
;
476 exception
= EXC_BAD_INSTRUCTION
;
477 code
= EXC_PPC_PRIVINST
;
478 subcode
= (unsigned int)ssp
->save_srr0
;
479 } else if (ssp
->save_srr1
& MASK(SRR1_PRG_TRAP
)) {
483 iaddr
= (char *)ssp
->save_srr0
; /* Trim from long long and make a char pointer */
484 if (copyin(iaddr
, (char *) &inst
, 4 )) panic("copyin failed\n");
486 if(dgWork
.dgFlags
& enaDiagTrap
) { /* Is the diagnostic trap enabled? */
487 if((inst
& 0xFFFFFFF0) == 0x0FFFFFF0) { /* Is this a TWI 31,R31,0xFFFx? */
488 if(diagTrap(ssp
, inst
& 0xF)) { /* Call the trap code */
489 ssp
->save_srr0
+= 4ULL; /* If we eat the trap, bump pc */
490 exception
= 0; /* Clear exception */
491 break; /* All done here */
496 UPDATE_PPC_EXCEPTION_STATE
;
498 if (inst
== 0x7FE00008) {
499 exception
= EXC_BREAKPOINT
;
500 code
= EXC_PPC_BREAKPOINT
;
502 exception
= EXC_SOFTWARE
;
505 subcode
= (unsigned int)ssp
->save_srr0
;
509 case T_ALTIVEC_ASSIST
:
510 UPDATE_PPC_EXCEPTION_STATE
;
511 exception
= EXC_ARITHMETIC
;
512 code
= EXC_PPC_ALTIVECASSIST
;
513 subcode
= (unsigned int)ssp
->save_srr0
;
519 if(ssp
->save_dsisr
& dsiInvMode
) { /* Did someone try to reserve cache inhibited? */
520 UPDATE_PPC_EXCEPTION_STATE
; /* Don't even bother VM with this one */
521 exception
= EXC_BAD_ACCESS
;
522 subcode
= (unsigned int)dar
;
526 code
= vm_fault(map
, trunc_page_64(dar
),
527 dsisr
& MASK(DSISR_WRITE
) ? PROT_RW
: PROT_RO
,
528 FALSE
, THREAD_ABORTSAFE
, NULL
, 0);
530 if ((code
!= KERN_SUCCESS
) && (code
!= KERN_ABORTED
)) {
531 UPDATE_PPC_EXCEPTION_STATE
;
532 exception
= EXC_BAD_ACCESS
;
533 subcode
= (unsigned int)dar
;
535 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
536 ssp
->save_dsisr
= (ssp
->save_dsisr
&
537 ~((MASK(DSISR_NOEX
) | MASK(DSISR_PROT
)))) | MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
541 case T_INSTRUCTION_ACCESS
:
542 /* Same as for data access, except fault type
543 * is PROT_EXEC and addr comes from srr0
547 code
= vm_fault(map
, trunc_page_64(ssp
->save_srr0
),
548 PROT_EXEC
, FALSE
, THREAD_ABORTSAFE
, NULL
, 0);
550 if ((code
!= KERN_SUCCESS
) && (code
!= KERN_ABORTED
)) {
551 UPDATE_PPC_EXCEPTION_STATE
;
552 exception
= EXC_BAD_ACCESS
;
553 subcode
= (unsigned int)ssp
->save_srr0
;
555 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
556 ssp
->save_srr1
= (ssp
->save_srr1
&
557 ~((unsigned long long)(MASK(DSISR_NOEX
) | MASK(DSISR_PROT
)))) | MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
562 ml_set_interrupts_enabled(FALSE
);
563 ast_taken(AST_ALL
, intr
);
569 void bsd_uprofil(time_value_t
*, unsigned int);
571 bsd_uprofil(&tv
, ssp
->save_srr0
);
573 #endif /* MACH_BSD */
577 /* if this is the init task, save the exception information */
578 /* this probably is a fatal exception */
580 if(bsd_init_task
== current_task()) {
584 buf
= init_task_failure_data
;
587 buf
+= sprintf(buf
, "Exception Code = 0x%x, Subcode = 0x%x\n", code
, subcode
);
588 buf
+= sprintf(buf
, "DSISR = 0x%08x, DAR = 0x%016llx\n"
591 for (i
=0; i
<32; i
++) {
593 buf
+= sprintf(buf
, "\n%4d :",i
);
595 buf
+= sprintf(buf
, " %08x",*(&ssp
->save_r0
+i
));
598 buf
+= sprintf(buf
, "\n\n");
599 buf
+= sprintf(buf
, "cr = 0x%08X\t\t",ssp
->save_cr
);
600 buf
+= sprintf(buf
, "xer = 0x%08X\n",ssp
->save_xer
);
601 buf
+= sprintf(buf
, "lr = 0x%016llX\t\t",ssp
->save_lr
);
602 buf
+= sprintf(buf
, "ctr = 0x%016llX\n",ssp
->save_ctr
);
603 buf
+= sprintf(buf
, "srr0(iar) = 0x%016llX\t\t",ssp
->save_srr0
);
604 buf
+= sprintf(buf
, "srr1(msr) = 0x%016llX\n",ssp
->save_srr1
,
605 "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18"
606 "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT");
607 buf
+= sprintf(buf
, "\n\n");
609 /* generate some stack trace */
610 buf
+= sprintf(buf
, "Application level back trace:\n");
611 if (ssp
->save_srr1
& MASK(MSR_PR
)) {
612 char *addr
= (char*)ssp
->save_r1
;
613 unsigned int stack_buf
[3];
614 for (i
= 0; i
< 8; i
++) {
615 if (addr
== (char*)NULL
)
617 if (!copyin(addr
,(char*)stack_buf
,
619 buf
+= sprintf(buf
, "0x%08X : 0x%08X\n"
621 addr
= (char*)stack_buf
[0];
630 doexception(exception
, code
, subcode
);
633 * Check to see if we need an AST, if so take care of it here
635 ml_set_interrupts_enabled(FALSE
);
636 if (USER_MODE(ssp
->save_srr1
))
637 while (ast_needed(cpu_number())) {
638 ast_taken(AST_ALL
, intr
);
639 ml_set_interrupts_enabled(FALSE
);
645 /* This routine is called from assembly before each and every system call.
646 * It must preserve r3.
649 extern int syscall_trace(int, struct savearea
*);
654 int syscall_trace(int retval
, struct savearea
*ssp
)
658 /* Always prepare to trace mach system calls */
664 argc
= mach_trap_table
[-((unsigned int)ssp
->save_r0
)].mach_trap_arg_count
;
669 for (i
=0; i
< argc
; i
++)
670 kdarg
[i
] = (int)*(&ssp
->save_r3
+ i
);
672 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
, (-(ssp
->save_r0
))) | DBG_FUNC_START
,
673 kdarg
[0], kdarg
[1], kdarg
[2], 0, 0);
678 /* This routine is called from assembly after each mach system call
679 * It must preserve r3.
682 extern int syscall_trace_end(int, struct savearea
*);
684 int syscall_trace_end(int retval
, struct savearea
*ssp
)
686 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
,(-((unsigned int)ssp
->save_r0
))) | DBG_FUNC_END
,
692 * called from syscall if there is an error
699 struct savearea
*ssp
)
701 register thread_t thread
;
703 thread
= current_thread();
706 panic("syscall error in boot phase");
708 if (!USER_MODE(ssp
->save_srr1
))
709 panic("system call called from kernel");
711 doexception(exception
, code
, subcode
);
716 /* Pass up a server syscall/exception */
723 exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
727 exception(exc
, codes
, 2);
730 char *trap_type
[] = {
732 "0x100 - System reset",
733 "0x200 - Machine check",
734 "0x300 - Data access",
735 "0x400 - Inst access",
739 "0x800 - Floating point",
740 "0x900 - Decrementer",
743 "0xC00 - System call",
751 "0x1300 - Inst bkpnt",
753 "0x1600 - Altivec Assist",
764 "0x2000 - Run Mode/Trace",
771 int TRAP_TYPES
= sizeof (trap_type
) / sizeof (trap_type
[0]);
773 void unresolved_kernel_trap(int trapno
,
774 struct savearea
*ssp
,
780 extern void print_backtrace(struct savearea
*);
781 extern unsigned int debug_mode
, disableDebugOuput
;
783 ml_set_interrupts_enabled(FALSE
); /* Turn off interruptions */
784 lastTrace
= LLTraceSet(0); /* Disable low-level tracing */
786 if( logPanicDataToScreen
)
787 disableDebugOuput
= FALSE
;
790 if ((unsigned)trapno
<= T_MAX
)
791 trap_name
= trap_type
[trapno
/ T_VECTOR_SIZE
];
793 trap_name
= "???? unrecognized exception";
797 kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%016llX PC=0x%016llX\n",
798 cpu_number(), trap_name
, dar
, ssp
->save_srr0
);
800 print_backtrace(ssp
);
805 (void *)Call_Debugger(trapno
, ssp
);
810 thread_syscall_return(
813 register thread_act_t thr_act
= current_act();
814 register struct savearea
*regs
= USER_REGS(thr_act
);
816 if (kdebug_enable
&& ((unsigned int)regs
->save_r0
& 0x80000000)) {
818 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
,(-(regs
->save_r0
))) | DBG_FUNC_END
,
823 thread_exception_return();
830 thread_kdb_return(void)
832 register thread_act_t thr_act
= current_act();
833 register thread_t cur_thr
= current_thread();
834 register struct savearea
*regs
= USER_REGS(thr_act
);
836 Call_Debugger(thr_act
->mact
.pcb
->save_exception
, regs
);
838 assert(cur_thr
->mutex_count
== 0);
839 #endif /* MACH_LDEBUG */
840 check_simple_locks();
841 thread_exception_return();
844 #endif /* MACH_KDB */