2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
33 #include <kern/thread.h>
34 #include <kern/exception.h>
35 #include <kern/syscall_sw.h>
36 #include <kern/cpu_data.h>
37 #include <kern/debug.h>
38 #include <mach/thread_status.h>
39 #include <vm/vm_fault.h>
40 #include <vm/vm_kern.h> /* For kernel_map */
41 #include <ppc/misc_protos.h>
43 #include <ppc/exception.h>
44 #include <ppc/proc_reg.h> /* for SR_xxx definitions */
47 #include <ppc/Firmware.h>
48 #include <ppc/low_trace.h>
50 #include <sys/kdebug.h>
52 perfTrap perfTrapHook
= 0; /* Pointer to performance trap hook routine */
55 #include <ddb/db_watch.h>
56 #include <ddb/db_run.h>
57 #include <ddb/db_break.h>
58 #include <ddb/db_trap.h>
60 boolean_t let_ddb_vm_fault
= FALSE
;
61 boolean_t debug_all_traps_with_kdb
= FALSE
;
62 extern struct db_watchpoint
*db_watchpoint_list
;
63 extern boolean_t db_watchpoints_inserted
;
64 extern boolean_t db_breakpoints_inserted
;
70 extern int debugger_active
[NCPUS
];
71 extern task_t bsd_init_task
;
72 extern char init_task_failure_data
[];
75 #define PROT_EXEC (VM_PROT_EXECUTE)
76 #define PROT_RO (VM_PROT_READ)
77 #define PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
79 /* A useful macro to update the ppc_exception_state in the PCB
80 * before calling doexception
82 #define UPDATE_PPC_EXCEPTION_STATE { \
83 thread_act_t thr_act = current_act(); \
84 thr_act->mact.pcb->save_dar = dar; \
85 thr_act->mact.pcb->save_dsisr = dsisr; \
86 thr_act->mact.pcb->save_exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \
89 static void unresolved_kernel_trap(int trapno
,
95 struct savearea
*trap(int trapno
,
105 unsigned int space
, space2
;
107 thread_act_t thr_act
;
111 #endif /* MACH_BSD */
113 if(perfTrapHook
) { /* Is there a hook? */
114 if(perfTrapHook(trapno
, ssp
, dsisr
, dar
) == KERN_SUCCESS
) return ssp
; /* If it succeeds, we are done... */
119 extern void fctx_text(void);
124 thr_act
= current_act(); /* Get current activation */
125 exception
= 0; /* Clear exception for now */
128 * Remember that we are disabled for interruptions when we come in here. Because
129 * of latency concerns, we need to enable interruptions in the interrupted process
130 * was enabled itself as soon as we can.
133 intr
= (ssp
->save_srr1
& MASK(MSR_EE
)) != 0; /* Remember if we were enabled */
135 /* Handle kernel traps first */
137 if (!USER_MODE(ssp
->save_srr1
)) {
139 * Trap came from kernel
143 case T_PREEMPT
: /* Handle a preempt trap */
144 ast_taken(AST_PREEMPT
, FALSE
);
147 case T_RESET
: /* Reset interruption */
149 kprintf("*** Reset exception ignored; srr0 = %08X, srr1 = %08X\n",
150 ssp
->save_srr0
, ssp
->save_srr1
);
152 panic("Unexpected Reset exception; srr0 = %08X, srr1 = %08X\n",
153 ssp
->save_srr0
, ssp
->save_srr1
);
155 break; /* We just ignore these */
158 * These trap types should never be seen by trap()
159 * in kernel mode, anyway.
160 * Some are interrupts that should be seen by
161 * interrupt() others just don't happen because they
162 * are handled elsewhere. Some could happen but are
163 * considered to be fatal in kernel mode.
166 case T_IN_VAIN
: /* Shouldn't ever see this, lowmem_vectors eats it */
167 case T_MACHINE_CHECK
:
168 case T_SYSTEM_MANAGEMENT
:
169 case T_ALTIVEC_ASSIST
:
171 case T_FP_UNAVAILABLE
:
176 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
180 case T_RUNMODE_TRACE
:
181 case T_INSTRUCTION_BKPT
:
182 if (!Call_Debugger(trapno
, ssp
))
183 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
187 if (ssp
->save_srr1
& MASK(SRR1_PRG_TRAP
)) {
188 if (!Call_Debugger(trapno
, ssp
))
189 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
191 unresolved_kernel_trap(trapno
, ssp
,
199 mp_disable_preemption();
201 && debugger_active
[cpu_number()]
202 && !let_ddb_vm_fault
) {
204 * Force kdb to handle this one.
206 kdb_trap(trapno
, ssp
);
208 mp_enable_preemption();
209 #endif /* MACH_KDB */
211 if(intr
) ml_set_interrupts_enabled(TRUE
); /* Enable if we were */
213 /* simple case : not SR_COPYIN segment, from kernel */
214 if ((dar
>> 28) != SR_COPYIN_NUM
) {
221 * Note: Some ROM device drivers will access page 0 when they start. The IOKit will
222 * set a flag to tell us to ignore any access fault on page 0. After the driver is
223 * opened, it will clear the flag.
225 if((0 == (dar
& -PAGE_SIZE
)) && /* Check for access of page 0 and */
226 ((thr_act
->mact
.specFlags
) & ignoreZeroFault
)) {
227 /* special case of ignoring page zero faults */
228 ssp
->save_srr0
+= 4; /* Point to next instruction */
232 code
= vm_fault(map
, trunc_page(offset
),
233 dsisr
& MASK(DSISR_WRITE
) ? PROT_RW
: PROT_RO
,
234 FALSE
, THREAD_UNINT
, NULL
, 0);
236 if (code
!= KERN_SUCCESS
) {
237 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
239 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
240 ssp
->save_dsisr
|= MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
245 /* If we get here, the fault was due to a copyin/out */
249 /* Mask out SR_COPYIN and mask in original segment */
251 offset
= (dar
& 0x0fffffff) |
252 ((mfsrin(dar
)<<8) & 0xF0000000);
254 code
= vm_fault(map
, trunc_page(offset
),
255 dsisr
& MASK(DSISR_WRITE
) ? PROT_RW
: PROT_RO
,
256 FALSE
, THREAD_UNINT
, NULL
, 0);
258 /* If we failed, there should be a recovery
261 if (code
!= KERN_SUCCESS
) {
263 if (thr_act
->thread
->recover
) {
265 act_lock_thread(thr_act
);
266 ssp
->save_srr0
= thr_act
->thread
->recover
;
267 thr_act
->thread
->recover
=
269 act_unlock_thread(thr_act
);
271 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, "copyin/out has no recovery point");
275 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
276 ssp
->save_dsisr
|= MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
281 case T_INSTRUCTION_ACCESS
:
285 && debugger_active
[cpu_number()]
286 && !let_ddb_vm_fault
) {
288 * Force kdb to handle this one.
290 kdb_trap(trapno
, ssp
);
292 #endif /* MACH_KDB */
294 /* Same as for data access, except fault type
295 * is PROT_EXEC and addr comes from srr0
298 if(intr
) ml_set_interrupts_enabled(TRUE
); /* Enable if we were */
302 code
= vm_fault(map
, trunc_page(ssp
->save_srr0
),
303 PROT_EXEC
, FALSE
, THREAD_UNINT
, NULL
, 0);
305 if (code
!= KERN_SUCCESS
) {
306 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
308 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
309 ssp
->save_srr1
|= MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
313 /* Usually shandler handles all the system calls, but the
314 * atomic thread switcher may throwup (via thandler) and
315 * have to pass it up to the exception handler.
319 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
323 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
328 ml_set_interrupts_enabled(TRUE
); /* Processing for user state traps is always enabled */
332 void get_procrustime(time_value_t
*);
334 get_procrustime(&tv
);
336 #endif /* MACH_BSD */
340 * Trap came from user task
346 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
350 * These trap types should never be seen by trap()
351 * Some are interrupts that should be seen by
352 * interrupt() others just don't happen because they
353 * are handled elsewhere.
356 case T_IN_VAIN
: /* Shouldn't ever see this, lowmem_vectors eats it */
357 case T_MACHINE_CHECK
:
359 case T_FP_UNAVAILABLE
:
360 case T_SYSTEM_MANAGEMENT
:
366 ml_set_interrupts_enabled(FALSE
); /* Turn off interruptions */
368 panic("Unexpected user state trap(cpu %d): 0x%08x DSISR=0x%08x DAR=0x%08x PC=0x%08x, MSR=0x%08x\n",
369 cpu_number(), trapno
, dsisr
, dar
, ssp
->save_srr0
, ssp
->save_srr1
);
374 kprintf("*** Reset exception ignored; srr0 = %08X, srr1 = %08X\n",
375 ssp
->save_srr0
, ssp
->save_srr1
);
377 panic("Unexpected Reset exception: srr0 = %0x08x, srr1 = %0x08x\n",
378 ssp
->save_srr0
, ssp
->save_srr1
);
380 break; /* We just ignore these */
384 * If notifyUnaligned is set, we have actually already emulated the unaligned access.
385 * All that we want to do here is to ignore the interrupt. This is to allow logging or
386 * tracing of unaligned accesses. Note that if trapUnaligned is also set, it takes
387 * precedence and we will take a bad access fault.
390 if(thr_act
->mact
.specFlags
& notifyUnalign
) {
392 KERNEL_DEBUG_CONSTANT(
393 MACHDBG_CODE(DBG_MACH_EXCP_ALNG
, 0) | DBG_FUNC_NONE
,
394 (int)ssp
->save_srr0
, (int)dar
, (int)dsisr
, (int)ssp
->save_lr
, 0);
397 if((!(thr_act
->mact
.specFlags
& notifyUnalign
)) || (thr_act
->mact
.specFlags
& trapUnalign
)) {
398 code
= EXC_PPC_UNALIGNED
;
399 exception
= EXC_BAD_ACCESS
;
404 case T_TRACE
: /* Real PPC chips */
411 case T_INSTRUCTION_BKPT
: /* 603 PPC chips */
412 case T_RUNMODE_TRACE
: /* 601 PPC chips */
413 exception
= EXC_BREAKPOINT
;
414 code
= EXC_PPC_TRACE
;
415 subcode
= ssp
->save_srr0
;
419 if (ssp
->save_srr1
& MASK(SRR1_PRG_FE
)) {
420 fpu_save(thr_act
->mact
.curctx
);
421 UPDATE_PPC_EXCEPTION_STATE
;
422 exception
= EXC_ARITHMETIC
;
423 code
= EXC_ARITHMETIC
;
425 mp_disable_preemption();
426 subcode
= ssp
->save_fpscr
;
427 mp_enable_preemption();
429 else if (ssp
->save_srr1
& MASK(SRR1_PRG_ILL_INS
)) {
431 UPDATE_PPC_EXCEPTION_STATE
432 exception
= EXC_BAD_INSTRUCTION
;
433 code
= EXC_PPC_UNIPL_INST
;
434 subcode
= ssp
->save_srr0
;
435 } else if (ssp
->save_srr1
& MASK(SRR1_PRG_PRV_INS
)) {
437 UPDATE_PPC_EXCEPTION_STATE
;
438 exception
= EXC_BAD_INSTRUCTION
;
439 code
= EXC_PPC_PRIVINST
;
440 subcode
= ssp
->save_srr0
;
441 } else if (ssp
->save_srr1
& MASK(SRR1_PRG_TRAP
)) {
444 if (copyin((char *) ssp
->save_srr0
, (char *) &inst
, 4 ))
445 panic("copyin failed\n");
446 UPDATE_PPC_EXCEPTION_STATE
;
447 if (inst
== 0x7FE00008) {
448 exception
= EXC_BREAKPOINT
;
449 code
= EXC_PPC_BREAKPOINT
;
451 exception
= EXC_SOFTWARE
;
454 subcode
= ssp
->save_srr0
;
458 case T_ALTIVEC_ASSIST
:
459 UPDATE_PPC_EXCEPTION_STATE
;
460 exception
= EXC_ARITHMETIC
;
461 code
= EXC_PPC_ALTIVECASSIST
;
462 subcode
= ssp
->save_srr0
;
468 code
= vm_fault(map
, trunc_page(dar
),
469 dsisr
& MASK(DSISR_WRITE
) ? PROT_RW
: PROT_RO
,
470 FALSE
, THREAD_ABORTSAFE
, NULL
, 0);
472 if ((code
!= KERN_SUCCESS
) && (code
!= KERN_ABORTED
)) {
473 UPDATE_PPC_EXCEPTION_STATE
;
474 exception
= EXC_BAD_ACCESS
;
477 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
478 ssp
->save_dsisr
|= MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
482 case T_INSTRUCTION_ACCESS
:
483 /* Same as for data access, except fault type
484 * is PROT_EXEC and addr comes from srr0
488 code
= vm_fault(map
, trunc_page(ssp
->save_srr0
),
489 PROT_EXEC
, FALSE
, THREAD_ABORTSAFE
, NULL
, 0);
491 if ((code
!= KERN_SUCCESS
) && (code
!= KERN_ABORTED
)) {
492 UPDATE_PPC_EXCEPTION_STATE
;
493 exception
= EXC_BAD_ACCESS
;
494 subcode
= ssp
->save_srr0
;
496 ssp
->save_hdr
.save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
497 ssp
->save_srr1
|= MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
502 ml_set_interrupts_enabled(FALSE
);
503 ast_taken(AST_ALL
, intr
);
509 void bsd_uprofil(time_value_t
*, unsigned int);
511 bsd_uprofil(&tv
, ssp
->save_srr0
);
513 #endif /* MACH_BSD */
517 /* if this is the init task, save the exception information */
518 /* this probably is a fatal exception */
519 if(bsd_init_task
== current_task()) {
523 buf
= init_task_failure_data
;
526 buf
+= sprintf(buf
, "Exception Code = 0x%x, Subcode = 0x%x\n", code
, subcode
);
527 buf
+= sprintf(buf
, "DSISR = 0x%08x, DAR = 0x%08x\n"
530 for (i
=0; i
<32; i
++) {
532 buf
+= sprintf(buf
, "\n%4d :",i
);
534 buf
+= sprintf(buf
, " %08x",*(&ssp
->save_r0
+i
));
537 buf
+= sprintf(buf
, "\n\n");
538 buf
+= sprintf(buf
, "cr = 0x%08x\t\t",ssp
->save_cr
);
539 buf
+= sprintf(buf
, "xer = 0x%08x\n",ssp
->save_xer
);
540 buf
+= sprintf(buf
, "lr = 0x%08x\t\t",ssp
->save_lr
);
541 buf
+= sprintf(buf
, "ctr = 0x%08x\n",ssp
->save_ctr
);
542 buf
+= sprintf(buf
, "srr0(iar) = 0x%08x\t\t",ssp
->save_srr0
);
543 buf
+= sprintf(buf
, "srr1(msr) = 0x%08B\n",ssp
->save_srr1
,
544 "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18"
545 "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT");
546 buf
+= sprintf(buf
, "\n\n");
548 /* generate some stack trace */
549 buf
+= sprintf(buf
, "Application level back trace:\n");
550 if (ssp
->save_srr1
& MASK(MSR_PR
)) {
551 char *addr
= (char*)ssp
->save_r1
;
552 unsigned int stack_buf
[3];
553 for (i
= 0; i
< 8; i
++) {
554 if (addr
== (char*)NULL
)
556 if (!copyin(addr
,(char*)stack_buf
,
558 buf
+= sprintf(buf
, "0x%08x : 0x%08x\n"
560 addr
= (char*)stack_buf
[0];
568 doexception(exception
, code
, subcode
);
571 * Check to see if we need an AST, if so take care of it here
573 ml_set_interrupts_enabled(FALSE
);
574 if (USER_MODE(ssp
->save_srr1
))
575 while (ast_needed(cpu_number())) {
576 ast_taken(AST_ALL
, intr
);
577 ml_set_interrupts_enabled(FALSE
);
583 /* This routine is called from assembly before each and every system call.
584 * It must preserve r3.
587 extern int syscall_trace(int, struct savearea
*);
592 int syscall_trace(int retval
, struct savearea
*ssp
)
597 /* Always prepare to trace mach system calls */
598 if (kdebug_enable
&& (ssp
->save_r0
& 0x80000000)) {
603 argc
= mach_trap_table
[-(ssp
->save_r0
)].mach_trap_arg_count
;
606 for (i
=0; i
< argc
; i
++)
607 kdarg
[i
] = (int)*(&ssp
->save_r3
+ i
);
608 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
, (-(ssp
->save_r0
))) | DBG_FUNC_START
,
609 kdarg
[0], kdarg
[1], kdarg
[2], 0, 0);
615 /* This routine is called from assembly after each mach system call
616 * It must preserve r3.
619 extern int syscall_trace_end(int, struct savearea
*);
621 int syscall_trace_end(int retval
, struct savearea
*ssp
)
623 if (kdebug_enable
&& (ssp
->save_r0
& 0x80000000)) {
625 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
,(-(ssp
->save_r0
))) | DBG_FUNC_END
,
632 * called from syscall if there is an error
639 struct savearea
*ssp
)
641 register thread_t thread
;
643 thread
= current_thread();
646 panic("syscall error in boot phase");
648 if (!USER_MODE(ssp
->save_srr1
))
649 panic("system call called from kernel");
651 doexception(exception
, code
, subcode
);
656 /* Pass up a server syscall/exception */
663 exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
667 exception(exc
, codes
, 2);
670 char *trap_type
[] = {
672 "0x100 - System reset",
673 "0x200 - Machine check",
674 "0x300 - Data access",
675 "0x400 - Inst access",
679 "0x800 - Floating point",
680 "0x900 - Decrementer",
683 "0xC00 - System call",
691 "0x1300 - Inst bkpnt",
693 "0x1600 - Altivec Assist",
704 "0x2000 - Run Mode/Trace",
711 int TRAP_TYPES
= sizeof (trap_type
) / sizeof (trap_type
[0]);
713 void unresolved_kernel_trap(int trapno
,
714 struct savearea
*ssp
,
720 extern void print_backtrace(struct savearea
*);
721 extern unsigned int debug_mode
, disableDebugOuput
;
723 ml_set_interrupts_enabled(FALSE
); /* Turn off interruptions */
724 lastTrace
= LLTraceSet(0); /* Disable low-level tracing */
726 if( logPanicDataToScreen
)
727 disableDebugOuput
= FALSE
;
730 if ((unsigned)trapno
<= T_MAX
)
731 trap_name
= trap_type
[trapno
/ T_VECTOR_SIZE
];
733 trap_name
= "???? unrecognized exception";
737 kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%08x PC=0x%08x\n",
738 cpu_number(), trap_name
, dar
, ssp
->save_srr0
);
740 print_backtrace(ssp
);
745 (void *)Call_Debugger(trapno
, ssp
);
750 thread_syscall_return(
753 register thread_act_t thr_act
= current_act();
754 register struct savearea
*regs
= USER_REGS(thr_act
);
756 if (kdebug_enable
&& (regs
->save_r0
& 0x80000000)) {
758 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
,(-(regs
->save_r0
))) | DBG_FUNC_END
,
763 thread_exception_return();
770 thread_kdb_return(void)
772 register thread_act_t thr_act
= current_act();
773 register thread_t cur_thr
= current_thread();
774 register struct savearea
*regs
= USER_REGS(thr_act
);
776 Call_Debugger(thr_act
->mact
.pcb
->save_exception
, regs
);
778 assert(cur_thr
->mutex_count
== 0);
779 #endif /* MACH_LDEBUG */
780 check_simple_locks();
781 thread_exception_return();
784 #endif /* MACH_KDB */