2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
30 #include <kern/thread.h>
31 #include <kern/exception.h>
32 #include <kern/syscall_sw.h>
33 #include <kern/cpu_data.h>
34 #include <kern/debug.h>
35 #include <mach/thread_status.h>
36 #include <vm/vm_fault.h>
37 #include <vm/vm_kern.h> /* For kernel_map */
38 #include <ppc/misc_protos.h>
40 #include <ppc/exception.h>
41 #include <ppc/proc_reg.h> /* for SR_xxx definitions */
44 #include <ppc/fpu_protos.h>
46 #include <sys/kdebug.h>
49 #include <ddb/db_watch.h>
50 #include <ddb/db_run.h>
51 #include <ddb/db_break.h>
52 #include <ddb/db_trap.h>
54 boolean_t let_ddb_vm_fault
= FALSE
;
55 boolean_t debug_all_traps_with_kdb
= FALSE
;
56 extern struct db_watchpoint
*db_watchpoint_list
;
57 extern boolean_t db_watchpoints_inserted
;
58 extern boolean_t db_breakpoints_inserted
;
64 extern int debugger_active
[NCPUS
];
65 extern vm_address_t bsd_init_task
;
66 extern char init_task_failure_data
[];
69 * XXX don't pass VM_PROT_EXECUTE to vm_fault(), execute permission is implied
70 * in either R or RW (note: the pmap module knows this). This is done for the
71 * benefit of programs that execute out of their data space (ala lisp).
72 * If we didn't do this in that scenerio, the ITLB miss code would call us
73 * and we would call vm_fault() with RX permission. However, the space was
74 * probably vm_allocate()ed with just RW and vm_fault would fail. The "right"
75 * solution to me is to have the un*x server always allocate data with RWX for
76 * compatibility with existing binaries.
79 #define PROT_EXEC (VM_PROT_READ)
80 #define PROT_RO (VM_PROT_READ)
81 #define PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
83 /* A useful macro to update the ppc_exception_state in the PCB
84 * before calling doexception
86 #define UPDATE_PPC_EXCEPTION_STATE { \
87 thread_act_t thr_act = current_act(); \
88 struct ppc_exception_state *es = &thr_act->mact.pcb->es; \
91 es->exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \
94 static void unresolved_kernel_trap(int trapno
,
95 struct ppc_saved_state
*ssp
,
100 struct ppc_saved_state
*trap(int trapno
,
101 struct ppc_saved_state
*ssp
,
110 unsigned int space
,space2
;
112 thread_act_t thr_act
= current_act();
116 #endif /* MACH_BSD */
119 * Remember that we are disabled for interruptions when we come in here. Because
120 * of latency concerns, we need to enable interruptions in the interrupted process
121 * was enabled itself as soon as we can.
124 intr
= (ssp
->srr1
& MASK(MSR_EE
)) != 0; /* Remember if we were enabled */
126 /* Handle kernel traps first */
128 if (!USER_MODE(ssp
->srr1
)) {
130 * Trap came from kernel
134 case T_PREEMPT
: /* Handle a preempt trap */
135 ast_taken(AST_PREEMPT
, FALSE
);
138 case T_RESET
: /* Reset interruption */
140 kprintf("*** Reset exception ignored; srr0 = %08X, srr1 = %08X\n",
141 ssp
->srr0
, ssp
->srr1
);
143 panic("Unexpected Reset exception; srr0 = %08X, srr1 = %08X\n",
144 ssp
->srr0
, ssp
->srr1
);
146 break; /* We just ignore these */
149 * These trap types should never be seen by trap()
150 * in kernel mode, anyway.
151 * Some are interrupts that should be seen by
152 * interrupt() others just don't happen because they
153 * are handled elsewhere. Some could happen but are
154 * considered to be fatal in kernel mode.
157 case T_IN_VAIN
: /* Shouldn't ever see this, lowmem_vectors eats it */
158 case T_MACHINE_CHECK
:
159 case T_SYSTEM_MANAGEMENT
:
160 case T_ALTIVEC_ASSIST
:
162 case T_FP_UNAVAILABLE
:
166 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
170 case T_RUNMODE_TRACE
:
171 case T_INSTRUCTION_BKPT
:
172 if (!Call_Debugger(trapno
, ssp
))
173 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
177 if (ssp
->srr1
& MASK(SRR1_PRG_TRAP
)) {
178 if (!Call_Debugger(trapno
, ssp
))
179 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
181 unresolved_kernel_trap(trapno
, ssp
,
187 if (alignment(dsisr
, dar
, ssp
)) {
188 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
195 mp_disable_preemption();
197 && debugger_active
[cpu_number()]
198 && !let_ddb_vm_fault
) {
200 * Force kdb to handle this one.
202 kdb_trap(trapno
, ssp
);
204 mp_enable_preemption();
205 #endif /* MACH_KDB */
207 if(intr
) ml_set_interrupts_enabled(TRUE
); /* Enable if we were */
209 /* simple case : not SR_COPYIN segment, from kernel */
210 if ((dar
>> 28) != SR_COPYIN_NUM
) {
217 * Note: Some ROM device drivers will access page 0 when they start. The IOKit will
218 * set a flag to tell us to ignore any access fault on page 0. After the driver is
219 * opened, it will clear the flag.
221 if((0 == (dar
& -PAGE_SIZE
)) && /* Check for access of page 0 and */
222 ((thr_act
->mact
.specFlags
) & ignoreZeroFault
)) {
223 /* special case of ignoring page zero faults */
224 ssp
->srr0
+= 4; /* Point to next instruction */
228 code
= vm_fault(map
, trunc_page(offset
),
229 dsisr
& MASK(DSISR_WRITE
) ? PROT_RW
: PROT_RO
,
230 FALSE
, THREAD_UNINT
);
232 if (code
!= KERN_SUCCESS
) {
233 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
235 ((savearea
*)ssp
)->save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
236 ((savearea
*)ssp
)->save_dsisr
|= MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
241 /* If we get here, the fault was due to a copyin/out */
245 /* Mask out SR_COPYIN and mask in original segment */
247 offset
= (dar
& 0x0fffffff) |
248 ((mfsrin(dar
)<<8) & 0xF0000000);
250 code
= vm_fault(map
, trunc_page(offset
),
251 dsisr
& MASK(DSISR_WRITE
) ? PROT_RW
: PROT_RO
,
252 FALSE
, THREAD_ABORTSAFE
);
254 /* If we failed, there should be a recovery
257 if (code
!= KERN_SUCCESS
) {
259 if (thr_act
->thread
->recover
) {
261 act_lock_thread(thr_act
);
262 ssp
->srr0
= thr_act
->thread
->recover
;
263 thr_act
->thread
->recover
=
265 act_unlock_thread(thr_act
);
267 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, "copyin/out has no recovery point");
271 ((savearea
*)ssp
)->save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
272 ((savearea
*)ssp
)->save_dsisr
|= MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
277 case T_INSTRUCTION_ACCESS
:
281 && debugger_active
[cpu_number()]
282 && !let_ddb_vm_fault
) {
284 * Force kdb to handle this one.
286 kdb_trap(trapno
, ssp
);
288 #endif /* MACH_KDB */
290 /* Same as for data access, except fault type
291 * is PROT_EXEC and addr comes from srr0
294 if(intr
) ml_set_interrupts_enabled(TRUE
); /* Enable if we were */
298 code
= vm_fault(map
, trunc_page(ssp
->srr0
),
299 PROT_EXEC
, FALSE
, THREAD_UNINT
);
301 if (code
!= KERN_SUCCESS
) {
302 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
304 ((savearea
*)ssp
)->save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
305 ssp
->srr1
|= MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
309 /* Usually shandler handles all the system calls, but the
310 * atomic thread switcher may throwup (via thandler) and
311 * have to pass it up to the exception handler.
315 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
319 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
324 ml_set_interrupts_enabled(TRUE
); /* Processing for user state traps is always enabled */
328 void get_procrustime(time_value_t
*);
330 get_procrustime(&tv
);
332 #endif /* MACH_BSD */
336 * Trap came from user task
342 unresolved_kernel_trap(trapno
, ssp
, dsisr
, dar
, NULL
);
346 * These trap types should never be seen by trap()
347 * Some are interrupts that should be seen by
348 * interrupt() others just don't happen because they
349 * are handled elsewhere.
352 case T_IN_VAIN
: /* Shouldn't ever see this, lowmem_vectors eats it */
353 case T_MACHINE_CHECK
:
355 case T_FP_UNAVAILABLE
:
356 case T_SYSTEM_MANAGEMENT
:
362 ml_set_interrupts_enabled(FALSE
); /* Turn off interruptions */
364 panic("Unexpected user state trap(cpu %d): 0x%08x DSISR=0x%08x DAR=0x%08x PC=0x%08x, MSR=0x%08x\n",
365 cpu_number(), trapno
, dsisr
, dar
, ssp
->srr0
, ssp
->srr1
);
370 kprintf("*** Reset exception ignored; srr0 = %08X, srr1 = %08X\n",
371 ssp
->srr0
, ssp
->srr1
);
373 panic("Unexpected Reset exception: srr0 = %0x08x, srr1 = %0x08x\n",
374 ssp
->srr0
, ssp
->srr1
);
376 break; /* We just ignore these */
379 if (alignment(dsisr
, dar
, ssp
)) {
380 code
= EXC_PPC_UNALIGNED
;
381 exception
= EXC_BAD_ACCESS
;
386 case T_TRACE
: /* Real PPC chips */
393 case T_INSTRUCTION_BKPT
: /* 603 PPC chips */
394 case T_RUNMODE_TRACE
: /* 601 PPC chips */
395 exception
= EXC_BREAKPOINT
;
396 code
= EXC_PPC_TRACE
;
401 if (ssp
->srr1
& MASK(SRR1_PRG_FE
)) {
403 UPDATE_PPC_EXCEPTION_STATE
;
404 exception
= EXC_ARITHMETIC
;
405 code
= EXC_ARITHMETIC
;
407 mp_disable_preemption();
408 subcode
= current_act()->mact
.FPU_pcb
->fs
.fpscr
;
409 mp_enable_preemption();
411 else if (ssp
->srr1
& MASK(SRR1_PRG_ILL_INS
)) {
413 UPDATE_PPC_EXCEPTION_STATE
414 exception
= EXC_BAD_INSTRUCTION
;
415 code
= EXC_PPC_UNIPL_INST
;
417 } else if (ssp
->srr1
& MASK(SRR1_PRG_PRV_INS
)) {
419 UPDATE_PPC_EXCEPTION_STATE
;
420 exception
= EXC_BAD_INSTRUCTION
;
421 code
= EXC_PPC_PRIVINST
;
423 } else if (ssp
->srr1
& MASK(SRR1_PRG_TRAP
)) {
426 if (copyin((char *) ssp
->srr0
, (char *) &inst
, 4 ))
427 panic("copyin failed\n");
428 UPDATE_PPC_EXCEPTION_STATE
;
429 if (inst
== 0x7FE00008) {
430 exception
= EXC_BREAKPOINT
;
431 code
= EXC_PPC_BREAKPOINT
;
433 exception
= EXC_SOFTWARE
;
440 case T_ALTIVEC_ASSIST
:
441 UPDATE_PPC_EXCEPTION_STATE
;
442 exception
= EXC_ARITHMETIC
;
443 code
= EXC_PPC_ALTIVECASSIST
;
450 code
= vm_fault(map
, trunc_page(dar
),
451 dsisr
& MASK(DSISR_WRITE
) ? PROT_RW
: PROT_RO
,
452 FALSE
, THREAD_ABORTSAFE
);
454 if ((code
!= KERN_SUCCESS
) && (code
!= KERN_ABORTED
)) {
455 UPDATE_PPC_EXCEPTION_STATE
;
456 exception
= EXC_BAD_ACCESS
;
459 ((savearea
*)ssp
)->save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
460 ((savearea
*)ssp
)->save_dsisr
|= MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
464 case T_INSTRUCTION_ACCESS
:
465 /* Same as for data access, except fault type
466 * is PROT_EXEC and addr comes from srr0
470 code
= vm_fault(map
, trunc_page(ssp
->srr0
),
471 PROT_EXEC
, FALSE
, THREAD_ABORTSAFE
);
473 if ((code
!= KERN_SUCCESS
) && (code
!= KERN_ABORTED
)) {
474 UPDATE_PPC_EXCEPTION_STATE
;
475 exception
= EXC_BAD_ACCESS
;
478 ((savearea
*)ssp
)->save_flags
|= SAVredrive
; /* Tell low-level to re-try fault */
479 ssp
->srr1
|= MASK(DSISR_HASH
); /* Make sure this is marked as a miss */
484 ml_set_interrupts_enabled(FALSE
);
485 ast_taken(AST_ALL
, intr
);
491 void bsd_uprofil(time_value_t
*, unsigned int);
493 bsd_uprofil(&tv
, ssp
->srr0
);
495 #endif /* MACH_BSD */
499 /* if this is the init task, save the exception information */
500 /* this probably is a fatal exception */
501 if(bsd_init_task
== current_task()) {
505 buf
= init_task_failure_data
;
508 buf
+= sprintf(buf
, "Exception Code = 0x%x, Subcode = 0x%x\n", code
, subcode
);
509 buf
+= sprintf(buf
, "DSISR = 0x%08x, DAR = 0x%08x\n"
512 for (i
=0; i
<32; i
++) {
514 buf
+= sprintf(buf
, "\n%4d :",i
);
516 buf
+= sprintf(buf
, " %08x",*(&ssp
->r0
+i
));
519 buf
+= sprintf(buf
, "\n\n");
520 buf
+= sprintf(buf
, "cr = 0x%08x\t\t",ssp
->cr
);
521 buf
+= sprintf(buf
, "xer = 0x%08x\n",ssp
->xer
);
522 buf
+= sprintf(buf
, "lr = 0x%08x\t\t",ssp
->lr
);
523 buf
+= sprintf(buf
, "ctr = 0x%08x\n",ssp
->ctr
);
524 buf
+= sprintf(buf
, "srr0(iar) = 0x%08x\t\t",ssp
->srr0
);
525 buf
+= sprintf(buf
, "srr1(msr) = 0x%08B\n",ssp
->srr1
,
526 "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18"
527 "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT");
528 buf
+= sprintf(buf
, "\n\n");
530 /* generate some stack trace */
531 buf
+= sprintf(buf
, "Application level back trace:\n");
532 if (ssp
->srr1
& MASK(MSR_PR
)) {
533 char *addr
= (char*)ssp
->r1
;
534 unsigned int stack_buf
[3];
535 for (i
= 0; i
< 8; i
++) {
536 if (addr
== (char*)NULL
)
538 if (!copyin(addr
,(char*)stack_buf
,
540 buf
+= sprintf(buf
, "0x%08x : 0x%08x\n"
542 addr
= (char*)stack_buf
[0];
550 doexception(exception
, code
, subcode
);
553 * Check to see if we need an AST, if so take care of it here
555 ml_set_interrupts_enabled(FALSE
);
556 if (USER_MODE(ssp
->srr1
))
557 while (ast_needed(cpu_number())) {
558 ast_taken(AST_ALL
, intr
);
559 ml_set_interrupts_enabled(FALSE
);
565 /* This routine is called from assembly before each and every system call.
566 * It must preserve r3.
569 extern int syscall_trace(int, struct ppc_saved_state
*);
574 int syscall_trace(int retval
, struct ppc_saved_state
*ssp
)
579 /* Always prepare to trace mach system calls */
580 if (kdebug_enable
&& (ssp
->r0
& 0x80000000)) {
585 argc
= mach_trap_table
[-(ssp
->r0
)].mach_trap_arg_count
;
588 for (i
=0; i
< argc
; i
++)
589 kdarg
[i
] = (int)*(&ssp
->r3
+ i
);
590 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
, (-(ssp
->r0
))) | DBG_FUNC_START
,
591 kdarg
[0], kdarg
[1], kdarg
[2], 0, 0);
597 /* This routine is called from assembly after each mach system call
598 * It must preserve r3.
601 extern int syscall_trace_end(int, struct ppc_saved_state
*);
603 int syscall_trace_end(int retval
, struct ppc_saved_state
*ssp
)
605 if (kdebug_enable
&& (ssp
->r0
& 0x80000000)) {
607 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
,(-(ssp
->r0
))) | DBG_FUNC_END
,
614 * called from syscall if there is an error
621 struct ppc_saved_state
*ssp
)
623 register thread_t thread
;
625 thread
= current_thread();
628 panic("syscall error in boot phase");
630 if (!USER_MODE(ssp
->srr1
))
631 panic("system call called from kernel");
633 doexception(exception
, code
, subcode
);
638 /* Pass up a server syscall/exception */
645 exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
649 exception(exc
, codes
, 2);
652 char *trap_type
[] = {
654 "0x100 - System reset",
655 "0x200 - Machine check",
656 "0x300 - Data access",
657 "0x400 - Inst access",
661 "0x800 - Floating point",
662 "0x900 - Decrementer",
665 "0xC00 - System call",
673 "0x1300 - Inst bkpnt",
675 "0x1600 - Altivec Assist",
686 "0x2000 - Run Mode/Trace",
693 int TRAP_TYPES
= sizeof (trap_type
) / sizeof (trap_type
[0]);
695 void unresolved_kernel_trap(int trapno
,
696 struct ppc_saved_state
*ssp
,
702 extern void print_backtrace(struct ppc_saved_state
*);
703 extern unsigned int debug_mode
, disableDebugOuput
;
705 ml_set_interrupts_enabled(FALSE
); /* Turn off interruptions */
707 disableDebugOuput
= FALSE
;
709 if ((unsigned)trapno
<= T_MAX
)
710 trap_name
= trap_type
[trapno
/ T_VECTOR_SIZE
];
712 trap_name
= "???? unrecognized exception";
716 printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%08x PC=0x%08x\n",
717 cpu_number(), trap_name
, dar
, ssp
->srr0
);
719 print_backtrace(ssp
);
721 (void *)Call_Debugger(trapno
, ssp
);
726 thread_syscall_return(
729 register thread_act_t thr_act
= current_act();
730 register struct ppc_saved_state
*regs
= USER_REGS(thr_act
);
732 if (kdebug_enable
&& (regs
->r0
& 0x80000000)) {
734 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
,(-(regs
->r0
))) | DBG_FUNC_END
,
739 thread_exception_return();
746 thread_kdb_return(void)
748 register thread_act_t thr_act
= current_act();
749 register thread_t cur_thr
= current_thread();
750 register struct ppc_saved_state
*regs
= USER_REGS(thr_act
);
752 Call_Debugger(thr_act
->mact
.pcb
->es
.exception
, regs
);
754 assert(cur_thr
->mutex_count
== 0);
755 #endif /* MACH_LDEBUG */
756 check_simple_locks();
757 thread_exception_return();
760 #endif /* MACH_KDB */