]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/trap.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / ppc / trap.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28
29 #include <mach_kdb.h>
30 #include <mach_kdp.h>
31 #include <debug.h>
32 #include <cpus.h>
33 #include <kern/thread.h>
34 #include <kern/exception.h>
35 #include <kern/syscall_sw.h>
36 #include <kern/cpu_data.h>
37 #include <kern/debug.h>
38 #include <mach/thread_status.h>
39 #include <vm/vm_fault.h>
40 #include <vm/vm_kern.h> /* For kernel_map */
41 #include <ppc/misc_protos.h>
42 #include <ppc/trap.h>
43 #include <ppc/exception.h>
44 #include <ppc/proc_reg.h> /* for SR_xxx definitions */
45 #include <ppc/pmap.h>
46 #include <ppc/mem.h>
47 #include <ppc/Firmware.h>
48 #include <ppc/low_trace.h>
49
50 #include <sys/kdebug.h>
51
52 perfTrap perfTrapHook = 0; /* Pointer to performance trap hook routine */
53
54 #if MACH_KDB
55 #include <ddb/db_watch.h>
56 #include <ddb/db_run.h>
57 #include <ddb/db_break.h>
58 #include <ddb/db_trap.h>
59
60 boolean_t let_ddb_vm_fault = FALSE;
61 boolean_t debug_all_traps_with_kdb = FALSE;
62 extern struct db_watchpoint *db_watchpoint_list;
63 extern boolean_t db_watchpoints_inserted;
64 extern boolean_t db_breakpoints_inserted;
65
66
67
68 #endif /* MACH_KDB */
69
70 extern int debugger_active[NCPUS];
71 extern task_t bsd_init_task;
72 extern char init_task_failure_data[];
73
74
75 #define PROT_EXEC (VM_PROT_EXECUTE)
76 #define PROT_RO (VM_PROT_READ)
77 #define PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
78
79 /* A useful macro to update the ppc_exception_state in the PCB
80 * before calling doexception
81 */
82 #define UPDATE_PPC_EXCEPTION_STATE { \
83 thread_act_t thr_act = current_act(); \
84 thr_act->mact.pcb->save_dar = dar; \
85 thr_act->mact.pcb->save_dsisr = dsisr; \
86 thr_act->mact.pcb->save_exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \
87 }
88
89 static void unresolved_kernel_trap(int trapno,
90 struct savearea *ssp,
91 unsigned int dsisr,
92 unsigned int dar,
93 char *message);
94
95 struct savearea *trap(int trapno,
96 struct savearea *ssp,
97 unsigned int dsisr,
98 unsigned int dar)
99 {
100 int exception;
101 int code;
102 int subcode;
103 vm_map_t map;
104 unsigned int sp;
105 unsigned int space, space2;
106 unsigned int offset;
107 thread_act_t thr_act;
108 boolean_t intr;
109 #ifdef MACH_BSD
110 time_value_t tv;
111 #endif /* MACH_BSD */
112
113 if(perfTrapHook) { /* Is there a hook? */
114 if(perfTrapHook(trapno, ssp, dsisr, dar) == KERN_SUCCESS) return ssp; /* If it succeeds, we are done... */
115 }
116
117 #if 0
118 {
119 extern void fctx_text(void);
120 fctx_test();
121 }
122 #endif
123
124 thr_act = current_act(); /* Get current activation */
125 exception = 0; /* Clear exception for now */
126
127 /*
128 * Remember that we are disabled for interruptions when we come in here. Because
129 * of latency concerns, we need to enable interruptions in the interrupted process
130 * was enabled itself as soon as we can.
131 */
132
133 intr = (ssp->save_srr1 & MASK(MSR_EE)) != 0; /* Remember if we were enabled */
134
135 /* Handle kernel traps first */
136
137 if (!USER_MODE(ssp->save_srr1)) {
138 /*
139 * Trap came from kernel
140 */
141 switch (trapno) {
142
143 case T_PREEMPT: /* Handle a preempt trap */
144 ast_taken(AST_PREEMPT, FALSE);
145 break;
146
147 case T_RESET: /* Reset interruption */
148 #if 0
149 kprintf("*** Reset exception ignored; srr0 = %08X, srr1 = %08X\n",
150 ssp->save_srr0, ssp->save_srr1);
151 #else
152 panic("Unexpected Reset exception; srr0 = %08X, srr1 = %08X\n",
153 ssp->save_srr0, ssp->save_srr1);
154 #endif
155 break; /* We just ignore these */
156
157 /*
158 * These trap types should never be seen by trap()
159 * in kernel mode, anyway.
160 * Some are interrupts that should be seen by
161 * interrupt() others just don't happen because they
162 * are handled elsewhere. Some could happen but are
163 * considered to be fatal in kernel mode.
164 */
165 case T_DECREMENTER:
166 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
167 case T_MACHINE_CHECK:
168 case T_SYSTEM_MANAGEMENT:
169 case T_ALTIVEC_ASSIST:
170 case T_INTERRUPT:
171 case T_FP_UNAVAILABLE:
172 case T_IO_ERROR:
173 case T_RESERVED:
174 case T_ALIGNMENT:
175 default:
176 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
177 break;
178
179 case T_TRACE:
180 case T_RUNMODE_TRACE:
181 case T_INSTRUCTION_BKPT:
182 if (!Call_Debugger(trapno, ssp))
183 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
184 break;
185
186 case T_PROGRAM:
187 if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
188 if (!Call_Debugger(trapno, ssp))
189 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
190 } else {
191 unresolved_kernel_trap(trapno, ssp,
192 dsisr, dar, NULL);
193 }
194 break;
195
196 case T_DATA_ACCESS:
197
198 #if MACH_KDB
199 mp_disable_preemption();
200 if (debug_mode
201 && debugger_active[cpu_number()]
202 && !let_ddb_vm_fault) {
203 /*
204 * Force kdb to handle this one.
205 */
206 kdb_trap(trapno, ssp);
207 }
208 mp_enable_preemption();
209 #endif /* MACH_KDB */
210
211 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
212
213 /* simple case : not SR_COPYIN segment, from kernel */
214 if ((dar >> 28) != SR_COPYIN_NUM) {
215 map = kernel_map;
216
217 offset = dar;
218
219
220 /*
221 * Note: Some ROM device drivers will access page 0 when they start. The IOKit will
222 * set a flag to tell us to ignore any access fault on page 0. After the driver is
223 * opened, it will clear the flag.
224 */
225 if((0 == (dar & -PAGE_SIZE)) && /* Check for access of page 0 and */
226 ((thr_act->mact.specFlags) & ignoreZeroFault)) {
227 /* special case of ignoring page zero faults */
228 ssp->save_srr0 += 4; /* Point to next instruction */
229 break;
230 }
231
232 code = vm_fault(map, trunc_page(offset),
233 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
234 FALSE, THREAD_UNINT, NULL, 0);
235
236 if (code != KERN_SUCCESS) {
237 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
238 } else {
239 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
240 ssp->save_dsisr |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */
241 }
242 break;
243 }
244
245 /* If we get here, the fault was due to a copyin/out */
246
247 map = thr_act->map;
248
249 /* Mask out SR_COPYIN and mask in original segment */
250
251 offset = (dar & 0x0fffffff) |
252 ((mfsrin(dar)<<8) & 0xF0000000);
253
254 code = vm_fault(map, trunc_page(offset),
255 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
256 FALSE, THREAD_UNINT, NULL, 0);
257
258 /* If we failed, there should be a recovery
259 * spot to rfi to.
260 */
261 if (code != KERN_SUCCESS) {
262
263 if (thr_act->thread->recover) {
264
265 act_lock_thread(thr_act);
266 ssp->save_srr0 = thr_act->thread->recover;
267 thr_act->thread->recover =
268 (vm_offset_t)NULL;
269 act_unlock_thread(thr_act);
270 } else {
271 unresolved_kernel_trap(trapno, ssp, dsisr, dar, "copyin/out has no recovery point");
272 }
273 }
274 else {
275 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
276 ssp->save_dsisr |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */
277 }
278
279 break;
280
281 case T_INSTRUCTION_ACCESS:
282
283 #if MACH_KDB
284 if (debug_mode
285 && debugger_active[cpu_number()]
286 && !let_ddb_vm_fault) {
287 /*
288 * Force kdb to handle this one.
289 */
290 kdb_trap(trapno, ssp);
291 }
292 #endif /* MACH_KDB */
293
294 /* Same as for data access, except fault type
295 * is PROT_EXEC and addr comes from srr0
296 */
297
298 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
299
300 map = kernel_map;
301
302 code = vm_fault(map, trunc_page(ssp->save_srr0),
303 PROT_EXEC, FALSE, THREAD_UNINT, NULL, 0);
304
305 if (code != KERN_SUCCESS) {
306 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
307 } else {
308 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
309 ssp->save_srr1 |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */
310 }
311 break;
312
313 /* Usually shandler handles all the system calls, but the
314 * atomic thread switcher may throwup (via thandler) and
315 * have to pass it up to the exception handler.
316 */
317
318 case T_SYSTEM_CALL:
319 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
320 break;
321
322 case T_AST:
323 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
324 break;
325 }
326 } else {
327
328 ml_set_interrupts_enabled(TRUE); /* Processing for user state traps is always enabled */
329
330 #ifdef MACH_BSD
331 {
332 void get_procrustime(time_value_t *);
333
334 get_procrustime(&tv);
335 }
336 #endif /* MACH_BSD */
337
338
339 /*
340 * Trap came from user task
341 */
342
343 switch (trapno) {
344
345 case T_PREEMPT:
346 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
347 break;
348
349 /*
350 * These trap types should never be seen by trap()
351 * Some are interrupts that should be seen by
352 * interrupt() others just don't happen because they
353 * are handled elsewhere.
354 */
355 case T_DECREMENTER:
356 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
357 case T_MACHINE_CHECK:
358 case T_INTERRUPT:
359 case T_FP_UNAVAILABLE:
360 case T_SYSTEM_MANAGEMENT:
361 case T_RESERVED:
362 case T_IO_ERROR:
363
364 default:
365
366 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
367
368 panic("Unexpected user state trap(cpu %d): 0x%08x DSISR=0x%08x DAR=0x%08x PC=0x%08x, MSR=0x%08x\n",
369 cpu_number(), trapno, dsisr, dar, ssp->save_srr0, ssp->save_srr1);
370 break;
371
372 case T_RESET:
373 #if 0
374 kprintf("*** Reset exception ignored; srr0 = %08X, srr1 = %08X\n",
375 ssp->save_srr0, ssp->save_srr1);
376 #else
377 panic("Unexpected Reset exception: srr0 = %0x08x, srr1 = %0x08x\n",
378 ssp->save_srr0, ssp->save_srr1);
379 #endif
380 break; /* We just ignore these */
381
382 case T_ALIGNMENT:
383 /*
384 * If notifyUnaligned is set, we have actually already emulated the unaligned access.
385 * All that we want to do here is to ignore the interrupt. This is to allow logging or
386 * tracing of unaligned accesses. Note that if trapUnaligned is also set, it takes
387 * precedence and we will take a bad access fault.
388 */
389
390 if(thr_act->mact.specFlags & notifyUnalign) {
391
392 KERNEL_DEBUG_CONSTANT(
393 MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
394 (int)ssp->save_srr0, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
395 }
396
397 if((!(thr_act->mact.specFlags & notifyUnalign)) || (thr_act->mact.specFlags & trapUnalign)) {
398 code = EXC_PPC_UNALIGNED;
399 exception = EXC_BAD_ACCESS;
400 subcode = dar;
401 }
402 break;
403
404 case T_TRACE: /* Real PPC chips */
405 if (be_tracing()) {
406 add_pcbuffer();
407 return ssp;
408 }
409 /* fall through */
410
411 case T_INSTRUCTION_BKPT: /* 603 PPC chips */
412 case T_RUNMODE_TRACE: /* 601 PPC chips */
413 exception = EXC_BREAKPOINT;
414 code = EXC_PPC_TRACE;
415 subcode = ssp->save_srr0;
416 break;
417
418 case T_PROGRAM:
419 if (ssp->save_srr1 & MASK(SRR1_PRG_FE)) {
420 fpu_save(thr_act->mact.curctx);
421 UPDATE_PPC_EXCEPTION_STATE;
422 exception = EXC_ARITHMETIC;
423 code = EXC_ARITHMETIC;
424
425 mp_disable_preemption();
426 subcode = ssp->save_fpscr;
427 mp_enable_preemption();
428 }
429 else if (ssp->save_srr1 & MASK(SRR1_PRG_ILL_INS)) {
430
431 UPDATE_PPC_EXCEPTION_STATE
432 exception = EXC_BAD_INSTRUCTION;
433 code = EXC_PPC_UNIPL_INST;
434 subcode = ssp->save_srr0;
435 } else if (ssp->save_srr1 & MASK(SRR1_PRG_PRV_INS)) {
436
437 UPDATE_PPC_EXCEPTION_STATE;
438 exception = EXC_BAD_INSTRUCTION;
439 code = EXC_PPC_PRIVINST;
440 subcode = ssp->save_srr0;
441 } else if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
442 unsigned int inst;
443
444 if (copyin((char *) ssp->save_srr0, (char *) &inst, 4 ))
445 panic("copyin failed\n");
446 UPDATE_PPC_EXCEPTION_STATE;
447 if (inst == 0x7FE00008) {
448 exception = EXC_BREAKPOINT;
449 code = EXC_PPC_BREAKPOINT;
450 } else {
451 exception = EXC_SOFTWARE;
452 code = EXC_PPC_TRAP;
453 }
454 subcode = ssp->save_srr0;
455 }
456 break;
457
458 case T_ALTIVEC_ASSIST:
459 UPDATE_PPC_EXCEPTION_STATE;
460 exception = EXC_ARITHMETIC;
461 code = EXC_PPC_ALTIVECASSIST;
462 subcode = ssp->save_srr0;
463 break;
464
465 case T_DATA_ACCESS:
466 map = thr_act->map;
467
468 code = vm_fault(map, trunc_page(dar),
469 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
470 FALSE, THREAD_ABORTSAFE, NULL, 0);
471
472 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
473 UPDATE_PPC_EXCEPTION_STATE;
474 exception = EXC_BAD_ACCESS;
475 subcode = dar;
476 } else {
477 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
478 ssp->save_dsisr |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */
479 }
480 break;
481
482 case T_INSTRUCTION_ACCESS:
483 /* Same as for data access, except fault type
484 * is PROT_EXEC and addr comes from srr0
485 */
486 map = thr_act->map;
487
488 code = vm_fault(map, trunc_page(ssp->save_srr0),
489 PROT_EXEC, FALSE, THREAD_ABORTSAFE, NULL, 0);
490
491 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
492 UPDATE_PPC_EXCEPTION_STATE;
493 exception = EXC_BAD_ACCESS;
494 subcode = ssp->save_srr0;
495 } else {
496 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
497 ssp->save_srr1 |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */
498 }
499 break;
500
501 case T_AST:
502 ml_set_interrupts_enabled(FALSE);
503 ast_taken(AST_ALL, intr);
504 break;
505
506 }
507 #ifdef MACH_BSD
508 {
509 void bsd_uprofil(time_value_t *, unsigned int);
510
511 bsd_uprofil(&tv, ssp->save_srr0);
512 }
513 #endif /* MACH_BSD */
514 }
515
516 if (exception) {
517 /* if this is the init task, save the exception information */
518 /* this probably is a fatal exception */
519 if(bsd_init_task == current_task()) {
520 char *buf;
521 int i;
522
523 buf = init_task_failure_data;
524
525
526 buf += sprintf(buf, "Exception Code = 0x%x, Subcode = 0x%x\n", code, subcode);
527 buf += sprintf(buf, "DSISR = 0x%08x, DAR = 0x%08x\n"
528 , dsisr, dar);
529
530 for (i=0; i<32; i++) {
531 if ((i % 8) == 0) {
532 buf += sprintf(buf, "\n%4d :",i);
533 }
534 buf += sprintf(buf, " %08x",*(&ssp->save_r0+i));
535 }
536
537 buf += sprintf(buf, "\n\n");
538 buf += sprintf(buf, "cr = 0x%08x\t\t",ssp->save_cr);
539 buf += sprintf(buf, "xer = 0x%08x\n",ssp->save_xer);
540 buf += sprintf(buf, "lr = 0x%08x\t\t",ssp->save_lr);
541 buf += sprintf(buf, "ctr = 0x%08x\n",ssp->save_ctr);
542 buf += sprintf(buf, "srr0(iar) = 0x%08x\t\t",ssp->save_srr0);
543 buf += sprintf(buf, "srr1(msr) = 0x%08B\n",ssp->save_srr1,
544 "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18"
545 "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT");
546 buf += sprintf(buf, "\n\n");
547
548 /* generate some stack trace */
549 buf += sprintf(buf, "Application level back trace:\n");
550 if (ssp->save_srr1 & MASK(MSR_PR)) {
551 char *addr = (char*)ssp->save_r1;
552 unsigned int stack_buf[3];
553 for (i = 0; i < 8; i++) {
554 if (addr == (char*)NULL)
555 break;
556 if (!copyin(addr,(char*)stack_buf,
557 3 * sizeof(int))) {
558 buf += sprintf(buf, "0x%08x : 0x%08x\n"
559 ,addr,stack_buf[2]);
560 addr = (char*)stack_buf[0];
561 } else {
562 break;
563 }
564 }
565 }
566 buf[0] = '\0';
567 }
568 doexception(exception, code, subcode);
569 }
570 /* AST delivery
571 * Check to see if we need an AST, if so take care of it here
572 */
573 ml_set_interrupts_enabled(FALSE);
574 if (USER_MODE(ssp->save_srr1))
575 while (ast_needed(cpu_number())) {
576 ast_taken(AST_ALL, intr);
577 ml_set_interrupts_enabled(FALSE);
578 }
579
580 return ssp;
581 }
582
583 /* This routine is called from assembly before each and every system call.
584 * It must preserve r3.
585 */
586
587 extern int syscall_trace(int, struct savearea *);
588
589
590 extern int pmdebug;
591
592 int syscall_trace(int retval, struct savearea *ssp)
593 {
594 int i, argc;
595
596 int kdarg[3];
597 /* Always prepare to trace mach system calls */
598 if (kdebug_enable && (ssp->save_r0 & 0x80000000)) {
599 /* Mach trap */
600 kdarg[0]=0;
601 kdarg[1]=0;
602 kdarg[2]=0;
603 argc = mach_trap_table[-(ssp->save_r0)].mach_trap_arg_count;
604 if (argc > 3)
605 argc = 3;
606 for (i=0; i < argc; i++)
607 kdarg[i] = (int)*(&ssp->save_r3 + i);
608 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (-(ssp->save_r0))) | DBG_FUNC_START,
609 kdarg[0], kdarg[1], kdarg[2], 0, 0);
610 }
611
612 return retval;
613 }
614
615 /* This routine is called from assembly after each mach system call
616 * It must preserve r3.
617 */
618
619 extern int syscall_trace_end(int, struct savearea *);
620
621 int syscall_trace_end(int retval, struct savearea *ssp)
622 {
623 if (kdebug_enable && (ssp->save_r0 & 0x80000000)) {
624 /* Mach trap */
625 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(ssp->save_r0))) | DBG_FUNC_END,
626 retval, 0, 0, 0, 0);
627 }
628 return retval;
629 }
630
631 /*
632 * called from syscall if there is an error
633 */
634
635 int syscall_error(
636 int exception,
637 int code,
638 int subcode,
639 struct savearea *ssp)
640 {
641 register thread_t thread;
642
643 thread = current_thread();
644
645 if (thread == 0)
646 panic("syscall error in boot phase");
647
648 if (!USER_MODE(ssp->save_srr1))
649 panic("system call called from kernel");
650
651 doexception(exception, code, subcode);
652
653 return 0;
654 }
655
656 /* Pass up a server syscall/exception */
657 void
658 doexception(
659 int exc,
660 int code,
661 int sub)
662 {
663 exception_data_type_t codes[EXCEPTION_CODE_MAX];
664
665 codes[0] = code;
666 codes[1] = sub;
667 exception(exc, codes, 2);
668 }
669
670 char *trap_type[] = {
671 "Unknown",
672 "0x100 - System reset",
673 "0x200 - Machine check",
674 "0x300 - Data access",
675 "0x400 - Inst access",
676 "0x500 - Ext int",
677 "0x600 - Alignment",
678 "0x700 - Program",
679 "0x800 - Floating point",
680 "0x900 - Decrementer",
681 "0xA00 - n/a",
682 "0xB00 - n/a",
683 "0xC00 - System call",
684 "0xD00 - Trace",
685 "0xE00 - FP assist",
686 "0xF00 - Perf mon",
687 "0xF20 - VMX",
688 "INVALID EXCEPTION",
689 "INVALID EXCEPTION",
690 "INVALID EXCEPTION",
691 "0x1300 - Inst bkpnt",
692 "0x1400 - Sys mgmt",
693 "0x1600 - Altivec Assist",
694 "0x1700 - Thermal",
695 "INVALID EXCEPTION",
696 "INVALID EXCEPTION",
697 "INVALID EXCEPTION",
698 "INVALID EXCEPTION",
699 "INVALID EXCEPTION",
700 "INVALID EXCEPTION",
701 "INVALID EXCEPTION",
702 "INVALID EXCEPTION",
703 "INVALID EXCEPTION",
704 "0x2000 - Run Mode/Trace",
705 "Signal Processor",
706 "Preemption",
707 "Context Switch",
708 "Shutdown",
709 "System Failure"
710 };
711 int TRAP_TYPES = sizeof (trap_type) / sizeof (trap_type[0]);
712
713 void unresolved_kernel_trap(int trapno,
714 struct savearea *ssp,
715 unsigned int dsisr,
716 unsigned int dar,
717 char *message)
718 {
719 char *trap_name;
720 extern void print_backtrace(struct savearea *);
721 extern unsigned int debug_mode, disableDebugOuput;
722
723 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
724 lastTrace = LLTraceSet(0); /* Disable low-level tracing */
725
726 if( logPanicDataToScreen )
727 disableDebugOuput = FALSE;
728
729 debug_mode++;
730 if ((unsigned)trapno <= T_MAX)
731 trap_name = trap_type[trapno / T_VECTOR_SIZE];
732 else
733 trap_name = "???? unrecognized exception";
734 if (message == NULL)
735 message = trap_name;
736
737 kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%08x PC=0x%08x\n",
738 cpu_number(), trap_name, dar, ssp->save_srr0);
739
740 print_backtrace(ssp);
741
742 draw_panic_dialog();
743
744 if( panicDebugging )
745 (void *)Call_Debugger(trapno, ssp);
746 panic(message);
747 }
748
749 void
750 thread_syscall_return(
751 kern_return_t ret)
752 {
753 register thread_act_t thr_act = current_act();
754 register struct savearea *regs = USER_REGS(thr_act);
755
756 if (kdebug_enable && (regs->save_r0 & 0x80000000)) {
757 /* Mach trap */
758 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(regs->save_r0))) | DBG_FUNC_END,
759 ret, 0, 0, 0, 0);
760 }
761 regs->save_r3 = ret;
762
763 thread_exception_return();
764 /*NOTREACHED*/
765 }
766
767
768 #if MACH_KDB
769 void
770 thread_kdb_return(void)
771 {
772 register thread_act_t thr_act = current_act();
773 register thread_t cur_thr = current_thread();
774 register struct savearea *regs = USER_REGS(thr_act);
775
776 Call_Debugger(thr_act->mact.pcb->save_exception, regs);
777 #if MACH_LDEBUG
778 assert(cur_thr->mutex_count == 0);
779 #endif /* MACH_LDEBUG */
780 check_simple_locks();
781 thread_exception_return();
782 /*NOTREACHED*/
783 }
784 #endif /* MACH_KDB */