]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/trap.c
xnu-201.42.3.tar.gz
[apple/xnu.git] / osfmk / ppc / trap.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25
26#include <mach_kdb.h>
27#include <mach_kdp.h>
28#include <debug.h>
29#include <cpus.h>
30#include <kern/thread.h>
31#include <kern/exception.h>
32#include <kern/syscall_sw.h>
33#include <kern/cpu_data.h>
34#include <kern/debug.h>
35#include <mach/thread_status.h>
36#include <vm/vm_fault.h>
37#include <vm/vm_kern.h> /* For kernel_map */
38#include <ppc/misc_protos.h>
39#include <ppc/trap.h>
40#include <ppc/exception.h>
41#include <ppc/proc_reg.h> /* for SR_xxx definitions */
42#include <ppc/pmap.h>
43#include <ppc/mem.h>
44#include <ppc/fpu_protos.h>
45
46#include <sys/kdebug.h>
47
48#if MACH_KDB
49#include <ddb/db_watch.h>
50#include <ddb/db_run.h>
51#include <ddb/db_break.h>
52#include <ddb/db_trap.h>
53
54boolean_t let_ddb_vm_fault = FALSE;
55boolean_t debug_all_traps_with_kdb = FALSE;
56extern struct db_watchpoint *db_watchpoint_list;
57extern boolean_t db_watchpoints_inserted;
58extern boolean_t db_breakpoints_inserted;
59
60
61
62#endif /* MACH_KDB */
63
64extern int debugger_active[NCPUS];
65extern vm_address_t bsd_init_task;
66extern char init_task_failure_data[];
67
68/*
69 * XXX don't pass VM_PROT_EXECUTE to vm_fault(), execute permission is implied
70 * in either R or RW (note: the pmap module knows this). This is done for the
71 * benefit of programs that execute out of their data space (ala lisp).
72 * If we didn't do this in that scenerio, the ITLB miss code would call us
73 * and we would call vm_fault() with RX permission. However, the space was
74 * probably vm_allocate()ed with just RW and vm_fault would fail. The "right"
75 * solution to me is to have the un*x server always allocate data with RWX for
76 * compatibility with existing binaries.
77 */
78
79#define PROT_EXEC (VM_PROT_READ)
80#define PROT_RO (VM_PROT_READ)
81#define PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
82
83/* A useful macro to update the ppc_exception_state in the PCB
84 * before calling doexception
85 */
86#define UPDATE_PPC_EXCEPTION_STATE { \
87 thread_act_t thr_act = current_act(); \
88 struct ppc_exception_state *es = &thr_act->mact.pcb->es; \
89 es->dar = dar; \
90 es->dsisr = dsisr; \
91 es->exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \
92}
93
94static void unresolved_kernel_trap(int trapno,
95 struct ppc_saved_state *ssp,
96 unsigned int dsisr,
97 unsigned int dar,
98 char *message);
99
100struct ppc_saved_state *trap(int trapno,
101 struct ppc_saved_state *ssp,
102 unsigned int dsisr,
103 unsigned int dar)
104{
105 int exception=0;
106 int code;
107 int subcode;
108 vm_map_t map;
109 unsigned int sp;
110 unsigned int space,space2;
111 unsigned int offset;
112 thread_act_t thr_act = current_act();
113 boolean_t intr;
114#ifdef MACH_BSD
115 time_value_t tv;
116#endif /* MACH_BSD */
117
118/*
119 * Remember that we are disabled for interruptions when we come in here. Because
120 * of latency concerns, we need to enable interruptions in the interrupted process
121 * was enabled itself as soon as we can.
122 */
123
124 intr = (ssp->srr1 & MASK(MSR_EE)) != 0; /* Remember if we were enabled */
125
126 /* Handle kernel traps first */
127
128 if (!USER_MODE(ssp->srr1)) {
129 /*
130 * Trap came from kernel
131 */
132 switch (trapno) {
133
134 case T_PREEMPT: /* Handle a preempt trap */
0b4e3aa0 135 ast_taken(AST_PREEMPT, FALSE);
1c79356b
A
136 break;
137
138 case T_RESET: /* Reset interruption */
139#if 0
140 kprintf("*** Reset exception ignored; srr0 = %08X, srr1 = %08X\n",
141 ssp->srr0, ssp->srr1);
142#else
143 panic("Unexpected Reset exception; srr0 = %08X, srr1 = %08X\n",
144 ssp->srr0, ssp->srr1);
145#endif
146 break; /* We just ignore these */
147
148 /*
149 * These trap types should never be seen by trap()
150 * in kernel mode, anyway.
151 * Some are interrupts that should be seen by
152 * interrupt() others just don't happen because they
153 * are handled elsewhere. Some could happen but are
154 * considered to be fatal in kernel mode.
155 */
156 case T_DECREMENTER:
157 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
158 case T_MACHINE_CHECK:
159 case T_SYSTEM_MANAGEMENT:
160 case T_ALTIVEC_ASSIST:
161 case T_INTERRUPT:
162 case T_FP_UNAVAILABLE:
163 case T_IO_ERROR:
164 case T_RESERVED:
165 default:
166 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
167 break;
168
169 case T_TRACE:
170 case T_RUNMODE_TRACE:
171 case T_INSTRUCTION_BKPT:
172 if (!Call_Debugger(trapno, ssp))
173 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
174 break;
175
176 case T_PROGRAM:
177 if (ssp->srr1 & MASK(SRR1_PRG_TRAP)) {
178 if (!Call_Debugger(trapno, ssp))
179 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
180 } else {
181 unresolved_kernel_trap(trapno, ssp,
182 dsisr, dar, NULL);
183 }
184 break;
185
186 case T_ALIGNMENT:
187 if (alignment(dsisr, dar, ssp)) {
188 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
189 }
190 break;
191
192 case T_DATA_ACCESS:
193
194#if MACH_KDB
195 mp_disable_preemption();
196 if (debug_mode
197 && debugger_active[cpu_number()]
198 && !let_ddb_vm_fault) {
199 /*
200 * Force kdb to handle this one.
201 */
202 kdb_trap(trapno, ssp);
203 }
204 mp_enable_preemption();
205#endif /* MACH_KDB */
206
207 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
208
209 /* simple case : not SR_COPYIN segment, from kernel */
210 if ((dar >> 28) != SR_COPYIN_NUM) {
211 map = kernel_map;
212
213 offset = dar;
214
215
216/*
217 * Note: Some ROM device drivers will access page 0 when they start. The IOKit will
218 * set a flag to tell us to ignore any access fault on page 0. After the driver is
219 * opened, it will clear the flag.
220 */
221 if((0 == (dar & -PAGE_SIZE)) && /* Check for access of page 0 and */
222 ((thr_act->mact.specFlags) & ignoreZeroFault)) {
223 /* special case of ignoring page zero faults */
224 ssp->srr0 += 4; /* Point to next instruction */
225 break;
226 }
227
228 code = vm_fault(map, trunc_page(offset),
229 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
230 FALSE, THREAD_UNINT);
231
232 if (code != KERN_SUCCESS) {
233 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
234 } else {
235 ((savearea *)ssp)->save_flags |= SAVredrive; /* Tell low-level to re-try fault */
236 ((savearea *)ssp)->save_dsisr |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */
237 }
238 break;
239 }
240
241 /* If we get here, the fault was due to a copyin/out */
242
243 map = thr_act->map;
244
245 /* Mask out SR_COPYIN and mask in original segment */
246
247 offset = (dar & 0x0fffffff) |
248 ((mfsrin(dar)<<8) & 0xF0000000);
249
250 code = vm_fault(map, trunc_page(offset),
251 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
252 FALSE, THREAD_ABORTSAFE);
253
254 /* If we failed, there should be a recovery
255 * spot to rfi to.
256 */
257 if (code != KERN_SUCCESS) {
258
259 if (thr_act->thread->recover) {
260
261 act_lock_thread(thr_act);
262 ssp->srr0 = thr_act->thread->recover;
263 thr_act->thread->recover =
264 (vm_offset_t)NULL;
265 act_unlock_thread(thr_act);
266 } else {
267 unresolved_kernel_trap(trapno, ssp, dsisr, dar, "copyin/out has no recovery point");
268 }
269 }
270 else {
271 ((savearea *)ssp)->save_flags |= SAVredrive; /* Tell low-level to re-try fault */
272 ((savearea *)ssp)->save_dsisr |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */
273 }
274
275 break;
276
277 case T_INSTRUCTION_ACCESS:
278
279#if MACH_KDB
280 if (debug_mode
281 && debugger_active[cpu_number()]
282 && !let_ddb_vm_fault) {
283 /*
284 * Force kdb to handle this one.
285 */
286 kdb_trap(trapno, ssp);
287 }
288#endif /* MACH_KDB */
289
290 /* Same as for data access, except fault type
291 * is PROT_EXEC and addr comes from srr0
292 */
293
294 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
295
296 map = kernel_map;
297
298 code = vm_fault(map, trunc_page(ssp->srr0),
299 PROT_EXEC, FALSE, THREAD_UNINT);
300
301 if (code != KERN_SUCCESS) {
302 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
303 } else {
304 ((savearea *)ssp)->save_flags |= SAVredrive; /* Tell low-level to re-try fault */
305 ssp->srr1 |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */
306 }
307 break;
308
309 /* Usually shandler handles all the system calls, but the
310 * atomic thread switcher may throwup (via thandler) and
311 * have to pass it up to the exception handler.
312 */
313
314 case T_SYSTEM_CALL:
315 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
316 break;
317
318 case T_AST:
319 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
320 break;
321 }
322 } else {
323
324 ml_set_interrupts_enabled(TRUE); /* Processing for user state traps is always enabled */
325
326#ifdef MACH_BSD
327 {
328 void get_procrustime(time_value_t *);
329
330 get_procrustime(&tv);
331 }
332#endif /* MACH_BSD */
333
334
335 /*
336 * Trap came from user task
337 */
338
339 switch (trapno) {
340
341 case T_PREEMPT:
342 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
343 break;
344
345 /*
346 * These trap types should never be seen by trap()
347 * Some are interrupts that should be seen by
348 * interrupt() others just don't happen because they
349 * are handled elsewhere.
350 */
351 case T_DECREMENTER:
352 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
353 case T_MACHINE_CHECK:
354 case T_INTERRUPT:
355 case T_FP_UNAVAILABLE:
356 case T_SYSTEM_MANAGEMENT:
357 case T_RESERVED:
358 case T_IO_ERROR:
359
360 default:
361
362 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
363
0b4e3aa0 364 panic("Unexpected user state trap(cpu %d): 0x%08x DSISR=0x%08x DAR=0x%08x PC=0x%08x, MSR=0x%08x\n",
1c79356b
A
365 cpu_number(), trapno, dsisr, dar, ssp->srr0, ssp->srr1);
366 break;
367
368 case T_RESET:
369#if 0
370 kprintf("*** Reset exception ignored; srr0 = %08X, srr1 = %08X\n",
371 ssp->srr0, ssp->srr1);
372#else
0b4e3aa0 373 panic("Unexpected Reset exception: srr0 = %0x08x, srr1 = %0x08x\n",
1c79356b
A
374 ssp->srr0, ssp->srr1);
375#endif
376 break; /* We just ignore these */
377
378 case T_ALIGNMENT:
379 if (alignment(dsisr, dar, ssp)) {
380 code = EXC_PPC_UNALIGNED;
381 exception = EXC_BAD_ACCESS;
382 subcode = dar;
383 }
384 break;
385
386 case T_TRACE: /* Real PPC chips */
387 if (be_tracing()) {
388 add_pcbuffer();
389 return ssp;
390 }
391 /* fall through */
392
393 case T_INSTRUCTION_BKPT: /* 603 PPC chips */
394 case T_RUNMODE_TRACE: /* 601 PPC chips */
395 exception = EXC_BREAKPOINT;
396 code = EXC_PPC_TRACE;
397 subcode = ssp->srr0;
398 break;
399
400 case T_PROGRAM:
401 if (ssp->srr1 & MASK(SRR1_PRG_FE)) {
0b4e3aa0 402 fpu_save(thr_act);
1c79356b
A
403 UPDATE_PPC_EXCEPTION_STATE;
404 exception = EXC_ARITHMETIC;
405 code = EXC_ARITHMETIC;
406
407 mp_disable_preemption();
408 subcode = current_act()->mact.FPU_pcb->fs.fpscr;
409 mp_enable_preemption();
410 }
411 else if (ssp->srr1 & MASK(SRR1_PRG_ILL_INS)) {
412
413 UPDATE_PPC_EXCEPTION_STATE
414 exception = EXC_BAD_INSTRUCTION;
415 code = EXC_PPC_UNIPL_INST;
416 subcode = ssp->srr0;
417 } else if (ssp->srr1 & MASK(SRR1_PRG_PRV_INS)) {
418
419 UPDATE_PPC_EXCEPTION_STATE;
420 exception = EXC_BAD_INSTRUCTION;
421 code = EXC_PPC_PRIVINST;
422 subcode = ssp->srr0;
423 } else if (ssp->srr1 & MASK(SRR1_PRG_TRAP)) {
424 unsigned int inst;
425
426 if (copyin((char *) ssp->srr0, (char *) &inst, 4 ))
427 panic("copyin failed\n");
428 UPDATE_PPC_EXCEPTION_STATE;
429 if (inst == 0x7FE00008) {
430 exception = EXC_BREAKPOINT;
431 code = EXC_PPC_BREAKPOINT;
432 } else {
433 exception = EXC_SOFTWARE;
434 code = EXC_PPC_TRAP;
435 }
436 subcode = ssp->srr0;
437 }
438 break;
439
440 case T_ALTIVEC_ASSIST:
441 UPDATE_PPC_EXCEPTION_STATE;
442 exception = EXC_ARITHMETIC;
443 code = EXC_PPC_ALTIVECASSIST;
444 subcode = ssp->srr0;
445 break;
446
447 case T_DATA_ACCESS:
448 map = thr_act->map;
449
450 code = vm_fault(map, trunc_page(dar),
451 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
452 FALSE, THREAD_ABORTSAFE);
453
454 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
455 UPDATE_PPC_EXCEPTION_STATE;
456 exception = EXC_BAD_ACCESS;
457 subcode = dar;
458 } else {
459 ((savearea *)ssp)->save_flags |= SAVredrive; /* Tell low-level to re-try fault */
460 ((savearea *)ssp)->save_dsisr |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */
461 }
462 break;
463
464 case T_INSTRUCTION_ACCESS:
465 /* Same as for data access, except fault type
466 * is PROT_EXEC and addr comes from srr0
467 */
468 map = thr_act->map;
469
470 code = vm_fault(map, trunc_page(ssp->srr0),
471 PROT_EXEC, FALSE, THREAD_ABORTSAFE);
472
473 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
474 UPDATE_PPC_EXCEPTION_STATE;
475 exception = EXC_BAD_ACCESS;
476 subcode = ssp->srr0;
477 } else {
478 ((savearea *)ssp)->save_flags |= SAVredrive; /* Tell low-level to re-try fault */
479 ssp->srr1 |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */
480 }
481 break;
482
483 case T_AST:
484 ml_set_interrupts_enabled(FALSE);
0b4e3aa0 485 ast_taken(AST_ALL, intr);
1c79356b
A
486 break;
487
488 }
489#ifdef MACH_BSD
490 {
491 void bsd_uprofil(time_value_t *, unsigned int);
492
493 bsd_uprofil(&tv, ssp->srr0);
494 }
495#endif /* MACH_BSD */
496 }
497
498 if (exception) {
499 /* if this is the init task, save the exception information */
500 /* this probably is a fatal exception */
501 if(bsd_init_task == current_task()) {
502 char *buf;
503 int i;
504
505 buf = init_task_failure_data;
506
507
508 buf += sprintf(buf, "Exception Code = 0x%x, Subcode = 0x%x\n", code, subcode);
509 buf += sprintf(buf, "DSISR = 0x%08x, DAR = 0x%08x\n"
510 , dsisr, dar);
511
512 for (i=0; i<32; i++) {
513 if ((i % 8) == 0) {
514 buf += sprintf(buf, "\n%4d :",i);
515 }
516 buf += sprintf(buf, " %08x",*(&ssp->r0+i));
517 }
518
519 buf += sprintf(buf, "\n\n");
520 buf += sprintf(buf, "cr = 0x%08x\t\t",ssp->cr);
521 buf += sprintf(buf, "xer = 0x%08x\n",ssp->xer);
522 buf += sprintf(buf, "lr = 0x%08x\t\t",ssp->lr);
523 buf += sprintf(buf, "ctr = 0x%08x\n",ssp->ctr);
524 buf += sprintf(buf, "srr0(iar) = 0x%08x\t\t",ssp->srr0);
525 buf += sprintf(buf, "srr1(msr) = 0x%08B\n",ssp->srr1,
526 "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18"
527 "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT");
528 buf += sprintf(buf, "\n\n");
529
530 /* generate some stack trace */
531 buf += sprintf(buf, "Application level back trace:\n");
532 if (ssp->srr1 & MASK(MSR_PR)) {
533 char *addr = (char*)ssp->r1;
534 unsigned int stack_buf[3];
535 for (i = 0; i < 8; i++) {
536 if (addr == (char*)NULL)
537 break;
538 if (!copyin(addr,(char*)stack_buf,
539 3 * sizeof(int))) {
540 buf += sprintf(buf, "0x%08x : 0x%08x\n"
541 ,addr,stack_buf[2]);
542 addr = (char*)stack_buf[0];
543 } else {
544 break;
545 }
546 }
547 }
548 buf[0] = '\0';
549 }
550 doexception(exception, code, subcode);
551 }
552 /* AST delivery
553 * Check to see if we need an AST, if so take care of it here
554 */
555 ml_set_interrupts_enabled(FALSE);
556 if (USER_MODE(ssp->srr1))
557 while (ast_needed(cpu_number())) {
0b4e3aa0 558 ast_taken(AST_ALL, intr);
1c79356b
A
559 ml_set_interrupts_enabled(FALSE);
560 }
561
562 return ssp;
563}
564
565/* This routine is called from assembly before each and every system call.
566 * It must preserve r3.
567 */
568
569extern int syscall_trace(int, struct ppc_saved_state *);
570
571
572extern int pmdebug;
573
574int syscall_trace(int retval, struct ppc_saved_state *ssp)
575{
576 int i, argc;
577
578 int kdarg[3];
579 /* Always prepare to trace mach system calls */
580 if (kdebug_enable && (ssp->r0 & 0x80000000)) {
581 /* Mach trap */
582 kdarg[0]=0;
583 kdarg[1]=0;
584 kdarg[2]=0;
585 argc = mach_trap_table[-(ssp->r0)].mach_trap_arg_count;
586 if (argc > 3)
587 argc = 3;
588 for (i=0; i < argc; i++)
589 kdarg[i] = (int)*(&ssp->r3 + i);
590 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (-(ssp->r0))) | DBG_FUNC_START,
591 kdarg[0], kdarg[1], kdarg[2], 0, 0);
592 }
593
594 return retval;
595}
596
597/* This routine is called from assembly after each mach system call
598 * It must preserve r3.
599 */
600
601extern int syscall_trace_end(int, struct ppc_saved_state *);
602
603int syscall_trace_end(int retval, struct ppc_saved_state *ssp)
604{
605 if (kdebug_enable && (ssp->r0 & 0x80000000)) {
606 /* Mach trap */
607 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(ssp->r0))) | DBG_FUNC_END,
608 retval, 0, 0, 0, 0);
609 }
610 return retval;
611}
612
613/*
614 * called from syscall if there is an error
615 */
616
617int syscall_error(
618 int exception,
619 int code,
620 int subcode,
621 struct ppc_saved_state *ssp)
622{
623 register thread_t thread;
624
625 thread = current_thread();
626
627 if (thread == 0)
628 panic("syscall error in boot phase");
629
630 if (!USER_MODE(ssp->srr1))
631 panic("system call called from kernel");
632
633 doexception(exception, code, subcode);
634
635 return 0;
636}
637
638/* Pass up a server syscall/exception */
639void
640doexception(
641 int exc,
642 int code,
643 int sub)
644{
645 exception_data_type_t codes[EXCEPTION_CODE_MAX];
646
647 codes[0] = code;
648 codes[1] = sub;
649 exception(exc, codes, 2);
650}
651
652char *trap_type[] = {
0b4e3aa0
A
653 "Unknown",
654 "0x100 - System reset",
655 "0x200 - Machine check",
656 "0x300 - Data access",
657 "0x400 - Inst access",
658 "0x500 - Ext int",
659 "0x600 - Alignment",
660 "0x700 - Program",
661 "0x800 - Floating point",
662 "0x900 - Decrementer",
663 "0xA00 - n/a",
664 "0xB00 - n/a",
665 "0xC00 - System call",
666 "0xD00 - Trace",
667 "0xE00 - FP assist",
668 "0xF00 - Perf mon",
669 "0xF20 - VMX",
670 "INVALID EXCEPTION",
671 "INVALID EXCEPTION",
672 "INVALID EXCEPTION",
673 "0x1300 - Inst bkpnt",
674 "0x1400 - Sys mgmt",
675 "0x1600 - Altivec Assist",
676 "0x1700 - Thermal",
677 "INVALID EXCEPTION",
678 "INVALID EXCEPTION",
679 "INVALID EXCEPTION",
680 "INVALID EXCEPTION",
681 "INVALID EXCEPTION",
682 "INVALID EXCEPTION",
683 "INVALID EXCEPTION",
684 "INVALID EXCEPTION",
685 "INVALID EXCEPTION",
686 "0x2000 - Run Mode/Trace",
687 "Signal Processor",
688 "Preemption",
689 "Context Switch",
690 "Shutdown",
691 "System Failure"
1c79356b
A
692};
693int TRAP_TYPES = sizeof (trap_type) / sizeof (trap_type[0]);
694
695void unresolved_kernel_trap(int trapno,
696 struct ppc_saved_state *ssp,
697 unsigned int dsisr,
698 unsigned int dar,
699 char *message)
700{
701 char *trap_name;
702 extern void print_backtrace(struct ppc_saved_state *);
703 extern unsigned int debug_mode, disableDebugOuput;
704
705 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
706
707 disableDebugOuput = FALSE;
708 debug_mode++;
709 if ((unsigned)trapno <= T_MAX)
710 trap_name = trap_type[trapno / T_VECTOR_SIZE];
711 else
712 trap_name = "???? unrecognized exception";
713 if (message == NULL)
714 message = trap_name;
715
0b4e3aa0
A
716 printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%08x PC=0x%08x\n",
717 cpu_number(), trap_name, dar, ssp->srr0);
1c79356b
A
718
719 print_backtrace(ssp);
720
721 (void *)Call_Debugger(trapno, ssp);
722 panic(message);
723}
724
725void
726thread_syscall_return(
727 kern_return_t ret)
728{
729 register thread_act_t thr_act = current_act();
730 register struct ppc_saved_state *regs = USER_REGS(thr_act);
731
732 if (kdebug_enable && (regs->r0 & 0x80000000)) {
733 /* Mach trap */
734 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(regs->r0))) | DBG_FUNC_END,
735 ret, 0, 0, 0, 0);
736 }
737 regs->r3 = ret;
738
739 thread_exception_return();
740 /*NOTREACHED*/
741}
742
743
744#if MACH_KDB
745void
746thread_kdb_return(void)
747{
748 register thread_act_t thr_act = current_act();
749 register thread_t cur_thr = current_thread();
750 register struct ppc_saved_state *regs = USER_REGS(thr_act);
751
752 Call_Debugger(thr_act->mact.pcb->es.exception, regs);
753#if MACH_LDEBUG
754 assert(cur_thr->mutex_count == 0);
755#endif /* MACH_LDEBUG */
756 check_simple_locks();
757 thread_exception_return();
758 /*NOTREACHED*/
759}
760#endif /* MACH_KDB */