]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/trap.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / ppc / trap.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33
34 #include <mach_kdb.h>
35 #include <mach_kdp.h>
36 #include <debug.h>
37
38 #include <mach/mach_types.h>
39 #include <mach/mach_traps.h>
40 #include <mach/thread_status.h>
41
42 #include <kern/processor.h>
43 #include <kern/thread.h>
44 #include <kern/exception.h>
45 #include <kern/syscall_sw.h>
46 #include <kern/cpu_data.h>
47 #include <kern/debug.h>
48
49 #include <vm/vm_fault.h>
50 #include <vm/vm_kern.h> /* For kernel_map */
51
52 #include <ppc/misc_protos.h>
53 #include <ppc/trap.h>
54 #include <ppc/exception.h>
55 #include <ppc/proc_reg.h> /* for SR_xxx definitions */
56 #include <ppc/pmap.h>
57 #include <ppc/mem.h>
58 #include <ppc/mappings.h>
59 #include <ppc/Firmware.h>
60 #include <ppc/low_trace.h>
61 #include <ppc/Diagnostics.h>
62 #include <ppc/hw_perfmon.h>
63
64 #include <sys/kdebug.h>
65
66 perfCallback perfTrapHook = 0; /* Pointer to CHUD trap hook routine */
67 perfCallback perfASTHook = 0; /* Pointer to CHUD AST hook routine */
68
69 #if MACH_KDB
70 #include <ddb/db_watch.h>
71 #include <ddb/db_run.h>
72 #include <ddb/db_break.h>
73 #include <ddb/db_trap.h>
74
75 boolean_t let_ddb_vm_fault = FALSE;
76 boolean_t debug_all_traps_with_kdb = FALSE;
77 extern struct db_watchpoint *db_watchpoint_list;
78 extern boolean_t db_watchpoints_inserted;
79 extern boolean_t db_breakpoints_inserted;
80
81
82
83 #endif /* MACH_KDB */
84
85 extern task_t bsd_init_task;
86 extern char init_task_failure_data[];
87 extern int not_in_kdp;
88
89 #define PROT_EXEC (VM_PROT_EXECUTE)
90 #define PROT_RO (VM_PROT_READ)
91 #define PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
92
93 /* A useful macro to update the ppc_exception_state in the PCB
94 * before calling doexception
95 */
96 #define UPDATE_PPC_EXCEPTION_STATE { \
97 thread_t _thread = current_thread(); \
98 _thread->machine.pcb->save_dar = (uint64_t)dar; \
99 _thread->machine.pcb->save_dsisr = dsisr; \
100 _thread->machine.pcb->save_exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \
101 }
102
103 void unresolved_kernel_trap(int trapno,
104 struct savearea *ssp,
105 unsigned int dsisr,
106 addr64_t dar,
107 const char *message);
108
109 static void handleMck(struct savearea *ssp); /* Common machine check handler */
110
111 #ifdef MACH_BSD
112 extern void get_procrustime(time_value_t *);
113 extern void bsd_uprofil(time_value_t *, user_addr_t);
114 #endif /* MACH_BSD */
115
116
117 struct savearea *trap(int trapno,
118 struct savearea *ssp,
119 unsigned int dsisr,
120 addr64_t dar)
121 {
122 int exception;
123 int code;
124 int subcode;
125 vm_map_t map;
126 unsigned int sp;
127 unsigned int space, space2;
128 vm_map_offset_t offset;
129 thread_t thread = current_thread();
130 boolean_t intr;
131 ast_t *myast;
132
133 #ifdef MACH_BSD
134 time_value_t tv;
135 #endif /* MACH_BSD */
136
137 myast = ast_pending();
138 if(perfASTHook) {
139 if(*myast & AST_PPC_CHUD_ALL) {
140 perfASTHook(trapno, ssp, dsisr, (unsigned int)dar);
141 }
142 } else {
143 *myast &= ~AST_PPC_CHUD_ALL;
144 }
145
146 if(perfTrapHook) { /* Is there a hook? */
147 if(perfTrapHook(trapno, ssp, dsisr, (unsigned int)dar) == KERN_SUCCESS) return ssp; /* If it succeeds, we are done... */
148 }
149
150 #if 0
151 {
152 extern void fctx_text(void);
153 fctx_test();
154 }
155 #endif
156
157 exception = 0; /* Clear exception for now */
158
159 /*
160 * Remember that we are disabled for interruptions when we come in here. Because
161 * of latency concerns, we need to enable interruptions in the interrupted process
162 * was enabled itself as soon as we can.
163 */
164
165 intr = (ssp->save_srr1 & MASK(MSR_EE)) != 0; /* Remember if we were enabled */
166
167 /* Handle kernel traps first */
168
169 if (!USER_MODE(ssp->save_srr1)) {
170 /*
171 * Trap came from kernel
172 */
173 switch (trapno) {
174
175 case T_PREEMPT: /* Handle a preempt trap */
176 ast_taken(AST_PREEMPTION, FALSE);
177 break;
178
179 case T_PERF_MON:
180 perfmon_handle_pmi(ssp);
181 break;
182
183 case T_RESET: /* Reset interruption */
184 if (!Call_Debugger(trapno, ssp))
185 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
186 break; /* We just ignore these */
187
188 /*
189 * These trap types should never be seen by trap()
190 * in kernel mode, anyway.
191 * Some are interrupts that should be seen by
192 * interrupt() others just don't happen because they
193 * are handled elsewhere. Some could happen but are
194 * considered to be fatal in kernel mode.
195 */
196 case T_DECREMENTER:
197 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
198 case T_SYSTEM_MANAGEMENT:
199 case T_ALTIVEC_ASSIST:
200 case T_INTERRUPT:
201 case T_FP_UNAVAILABLE:
202 case T_IO_ERROR:
203 case T_RESERVED:
204 default:
205 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
206 break;
207
208
209 /*
210 * Here we handle a machine check in the kernel
211 */
212
213 case T_MACHINE_CHECK:
214 handleMck(ssp); /* Common to both user and kernel */
215 break;
216
217
218 case T_ALIGNMENT:
219 /*
220 * If enaNotifyEMb is set, we get here, and
221 * we have actually already emulated the unaligned access.
222 * All that we want to do here is to ignore the interrupt. This is to allow logging or
223 * tracing of unaligned accesses.
224 */
225
226 if(ssp->save_hdr.save_misc3) { /* Was it a handled exception? */
227 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); /* Go panic */
228 break;
229 }
230 KERNEL_DEBUG_CONSTANT(
231 MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
232 (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
233 break;
234
235 case T_EMULATE:
236 /*
237 * If enaNotifyEMb is set we get here, and
238 * we have actually already emulated the instruction.
239 * All that we want to do here is to ignore the interrupt. This is to allow logging or
240 * tracing of emulated instructions.
241 */
242
243 KERNEL_DEBUG_CONSTANT(
244 MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
245 (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
246 break;
247
248
249
250
251
252 case T_TRACE:
253 case T_RUNMODE_TRACE:
254 case T_INSTRUCTION_BKPT:
255 if (!Call_Debugger(trapno, ssp))
256 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
257 break;
258
259 case T_PROGRAM:
260 if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
261 if (!Call_Debugger(trapno, ssp))
262 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
263 } else {
264 unresolved_kernel_trap(trapno, ssp,
265 dsisr, dar, NULL);
266 }
267 break;
268
269 case T_DATA_ACCESS:
270 #if MACH_KDB
271 mp_disable_preemption();
272 if (debug_mode
273 && getPerProc()->debugger_active
274 && !let_ddb_vm_fault) {
275 /*
276 * Force kdb to handle this one.
277 */
278 kdb_trap(trapno, ssp);
279 }
280 mp_enable_preemption();
281 #endif /* MACH_KDB */
282 /* can we take this during normal panic dump operation? */
283 if (debug_mode
284 && getPerProc()->debugger_active
285 && !not_in_kdp) {
286 /*
287 * Access fault while in kernel core dump.
288 */
289 kdp_dump_trap(trapno, ssp);
290 }
291
292
293 if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */
294 panic("trap: disallowed access to cache inhibited memory - %016llX\n", dar);
295 }
296
297 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
298
299 if(((dar >> 28) < 0xE) | ((dar >> 28) > 0xF)) { /* User memory window access? */
300
301 offset = (vm_map_offset_t)dar; /* Set the failing address */
302 map = kernel_map; /* No, this is a normal kernel access */
303
304 /*
305 * Note: Some ROM device drivers will access page 0 when they start. The IOKit will
306 * set a flag to tell us to ignore any access fault on page 0. After the driver is
307 * opened, it will clear the flag.
308 */
309 if((0 == (offset & -PAGE_SIZE)) && /* Check for access of page 0 and */
310 ((thread->machine.specFlags) & ignoreZeroFault)) { /* special case of ignoring page zero faults */
311 ssp->save_srr0 += 4; /* Point to next instruction */
312 break;
313 }
314
315 code = vm_fault(map, vm_map_trunc_page(offset),
316 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
317 FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
318
319 if (code != KERN_SUCCESS) {
320 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
321 } else {
322 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
323 ssp->save_dsisr = (ssp->save_dsisr &
324 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
325 }
326 break;
327 }
328
329 /* If we get here, the fault was due to a user memory window access */
330
331 map = thread->map;
332
333 offset = (vm_map_offset_t)(thread->machine.umwRelo + dar); /* Compute the user space address */
334
335 code = vm_fault(map, vm_map_trunc_page(offset),
336 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
337 FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
338
339 /* If we failed, there should be a recovery
340 * spot to rfi to.
341 */
342 if (code != KERN_SUCCESS) {
343 if (thread->recover) {
344 ssp->save_srr0 = thread->recover;
345 thread->recover = (vm_offset_t)NULL;
346 } else {
347 unresolved_kernel_trap(trapno, ssp, dsisr, dar, "copyin/out has no recovery point");
348 }
349 }
350 else {
351 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
352 ssp->save_dsisr = (ssp->save_dsisr &
353 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
354 }
355
356 break;
357
358 case T_INSTRUCTION_ACCESS:
359
360 #if MACH_KDB
361 if (debug_mode
362 && getPerProc()->debugger_active
363 && !let_ddb_vm_fault) {
364 /*
365 * Force kdb to handle this one.
366 */
367 kdb_trap(trapno, ssp);
368 }
369 #endif /* MACH_KDB */
370
371 /* Same as for data access, except fault type
372 * is PROT_EXEC and addr comes from srr0
373 */
374
375 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
376
377 map = kernel_map;
378
379 code = vm_fault(map, vm_map_trunc_page(ssp->save_srr0),
380 PROT_EXEC, FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
381
382 if (code != KERN_SUCCESS) {
383 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
384 } else {
385 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
386 ssp->save_srr1 = (ssp->save_srr1 &
387 ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
388 }
389 break;
390
391 /* Usually shandler handles all the system calls, but the
392 * atomic thread switcher may throwup (via thandler) and
393 * have to pass it up to the exception handler.
394 */
395
396 case T_SYSTEM_CALL:
397 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
398 break;
399
400 case T_AST:
401 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
402 break;
403 }
404 } else {
405
406 /*
407 * Processing for user state traps with interrupt enabled
408 * For T_AST, interrupts are enabled in the AST delivery
409 */
410 if (trapno != T_AST)
411 ml_set_interrupts_enabled(TRUE);
412
413 #ifdef MACH_BSD
414 {
415 get_procrustime(&tv);
416 }
417 #endif /* MACH_BSD */
418
419
420 /*
421 * Trap came from user task
422 */
423
424 switch (trapno) {
425
426 case T_PREEMPT:
427 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
428 break;
429
430 case T_PERF_MON:
431 perfmon_handle_pmi(ssp);
432 break;
433
434 /*
435 * These trap types should never be seen by trap()
436 * Some are interrupts that should be seen by
437 * interrupt() others just don't happen because they
438 * are handled elsewhere.
439 */
440 case T_DECREMENTER:
441 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
442 case T_INTERRUPT:
443 case T_FP_UNAVAILABLE:
444 case T_SYSTEM_MANAGEMENT:
445 case T_RESERVED:
446 case T_IO_ERROR:
447
448 default:
449
450 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
451
452 panic("Unexpected user state trap(cpu %d): 0x%08X DSISR=0x%08X DAR=0x%016llX PC=0x%016llX, MSR=0x%016llX\n",
453 cpu_number(), trapno, dsisr, dar, ssp->save_srr0, ssp->save_srr1);
454 break;
455
456
457 /*
458 * Here we handle a machine check in user state
459 */
460
461 case T_MACHINE_CHECK:
462 handleMck(ssp); /* Common to both user and kernel */
463 break;
464
465 case T_RESET:
466 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
467 if (!Call_Debugger(trapno, ssp))
468 panic("Unexpected Reset exception: srr0 = %016llx, srr1 = %016llx\n",
469 ssp->save_srr0, ssp->save_srr1);
470 break; /* We just ignore these */
471
472 case T_ALIGNMENT:
473 /*
474 * If enaNotifyEMb is set, we get here, and
475 * we have actually already emulated the unaligned access.
476 * All that we want to do here is to ignore the interrupt. This is to allow logging or
477 * tracing of unaligned accesses.
478 */
479
480 KERNEL_DEBUG_CONSTANT(
481 MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
482 (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
483
484 if(ssp->save_hdr.save_misc3) { /* Was it a handled exception? */
485 exception = EXC_BAD_ACCESS; /* Yes, throw exception */
486 code = EXC_PPC_UNALIGNED;
487 subcode = (unsigned int)dar;
488 }
489 break;
490
491 case T_EMULATE:
492 /*
493 * If enaNotifyEMb is set we get here, and
494 * we have actually already emulated the instruction.
495 * All that we want to do here is to ignore the interrupt. This is to allow logging or
496 * tracing of emulated instructions.
497 */
498
499 KERNEL_DEBUG_CONSTANT(
500 MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
501 (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
502 break;
503
504 case T_TRACE: /* Real PPC chips */
505 if (be_tracing()) {
506 add_pcbuffer();
507 return ssp;
508 }
509 /* fall through */
510
511 case T_INSTRUCTION_BKPT:
512 exception = EXC_BREAKPOINT;
513 code = EXC_PPC_TRACE;
514 subcode = (unsigned int)ssp->save_srr0;
515 break;
516
517 case T_PROGRAM:
518 if (ssp->save_srr1 & MASK(SRR1_PRG_FE)) {
519 fpu_save(thread->machine.curctx);
520 UPDATE_PPC_EXCEPTION_STATE;
521 exception = EXC_ARITHMETIC;
522 code = EXC_ARITHMETIC;
523
524 mp_disable_preemption();
525 subcode = ssp->save_fpscr;
526 mp_enable_preemption();
527 }
528 else if (ssp->save_srr1 & MASK(SRR1_PRG_ILL_INS)) {
529
530 UPDATE_PPC_EXCEPTION_STATE
531 exception = EXC_BAD_INSTRUCTION;
532 code = EXC_PPC_UNIPL_INST;
533 subcode = (unsigned int)ssp->save_srr0;
534 } else if ((unsigned int)ssp->save_srr1 & MASK(SRR1_PRG_PRV_INS)) {
535
536 UPDATE_PPC_EXCEPTION_STATE;
537 exception = EXC_BAD_INSTRUCTION;
538 code = EXC_PPC_PRIVINST;
539 subcode = (unsigned int)ssp->save_srr0;
540 } else if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
541 unsigned int inst;
542 //char *iaddr;
543
544 //iaddr = CAST_DOWN(char *, ssp->save_srr0); /* Trim from long long and make a char pointer */
545 if (copyin(ssp->save_srr0, (char *) &inst, 4 )) panic("copyin failed\n");
546
547 if(dgWork.dgFlags & enaDiagTrap) { /* Is the diagnostic trap enabled? */
548 if((inst & 0xFFFFFFF0) == 0x0FFFFFF0) { /* Is this a TWI 31,R31,0xFFFx? */
549 if(diagTrap(ssp, inst & 0xF)) { /* Call the trap code */
550 ssp->save_srr0 += 4ULL; /* If we eat the trap, bump pc */
551 exception = 0; /* Clear exception */
552 break; /* All done here */
553 }
554 }
555 }
556
557 UPDATE_PPC_EXCEPTION_STATE;
558
559 if (inst == 0x7FE00008) {
560 exception = EXC_BREAKPOINT;
561 code = EXC_PPC_BREAKPOINT;
562 } else {
563 exception = EXC_SOFTWARE;
564 code = EXC_PPC_TRAP;
565 }
566 subcode = (unsigned int)ssp->save_srr0;
567 }
568 break;
569
570 case T_ALTIVEC_ASSIST:
571 UPDATE_PPC_EXCEPTION_STATE;
572 exception = EXC_ARITHMETIC;
573 code = EXC_PPC_ALTIVECASSIST;
574 subcode = (unsigned int)ssp->save_srr0;
575 break;
576
577 case T_DATA_ACCESS:
578 map = thread->map;
579
580 if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */
581 UPDATE_PPC_EXCEPTION_STATE; /* Don't even bother VM with this one */
582 exception = EXC_BAD_ACCESS;
583 subcode = (unsigned int)dar;
584 break;
585 }
586
587 code = vm_fault(map, vm_map_trunc_page(dar),
588 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
589 FALSE, THREAD_ABORTSAFE, NULL, vm_map_trunc_page(0));
590
591 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
592 UPDATE_PPC_EXCEPTION_STATE;
593 exception = EXC_BAD_ACCESS;
594 subcode = (unsigned int)dar;
595 } else {
596 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
597 ssp->save_dsisr = (ssp->save_dsisr &
598 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
599 }
600 break;
601
602 case T_INSTRUCTION_ACCESS:
603 /* Same as for data access, except fault type
604 * is PROT_EXEC and addr comes from srr0
605 */
606 map = thread->map;
607
608 code = vm_fault(map, vm_map_trunc_page(ssp->save_srr0),
609 PROT_EXEC, FALSE, THREAD_ABORTSAFE, NULL, vm_map_trunc_page(0));
610
611 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
612 UPDATE_PPC_EXCEPTION_STATE;
613 exception = EXC_BAD_ACCESS;
614 subcode = (unsigned int)ssp->save_srr0;
615 } else {
616 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
617 ssp->save_srr1 = (ssp->save_srr1 &
618 ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
619 }
620 break;
621
622 case T_AST:
623 /* AST delivery is done below */
624 break;
625
626 }
627 #ifdef MACH_BSD
628 {
629 bsd_uprofil(&tv, ssp->save_srr0);
630 }
631 #endif /* MACH_BSD */
632 }
633
634 if (exception) {
635 /* if this is the init task, save the exception information */
636 /* this probably is a fatal exception */
637 #if 0
638 if(bsd_init_task == current_task()) {
639 char *buf;
640 int i;
641
642 buf = init_task_failure_data;
643
644
645 buf += sprintf(buf, "Exception Code = 0x%x, Subcode = 0x%x\n", code, subcode);
646 buf += sprintf(buf, "DSISR = 0x%08x, DAR = 0x%016llx\n"
647 , dsisr, dar);
648
649 for (i=0; i<32; i++) {
650 if ((i % 8) == 0) {
651 buf += sprintf(buf, "\n%4d :",i);
652 }
653 buf += sprintf(buf, " %08x",*(&ssp->save_r0+i));
654 }
655
656 buf += sprintf(buf, "\n\n");
657 buf += sprintf(buf, "cr = 0x%08X\t\t",ssp->save_cr);
658 buf += sprintf(buf, "xer = 0x%08X\n",ssp->save_xer);
659 buf += sprintf(buf, "lr = 0x%016llX\t\t",ssp->save_lr);
660 buf += sprintf(buf, "ctr = 0x%016llX\n",ssp->save_ctr);
661 buf += sprintf(buf, "srr0(iar) = 0x%016llX\t\t",ssp->save_srr0);
662 buf += sprintf(buf, "srr1(msr) = 0x%016llX\n",ssp->save_srr1,
663 "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18"
664 "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT");
665 buf += sprintf(buf, "\n\n");
666
667 /* generate some stack trace */
668 buf += sprintf(buf, "Application level back trace:\n");
669 if (ssp->save_srr1 & MASK(MSR_PR)) {
670 char *addr = (char*)ssp->save_r1;
671 unsigned int stack_buf[3];
672 for (i = 0; i < 8; i++) {
673 if (addr == (char*)NULL)
674 break;
675 if (!copyin(ssp->save_r1,(char*)stack_buf,
676 3 * sizeof(int))) {
677 buf += sprintf(buf, "0x%08X : 0x%08X\n"
678 ,addr,stack_buf[2]);
679 addr = (char*)stack_buf[0];
680 } else {
681 break;
682 }
683 }
684 }
685 buf[0] = '\0';
686 }
687 #endif
688 doexception(exception, code, subcode);
689 }
690 /* AST delivery
691 * Check to see if we need an AST, if so take care of it here
692 */
693 ml_set_interrupts_enabled(FALSE);
694
695 if (USER_MODE(ssp->save_srr1)) {
696 myast = ast_pending();
697 while (*myast & AST_ALL) {
698 ast_taken(AST_ALL, intr);
699 ml_set_interrupts_enabled(FALSE);
700 myast = ast_pending();
701 }
702 }
703
704 return ssp;
705 }
706
707 /* This routine is called from assembly before each and every system call.
708 * It must preserve r3.
709 */
710
711 extern int syscall_trace(int, struct savearea *);
712
713
714 extern int pmdebug;
715
716 int syscall_trace(int retval, struct savearea *ssp)
717 {
718 int i, argc;
719 int kdarg[3];
720 /* Always prepare to trace mach system calls */
721
722 kdarg[0]=0;
723 kdarg[1]=0;
724 kdarg[2]=0;
725
726 argc = mach_trap_table[-((unsigned int)ssp->save_r0)].mach_trap_arg_count;
727
728 if (argc > 3)
729 argc = 3;
730
731 for (i=0; i < argc; i++)
732 kdarg[i] = (int)*(&ssp->save_r3 + i);
733
734 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (-(ssp->save_r0))) | DBG_FUNC_START,
735 kdarg[0], kdarg[1], kdarg[2], 0, 0);
736
737 return retval;
738 }
739
740 /* This routine is called from assembly after each mach system call
741 * It must preserve r3.
742 */
743
744 extern int syscall_trace_end(int, struct savearea *);
745
746 int syscall_trace_end(int retval, struct savearea *ssp)
747 {
748 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-((unsigned int)ssp->save_r0))) | DBG_FUNC_END,
749 retval, 0, 0, 0, 0);
750 return retval;
751 }
752
753 /*
754 * called from syscall if there is an error
755 */
756
757 int syscall_error(
758 int exception,
759 int code,
760 int subcode,
761 struct savearea *ssp)
762 {
763 register thread_t thread;
764
765 thread = current_thread();
766
767 if (thread == 0)
768 panic("syscall error in boot phase");
769
770 if (!USER_MODE(ssp->save_srr1))
771 panic("system call called from kernel");
772
773 doexception(exception, code, subcode);
774
775 return 0;
776 }
777
778 /* Pass up a server syscall/exception */
779 void
780 doexception(
781 int exc,
782 int code,
783 int sub)
784 {
785 exception_data_type_t codes[EXCEPTION_CODE_MAX];
786
787 codes[0] = code;
788 codes[1] = sub;
789 exception_triage(exc, codes, 2);
790 }
791
792 char *trap_type[] = {
793 "Unknown",
794 "0x100 - System reset",
795 "0x200 - Machine check",
796 "0x300 - Data access",
797 "0x400 - Inst access",
798 "0x500 - Ext int",
799 "0x600 - Alignment",
800 "0x700 - Program",
801 "0x800 - Floating point",
802 "0x900 - Decrementer",
803 "0xA00 - n/a",
804 "0xB00 - n/a",
805 "0xC00 - System call",
806 "0xD00 - Trace",
807 "0xE00 - FP assist",
808 "0xF00 - Perf mon",
809 "0xF20 - VMX",
810 "INVALID EXCEPTION",
811 "INVALID EXCEPTION",
812 "INVALID EXCEPTION",
813 "0x1300 - Inst bkpnt",
814 "0x1400 - Sys mgmt",
815 "0x1600 - Altivec Assist",
816 "0x1700 - Thermal",
817 "INVALID EXCEPTION",
818 "INVALID EXCEPTION",
819 "INVALID EXCEPTION",
820 "INVALID EXCEPTION",
821 "INVALID EXCEPTION",
822 "INVALID EXCEPTION",
823 "INVALID EXCEPTION",
824 "INVALID EXCEPTION",
825 "Emulate",
826 "0x2000 - Run Mode/Trace",
827 "Signal Processor",
828 "Preemption",
829 "Context Switch",
830 "Shutdown",
831 "System Failure"
832 };
833 int TRAP_TYPES = sizeof (trap_type) / sizeof (trap_type[0]);
834
835 void unresolved_kernel_trap(int trapno,
836 struct savearea *ssp,
837 unsigned int dsisr,
838 addr64_t dar,
839 const char *message)
840 {
841 char *trap_name;
842 extern void print_backtrace(struct savearea *);
843 extern unsigned int debug_mode, disableDebugOuput;
844 extern unsigned long panic_caller;
845
846 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
847 lastTrace = LLTraceSet(0); /* Disable low-level tracing */
848
849 if( logPanicDataToScreen )
850 disableDebugOuput = FALSE;
851
852 debug_mode++;
853 if ((unsigned)trapno <= T_MAX)
854 trap_name = trap_type[trapno / T_VECTOR_SIZE];
855 else
856 trap_name = "???? unrecognized exception";
857 if (message == NULL)
858 message = trap_name;
859
860 kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%016llX PC=0x%016llX\n",
861 cpu_number(), trap_name, dar, ssp->save_srr0);
862
863 print_backtrace(ssp);
864
865 panic_caller = (0xFFFF0000 | (trapno / T_VECTOR_SIZE) );
866 draw_panic_dialog();
867
868 if( panicDebugging )
869 (void *)Call_Debugger(trapno, ssp);
870 panic(message);
871 }
872
873 const char *corr[2] = {"uncorrected", "corrected "};
874
875 void handleMck(struct savearea *ssp) { /* Common machine check handler */
876
877 int cpu;
878
879 cpu = cpu_number();
880
881 printf("Machine check (%d) - %s - pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n",
882 cpu, corr[ssp->save_hdr.save_misc3], ssp->save_srr0, ssp->save_srr1, ssp->save_dsisr, ssp->save_dar); /* Tell us about it */
883 printf("Machine check (%d) - AsyncSrc = %016llX, CoreFIR = %016llx\n", cpu, ssp->save_xdat0, ssp->save_xdat1);
884 printf("Machine check (%d) - L2FIR = %016llX, BusFir = %016llx\n", cpu, ssp->save_xdat2, ssp->save_xdat3);
885
886 if(ssp->save_hdr.save_misc3) return; /* Leave the the machine check was recovered */
887
888 panic("Uncorrectable machine check: pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n"
889 " AsyncSrc = %016llX, CoreFIR = %016llx\n"
890 " L2FIR = %016llX, BusFir = %016llx\n",
891 ssp->save_srr0, ssp->save_srr1, ssp->save_dsisr, ssp->save_dar,
892 ssp->save_xdat0, ssp->save_xdat1, ssp->save_xdat2, ssp->save_xdat3);
893
894 return;
895 }
896
897 void
898 thread_syscall_return(
899 kern_return_t ret)
900 {
901 register thread_t thread = current_thread();
902 register struct savearea *regs = USER_REGS(thread);
903
904 if (kdebug_enable && ((unsigned int)regs->save_r0 & 0x80000000)) {
905 /* Mach trap */
906 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(regs->save_r0))) | DBG_FUNC_END,
907 ret, 0, 0, 0, 0);
908 }
909 regs->save_r3 = ret;
910
911 thread_exception_return();
912 /*NOTREACHED*/
913 }
914
915
916 #if MACH_KDB
917 void
918 thread_kdb_return(void)
919 {
920 register thread_t thread = current_thread();
921 register struct savearea *regs = USER_REGS(thread);
922
923 Call_Debugger(thread->machine.pcb->save_exception, regs);
924 thread_exception_return();
925 /*NOTREACHED*/
926 }
927 #endif /* MACH_KDB */