]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/trap.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / osfmk / ppc / trap.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #include <mach_kdb.h>
33 #include <mach_kdp.h>
34 #include <debug.h>
35
36 #include <mach/mach_types.h>
37 #include <mach/mach_traps.h>
38 #include <mach/thread_status.h>
39
40 #include <kern/processor.h>
41 #include <kern/thread.h>
42 #include <kern/exception.h>
43 #include <kern/syscall_sw.h>
44 #include <kern/cpu_data.h>
45 #include <kern/debug.h>
46
47 #include <vm/vm_fault.h>
48 #include <vm/vm_kern.h> /* For kernel_map */
49
50 #include <ppc/misc_protos.h>
51 #include <ppc/trap.h>
52 #include <ppc/exception.h>
53 #include <ppc/proc_reg.h> /* for SR_xxx definitions */
54 #include <ppc/pmap.h>
55 #include <ppc/mem.h>
56 #include <ppc/mappings.h>
57 #include <ppc/Firmware.h>
58 #include <ppc/low_trace.h>
59 #include <ppc/Diagnostics.h>
60 #include <ppc/hw_perfmon.h>
61
62 #include <sys/kdebug.h>
63
64 perfCallback perfTrapHook = 0; /* Pointer to CHUD trap hook routine */
65 perfCallback perfASTHook = 0; /* Pointer to CHUD AST hook routine */
66
67 #if MACH_KDB
68 #include <ddb/db_watch.h>
69 #include <ddb/db_run.h>
70 #include <ddb/db_break.h>
71 #include <ddb/db_trap.h>
72
73 boolean_t let_ddb_vm_fault = FALSE;
74 boolean_t debug_all_traps_with_kdb = FALSE;
75 extern struct db_watchpoint *db_watchpoint_list;
76 extern boolean_t db_watchpoints_inserted;
77 extern boolean_t db_breakpoints_inserted;
78
79
80
81 #endif /* MACH_KDB */
82
83 extern task_t bsd_init_task;
84 extern char init_task_failure_data[];
85 extern int not_in_kdp;
86
87 #define PROT_EXEC (VM_PROT_EXECUTE)
88 #define PROT_RO (VM_PROT_READ)
89 #define PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
90
91 /* A useful macro to update the ppc_exception_state in the PCB
92 * before calling doexception
93 */
94 #define UPDATE_PPC_EXCEPTION_STATE { \
95 thread_t _thread = current_thread(); \
96 _thread->machine.pcb->save_dar = (uint64_t)dar; \
97 _thread->machine.pcb->save_dsisr = dsisr; \
98 _thread->machine.pcb->save_exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \
99 }
100
101 void unresolved_kernel_trap(int trapno,
102 struct savearea *ssp,
103 unsigned int dsisr,
104 addr64_t dar,
105 const char *message);
106
107 static void handleMck(struct savearea *ssp); /* Common machine check handler */
108
109 #ifdef MACH_BSD
110 extern void get_procrustime(time_value_t *);
111 extern void bsd_uprofil(time_value_t *, user_addr_t);
112 #endif /* MACH_BSD */
113
114
115 struct savearea *trap(int trapno,
116 struct savearea *ssp,
117 unsigned int dsisr,
118 addr64_t dar)
119 {
120 int exception;
121 int code;
122 int subcode;
123 vm_map_t map;
124 unsigned int sp;
125 unsigned int space, space2;
126 vm_map_offset_t offset;
127 thread_t thread = current_thread();
128 boolean_t intr;
129 ast_t *myast;
130
131 #ifdef MACH_BSD
132 time_value_t tv;
133 #endif /* MACH_BSD */
134
135 myast = ast_pending();
136 if(perfASTHook) {
137 if(*myast & AST_PPC_CHUD_ALL) {
138 perfASTHook(trapno, ssp, dsisr, (unsigned int)dar);
139 }
140 } else {
141 *myast &= ~AST_PPC_CHUD_ALL;
142 }
143
144 if(perfTrapHook) { /* Is there a hook? */
145 if(perfTrapHook(trapno, ssp, dsisr, (unsigned int)dar) == KERN_SUCCESS) return ssp; /* If it succeeds, we are done... */
146 }
147
148 #if 0
149 {
150 extern void fctx_text(void);
151 fctx_test();
152 }
153 #endif
154
155 exception = 0; /* Clear exception for now */
156
157 /*
158 * Remember that we are disabled for interruptions when we come in here. Because
159 * of latency concerns, we need to enable interruptions in the interrupted process
160 * was enabled itself as soon as we can.
161 */
162
163 intr = (ssp->save_srr1 & MASK(MSR_EE)) != 0; /* Remember if we were enabled */
164
165 /* Handle kernel traps first */
166
167 if (!USER_MODE(ssp->save_srr1)) {
168 /*
169 * Trap came from kernel
170 */
171 switch (trapno) {
172
173 case T_PREEMPT: /* Handle a preempt trap */
174 ast_taken(AST_PREEMPTION, FALSE);
175 break;
176
177 case T_PERF_MON:
178 perfmon_handle_pmi(ssp);
179 break;
180
181 case T_RESET: /* Reset interruption */
182 if (!Call_Debugger(trapno, ssp))
183 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
184 break; /* We just ignore these */
185
186 /*
187 * These trap types should never be seen by trap()
188 * in kernel mode, anyway.
189 * Some are interrupts that should be seen by
190 * interrupt() others just don't happen because they
191 * are handled elsewhere. Some could happen but are
192 * considered to be fatal in kernel mode.
193 */
194 case T_DECREMENTER:
195 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
196 case T_SYSTEM_MANAGEMENT:
197 case T_ALTIVEC_ASSIST:
198 case T_INTERRUPT:
199 case T_FP_UNAVAILABLE:
200 case T_IO_ERROR:
201 case T_RESERVED:
202 default:
203 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
204 break;
205
206
207 /*
208 * Here we handle a machine check in the kernel
209 */
210
211 case T_MACHINE_CHECK:
212 handleMck(ssp); /* Common to both user and kernel */
213 break;
214
215
216 case T_ALIGNMENT:
217 /*
218 * If enaNotifyEMb is set, we get here, and
219 * we have actually already emulated the unaligned access.
220 * All that we want to do here is to ignore the interrupt. This is to allow logging or
221 * tracing of unaligned accesses.
222 */
223
224 if(ssp->save_hdr.save_misc3) { /* Was it a handled exception? */
225 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); /* Go panic */
226 break;
227 }
228 KERNEL_DEBUG_CONSTANT(
229 MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
230 (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
231 break;
232
233 case T_EMULATE:
234 /*
235 * If enaNotifyEMb is set we get here, and
236 * we have actually already emulated the instruction.
237 * All that we want to do here is to ignore the interrupt. This is to allow logging or
238 * tracing of emulated instructions.
239 */
240
241 KERNEL_DEBUG_CONSTANT(
242 MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
243 (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
244 break;
245
246
247
248
249
250 case T_TRACE:
251 case T_RUNMODE_TRACE:
252 case T_INSTRUCTION_BKPT:
253 if (!Call_Debugger(trapno, ssp))
254 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
255 break;
256
257 case T_PROGRAM:
258 if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
259 if (!Call_Debugger(trapno, ssp))
260 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
261 } else {
262 unresolved_kernel_trap(trapno, ssp,
263 dsisr, dar, NULL);
264 }
265 break;
266
267 case T_DATA_ACCESS:
268 #if MACH_KDB
269 mp_disable_preemption();
270 if (debug_mode
271 && getPerProc()->debugger_active
272 && !let_ddb_vm_fault) {
273 /*
274 * Force kdb to handle this one.
275 */
276 kdb_trap(trapno, ssp);
277 }
278 mp_enable_preemption();
279 #endif /* MACH_KDB */
280 /* can we take this during normal panic dump operation? */
281 if (debug_mode
282 && getPerProc()->debugger_active
283 && !not_in_kdp) {
284 /*
285 * Access fault while in kernel core dump.
286 */
287 kdp_dump_trap(trapno, ssp);
288 }
289
290
291 if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */
292 panic("trap: disallowed access to cache inhibited memory - %016llX\n", dar);
293 }
294
295 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
296
297 if(((dar >> 28) < 0xE) | ((dar >> 28) > 0xF)) { /* User memory window access? */
298
299 offset = (vm_map_offset_t)dar; /* Set the failing address */
300 map = kernel_map; /* No, this is a normal kernel access */
301
302 /*
303 * Note: Some ROM device drivers will access page 0 when they start. The IOKit will
304 * set a flag to tell us to ignore any access fault on page 0. After the driver is
305 * opened, it will clear the flag.
306 */
307 if((0 == (offset & -PAGE_SIZE)) && /* Check for access of page 0 and */
308 ((thread->machine.specFlags) & ignoreZeroFault)) { /* special case of ignoring page zero faults */
309 ssp->save_srr0 += 4; /* Point to next instruction */
310 break;
311 }
312
313 code = vm_fault(map, vm_map_trunc_page(offset),
314 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
315 FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
316
317 if (code != KERN_SUCCESS) {
318 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
319 } else {
320 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
321 ssp->save_dsisr = (ssp->save_dsisr &
322 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
323 }
324 break;
325 }
326
327 /* If we get here, the fault was due to a user memory window access */
328
329 map = thread->map;
330
331 offset = (vm_map_offset_t)(thread->machine.umwRelo + dar); /* Compute the user space address */
332
333 code = vm_fault(map, vm_map_trunc_page(offset),
334 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
335 FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
336
337 /* If we failed, there should be a recovery
338 * spot to rfi to.
339 */
340 if (code != KERN_SUCCESS) {
341 if (thread->recover) {
342 ssp->save_srr0 = thread->recover;
343 thread->recover = (vm_offset_t)NULL;
344 } else {
345 unresolved_kernel_trap(trapno, ssp, dsisr, dar, "copyin/out has no recovery point");
346 }
347 }
348 else {
349 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
350 ssp->save_dsisr = (ssp->save_dsisr &
351 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
352 }
353
354 break;
355
356 case T_INSTRUCTION_ACCESS:
357
358 #if MACH_KDB
359 if (debug_mode
360 && getPerProc()->debugger_active
361 && !let_ddb_vm_fault) {
362 /*
363 * Force kdb to handle this one.
364 */
365 kdb_trap(trapno, ssp);
366 }
367 #endif /* MACH_KDB */
368
369 /* Same as for data access, except fault type
370 * is PROT_EXEC and addr comes from srr0
371 */
372
373 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
374
375 map = kernel_map;
376
377 code = vm_fault(map, vm_map_trunc_page(ssp->save_srr0),
378 PROT_EXEC, FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
379
380 if (code != KERN_SUCCESS) {
381 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
382 } else {
383 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
384 ssp->save_srr1 = (ssp->save_srr1 &
385 ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
386 }
387 break;
388
389 /* Usually shandler handles all the system calls, but the
390 * atomic thread switcher may throwup (via thandler) and
391 * have to pass it up to the exception handler.
392 */
393
394 case T_SYSTEM_CALL:
395 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
396 break;
397
398 case T_AST:
399 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
400 break;
401 }
402 } else {
403
404 /*
405 * Processing for user state traps with interrupt enabled
406 * For T_AST, interrupts are enabled in the AST delivery
407 */
408 if (trapno != T_AST)
409 ml_set_interrupts_enabled(TRUE);
410
411 #ifdef MACH_BSD
412 {
413 get_procrustime(&tv);
414 }
415 #endif /* MACH_BSD */
416
417
418 /*
419 * Trap came from user task
420 */
421
422 switch (trapno) {
423
424 case T_PREEMPT:
425 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
426 break;
427
428 case T_PERF_MON:
429 perfmon_handle_pmi(ssp);
430 break;
431
432 /*
433 * These trap types should never be seen by trap()
434 * Some are interrupts that should be seen by
435 * interrupt() others just don't happen because they
436 * are handled elsewhere.
437 */
438 case T_DECREMENTER:
439 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
440 case T_INTERRUPT:
441 case T_FP_UNAVAILABLE:
442 case T_SYSTEM_MANAGEMENT:
443 case T_RESERVED:
444 case T_IO_ERROR:
445
446 default:
447
448 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
449
450 panic("Unexpected user state trap(cpu %d): 0x%08X DSISR=0x%08X DAR=0x%016llX PC=0x%016llX, MSR=0x%016llX\n",
451 cpu_number(), trapno, dsisr, dar, ssp->save_srr0, ssp->save_srr1);
452 break;
453
454
455 /*
456 * Here we handle a machine check in user state
457 */
458
459 case T_MACHINE_CHECK:
460 handleMck(ssp); /* Common to both user and kernel */
461 break;
462
463 case T_RESET:
464 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
465 if (!Call_Debugger(trapno, ssp))
466 panic("Unexpected Reset exception: srr0 = %016llx, srr1 = %016llx\n",
467 ssp->save_srr0, ssp->save_srr1);
468 break; /* We just ignore these */
469
470 case T_ALIGNMENT:
471 /*
472 * If enaNotifyEMb is set, we get here, and
473 * we have actually already emulated the unaligned access.
474 * All that we want to do here is to ignore the interrupt. This is to allow logging or
475 * tracing of unaligned accesses.
476 */
477
478 KERNEL_DEBUG_CONSTANT(
479 MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
480 (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
481
482 if(ssp->save_hdr.save_misc3) { /* Was it a handled exception? */
483 exception = EXC_BAD_ACCESS; /* Yes, throw exception */
484 code = EXC_PPC_UNALIGNED;
485 subcode = (unsigned int)dar;
486 }
487 break;
488
489 case T_EMULATE:
490 /*
491 * If enaNotifyEMb is set we get here, and
492 * we have actually already emulated the instruction.
493 * All that we want to do here is to ignore the interrupt. This is to allow logging or
494 * tracing of emulated instructions.
495 */
496
497 KERNEL_DEBUG_CONSTANT(
498 MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
499 (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
500 break;
501
502 case T_TRACE: /* Real PPC chips */
503 if (be_tracing()) {
504 add_pcbuffer();
505 return ssp;
506 }
507 /* fall through */
508
509 case T_INSTRUCTION_BKPT:
510 exception = EXC_BREAKPOINT;
511 code = EXC_PPC_TRACE;
512 subcode = (unsigned int)ssp->save_srr0;
513 break;
514
515 case T_PROGRAM:
516 if (ssp->save_srr1 & MASK(SRR1_PRG_FE)) {
517 fpu_save(thread->machine.curctx);
518 UPDATE_PPC_EXCEPTION_STATE;
519 exception = EXC_ARITHMETIC;
520 code = EXC_ARITHMETIC;
521
522 mp_disable_preemption();
523 subcode = ssp->save_fpscr;
524 mp_enable_preemption();
525 }
526 else if (ssp->save_srr1 & MASK(SRR1_PRG_ILL_INS)) {
527
528 UPDATE_PPC_EXCEPTION_STATE
529 exception = EXC_BAD_INSTRUCTION;
530 code = EXC_PPC_UNIPL_INST;
531 subcode = (unsigned int)ssp->save_srr0;
532 } else if ((unsigned int)ssp->save_srr1 & MASK(SRR1_PRG_PRV_INS)) {
533
534 UPDATE_PPC_EXCEPTION_STATE;
535 exception = EXC_BAD_INSTRUCTION;
536 code = EXC_PPC_PRIVINST;
537 subcode = (unsigned int)ssp->save_srr0;
538 } else if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
539 unsigned int inst;
540 //char *iaddr;
541
542 //iaddr = CAST_DOWN(char *, ssp->save_srr0); /* Trim from long long and make a char pointer */
543 if (copyin(ssp->save_srr0, (char *) &inst, 4 )) panic("copyin failed\n");
544
545 if(dgWork.dgFlags & enaDiagTrap) { /* Is the diagnostic trap enabled? */
546 if((inst & 0xFFFFFFF0) == 0x0FFFFFF0) { /* Is this a TWI 31,R31,0xFFFx? */
547 if(diagTrap(ssp, inst & 0xF)) { /* Call the trap code */
548 ssp->save_srr0 += 4ULL; /* If we eat the trap, bump pc */
549 exception = 0; /* Clear exception */
550 break; /* All done here */
551 }
552 }
553 }
554
555 UPDATE_PPC_EXCEPTION_STATE;
556
557 if (inst == 0x7FE00008) {
558 exception = EXC_BREAKPOINT;
559 code = EXC_PPC_BREAKPOINT;
560 } else {
561 exception = EXC_SOFTWARE;
562 code = EXC_PPC_TRAP;
563 }
564 subcode = (unsigned int)ssp->save_srr0;
565 }
566 break;
567
568 case T_ALTIVEC_ASSIST:
569 UPDATE_PPC_EXCEPTION_STATE;
570 exception = EXC_ARITHMETIC;
571 code = EXC_PPC_ALTIVECASSIST;
572 subcode = (unsigned int)ssp->save_srr0;
573 break;
574
575 case T_DATA_ACCESS:
576 map = thread->map;
577
578 if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */
579 UPDATE_PPC_EXCEPTION_STATE; /* Don't even bother VM with this one */
580 exception = EXC_BAD_ACCESS;
581 subcode = (unsigned int)dar;
582 break;
583 }
584
585 code = vm_fault(map, vm_map_trunc_page(dar),
586 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
587 FALSE, THREAD_ABORTSAFE, NULL, vm_map_trunc_page(0));
588
589 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
590 UPDATE_PPC_EXCEPTION_STATE;
591 exception = EXC_BAD_ACCESS;
592 subcode = (unsigned int)dar;
593 } else {
594 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
595 ssp->save_dsisr = (ssp->save_dsisr &
596 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
597 }
598 break;
599
600 case T_INSTRUCTION_ACCESS:
601 /* Same as for data access, except fault type
602 * is PROT_EXEC and addr comes from srr0
603 */
604 map = thread->map;
605
606 code = vm_fault(map, vm_map_trunc_page(ssp->save_srr0),
607 PROT_EXEC, FALSE, THREAD_ABORTSAFE, NULL, vm_map_trunc_page(0));
608
609 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
610 UPDATE_PPC_EXCEPTION_STATE;
611 exception = EXC_BAD_ACCESS;
612 subcode = (unsigned int)ssp->save_srr0;
613 } else {
614 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
615 ssp->save_srr1 = (ssp->save_srr1 &
616 ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
617 }
618 break;
619
620 case T_AST:
621 /* AST delivery is done below */
622 break;
623
624 }
625 #ifdef MACH_BSD
626 {
627 bsd_uprofil(&tv, ssp->save_srr0);
628 }
629 #endif /* MACH_BSD */
630 }
631
632 if (exception) {
633 /* if this is the init task, save the exception information */
634 /* this probably is a fatal exception */
635 #if 0
636 if(bsd_init_task == current_task()) {
637 char *buf;
638 int i;
639
640 buf = init_task_failure_data;
641
642
643 buf += sprintf(buf, "Exception Code = 0x%x, Subcode = 0x%x\n", code, subcode);
644 buf += sprintf(buf, "DSISR = 0x%08x, DAR = 0x%016llx\n"
645 , dsisr, dar);
646
647 for (i=0; i<32; i++) {
648 if ((i % 8) == 0) {
649 buf += sprintf(buf, "\n%4d :",i);
650 }
651 buf += sprintf(buf, " %08x",*(&ssp->save_r0+i));
652 }
653
654 buf += sprintf(buf, "\n\n");
655 buf += sprintf(buf, "cr = 0x%08X\t\t",ssp->save_cr);
656 buf += sprintf(buf, "xer = 0x%08X\n",ssp->save_xer);
657 buf += sprintf(buf, "lr = 0x%016llX\t\t",ssp->save_lr);
658 buf += sprintf(buf, "ctr = 0x%016llX\n",ssp->save_ctr);
659 buf += sprintf(buf, "srr0(iar) = 0x%016llX\t\t",ssp->save_srr0);
660 buf += sprintf(buf, "srr1(msr) = 0x%016llX\n",ssp->save_srr1,
661 "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18"
662 "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT");
663 buf += sprintf(buf, "\n\n");
664
665 /* generate some stack trace */
666 buf += sprintf(buf, "Application level back trace:\n");
667 if (ssp->save_srr1 & MASK(MSR_PR)) {
668 char *addr = (char*)ssp->save_r1;
669 unsigned int stack_buf[3];
670 for (i = 0; i < 8; i++) {
671 if (addr == (char*)NULL)
672 break;
673 if (!copyin(ssp->save_r1,(char*)stack_buf,
674 3 * sizeof(int))) {
675 buf += sprintf(buf, "0x%08X : 0x%08X\n"
676 ,addr,stack_buf[2]);
677 addr = (char*)stack_buf[0];
678 } else {
679 break;
680 }
681 }
682 }
683 buf[0] = '\0';
684 }
685 #endif
686 doexception(exception, code, subcode);
687 }
688 /* AST delivery
689 * Check to see if we need an AST, if so take care of it here
690 */
691 ml_set_interrupts_enabled(FALSE);
692
693 if (USER_MODE(ssp->save_srr1)) {
694 myast = ast_pending();
695 while (*myast & AST_ALL) {
696 ast_taken(AST_ALL, intr);
697 ml_set_interrupts_enabled(FALSE);
698 myast = ast_pending();
699 }
700 }
701
702 return ssp;
703 }
704
705 /* This routine is called from assembly before each and every system call.
706 * It must preserve r3.
707 */
708
709 extern int syscall_trace(int, struct savearea *);
710
711
712 extern int pmdebug;
713
714 int syscall_trace(int retval, struct savearea *ssp)
715 {
716 int i, argc;
717 int kdarg[3];
718 /* Always prepare to trace mach system calls */
719
720 kdarg[0]=0;
721 kdarg[1]=0;
722 kdarg[2]=0;
723
724 argc = mach_trap_table[-((unsigned int)ssp->save_r0)].mach_trap_arg_count;
725
726 if (argc > 3)
727 argc = 3;
728
729 for (i=0; i < argc; i++)
730 kdarg[i] = (int)*(&ssp->save_r3 + i);
731
732 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (-(ssp->save_r0))) | DBG_FUNC_START,
733 kdarg[0], kdarg[1], kdarg[2], 0, 0);
734
735 return retval;
736 }
737
738 /* This routine is called from assembly after each mach system call
739 * It must preserve r3.
740 */
741
742 extern int syscall_trace_end(int, struct savearea *);
743
744 int syscall_trace_end(int retval, struct savearea *ssp)
745 {
746 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-((unsigned int)ssp->save_r0))) | DBG_FUNC_END,
747 retval, 0, 0, 0, 0);
748 return retval;
749 }
750
751 /*
752 * called from syscall if there is an error
753 */
754
755 int syscall_error(
756 int exception,
757 int code,
758 int subcode,
759 struct savearea *ssp)
760 {
761 register thread_t thread;
762
763 thread = current_thread();
764
765 if (thread == 0)
766 panic("syscall error in boot phase");
767
768 if (!USER_MODE(ssp->save_srr1))
769 panic("system call called from kernel");
770
771 doexception(exception, code, subcode);
772
773 return 0;
774 }
775
776 /* Pass up a server syscall/exception */
777 void
778 doexception(
779 int exc,
780 int code,
781 int sub)
782 {
783 exception_data_type_t codes[EXCEPTION_CODE_MAX];
784
785 codes[0] = code;
786 codes[1] = sub;
787 exception_triage(exc, codes, 2);
788 }
789
790 char *trap_type[] = {
791 "Unknown",
792 "0x100 - System reset",
793 "0x200 - Machine check",
794 "0x300 - Data access",
795 "0x400 - Inst access",
796 "0x500 - Ext int",
797 "0x600 - Alignment",
798 "0x700 - Program",
799 "0x800 - Floating point",
800 "0x900 - Decrementer",
801 "0xA00 - n/a",
802 "0xB00 - n/a",
803 "0xC00 - System call",
804 "0xD00 - Trace",
805 "0xE00 - FP assist",
806 "0xF00 - Perf mon",
807 "0xF20 - VMX",
808 "INVALID EXCEPTION",
809 "INVALID EXCEPTION",
810 "INVALID EXCEPTION",
811 "0x1300 - Inst bkpnt",
812 "0x1400 - Sys mgmt",
813 "0x1600 - Altivec Assist",
814 "0x1700 - Thermal",
815 "INVALID EXCEPTION",
816 "INVALID EXCEPTION",
817 "INVALID EXCEPTION",
818 "INVALID EXCEPTION",
819 "INVALID EXCEPTION",
820 "INVALID EXCEPTION",
821 "INVALID EXCEPTION",
822 "INVALID EXCEPTION",
823 "Emulate",
824 "0x2000 - Run Mode/Trace",
825 "Signal Processor",
826 "Preemption",
827 "Context Switch",
828 "Shutdown",
829 "System Failure"
830 };
831 int TRAP_TYPES = sizeof (trap_type) / sizeof (trap_type[0]);
832
833 void unresolved_kernel_trap(int trapno,
834 struct savearea *ssp,
835 unsigned int dsisr,
836 addr64_t dar,
837 const char *message)
838 {
839 char *trap_name;
840 extern void print_backtrace(struct savearea *);
841 extern unsigned int debug_mode, disableDebugOuput;
842 extern unsigned long panic_caller;
843
844 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
845 lastTrace = LLTraceSet(0); /* Disable low-level tracing */
846
847 if( logPanicDataToScreen )
848 disableDebugOuput = FALSE;
849
850 debug_mode++;
851 if ((unsigned)trapno <= T_MAX)
852 trap_name = trap_type[trapno / T_VECTOR_SIZE];
853 else
854 trap_name = "???? unrecognized exception";
855 if (message == NULL)
856 message = trap_name;
857
858 kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%016llX PC=0x%016llX\n",
859 cpu_number(), trap_name, dar, ssp->save_srr0);
860
861 print_backtrace(ssp);
862
863 panic_caller = (0xFFFF0000 | (trapno / T_VECTOR_SIZE) );
864 draw_panic_dialog();
865
866 if( panicDebugging )
867 (void *)Call_Debugger(trapno, ssp);
868 panic(message);
869 }
870
871 const char *corr[2] = {"uncorrected", "corrected "};
872
873 void handleMck(struct savearea *ssp) { /* Common machine check handler */
874
875 int cpu;
876
877 cpu = cpu_number();
878
879 printf("Machine check (%d) - %s - pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n",
880 cpu, corr[ssp->save_hdr.save_misc3], ssp->save_srr0, ssp->save_srr1, ssp->save_dsisr, ssp->save_dar); /* Tell us about it */
881 printf("Machine check (%d) - AsyncSrc = %016llX, CoreFIR = %016llx\n", cpu, ssp->save_xdat0, ssp->save_xdat1);
882 printf("Machine check (%d) - L2FIR = %016llX, BusFir = %016llx\n", cpu, ssp->save_xdat2, ssp->save_xdat3);
883
884 if(ssp->save_hdr.save_misc3) return; /* Leave the the machine check was recovered */
885
886 panic("Uncorrectable machine check: pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n"
887 " AsyncSrc = %016llX, CoreFIR = %016llx\n"
888 " L2FIR = %016llX, BusFir = %016llx\n",
889 ssp->save_srr0, ssp->save_srr1, ssp->save_dsisr, ssp->save_dar,
890 ssp->save_xdat0, ssp->save_xdat1, ssp->save_xdat2, ssp->save_xdat3);
891
892 return;
893 }
894
895 void
896 thread_syscall_return(
897 kern_return_t ret)
898 {
899 register thread_t thread = current_thread();
900 register struct savearea *regs = USER_REGS(thread);
901
902 if (kdebug_enable && ((unsigned int)regs->save_r0 & 0x80000000)) {
903 /* Mach trap */
904 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(regs->save_r0))) | DBG_FUNC_END,
905 ret, 0, 0, 0, 0);
906 }
907 regs->save_r3 = ret;
908
909 thread_exception_return();
910 /*NOTREACHED*/
911 }
912
913
914 #if MACH_KDB
915 void
916 thread_kdb_return(void)
917 {
918 register thread_t thread = current_thread();
919 register struct savearea *regs = USER_REGS(thread);
920
921 Call_Debugger(thread->machine.pcb->save_exception, regs);
922 thread_exception_return();
923 /*NOTREACHED*/
924 }
925 #endif /* MACH_KDB */