]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/trap.c
774ab23dad0a733f042da24911698c7ffd42b426
[apple/xnu.git] / osfmk / ppc / trap.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25
26 #include <mach_kdb.h>
27 #include <mach_kdp.h>
28 #include <debug.h>
29
30 #include <mach/mach_types.h>
31 #include <mach/mach_traps.h>
32 #include <mach/thread_status.h>
33
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/exception.h>
37 #include <kern/syscall_sw.h>
38 #include <kern/cpu_data.h>
39 #include <kern/debug.h>
40
41 #include <vm/vm_fault.h>
42 #include <vm/vm_kern.h> /* For kernel_map */
43
44 #include <ppc/misc_protos.h>
45 #include <ppc/trap.h>
46 #include <ppc/exception.h>
47 #include <ppc/proc_reg.h> /* for SR_xxx definitions */
48 #include <ppc/pmap.h>
49 #include <ppc/mem.h>
50 #include <ppc/mappings.h>
51 #include <ppc/Firmware.h>
52 #include <ppc/low_trace.h>
53 #include <ppc/Diagnostics.h>
54 #include <ppc/hw_perfmon.h>
55
56 #include <sys/kdebug.h>
57
58 perfCallback perfTrapHook = 0; /* Pointer to CHUD trap hook routine */
59 perfCallback perfASTHook = 0; /* Pointer to CHUD AST hook routine */
60
61 #if MACH_KDB
62 #include <ddb/db_watch.h>
63 #include <ddb/db_run.h>
64 #include <ddb/db_break.h>
65 #include <ddb/db_trap.h>
66
67 boolean_t let_ddb_vm_fault = FALSE;
68 boolean_t debug_all_traps_with_kdb = FALSE;
69 extern struct db_watchpoint *db_watchpoint_list;
70 extern boolean_t db_watchpoints_inserted;
71 extern boolean_t db_breakpoints_inserted;
72
73
74
75 #endif /* MACH_KDB */
76
77 extern task_t bsd_init_task;
78 extern char init_task_failure_data[];
79 extern int not_in_kdp;
80
81 #define PROT_EXEC (VM_PROT_EXECUTE)
82 #define PROT_RO (VM_PROT_READ)
83 #define PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
84
85
86 /* A useful macro to update the ppc_exception_state in the PCB
87 * before calling doexception
88 */
89 #define UPDATE_PPC_EXCEPTION_STATE { \
90 thread_t _thread = current_thread(); \
91 _thread->machine.pcb->save_dar = (uint64_t)dar; \
92 _thread->machine.pcb->save_dsisr = dsisr; \
93 _thread->machine.pcb->save_exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \
94 }
95
96 void unresolved_kernel_trap(int trapno,
97 struct savearea *ssp,
98 unsigned int dsisr,
99 addr64_t dar,
100 const char *message);
101
102 static void handleMck(struct savearea *ssp); /* Common machine check handler */
103
104 #ifdef MACH_BSD
105 extern void get_procrustime(time_value_t *);
106 extern void bsd_uprofil(time_value_t *, user_addr_t);
107 #endif /* MACH_BSD */
108
109
110 struct savearea *trap(int trapno,
111 struct savearea *ssp,
112 unsigned int dsisr,
113 addr64_t dar)
114 {
115 int exception;
116 int code;
117 int subcode;
118 vm_map_t map;
119 unsigned int sp;
120 unsigned int space, space2;
121 vm_map_offset_t offset;
122 thread_t thread = current_thread();
123 boolean_t intr;
124 ast_t *myast;
125
126 #ifdef MACH_BSD
127 time_value_t tv;
128 #endif /* MACH_BSD */
129
130 myast = ast_pending();
131 if(perfASTHook) {
132 if(*myast & AST_CHUD_ALL) {
133 perfASTHook(trapno, ssp, dsisr, (unsigned int)dar);
134 }
135 } else {
136 *myast &= ~AST_CHUD_ALL;
137 }
138
139 if(perfTrapHook) { /* Is there a hook? */
140 if(perfTrapHook(trapno, ssp, dsisr, (unsigned int)dar) == KERN_SUCCESS) return ssp; /* If it succeeds, we are done... */
141 }
142
143 #if 0
144 {
145 extern void fctx_text(void);
146 fctx_test();
147 }
148 #endif
149
150 exception = 0; /* Clear exception for now */
151
152 /*
153 * Remember that we are disabled for interruptions when we come in here. Because
154 * of latency concerns, we need to enable interruptions in the interrupted process
155 * was enabled itself as soon as we can.
156 */
157
158 intr = (ssp->save_srr1 & MASK(MSR_EE)) != 0; /* Remember if we were enabled */
159
160 /* Handle kernel traps first */
161
162 if (!USER_MODE(ssp->save_srr1)) {
163 /*
164 * Trap came from kernel
165 */
166 switch (trapno) {
167
168 case T_PREEMPT: /* Handle a preempt trap */
169 ast_taken(AST_PREEMPTION, FALSE);
170 break;
171
172 case T_PERF_MON:
173 perfmon_handle_pmi(ssp);
174 break;
175
176 case T_RESET: /* Reset interruption */
177 if (!Call_Debugger(trapno, ssp))
178 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
179 break; /* We just ignore these */
180
181 /*
182 * These trap types should never be seen by trap()
183 * in kernel mode, anyway.
184 * Some are interrupts that should be seen by
185 * interrupt() others just don't happen because they
186 * are handled elsewhere. Some could happen but are
187 * considered to be fatal in kernel mode.
188 */
189 case T_DECREMENTER:
190 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
191 case T_SYSTEM_MANAGEMENT:
192 case T_ALTIVEC_ASSIST:
193 case T_INTERRUPT:
194 case T_FP_UNAVAILABLE:
195 case T_IO_ERROR:
196 case T_RESERVED:
197 default:
198 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
199 break;
200
201
202 /*
203 * Here we handle a machine check in the kernel
204 */
205
206 case T_MACHINE_CHECK:
207 handleMck(ssp); /* Common to both user and kernel */
208 break;
209
210
211 case T_ALIGNMENT:
212 /*
213 * If enaNotifyEMb is set, we get here, and
214 * we have actually already emulated the unaligned access.
215 * All that we want to do here is to ignore the interrupt. This is to allow logging or
216 * tracing of unaligned accesses.
217 */
218
219 if(ssp->save_hdr.save_misc3) { /* Was it a handled exception? */
220 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); /* Go panic */
221 break;
222 }
223 KERNEL_DEBUG_CONSTANT(
224 MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
225 (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
226 break;
227
228 case T_EMULATE:
229 /*
230 * If enaNotifyEMb is set we get here, and
231 * we have actually already emulated the instruction.
232 * All that we want to do here is to ignore the interrupt. This is to allow logging or
233 * tracing of emulated instructions.
234 */
235
236 KERNEL_DEBUG_CONSTANT(
237 MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
238 (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
239 break;
240
241
242
243
244
245 case T_TRACE:
246 case T_RUNMODE_TRACE:
247 case T_INSTRUCTION_BKPT:
248 if (!Call_Debugger(trapno, ssp))
249 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
250 break;
251
252 case T_PROGRAM:
253 if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
254 if (!Call_Debugger(trapno, ssp))
255 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
256 } else {
257 unresolved_kernel_trap(trapno, ssp,
258 dsisr, dar, NULL);
259 }
260 break;
261
262 case T_DATA_ACCESS:
263 #if MACH_KDB
264 mp_disable_preemption();
265 if (debug_mode
266 && getPerProc()->debugger_active
267 && !let_ddb_vm_fault) {
268 /*
269 * Force kdb to handle this one.
270 */
271 kdb_trap(trapno, ssp);
272 }
273 mp_enable_preemption();
274 #endif /* MACH_KDB */
275 /* can we take this during normal panic dump operation? */
276 if (debug_mode
277 && getPerProc()->debugger_active
278 && !not_in_kdp) {
279 /*
280 * Access fault while in kernel core dump.
281 */
282 kdp_dump_trap(trapno, ssp);
283 }
284
285
286 if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */
287 panic("trap: disallowed access to cache inhibited memory - %016llX\n", dar);
288 }
289
290 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
291
292 if(((dar >> 28) < 0xE) | ((dar >> 28) > 0xF)) { /* User memory window access? */
293
294 offset = (vm_map_offset_t)dar; /* Set the failing address */
295 map = kernel_map; /* No, this is a normal kernel access */
296
297 /*
298 * Note: Some ROM device drivers will access page 0 when they start. The IOKit will
299 * set a flag to tell us to ignore any access fault on page 0. After the driver is
300 * opened, it will clear the flag.
301 */
302 if((0 == (offset & -PAGE_SIZE)) && /* Check for access of page 0 and */
303 ((thread->machine.specFlags) & ignoreZeroFault)) { /* special case of ignoring page zero faults */
304 ssp->save_srr0 += 4; /* Point to next instruction */
305 break;
306 }
307
308 code = vm_fault(map, vm_map_trunc_page(offset),
309 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
310 FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
311
312 if (code != KERN_SUCCESS) {
313 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
314 } else {
315 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
316 ssp->save_dsisr = (ssp->save_dsisr &
317 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
318 }
319 break;
320 }
321
322 /* If we get here, the fault was due to a user memory window access */
323
324 map = thread->map;
325
326 offset = (vm_map_offset_t)(thread->machine.umwRelo + dar); /* Compute the user space address */
327
328 code = vm_fault(map, vm_map_trunc_page(offset),
329 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
330 FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
331
332 /* If we failed, there should be a recovery
333 * spot to rfi to.
334 */
335 if (code != KERN_SUCCESS) {
336 if (thread->recover) {
337 ssp->save_srr0 = thread->recover;
338 thread->recover = (vm_offset_t)NULL;
339 } else {
340 unresolved_kernel_trap(trapno, ssp, dsisr, dar, "copyin/out has no recovery point");
341 }
342 }
343 else {
344 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
345 ssp->save_dsisr = (ssp->save_dsisr &
346 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
347 }
348
349 break;
350
351 case T_INSTRUCTION_ACCESS:
352
353 #if MACH_KDB
354 if (debug_mode
355 && getPerProc()->debugger_active
356 && !let_ddb_vm_fault) {
357 /*
358 * Force kdb to handle this one.
359 */
360 kdb_trap(trapno, ssp);
361 }
362 #endif /* MACH_KDB */
363
364 /* Same as for data access, except fault type
365 * is PROT_EXEC and addr comes from srr0
366 */
367
368 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
369
370 map = kernel_map;
371
372 code = vm_fault(map, vm_map_trunc_page(ssp->save_srr0),
373 (PROT_EXEC | PROT_RO), FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
374
375 if (code != KERN_SUCCESS) {
376 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
377 } else {
378 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
379 ssp->save_srr1 = (ssp->save_srr1 &
380 ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
381 }
382 break;
383
384 /* Usually shandler handles all the system calls, but the
385 * atomic thread switcher may throwup (via thandler) and
386 * have to pass it up to the exception handler.
387 */
388
389 case T_SYSTEM_CALL:
390 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
391 break;
392
393 case T_AST:
394 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
395 break;
396 }
397 } else {
398
399 /*
400 * Processing for user state traps with interrupt enabled
401 * For T_AST, interrupts are enabled in the AST delivery
402 */
403 if (trapno != T_AST)
404 ml_set_interrupts_enabled(TRUE);
405
406 #ifdef MACH_BSD
407 {
408 get_procrustime(&tv);
409 }
410 #endif /* MACH_BSD */
411
412
413 /*
414 * Trap came from user task
415 */
416
417 switch (trapno) {
418
419 case T_PREEMPT:
420 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
421 break;
422
423 case T_PERF_MON:
424 perfmon_handle_pmi(ssp);
425 break;
426
427 /*
428 * These trap types should never be seen by trap()
429 * Some are interrupts that should be seen by
430 * interrupt() others just don't happen because they
431 * are handled elsewhere.
432 */
433 case T_DECREMENTER:
434 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
435 case T_INTERRUPT:
436 case T_FP_UNAVAILABLE:
437 case T_SYSTEM_MANAGEMENT:
438 case T_RESERVED:
439 case T_IO_ERROR:
440
441 default:
442
443 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
444
445 panic("Unexpected user state trap(cpu %d): 0x%08X DSISR=0x%08X DAR=0x%016llX PC=0x%016llX, MSR=0x%016llX\n",
446 cpu_number(), trapno, dsisr, dar, ssp->save_srr0, ssp->save_srr1);
447 break;
448
449
450 /*
451 * Here we handle a machine check in user state
452 */
453
454 case T_MACHINE_CHECK:
455 handleMck(ssp); /* Common to both user and kernel */
456 break;
457
458 case T_RESET:
459 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
460 if (!Call_Debugger(trapno, ssp))
461 panic("Unexpected Reset exception: srr0 = %016llx, srr1 = %016llx\n",
462 ssp->save_srr0, ssp->save_srr1);
463 break; /* We just ignore these */
464
465 case T_ALIGNMENT:
466 /*
467 * If enaNotifyEMb is set, we get here, and
468 * we have actually already emulated the unaligned access.
469 * All that we want to do here is to ignore the interrupt. This is to allow logging or
470 * tracing of unaligned accesses.
471 */
472
473 KERNEL_DEBUG_CONSTANT(
474 MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
475 (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
476
477 if(ssp->save_hdr.save_misc3) { /* Was it a handled exception? */
478 exception = EXC_BAD_ACCESS; /* Yes, throw exception */
479 code = EXC_PPC_UNALIGNED;
480 subcode = (unsigned int)dar;
481 }
482 break;
483
484 case T_EMULATE:
485 /*
486 * If enaNotifyEMb is set we get here, and
487 * we have actually already emulated the instruction.
488 * All that we want to do here is to ignore the interrupt. This is to allow logging or
489 * tracing of emulated instructions.
490 */
491
492 KERNEL_DEBUG_CONSTANT(
493 MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
494 (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
495 break;
496
497 case T_TRACE: /* Real PPC chips */
498 if (be_tracing()) {
499 add_pcbuffer();
500 return ssp;
501 }
502 /* fall through */
503
504 case T_INSTRUCTION_BKPT:
505 exception = EXC_BREAKPOINT;
506 code = EXC_PPC_TRACE;
507 subcode = (unsigned int)ssp->save_srr0;
508 break;
509
510 case T_PROGRAM:
511 if (ssp->save_srr1 & MASK(SRR1_PRG_FE)) {
512 fpu_save(thread->machine.curctx);
513 UPDATE_PPC_EXCEPTION_STATE;
514 exception = EXC_ARITHMETIC;
515 code = EXC_ARITHMETIC;
516
517 mp_disable_preemption();
518 subcode = ssp->save_fpscr;
519 mp_enable_preemption();
520 }
521 else if (ssp->save_srr1 & MASK(SRR1_PRG_ILL_INS)) {
522
523 UPDATE_PPC_EXCEPTION_STATE
524 exception = EXC_BAD_INSTRUCTION;
525 code = EXC_PPC_UNIPL_INST;
526 subcode = (unsigned int)ssp->save_srr0;
527 } else if ((unsigned int)ssp->save_srr1 & MASK(SRR1_PRG_PRV_INS)) {
528
529 UPDATE_PPC_EXCEPTION_STATE;
530 exception = EXC_BAD_INSTRUCTION;
531 code = EXC_PPC_PRIVINST;
532 subcode = (unsigned int)ssp->save_srr0;
533 } else if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
534 unsigned int inst;
535 //char *iaddr;
536
537 //iaddr = CAST_DOWN(char *, ssp->save_srr0); /* Trim from long long and make a char pointer */
538 if (copyin(ssp->save_srr0, (char *) &inst, 4 )) panic("copyin failed\n");
539
540 if(dgWork.dgFlags & enaDiagTrap) { /* Is the diagnostic trap enabled? */
541 if((inst & 0xFFFFFFF0) == 0x0FFFFFF0) { /* Is this a TWI 31,R31,0xFFFx? */
542 if(diagTrap(ssp, inst & 0xF)) { /* Call the trap code */
543 ssp->save_srr0 += 4ULL; /* If we eat the trap, bump pc */
544 exception = 0; /* Clear exception */
545 break; /* All done here */
546 }
547 }
548 }
549
550 UPDATE_PPC_EXCEPTION_STATE;
551
552 if (inst == 0x7FE00008) {
553 exception = EXC_BREAKPOINT;
554 code = EXC_PPC_BREAKPOINT;
555 } else {
556 exception = EXC_SOFTWARE;
557 code = EXC_PPC_TRAP;
558 }
559 subcode = (unsigned int)ssp->save_srr0;
560 }
561 break;
562
563 case T_ALTIVEC_ASSIST:
564 UPDATE_PPC_EXCEPTION_STATE;
565 exception = EXC_ARITHMETIC;
566 code = EXC_PPC_ALTIVECASSIST;
567 subcode = (unsigned int)ssp->save_srr0;
568 break;
569
570 case T_DATA_ACCESS:
571 map = thread->map;
572
573 if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */
574 UPDATE_PPC_EXCEPTION_STATE; /* Don't even bother VM with this one */
575 exception = EXC_BAD_ACCESS;
576 subcode = (unsigned int)dar;
577 break;
578 }
579
580 code = vm_fault(map, vm_map_trunc_page(dar),
581 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
582 FALSE, THREAD_ABORTSAFE, NULL, vm_map_trunc_page(0));
583
584 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
585 UPDATE_PPC_EXCEPTION_STATE;
586 exception = EXC_BAD_ACCESS;
587 subcode = (unsigned int)dar;
588 } else {
589 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
590 ssp->save_dsisr = (ssp->save_dsisr &
591 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
592 }
593 break;
594
595 case T_INSTRUCTION_ACCESS:
596 /* Same as for data access, except fault type
597 * is PROT_EXEC and addr comes from srr0
598 */
599 map = thread->map;
600
601 code = vm_fault(map, vm_map_trunc_page(ssp->save_srr0),
602 (PROT_EXEC | PROT_RO), FALSE, THREAD_ABORTSAFE, NULL, vm_map_trunc_page(0));
603
604 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
605 UPDATE_PPC_EXCEPTION_STATE;
606 exception = EXC_BAD_ACCESS;
607 subcode = (unsigned int)ssp->save_srr0;
608 } else {
609 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
610 ssp->save_srr1 = (ssp->save_srr1 &
611 ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
612 }
613 break;
614
615 case T_AST:
616 /* AST delivery is done below */
617 break;
618
619 }
620 #ifdef MACH_BSD
621 {
622 bsd_uprofil(&tv, ssp->save_srr0);
623 }
624 #endif /* MACH_BSD */
625 }
626
627 if (exception) {
628 /* if this is the init task, save the exception information */
629 /* this probably is a fatal exception */
630 #if 0
631 if(bsd_init_task == current_task()) {
632 char *buf;
633 int i;
634
635 buf = init_task_failure_data;
636
637
638 buf += sprintf(buf, "Exception Code = 0x%x, Subcode = 0x%x\n", code, subcode);
639 buf += sprintf(buf, "DSISR = 0x%08x, DAR = 0x%016llx\n"
640 , dsisr, dar);
641
642 for (i=0; i<32; i++) {
643 if ((i % 8) == 0) {
644 buf += sprintf(buf, "\n%4d :",i);
645 }
646 buf += sprintf(buf, " %08x",*(&ssp->save_r0+i));
647 }
648
649 buf += sprintf(buf, "\n\n");
650 buf += sprintf(buf, "cr = 0x%08X\t\t",ssp->save_cr);
651 buf += sprintf(buf, "xer = 0x%08X\n",ssp->save_xer);
652 buf += sprintf(buf, "lr = 0x%016llX\t\t",ssp->save_lr);
653 buf += sprintf(buf, "ctr = 0x%016llX\n",ssp->save_ctr);
654 buf += sprintf(buf, "srr0(iar) = 0x%016llX\t\t",ssp->save_srr0);
655 buf += sprintf(buf, "srr1(msr) = 0x%016llX\n",ssp->save_srr1,
656 "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18"
657 "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT");
658 buf += sprintf(buf, "\n\n");
659
660 /* generate some stack trace */
661 buf += sprintf(buf, "Application level back trace:\n");
662 if (ssp->save_srr1 & MASK(MSR_PR)) {
663 char *addr = (char*)ssp->save_r1;
664 unsigned int stack_buf[3];
665 for (i = 0; i < 8; i++) {
666 if (addr == (char*)NULL)
667 break;
668 if (!copyin(ssp->save_r1,(char*)stack_buf,
669 3 * sizeof(int))) {
670 buf += sprintf(buf, "0x%08X : 0x%08X\n"
671 ,addr,stack_buf[2]);
672 addr = (char*)stack_buf[0];
673 } else {
674 break;
675 }
676 }
677 }
678 buf[0] = '\0';
679 }
680 #endif
681 doexception(exception, code, subcode);
682 }
683 /* AST delivery
684 * Check to see if we need an AST, if so take care of it here
685 */
686 ml_set_interrupts_enabled(FALSE);
687
688 if (USER_MODE(ssp->save_srr1)) {
689 myast = ast_pending();
690 while (*myast & AST_ALL) {
691 ast_taken(AST_ALL, intr);
692 ml_set_interrupts_enabled(FALSE);
693 myast = ast_pending();
694 }
695 }
696
697 return ssp;
698 }
699
700 /* This routine is called from assembly before each and every system call.
701 * It must preserve r3.
702 */
703
704 extern int syscall_trace(int, struct savearea *);
705
706
707 extern int pmdebug;
708
709 int syscall_trace(int retval, struct savearea *ssp)
710 {
711 int i, argc;
712 int kdarg[3];
713 /* Always prepare to trace mach system calls */
714
715 kdarg[0]=0;
716 kdarg[1]=0;
717 kdarg[2]=0;
718
719 argc = mach_trap_table[-((unsigned int)ssp->save_r0)].mach_trap_arg_count;
720
721 if (argc > 3)
722 argc = 3;
723
724 for (i=0; i < argc; i++)
725 kdarg[i] = (int)*(&ssp->save_r3 + i);
726
727 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (-(ssp->save_r0))) | DBG_FUNC_START,
728 kdarg[0], kdarg[1], kdarg[2], 0, 0);
729
730 return retval;
731 }
732
733 /* This routine is called from assembly after each mach system call
734 * It must preserve r3.
735 */
736
737 extern int syscall_trace_end(int, struct savearea *);
738
739 int syscall_trace_end(int retval, struct savearea *ssp)
740 {
741 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-((unsigned int)ssp->save_r0))) | DBG_FUNC_END,
742 retval, 0, 0, 0, 0);
743 return retval;
744 }
745
746 /*
747 * called from syscall if there is an error
748 */
749
750 int syscall_error(
751 int exception,
752 int code,
753 int subcode,
754 struct savearea *ssp)
755 {
756 register thread_t thread;
757
758 thread = current_thread();
759
760 if (thread == 0)
761 panic("syscall error in boot phase");
762
763 if (!USER_MODE(ssp->save_srr1))
764 panic("system call called from kernel");
765
766 doexception(exception, code, subcode);
767
768 return 0;
769 }
770
771 /* Pass up a server syscall/exception */
772 void
773 doexception(
774 int exc,
775 int code,
776 int sub)
777 {
778 exception_data_type_t codes[EXCEPTION_CODE_MAX];
779
780 codes[0] = code;
781 codes[1] = sub;
782 exception_triage(exc, codes, 2);
783 }
784
785 char *trap_type[] = {
786 "Unknown",
787 "0x100 - System reset",
788 "0x200 - Machine check",
789 "0x300 - Data access",
790 "0x400 - Inst access",
791 "0x500 - Ext int",
792 "0x600 - Alignment",
793 "0x700 - Program",
794 "0x800 - Floating point",
795 "0x900 - Decrementer",
796 "0xA00 - n/a",
797 "0xB00 - n/a",
798 "0xC00 - System call",
799 "0xD00 - Trace",
800 "0xE00 - FP assist",
801 "0xF00 - Perf mon",
802 "0xF20 - VMX",
803 "INVALID EXCEPTION",
804 "INVALID EXCEPTION",
805 "INVALID EXCEPTION",
806 "0x1300 - Inst bkpnt",
807 "0x1400 - Sys mgmt",
808 "0x1600 - Altivec Assist",
809 "0x1700 - Thermal",
810 "INVALID EXCEPTION",
811 "INVALID EXCEPTION",
812 "INVALID EXCEPTION",
813 "INVALID EXCEPTION",
814 "INVALID EXCEPTION",
815 "INVALID EXCEPTION",
816 "INVALID EXCEPTION",
817 "INVALID EXCEPTION",
818 "Emulate",
819 "0x2000 - Run Mode/Trace",
820 "Signal Processor",
821 "Preemption",
822 "Context Switch",
823 "Shutdown",
824 "System Failure"
825 };
826 int TRAP_TYPES = sizeof (trap_type) / sizeof (trap_type[0]);
827
828 void unresolved_kernel_trap(int trapno,
829 struct savearea *ssp,
830 unsigned int dsisr,
831 addr64_t dar,
832 const char *message)
833 {
834 char *trap_name;
835 extern void print_backtrace(struct savearea *);
836 extern unsigned int debug_mode, disableDebugOuput;
837 extern unsigned long panic_caller;
838
839 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
840 lastTrace = LLTraceSet(0); /* Disable low-level tracing */
841
842 if( logPanicDataToScreen )
843 disableDebugOuput = FALSE;
844
845 debug_mode++;
846 if ((unsigned)trapno <= T_MAX)
847 trap_name = trap_type[trapno / T_VECTOR_SIZE];
848 else
849 trap_name = "???? unrecognized exception";
850 if (message == NULL)
851 message = trap_name;
852
853 kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%016llX PC=0x%016llX\n",
854 cpu_number(), trap_name, dar, ssp->save_srr0);
855
856 print_backtrace(ssp);
857
858 panic_caller = (0xFFFF0000 | (trapno / T_VECTOR_SIZE) );
859 draw_panic_dialog();
860
861 if( panicDebugging )
862 (void *)Call_Debugger(trapno, ssp);
863 panic(message);
864 }
865
866 const char *corr[2] = {"uncorrected", "corrected "};
867
868 void handleMck(struct savearea *ssp) { /* Common machine check handler */
869
870 int cpu;
871
872 cpu = cpu_number();
873
874 printf("Machine check (%d) - %s - pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n",
875 cpu, corr[ssp->save_hdr.save_misc3], ssp->save_srr0, ssp->save_srr1, ssp->save_dsisr, ssp->save_dar); /* Tell us about it */
876 printf("Machine check (%d) - AsyncSrc = %016llX, CoreFIR = %016llx\n", cpu, ssp->save_xdat0, ssp->save_xdat1);
877 printf("Machine check (%d) - L2FIR = %016llX, BusFir = %016llx\n", cpu, ssp->save_xdat2, ssp->save_xdat3);
878
879 if(ssp->save_hdr.save_misc3) return; /* Leave the the machine check was recovered */
880
881 panic("Uncorrectable machine check: pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n"
882 " AsyncSrc = %016llX, CoreFIR = %016llx\n"
883 " L2FIR = %016llX, BusFir = %016llx\n",
884 ssp->save_srr0, ssp->save_srr1, ssp->save_dsisr, ssp->save_dar,
885 ssp->save_xdat0, ssp->save_xdat1, ssp->save_xdat2, ssp->save_xdat3);
886
887 return;
888 }
889
890 void
891 thread_syscall_return(
892 kern_return_t ret)
893 {
894 register thread_t thread = current_thread();
895 register struct savearea *regs = USER_REGS(thread);
896
897 if (kdebug_enable && ((unsigned int)regs->save_r0 & 0x80000000)) {
898 /* Mach trap */
899 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(regs->save_r0))) | DBG_FUNC_END,
900 ret, 0, 0, 0, 0);
901 }
902 regs->save_r3 = ret;
903
904 thread_exception_return();
905 /*NOTREACHED*/
906 }
907
908
909 #if MACH_KDB
910 void
911 thread_kdb_return(void)
912 {
913 register thread_t thread = current_thread();
914 register struct savearea *regs = USER_REGS(thread);
915
916 Call_Debugger(thread->machine.pcb->save_exception, regs);
917 thread_exception_return();
918 /*NOTREACHED*/
919 }
920 #endif /* MACH_KDB */