]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/trap.c
xnu-1504.3.12.tar.gz
[apple/xnu.git] / osfmk / ppc / trap.c
1 /*
2 * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #include <mach_kdb.h>
33 #include <mach_kdp.h>
34 #include <debug.h>
35
36 #include <mach/mach_types.h>
37 #include <mach/mach_traps.h>
38 #include <mach/thread_status.h>
39
40 #include <kern/processor.h>
41 #include <kern/thread.h>
42 #include <kern/exception.h>
43 #include <kern/syscall_sw.h>
44 #include <kern/cpu_data.h>
45 #include <kern/debug.h>
46
47 #include <vm/vm_fault.h>
48 #include <vm/vm_kern.h> /* For kernel_map */
49
50 #include <ppc/misc_protos.h>
51 #include <ppc/trap.h>
52 #include <ppc/exception.h>
53 #include <ppc/proc_reg.h> /* for SR_xxx definitions */
54 #include <ppc/pmap.h>
55 #include <ppc/mem.h>
56 #include <ppc/mappings.h>
57 #include <ppc/Firmware.h>
58 #include <ppc/low_trace.h>
59 #include <ppc/Diagnostics.h>
60 #include <ppc/hw_perfmon.h>
61 #include <ppc/fpu_protos.h>
62
63 #include <sys/kdebug.h>
64
65 volatile perfCallback perfTrapHook; /* Pointer to CHUD trap hook routine */
66 volatile perfCallback perfASTHook; /* Pointer to CHUD AST hook routine */
67
68 #if CONFIG_DTRACE
69 extern kern_return_t dtrace_user_probe(ppc_saved_state_t *sv);
70
71 /* See <rdar://problem/4613924> */
72 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
73
74 extern boolean_t dtrace_tally_fault(user_addr_t);
75 #endif
76
77 #if MACH_KDB
78 #include <ddb/db_watch.h>
79 #include <ddb/db_run.h>
80 #include <ddb/db_break.h>
81 #include <ddb/db_trap.h>
82
83 boolean_t let_ddb_vm_fault = FALSE;
84 boolean_t debug_all_traps_with_kdb = FALSE;
85 extern struct db_watchpoint *db_watchpoint_list;
86 extern boolean_t db_watchpoints_inserted;
87 extern boolean_t db_breakpoints_inserted;
88
89
90
91 #endif /* MACH_KDB */
92
93 extern task_t bsd_init_task;
94 extern char init_task_failure_data[];
95 extern int not_in_kdp;
96
97 #define PROT_EXEC (VM_PROT_EXECUTE)
98 #define PROT_RO (VM_PROT_READ)
99 #define PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
100
101
102 /* A useful macro to update the ppc_exception_state in the PCB
103 * before calling doexception
104 */
105 #define UPDATE_PPC_EXCEPTION_STATE { \
106 thread_t _thread = current_thread(); \
107 _thread->machine.pcb->save_dar = (uint64_t)dar; \
108 _thread->machine.pcb->save_dsisr = dsisr; \
109 _thread->machine.pcb->save_exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \
110 }
111
112 void unresolved_kernel_trap(int trapno,
113 struct savearea *ssp,
114 unsigned int dsisr,
115 addr64_t dar,
116 const char *message);
117
118 static void handleMck(struct savearea *ssp); /* Common machine check handler */
119
120 #ifdef MACH_BSD
121 extern void get_procrustime(time_value_t *);
122 extern void bsd_uprofil(time_value_t *, user_addr_t);
123 #endif /* MACH_BSD */
124
125
126 struct savearea *trap(int trapno,
127 struct savearea *ssp,
128 unsigned int dsisr,
129 addr64_t dar)
130 {
131 int exception;
132 mach_exception_code_t code = 0;
133 mach_exception_subcode_t subcode = 0;
134 vm_map_t map;
135 vm_map_offset_t offset;
136 thread_t thread = current_thread();
137 boolean_t intr;
138 ast_t *myast;
139 int ret;
140
141 #ifdef MACH_BSD
142 time_value_t tv;
143 #endif /* MACH_BSD */
144
145 myast = ast_pending();
146 perfCallback fn = perfASTHook;
147 if(fn) {
148 if(*myast & AST_CHUD_ALL) {
149 fn(trapno, ssp, dsisr, (unsigned int)dar);
150 }
151 } else {
152 *myast &= ~AST_CHUD_ALL;
153 }
154
155 fn = perfTrapHook;
156 if(fn) { /* Is there a hook? */
157 if(fn(trapno, ssp, dsisr, (unsigned int)dar) == KERN_SUCCESS) return ssp; /* If it succeeds, we are done... */
158 }
159
160 #if CONFIG_DTRACE
161 if(tempDTraceTrapHook) { /* Is there a hook? */
162 if(tempDTraceTrapHook(trapno, ssp, dsisr, (unsigned int)dar) == KERN_SUCCESS) return ssp; /* If it succeeds, we are done... */
163 }
164 #endif
165
166 #if 0
167 {
168 extern void fctx_text(void);
169 fctx_test();
170 }
171 #endif
172
173 exception = 0; /* Clear exception for now */
174
175 /*
176 * Remember that we are disabled for interruptions when we come in here. Because
177 * of latency concerns, we need to enable interruptions in the interrupted process
178 * was enabled itself as soon as we can.
179 */
180
181 intr = (ssp->save_srr1 & MASK(MSR_EE)) != 0; /* Remember if we were enabled */
182
183 /* Handle kernel traps first */
184
185 if (!USER_MODE(ssp->save_srr1)) {
186 /*
187 * Trap came from kernel
188 */
189 switch (trapno) {
190
191 case T_PREEMPT: /* Handle a preempt trap */
192 ast_taken(AST_PREEMPTION, FALSE);
193 break;
194
195 case T_PERF_MON:
196 perfmon_handle_pmi(ssp);
197 break;
198
199 case T_RESET: /* Reset interruption */
200 if (!Call_Debugger(trapno, ssp))
201 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
202 break; /* We just ignore these */
203
204 /*
205 * These trap types should never be seen by trap()
206 * in kernel mode, anyway.
207 * Some are interrupts that should be seen by
208 * interrupt() others just don't happen because they
209 * are handled elsewhere. Some could happen but are
210 * considered to be fatal in kernel mode.
211 */
212 case T_DECREMENTER:
213 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
214 case T_SYSTEM_MANAGEMENT:
215 case T_ALTIVEC_ASSIST:
216 case T_INTERRUPT:
217 case T_FP_UNAVAILABLE:
218 case T_IO_ERROR:
219 case T_RESERVED:
220 default:
221 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
222 break;
223
224
225 /*
226 * Here we handle a machine check in the kernel
227 */
228
229 case T_MACHINE_CHECK:
230 handleMck(ssp); /* Common to both user and kernel */
231 break;
232
233
234 case T_ALIGNMENT:
235 /*
236 * If enaNotifyEMb is set, we get here, and
237 * we have actually already emulated the unaligned access.
238 * All that we want to do here is to ignore the interrupt. This is to allow logging or
239 * tracing of unaligned accesses.
240 */
241
242 if(ssp->save_hdr.save_misc3) { /* Was it a handled exception? */
243 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); /* Go panic */
244 break;
245 }
246 KERNEL_DEBUG_CONSTANT(
247 MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
248 (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
249 break;
250
251 case T_EMULATE:
252 /*
253 * If enaNotifyEMb is set we get here, and
254 * we have actually already emulated the instruction.
255 * All that we want to do here is to ignore the interrupt. This is to allow logging or
256 * tracing of emulated instructions.
257 */
258
259 KERNEL_DEBUG_CONSTANT(
260 MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
261 (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
262 break;
263
264
265
266
267
268 case T_TRACE:
269 case T_RUNMODE_TRACE:
270 case T_INSTRUCTION_BKPT:
271 if (!Call_Debugger(trapno, ssp))
272 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
273 break;
274
275 case T_PROGRAM:
276 if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
277 if (!Call_Debugger(trapno, ssp))
278 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
279 } else {
280 unresolved_kernel_trap(trapno, ssp,
281 dsisr, dar, NULL);
282 }
283 break;
284
285 case T_DATA_ACCESS:
286 #if MACH_KDB
287 mp_disable_preemption();
288 if (debug_mode
289 && getPerProc()->debugger_active
290 && !let_ddb_vm_fault) {
291 /*
292 * Force kdb to handle this one.
293 */
294 kdb_trap(trapno, ssp);
295 }
296 mp_enable_preemption();
297 #endif /* MACH_KDB */
298 /* can we take this during normal panic dump operation? */
299 if (debug_mode
300 && getPerProc()->debugger_active
301 && !not_in_kdp) {
302 /*
303 * Access fault while in kernel core dump.
304 */
305 kdp_dump_trap(trapno, ssp);
306 }
307
308
309 if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */
310 panic("trap: disallowed access to cache inhibited memory - %016llX\n", dar);
311 }
312
313 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
314
315 if(((dar >> 28) < 0xE) | ((dar >> 28) > 0xF)) { /* User memory window access? */
316
317 offset = (vm_map_offset_t)dar; /* Set the failing address */
318 map = kernel_map; /* No, this is a normal kernel access */
319
320 /*
321 * Note: Some ROM device drivers will access page 0 when they start. The IOKit will
322 * set a flag to tell us to ignore any access fault on page 0. After the driver is
323 * opened, it will clear the flag.
324 */
325 if((0 == (offset & -PAGE_SIZE)) && /* Check for access of page 0 and */
326 ((thread->machine.specFlags) & ignoreZeroFault)) { /* special case of ignoring page zero faults */
327 ssp->save_srr0 += 4; /* Point to next instruction */
328 break;
329 }
330
331 #if CONFIG_DTRACE
332 if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */
333 if (dtrace_tally_fault(dar)) { /* Should a fault under dtrace be ignored? */
334 ssp->save_srr0 += 4; /* Point to next instruction */
335 break;
336 } else {
337 unresolved_kernel_trap(trapno, ssp, dsisr, dar, "Unexpected page fault under dtrace_probe");
338 }
339 }
340 #endif
341
342 code = vm_fault(map, vm_map_trunc_page(offset),
343 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
344 FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
345
346 if (code != KERN_SUCCESS) {
347 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
348 } else {
349 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
350 ssp->save_dsisr = (ssp->save_dsisr &
351 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
352 }
353 break;
354 }
355
356 /* If we get here, the fault was due to a user memory window access */
357
358 #if CONFIG_DTRACE
359 if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */
360 if (dtrace_tally_fault(dar)) { /* Should a user memory window access fault under dtrace be ignored? */
361 if (thread->recover) {
362 ssp->save_srr0 = thread->recover;
363 thread->recover = (vm_offset_t)NULL;
364 } else {
365 unresolved_kernel_trap(trapno, ssp, dsisr, dar, "copyin/out has no recovery point");
366 }
367 break;
368 } else {
369 unresolved_kernel_trap(trapno, ssp, dsisr, dar, "Unexpected UMW page fault under dtrace_probe");
370 }
371 }
372 #endif
373
374 map = thread->map;
375
376 offset = (vm_map_offset_t)(thread->machine.umwRelo + dar); /* Compute the user space address */
377
378 code = vm_fault(map, vm_map_trunc_page(offset),
379 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
380 FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
381
382 /* If we failed, there should be a recovery
383 * spot to rfi to.
384 */
385 if (code != KERN_SUCCESS) {
386 if (thread->recover) {
387 ssp->save_srr0 = thread->recover;
388 thread->recover = (vm_offset_t)NULL;
389 } else {
390 unresolved_kernel_trap(trapno, ssp, dsisr, dar, "copyin/out has no recovery point");
391 }
392 }
393 else {
394 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
395 ssp->save_dsisr = (ssp->save_dsisr &
396 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
397 }
398
399 break;
400
401 case T_INSTRUCTION_ACCESS:
402
403 #if MACH_KDB
404 if (debug_mode
405 && getPerProc()->debugger_active
406 && !let_ddb_vm_fault) {
407 /*
408 * Force kdb to handle this one.
409 */
410 kdb_trap(trapno, ssp);
411 }
412 #endif /* MACH_KDB */
413
414 /* Same as for data access, except fault type
415 * is PROT_EXEC and addr comes from srr0
416 */
417
418 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
419
420 map = kernel_map;
421
422 code = vm_fault(map, vm_map_trunc_page(ssp->save_srr0),
423 (PROT_EXEC | PROT_RO), FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
424
425 if (code != KERN_SUCCESS) {
426 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
427 } else {
428 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
429 ssp->save_srr1 = (ssp->save_srr1 &
430 ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
431 }
432 break;
433
434 /* Usually shandler handles all the system calls, but the
435 * atomic thread switcher may throwup (via thandler) and
436 * have to pass it up to the exception handler.
437 */
438
439 case T_SYSTEM_CALL:
440 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
441 break;
442
443 case T_AST:
444 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
445 break;
446 }
447 } else {
448
449 /*
450 * Processing for user state traps with interrupt enabled
451 * For T_AST, interrupts are enabled in the AST delivery
452 */
453 if (trapno != T_AST)
454 ml_set_interrupts_enabled(TRUE);
455
456 #ifdef MACH_BSD
457 {
458 get_procrustime(&tv);
459 }
460 #endif /* MACH_BSD */
461
462
463 /*
464 * Trap came from user task
465 */
466
467 switch (trapno) {
468
469 case T_PREEMPT:
470 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
471 break;
472
473 case T_PERF_MON:
474 perfmon_handle_pmi(ssp);
475 break;
476
477 /*
478 * These trap types should never be seen by trap()
479 * Some are interrupts that should be seen by
480 * interrupt() others just don't happen because they
481 * are handled elsewhere.
482 */
483 case T_DECREMENTER:
484 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
485 case T_INTERRUPT:
486 case T_FP_UNAVAILABLE:
487 case T_SYSTEM_MANAGEMENT:
488 case T_RESERVED:
489 case T_IO_ERROR:
490
491 default:
492
493 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
494
495 panic("Unexpected user state trap(cpu %d): 0x%08X DSISR=0x%08X DAR=0x%016llX PC=0x%016llX, MSR=0x%016llX\n",
496 cpu_number(), trapno, dsisr, dar, ssp->save_srr0, ssp->save_srr1);
497 break;
498
499
500 /*
501 * Here we handle a machine check in user state
502 */
503
504 case T_MACHINE_CHECK:
505 handleMck(ssp); /* Common to both user and kernel */
506 break;
507
508 case T_RESET:
509 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
510 if (!Call_Debugger(trapno, ssp))
511 panic("Unexpected Reset exception: srr0 = %016llx, srr1 = %016llx\n",
512 ssp->save_srr0, ssp->save_srr1);
513 break; /* We just ignore these */
514
515 case T_ALIGNMENT:
516 /*
517 * If enaNotifyEMb is set, we get here, and
518 * we have actually already emulated the unaligned access.
519 * All that we want to do here is to ignore the interrupt. This is to allow logging or
520 * tracing of unaligned accesses.
521 */
522
523 KERNEL_DEBUG_CONSTANT(
524 MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
525 (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
526
527 if(ssp->save_hdr.save_misc3) { /* Was it a handled exception? */
528 exception = EXC_BAD_ACCESS; /* Yes, throw exception */
529 code = EXC_PPC_UNALIGNED;
530 subcode = dar;
531 }
532 break;
533
534 case T_EMULATE:
535 /*
536 * If enaNotifyEMb is set we get here, and
537 * we have actually already emulated the instruction.
538 * All that we want to do here is to ignore the interrupt. This is to allow logging or
539 * tracing of emulated instructions.
540 */
541
542 KERNEL_DEBUG_CONSTANT(
543 MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
544 (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
545 break;
546
547 case T_TRACE: /* Real PPC chips */
548 case T_INSTRUCTION_BKPT:
549 exception = EXC_BREAKPOINT;
550 code = EXC_PPC_TRACE;
551 subcode = ssp->save_srr0;
552 break;
553
554 case T_PROGRAM:
555 if (ssp->save_srr1 & MASK(SRR1_PRG_FE)) {
556 fpu_save(thread->machine.curctx);
557 UPDATE_PPC_EXCEPTION_STATE;
558 exception = EXC_ARITHMETIC;
559 code = EXC_ARITHMETIC;
560
561 mp_disable_preemption();
562 subcode = ssp->save_fpscr;
563 mp_enable_preemption();
564 }
565 else if (ssp->save_srr1 & MASK(SRR1_PRG_ILL_INS)) {
566
567 UPDATE_PPC_EXCEPTION_STATE
568 exception = EXC_BAD_INSTRUCTION;
569 code = EXC_PPC_UNIPL_INST;
570 subcode = ssp->save_srr0;
571 } else if ((unsigned int)ssp->save_srr1 & MASK(SRR1_PRG_PRV_INS)) {
572
573 UPDATE_PPC_EXCEPTION_STATE;
574 exception = EXC_BAD_INSTRUCTION;
575 code = EXC_PPC_PRIVINST;
576 subcode = ssp->save_srr0;
577 } else if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
578 unsigned int inst;
579
580 if (copyin(ssp->save_srr0, (char *) &inst, 4 )) panic("copyin failed\n");
581
582 if(dgWork.dgFlags & enaDiagTrap) { /* Is the diagnostic trap enabled? */
583 if((inst & 0xFFFFFFF0) == 0x0FFFFFF0) { /* Is this a TWI 31,R31,0xFFFx? */
584 if(diagTrap(ssp, inst & 0xF)) { /* Call the trap code */
585 ssp->save_srr0 += 4ULL; /* If we eat the trap, bump pc */
586 exception = 0; /* Clear exception */
587 break; /* All done here */
588 }
589 }
590 }
591
592 #if CONFIG_DTRACE
593 if(inst == 0x0FFFDDDD) { /* Is this the dtrace trap? */
594 ret = dtrace_user_probe((ppc_saved_state_t *)ssp); /* Go check if it is for real and process if so... */
595 if(ret == KERN_SUCCESS) { /* Was it really? */
596 exception = 0; /* Clear the exception */
597 break; /* Go flow through and out... */
598 }
599 }
600 #endif
601
602 UPDATE_PPC_EXCEPTION_STATE;
603
604 if (inst == 0x7FE00008) {
605 exception = EXC_BREAKPOINT;
606 code = EXC_PPC_BREAKPOINT;
607 } else {
608 exception = EXC_SOFTWARE;
609 code = EXC_PPC_TRAP;
610 }
611 subcode = ssp->save_srr0;
612 }
613 break;
614
615 #if CONFIG_DTRACE
616 case T_DTRACE_RET: /* Are we returning from a dtrace injection? */
617 ret = dtrace_user_probe((ppc_saved_state_t *)ssp); /* Call the probe function if so... */
618 if(ret == KERN_SUCCESS) { /* Did this actually work? */
619 exception = 0; /* Clear the exception */
620 break; /* Go flow through and out... */
621 }
622 break;
623 #endif
624
625 case T_ALTIVEC_ASSIST:
626 UPDATE_PPC_EXCEPTION_STATE;
627 exception = EXC_ARITHMETIC;
628 code = EXC_PPC_ALTIVECASSIST;
629 subcode = ssp->save_srr0;
630 break;
631
632 case T_DATA_ACCESS:
633 map = thread->map;
634
635 if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */
636 UPDATE_PPC_EXCEPTION_STATE; /* Don't even bother VM with this one */
637 exception = EXC_BAD_ACCESS;
638 subcode = dar;
639 break;
640 }
641
642 code = vm_fault(map, vm_map_trunc_page(dar),
643 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
644 FALSE, THREAD_ABORTSAFE, NULL, vm_map_trunc_page(0));
645
646 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
647 UPDATE_PPC_EXCEPTION_STATE;
648 exception = EXC_BAD_ACCESS;
649 subcode = dar;
650 } else {
651 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to retry fault */
652 ssp->save_dsisr = (ssp->save_dsisr &
653 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
654 }
655 break;
656
657 case T_INSTRUCTION_ACCESS:
658 /* Same as for data access, except fault type
659 * is PROT_EXEC and addr comes from srr0
660 */
661 map = thread->map;
662
663 code = vm_fault(map, vm_map_trunc_page(ssp->save_srr0),
664 (PROT_EXEC | PROT_RO), FALSE, THREAD_ABORTSAFE, NULL, vm_map_trunc_page(0));
665
666 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
667 UPDATE_PPC_EXCEPTION_STATE;
668 exception = EXC_BAD_ACCESS;
669 subcode = ssp->save_srr0;
670 } else {
671 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
672 ssp->save_srr1 = (ssp->save_srr1 &
673 ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
674 }
675 break;
676
677 case T_AST:
678 /* AST delivery is done below */
679 break;
680
681 }
682
683 #ifdef MACH_BSD
684 {
685 bsd_uprofil(&tv, ssp->save_srr0);
686 }
687 #endif /* MACH_BSD */
688 }
689
690 if (exception) {
691 /* if this is the init task, save the exception information */
692 /* this probably is a fatal exception */
693 #if 0
694 if(bsd_init_task == current_task()) {
695 char *buf;
696 int i;
697
698 buf = init_task_failure_data;
699
700
701 buf += sprintf(buf, "Exception Code = 0x%x, Subcode = 0x%x\n", code, subcode);
702 buf += sprintf(buf, "DSISR = 0x%08x, DAR = 0x%016llx\n"
703 , dsisr, dar);
704
705 for (i=0; i<32; i++) {
706 if ((i % 8) == 0) {
707 buf += sprintf(buf, "\n%4d :",i);
708 }
709 buf += sprintf(buf, " %08x",*(&ssp->save_r0+i));
710 }
711
712 buf += sprintf(buf, "\n\n");
713 buf += sprintf(buf, "cr = 0x%08X\t\t",ssp->save_cr);
714 buf += sprintf(buf, "xer = 0x%08X\n",ssp->save_xer);
715 buf += sprintf(buf, "lr = 0x%016llX\t\t",ssp->save_lr);
716 buf += sprintf(buf, "ctr = 0x%016llX\n",ssp->save_ctr);
717 buf += sprintf(buf, "srr0(iar) = 0x%016llX\t\t",ssp->save_srr0);
718 buf += sprintf(buf, "srr1(msr) = 0x%016llX\n",ssp->save_srr1,
719 "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18"
720 "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT");
721 buf += sprintf(buf, "\n\n");
722
723 /* generate some stack trace */
724 buf += sprintf(buf, "Application level back trace:\n");
725 if (ssp->save_srr1 & MASK(MSR_PR)) {
726 char *addr = (char*)ssp->save_r1;
727 unsigned int stack_buf[3];
728 for (i = 0; i < 8; i++) {
729 if (addr == (char*)NULL)
730 break;
731 if (!copyin(ssp->save_r1,(char*)stack_buf,
732 3 * sizeof(int))) {
733 buf += sprintf(buf, "0x%08X : 0x%08X\n"
734 ,addr,stack_buf[2]);
735 addr = (char*)stack_buf[0];
736 } else {
737 break;
738 }
739 }
740 }
741 buf[0] = '\0';
742 }
743 #endif
744 doexception(exception, code, subcode);
745 }
746 /* AST delivery
747 * Check to see if we need an AST, if so take care of it here
748 */
749 ml_set_interrupts_enabled(FALSE);
750
751 if (USER_MODE(ssp->save_srr1)) {
752 myast = ast_pending();
753 while (*myast & AST_ALL) {
754 ast_taken(AST_ALL, intr);
755 ml_set_interrupts_enabled(FALSE);
756 myast = ast_pending();
757 }
758 }
759
760 return ssp;
761 }
762
763 /* This routine is called from assembly before each and every system call.
764 * It must preserve r3.
765 */
766
767 extern int syscall_trace(int, struct savearea *);
768
769
770 extern int pmdebug;
771
772 int syscall_trace(int retval, struct savearea *ssp)
773 {
774 int i, argc;
775 int kdarg[3];
776 /* Always prepare to trace mach system calls */
777
778 kdarg[0]=0;
779 kdarg[1]=0;
780 kdarg[2]=0;
781
782 argc = mach_trap_table[-((unsigned int)ssp->save_r0)].mach_trap_arg_count;
783
784 if (argc > 3)
785 argc = 3;
786
787 for (i=0; i < argc; i++)
788 kdarg[i] = (int)*(&ssp->save_r3 + i);
789
790 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (-(ssp->save_r0))) | DBG_FUNC_START,
791 kdarg[0], kdarg[1], kdarg[2], 0, 0);
792
793 return retval;
794 }
795
796 /* This routine is called from assembly after each mach system call
797 * It must preserve r3.
798 */
799
800 extern int syscall_trace_end(int, struct savearea *);
801
802 int syscall_trace_end(int retval, struct savearea *ssp)
803 {
804 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-((unsigned int)ssp->save_r0))) | DBG_FUNC_END,
805 retval, 0, 0, 0, 0);
806 return retval;
807 }
808
809 /*
810 * called from syscall if there is an error
811 */
812
813 int syscall_error(
814 int exception,
815 mach_exception_code_t code,
816 mach_exception_subcode_t subcode,
817 struct savearea *ssp)
818 {
819 register thread_t thread;
820
821 thread = current_thread();
822
823 if (thread == 0)
824 panic("syscall error in boot phase");
825
826 if (!USER_MODE(ssp->save_srr1))
827 panic("system call called from kernel");
828
829 doexception(exception, code, subcode);
830
831 return 0;
832 }
833
834 /* Pass up a server syscall/exception */
835 void
836 doexception(
837 int exc,
838 mach_exception_code_t code,
839 mach_exception_subcode_t sub)
840 {
841 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX];
842
843 codes[0] = code;
844 codes[1] = sub;
845 exception_triage(exc, codes, 2);
846 }
847
848 const char *trap_type[] = {
849 "Unknown",
850 "0x100 - System reset",
851 "0x200 - Machine check",
852 "0x300 - Data access",
853 "0x400 - Inst access",
854 "0x500 - Ext int",
855 "0x600 - Alignment",
856 "0x700 - Program",
857 "0x800 - Floating point",
858 "0x900 - Decrementer",
859 "0xA00 - n/a",
860 "0xB00 - n/a",
861 "0xC00 - System call",
862 "0xD00 - Trace",
863 "0xE00 - FP assist",
864 "0xF00 - Perf mon",
865 "0xF20 - VMX",
866 "INVALID EXCEPTION",
867 "INVALID EXCEPTION",
868 "INVALID EXCEPTION",
869 "0x1300 - Inst bkpnt",
870 "0x1400 - Sys mgmt",
871 "0x1600 - Altivec Assist",
872 "0x1700 - Thermal",
873 "INVALID EXCEPTION",
874 "INVALID EXCEPTION",
875 "INVALID EXCEPTION",
876 "INVALID EXCEPTION",
877 "INVALID EXCEPTION",
878 "INVALID EXCEPTION",
879 "INVALID EXCEPTION",
880 "INVALID EXCEPTION",
881 "Emulate",
882 "0x2000 - Run Mode/Trace",
883 "Signal Processor",
884 "Preemption",
885 "Context Switch",
886 "Shutdown",
887 "System Failure"
888 };
889 int TRAP_TYPES = sizeof (trap_type) / sizeof (trap_type[0]);
890
891 void unresolved_kernel_trap(int trapno,
892 struct savearea *ssp,
893 __unused unsigned int dsisr,
894 addr64_t dar,
895 const char *message)
896 {
897 const char *trap_name;
898
899 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
900 lastTrace = LLTraceSet(0); /* Disable low-level tracing */
901
902 #if 0
903 {
904 struct per_proc_info *pp;
905 kprintf(" srr0: %016llX\n", ssp->save_srr0); /* (TEST/DEBUG) */
906 kprintf(" srr1: %016llX\n", ssp->save_srr1); /* (TEST/DEBUG) */
907 kprintf(" dar: %016llX\n", ssp->save_dar); /* (TEST/DEBUG) */
908 kprintf(" xcp: %08X\n", ssp->save_exception); /* (TEST/DEBUG) */
909 kprintf(" ins0: %08X\n", ssp->save_instr[0]); /* (TEST/DEBUG) */
910 kprintf(" ins1: %08X\n", ssp->save_instr[1]); /* (TEST/DEBUG) */
911 kprintf(" ins2: %08X\n", ssp->save_instr[2]); /* (TEST/DEBUG) */
912 kprintf(" ins3: %08X\n", ssp->save_instr[3]); /* (TEST/DEBUG) */
913 kprintf(" ins4: %08X\n", ssp->save_instr[4]); /* (TEST/DEBUG) */
914 kprintf(" ins5: %08X\n", ssp->save_instr[5]); /* (TEST/DEBUG) */
915 kprintf(" ins6: %08X\n", ssp->save_instr[6]); /* (TEST/DEBUG) */
916 kprintf(" ins7: %08X\n", ssp->save_instr[7]); /* (TEST/DEBUG) */
917 pp = getPerProc(); /* (TEST/DEBUG) */
918 kprintf("ijsave: %016llX\n", pp->ijsave); /* (TEST/DEBUG) */
919 }
920 #endif
921
922 if( logPanicDataToScreen )
923 disable_debug_output = FALSE;
924
925 debug_mode++;
926 if ((unsigned)trapno <= T_MAX)
927 trap_name = trap_type[trapno / T_VECTOR_SIZE];
928 else
929 trap_name = "???? unrecognized exception";
930 if (message == NULL)
931 message = trap_name;
932
933 kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%016llX PC=0x%016llX\n",
934 cpu_number(), trap_name, dar, ssp->save_srr0);
935
936 print_backtrace(ssp);
937
938 panic_caller = (0xFFFF0000 | (trapno / T_VECTOR_SIZE) );
939 /* Commit the panic log buffer to NVRAM, unless otherwise
940 * specified via a boot-arg.
941 */
942 if (panicDebugging)
943 commit_paniclog();
944
945 draw_panic_dialog();
946 /* XXX: This is yet another codepath into the debugger, which should
947 * be reworked to enter the primary panic codepath instead.
948 * The idea appears to be to enter the debugger (performing a
949 * stack switch) as soon as possible, but we do have a
950 * savearea encapsulating state (accessible by walking the savearea
951 * chain), so that's superfluous.
952 */
953 if( panicDebugging )
954 (void)Call_Debugger(trapno, ssp);
955 panic_plain("%s", message);
956 }
957
958 const char *corr[2] = {"uncorrected", "corrected "};
959
960 void handleMck(struct savearea *ssp) { /* Common machine check handler */
961
962 int cpu;
963
964 cpu = cpu_number();
965
966 printf("Machine check (%d) - %s - pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n",
967 cpu, corr[ssp->save_hdr.save_misc3], ssp->save_srr0, ssp->save_srr1, ssp->save_dsisr, ssp->save_dar); /* Tell us about it */
968 printf("Machine check (%d) - AsyncSrc = %016llX, CoreFIR = %016llx\n", cpu, ssp->save_xdat0, ssp->save_xdat1);
969 printf("Machine check (%d) - L2FIR = %016llX, BusFir = %016llx\n", cpu, ssp->save_xdat2, ssp->save_xdat3);
970
971 if(ssp->save_hdr.save_misc3) return; /* Leave the the machine check was recovered */
972
973 panic("Uncorrectable machine check: pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n"
974 " AsyncSrc = %016llX, CoreFIR = %016llx\n"
975 " L2FIR = %016llX, BusFir = %016llx\n",
976 ssp->save_srr0, ssp->save_srr1, ssp->save_dsisr, ssp->save_dar,
977 ssp->save_xdat0, ssp->save_xdat1, ssp->save_xdat2, ssp->save_xdat3);
978
979 return;
980 }
981
982 void
983 thread_syscall_return(
984 kern_return_t ret)
985 {
986 register thread_t thread = current_thread();
987 register struct savearea *regs = USER_REGS(thread);
988
989 if (kdebug_enable && ((unsigned int)regs->save_r0 & 0x80000000)) {
990 /* Mach trap */
991 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(regs->save_r0))) | DBG_FUNC_END,
992 ret, 0, 0, 0, 0);
993 }
994 regs->save_r3 = ret;
995
996 thread_exception_return();
997 /*NOTREACHED*/
998 }
999
1000
1001 #if MACH_KDB
1002 void
1003 thread_kdb_return(void)
1004 {
1005 register thread_t thread = current_thread();
1006 register struct savearea *regs = USER_REGS(thread);
1007
1008 Call_Debugger(thread->machine.pcb->save_exception, regs);
1009 thread_exception_return();
1010 /*NOTREACHED*/
1011 }
1012 #endif /* MACH_KDB */