]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/trap.c
fd3514628b93b89d0542acb0e1168e8d82e8ad5d
[apple/xnu.git] / osfmk / ppc / trap.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28
29 #include <mach_kdb.h>
30 #include <mach_kdp.h>
31 #include <debug.h>
32 #include <cpus.h>
33 #include <kern/thread.h>
34 #include <kern/exception.h>
35 #include <kern/syscall_sw.h>
36 #include <kern/cpu_data.h>
37 #include <kern/debug.h>
38 #include <mach/thread_status.h>
39 #include <vm/vm_fault.h>
40 #include <vm/vm_kern.h> /* For kernel_map */
41 #include <ppc/misc_protos.h>
42 #include <ppc/trap.h>
43 #include <ppc/exception.h>
44 #include <ppc/proc_reg.h> /* for SR_xxx definitions */
45 #include <ppc/pmap.h>
46 #include <ppc/mem.h>
47 #include <ppc/mappings.h>
48 #include <ppc/Firmware.h>
49 #include <ppc/low_trace.h>
50 #include <ppc/Diagnostics.h>
51 #include <ppc/hw_perfmon.h>
52
53 #include <sys/kdebug.h>
54
55 perfTrap perfTrapHook = 0; /* Pointer to performance trap hook routine */
56
57 #if MACH_KDB
58 #include <ddb/db_watch.h>
59 #include <ddb/db_run.h>
60 #include <ddb/db_break.h>
61 #include <ddb/db_trap.h>
62
63 boolean_t let_ddb_vm_fault = FALSE;
64 boolean_t debug_all_traps_with_kdb = FALSE;
65 extern struct db_watchpoint *db_watchpoint_list;
66 extern boolean_t db_watchpoints_inserted;
67 extern boolean_t db_breakpoints_inserted;
68
69
70
71 #endif /* MACH_KDB */
72
73 extern int debugger_active[NCPUS];
74 extern task_t bsd_init_task;
75 extern char init_task_failure_data[];
76
77
78 #define PROT_EXEC (VM_PROT_EXECUTE)
79 #define PROT_RO (VM_PROT_READ)
80 #define PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
81
82 /* A useful macro to update the ppc_exception_state in the PCB
83 * before calling doexception
84 */
85 #define UPDATE_PPC_EXCEPTION_STATE { \
86 thread_act_t thr_act = current_act(); \
87 thr_act->mact.pcb->save_dar = (uint64_t)dar; \
88 thr_act->mact.pcb->save_dsisr = dsisr; \
89 thr_act->mact.pcb->save_exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \
90 }
91
92 static void unresolved_kernel_trap(int trapno,
93 struct savearea *ssp,
94 unsigned int dsisr,
95 addr64_t dar,
96 char *message);
97
98 struct savearea *trap(int trapno,
99 struct savearea *ssp,
100 unsigned int dsisr,
101 addr64_t dar)
102 {
103 int exception;
104 int code;
105 int subcode;
106 vm_map_t map;
107 unsigned int sp;
108 unsigned int space, space2;
109 unsigned int offset;
110 thread_act_t thr_act;
111 boolean_t intr;
112
113 #ifdef MACH_BSD
114 time_value_t tv;
115 #endif /* MACH_BSD */
116
117 if(perfTrapHook) { /* Is there a hook? */
118 if(perfTrapHook(trapno, ssp, dsisr, (unsigned int)dar) == KERN_SUCCESS) return ssp; /* If it succeeds, we are done... */
119 }
120
121 #if 0
122 {
123 extern void fctx_text(void);
124 fctx_test();
125 }
126 #endif
127
128 thr_act = current_act(); /* Get current activation */
129 exception = 0; /* Clear exception for now */
130
131 /*
132 * Remember that we are disabled for interruptions when we come in here. Because
133 * of latency concerns, we need to enable interruptions in the interrupted process
134 * was enabled itself as soon as we can.
135 */
136
137 intr = (ssp->save_srr1 & MASK(MSR_EE)) != 0; /* Remember if we were enabled */
138
139 /* Handle kernel traps first */
140
141 if (!USER_MODE(ssp->save_srr1)) {
142 /*
143 * Trap came from kernel
144 */
145 switch (trapno) {
146
147 case T_PREEMPT: /* Handle a preempt trap */
148 ast_taken(AST_PREEMPT, FALSE);
149 break;
150
151 case T_PERF_MON:
152 perfmon_handle_pmi(ssp);
153 break;
154
155 case T_RESET: /* Reset interruption */
156 if (!Call_Debugger(trapno, ssp))
157 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
158 break; /* We just ignore these */
159
160 /*
161 * These trap types should never be seen by trap()
162 * in kernel mode, anyway.
163 * Some are interrupts that should be seen by
164 * interrupt() others just don't happen because they
165 * are handled elsewhere. Some could happen but are
166 * considered to be fatal in kernel mode.
167 */
168 case T_DECREMENTER:
169 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
170 case T_MACHINE_CHECK:
171 case T_SYSTEM_MANAGEMENT:
172 case T_ALTIVEC_ASSIST:
173 case T_INTERRUPT:
174 case T_FP_UNAVAILABLE:
175 case T_IO_ERROR:
176 case T_RESERVED:
177 default:
178 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
179 break;
180
181
182 case T_ALIGNMENT:
183 /*
184 * If enaNotifyEMb is set, we get here, and
185 * we have actually already emulated the unaligned access.
186 * All that we want to do here is to ignore the interrupt. This is to allow logging or
187 * tracing of unaligned accesses.
188 */
189
190 KERNEL_DEBUG_CONSTANT(
191 MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
192 (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
193 break;
194
195 case T_EMULATE:
196 /*
197 * If enaNotifyEMb is set we get here, and
198 * we have actually already emulated the instruction.
199 * All that we want to do here is to ignore the interrupt. This is to allow logging or
200 * tracing of emulated instructions.
201 */
202
203 KERNEL_DEBUG_CONSTANT(
204 MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
205 (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
206 break;
207
208
209
210
211
212 case T_TRACE:
213 case T_RUNMODE_TRACE:
214 case T_INSTRUCTION_BKPT:
215 if (!Call_Debugger(trapno, ssp))
216 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
217 break;
218
219 case T_PROGRAM:
220 if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
221 if (!Call_Debugger(trapno, ssp))
222 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
223 } else {
224 unresolved_kernel_trap(trapno, ssp,
225 dsisr, dar, NULL);
226 }
227 break;
228
229 case T_DATA_ACCESS:
230
231 #if MACH_KDB
232 mp_disable_preemption();
233 if (debug_mode
234 && debugger_active[cpu_number()]
235 && !let_ddb_vm_fault) {
236 /*
237 * Force kdb to handle this one.
238 */
239 kdb_trap(trapno, ssp);
240 }
241 mp_enable_preemption();
242 #endif /* MACH_KDB */
243
244 if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */
245 panic("trap: disallowed access to cache inhibited memory - %016llX\n", dar);
246 }
247
248 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
249
250 if(((dar >> 28) < 0xE) | ((dar >> 28) > 0xF)) { /* Is this a copy in/out? */
251
252 offset = (unsigned int)dar; /* Set the failing address */
253 map = kernel_map; /* No, this is a normal kernel access */
254
255 /*
256 * Note: Some ROM device drivers will access page 0 when they start. The IOKit will
257 * set a flag to tell us to ignore any access fault on page 0. After the driver is
258 * opened, it will clear the flag.
259 */
260 if((0 == (offset & -PAGE_SIZE)) && /* Check for access of page 0 and */
261 ((thr_act->mact.specFlags) & ignoreZeroFault)) { /* special case of ignoring page zero faults */
262 ssp->save_srr0 += 4; /* Point to next instruction */
263 break;
264 }
265
266 code = vm_fault(map, trunc_page_32(offset),
267 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
268 FALSE, THREAD_UNINT, NULL, 0);
269
270 if (code != KERN_SUCCESS) {
271 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
272 } else {
273 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
274 ssp->save_dsisr = (ssp->save_dsisr &
275 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
276 }
277 break;
278 }
279
280 /* If we get here, the fault was due to a copyin/out */
281
282 map = thr_act->map;
283
284 offset = (unsigned int)(thr_act->mact.cioRelo + dar); /* Compute the user space address */
285
286 code = vm_fault(map, trunc_page_32(offset),
287 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
288 FALSE, THREAD_UNINT, NULL, 0);
289
290 /* If we failed, there should be a recovery
291 * spot to rfi to.
292 */
293 if (code != KERN_SUCCESS) {
294
295 if (thr_act->thread->recover) {
296
297 act_lock_thread(thr_act);
298 ssp->save_srr0 = thr_act->thread->recover;
299 thr_act->thread->recover =
300 (vm_offset_t)NULL;
301 act_unlock_thread(thr_act);
302 } else {
303 unresolved_kernel_trap(trapno, ssp, dsisr, dar, "copyin/out has no recovery point");
304 }
305 }
306 else {
307 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
308 ssp->save_dsisr = (ssp->save_dsisr &
309 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
310 }
311
312 break;
313
314 case T_INSTRUCTION_ACCESS:
315
316 #if MACH_KDB
317 if (debug_mode
318 && debugger_active[cpu_number()]
319 && !let_ddb_vm_fault) {
320 /*
321 * Force kdb to handle this one.
322 */
323 kdb_trap(trapno, ssp);
324 }
325 #endif /* MACH_KDB */
326
327 /* Same as for data access, except fault type
328 * is PROT_EXEC and addr comes from srr0
329 */
330
331 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
332
333 map = kernel_map;
334
335 code = vm_fault(map, trunc_page_64(ssp->save_srr0),
336 PROT_EXEC, FALSE, THREAD_UNINT, NULL, 0);
337
338 if (code != KERN_SUCCESS) {
339 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
340 } else {
341 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
342 ssp->save_srr1 = (ssp->save_srr1 &
343 ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
344 }
345 break;
346
347 /* Usually shandler handles all the system calls, but the
348 * atomic thread switcher may throwup (via thandler) and
349 * have to pass it up to the exception handler.
350 */
351
352 case T_SYSTEM_CALL:
353 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
354 break;
355
356 case T_AST:
357 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
358 break;
359 }
360 } else {
361
362 ml_set_interrupts_enabled(TRUE); /* Processing for user state traps is always enabled */
363
364 #ifdef MACH_BSD
365 {
366 void get_procrustime(time_value_t *);
367
368 get_procrustime(&tv);
369 }
370 #endif /* MACH_BSD */
371
372
373 /*
374 * Trap came from user task
375 */
376
377 switch (trapno) {
378
379 case T_PREEMPT:
380 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
381 break;
382
383 case T_PERF_MON:
384 perfmon_handle_pmi(ssp);
385 break;
386
387 /*
388 * These trap types should never be seen by trap()
389 * Some are interrupts that should be seen by
390 * interrupt() others just don't happen because they
391 * are handled elsewhere.
392 */
393 case T_DECREMENTER:
394 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
395 case T_MACHINE_CHECK:
396 case T_INTERRUPT:
397 case T_FP_UNAVAILABLE:
398 case T_SYSTEM_MANAGEMENT:
399 case T_RESERVED:
400 case T_IO_ERROR:
401
402 default:
403
404 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
405
406 panic("Unexpected user state trap(cpu %d): 0x%08X DSISR=0x%08X DAR=0x%016llX PC=0x%016llX, MSR=0x%016llX\n",
407 cpu_number(), trapno, dsisr, dar, ssp->save_srr0, ssp->save_srr1);
408 break;
409
410 case T_RESET:
411 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
412 if (!Call_Debugger(trapno, ssp))
413 panic("Unexpected Reset exception: srr0 = %016llx, srr1 = %016llx\n",
414 ssp->save_srr0, ssp->save_srr1);
415 break; /* We just ignore these */
416
417 case T_ALIGNMENT:
418 /*
419 * If enaNotifyEMb is set, we get here, and
420 * we have actually already emulated the unaligned access.
421 * All that we want to do here is to ignore the interrupt. This is to allow logging or
422 * tracing of unaligned accesses.
423 */
424
425 KERNEL_DEBUG_CONSTANT(
426 MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
427 (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
428 break;
429
430 case T_EMULATE:
431 /*
432 * If enaNotifyEMb is set we get here, and
433 * we have actually already emulated the instruction.
434 * All that we want to do here is to ignore the interrupt. This is to allow logging or
435 * tracing of emulated instructions.
436 */
437
438 KERNEL_DEBUG_CONSTANT(
439 MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
440 (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
441 break;
442
443 case T_TRACE: /* Real PPC chips */
444 if (be_tracing()) {
445 add_pcbuffer();
446 return ssp;
447 }
448 /* fall through */
449
450 case T_INSTRUCTION_BKPT:
451 exception = EXC_BREAKPOINT;
452 code = EXC_PPC_TRACE;
453 subcode = (unsigned int)ssp->save_srr0;
454 break;
455
456 case T_PROGRAM:
457 if (ssp->save_srr1 & MASK(SRR1_PRG_FE)) {
458 fpu_save(thr_act->mact.curctx);
459 UPDATE_PPC_EXCEPTION_STATE;
460 exception = EXC_ARITHMETIC;
461 code = EXC_ARITHMETIC;
462
463 mp_disable_preemption();
464 subcode = ssp->save_fpscr;
465 mp_enable_preemption();
466 }
467 else if (ssp->save_srr1 & MASK(SRR1_PRG_ILL_INS)) {
468
469 UPDATE_PPC_EXCEPTION_STATE
470 exception = EXC_BAD_INSTRUCTION;
471 code = EXC_PPC_UNIPL_INST;
472 subcode = (unsigned int)ssp->save_srr0;
473 } else if ((unsigned int)ssp->save_srr1 & MASK(SRR1_PRG_PRV_INS)) {
474
475 UPDATE_PPC_EXCEPTION_STATE;
476 exception = EXC_BAD_INSTRUCTION;
477 code = EXC_PPC_PRIVINST;
478 subcode = (unsigned int)ssp->save_srr0;
479 } else if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
480 unsigned int inst;
481 char *iaddr;
482
483 iaddr = (char *)ssp->save_srr0; /* Trim from long long and make a char pointer */
484 if (copyin(iaddr, (char *) &inst, 4 )) panic("copyin failed\n");
485
486 if(dgWork.dgFlags & enaDiagTrap) { /* Is the diagnostic trap enabled? */
487 if((inst & 0xFFFFFFF0) == 0x0FFFFFF0) { /* Is this a TWI 31,R31,0xFFFx? */
488 if(diagTrap(ssp, inst & 0xF)) { /* Call the trap code */
489 ssp->save_srr0 += 4ULL; /* If we eat the trap, bump pc */
490 exception = 0; /* Clear exception */
491 break; /* All done here */
492 }
493 }
494 }
495
496 UPDATE_PPC_EXCEPTION_STATE;
497
498 if (inst == 0x7FE00008) {
499 exception = EXC_BREAKPOINT;
500 code = EXC_PPC_BREAKPOINT;
501 } else {
502 exception = EXC_SOFTWARE;
503 code = EXC_PPC_TRAP;
504 }
505 subcode = (unsigned int)ssp->save_srr0;
506 }
507 break;
508
509 case T_ALTIVEC_ASSIST:
510 UPDATE_PPC_EXCEPTION_STATE;
511 exception = EXC_ARITHMETIC;
512 code = EXC_PPC_ALTIVECASSIST;
513 subcode = (unsigned int)ssp->save_srr0;
514 break;
515
516 case T_DATA_ACCESS:
517 map = thr_act->map;
518
519 if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */
520 UPDATE_PPC_EXCEPTION_STATE; /* Don't even bother VM with this one */
521 exception = EXC_BAD_ACCESS;
522 subcode = (unsigned int)dar;
523 break;
524 }
525
526 code = vm_fault(map, trunc_page_64(dar),
527 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
528 FALSE, THREAD_ABORTSAFE, NULL, 0);
529
530 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
531 UPDATE_PPC_EXCEPTION_STATE;
532 exception = EXC_BAD_ACCESS;
533 subcode = (unsigned int)dar;
534 } else {
535 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
536 ssp->save_dsisr = (ssp->save_dsisr &
537 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
538 }
539 break;
540
541 case T_INSTRUCTION_ACCESS:
542 /* Same as for data access, except fault type
543 * is PROT_EXEC and addr comes from srr0
544 */
545 map = thr_act->map;
546
547 code = vm_fault(map, trunc_page_64(ssp->save_srr0),
548 PROT_EXEC, FALSE, THREAD_ABORTSAFE, NULL, 0);
549
550 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
551 UPDATE_PPC_EXCEPTION_STATE;
552 exception = EXC_BAD_ACCESS;
553 subcode = (unsigned int)ssp->save_srr0;
554 } else {
555 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
556 ssp->save_srr1 = (ssp->save_srr1 &
557 ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
558 }
559 break;
560
561 case T_AST:
562 ml_set_interrupts_enabled(FALSE);
563 ast_taken(AST_ALL, intr);
564 break;
565
566 }
567 #ifdef MACH_BSD
568 {
569 void bsd_uprofil(time_value_t *, unsigned int);
570
571 bsd_uprofil(&tv, ssp->save_srr0);
572 }
573 #endif /* MACH_BSD */
574 }
575
576 if (exception) {
577 /* if this is the init task, save the exception information */
578 /* this probably is a fatal exception */
579 #if 0
580 if(bsd_init_task == current_task()) {
581 char *buf;
582 int i;
583
584 buf = init_task_failure_data;
585
586
587 buf += sprintf(buf, "Exception Code = 0x%x, Subcode = 0x%x\n", code, subcode);
588 buf += sprintf(buf, "DSISR = 0x%08x, DAR = 0x%016llx\n"
589 , dsisr, dar);
590
591 for (i=0; i<32; i++) {
592 if ((i % 8) == 0) {
593 buf += sprintf(buf, "\n%4d :",i);
594 }
595 buf += sprintf(buf, " %08x",*(&ssp->save_r0+i));
596 }
597
598 buf += sprintf(buf, "\n\n");
599 buf += sprintf(buf, "cr = 0x%08X\t\t",ssp->save_cr);
600 buf += sprintf(buf, "xer = 0x%08X\n",ssp->save_xer);
601 buf += sprintf(buf, "lr = 0x%016llX\t\t",ssp->save_lr);
602 buf += sprintf(buf, "ctr = 0x%016llX\n",ssp->save_ctr);
603 buf += sprintf(buf, "srr0(iar) = 0x%016llX\t\t",ssp->save_srr0);
604 buf += sprintf(buf, "srr1(msr) = 0x%016llX\n",ssp->save_srr1,
605 "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18"
606 "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT");
607 buf += sprintf(buf, "\n\n");
608
609 /* generate some stack trace */
610 buf += sprintf(buf, "Application level back trace:\n");
611 if (ssp->save_srr1 & MASK(MSR_PR)) {
612 char *addr = (char*)ssp->save_r1;
613 unsigned int stack_buf[3];
614 for (i = 0; i < 8; i++) {
615 if (addr == (char*)NULL)
616 break;
617 if (!copyin(addr,(char*)stack_buf,
618 3 * sizeof(int))) {
619 buf += sprintf(buf, "0x%08X : 0x%08X\n"
620 ,addr,stack_buf[2]);
621 addr = (char*)stack_buf[0];
622 } else {
623 break;
624 }
625 }
626 }
627 buf[0] = '\0';
628 }
629 #endif
630 doexception(exception, code, subcode);
631 }
632 /* AST delivery
633 * Check to see if we need an AST, if so take care of it here
634 */
635 ml_set_interrupts_enabled(FALSE);
636 if (USER_MODE(ssp->save_srr1))
637 while (ast_needed(cpu_number())) {
638 ast_taken(AST_ALL, intr);
639 ml_set_interrupts_enabled(FALSE);
640 }
641
642 return ssp;
643 }
644
645 /* This routine is called from assembly before each and every system call.
646 * It must preserve r3.
647 */
648
649 extern int syscall_trace(int, struct savearea *);
650
651
652 extern int pmdebug;
653
654 int syscall_trace(int retval, struct savearea *ssp)
655 {
656 int i, argc;
657 int kdarg[3];
658 /* Always prepare to trace mach system calls */
659
660 kdarg[0]=0;
661 kdarg[1]=0;
662 kdarg[2]=0;
663
664 argc = mach_trap_table[-((unsigned int)ssp->save_r0)].mach_trap_arg_count;
665
666 if (argc > 3)
667 argc = 3;
668
669 for (i=0; i < argc; i++)
670 kdarg[i] = (int)*(&ssp->save_r3 + i);
671
672 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (-(ssp->save_r0))) | DBG_FUNC_START,
673 kdarg[0], kdarg[1], kdarg[2], 0, 0);
674
675 return retval;
676 }
677
678 /* This routine is called from assembly after each mach system call
679 * It must preserve r3.
680 */
681
682 extern int syscall_trace_end(int, struct savearea *);
683
684 int syscall_trace_end(int retval, struct savearea *ssp)
685 {
686 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-((unsigned int)ssp->save_r0))) | DBG_FUNC_END,
687 retval, 0, 0, 0, 0);
688 return retval;
689 }
690
691 /*
692 * called from syscall if there is an error
693 */
694
695 int syscall_error(
696 int exception,
697 int code,
698 int subcode,
699 struct savearea *ssp)
700 {
701 register thread_t thread;
702
703 thread = current_thread();
704
705 if (thread == 0)
706 panic("syscall error in boot phase");
707
708 if (!USER_MODE(ssp->save_srr1))
709 panic("system call called from kernel");
710
711 doexception(exception, code, subcode);
712
713 return 0;
714 }
715
716 /* Pass up a server syscall/exception */
717 void
718 doexception(
719 int exc,
720 int code,
721 int sub)
722 {
723 exception_data_type_t codes[EXCEPTION_CODE_MAX];
724
725 codes[0] = code;
726 codes[1] = sub;
727 exception(exc, codes, 2);
728 }
729
730 char *trap_type[] = {
731 "Unknown",
732 "0x100 - System reset",
733 "0x200 - Machine check",
734 "0x300 - Data access",
735 "0x400 - Inst access",
736 "0x500 - Ext int",
737 "0x600 - Alignment",
738 "0x700 - Program",
739 "0x800 - Floating point",
740 "0x900 - Decrementer",
741 "0xA00 - n/a",
742 "0xB00 - n/a",
743 "0xC00 - System call",
744 "0xD00 - Trace",
745 "0xE00 - FP assist",
746 "0xF00 - Perf mon",
747 "0xF20 - VMX",
748 "INVALID EXCEPTION",
749 "INVALID EXCEPTION",
750 "INVALID EXCEPTION",
751 "0x1300 - Inst bkpnt",
752 "0x1400 - Sys mgmt",
753 "0x1600 - Altivec Assist",
754 "0x1700 - Thermal",
755 "INVALID EXCEPTION",
756 "INVALID EXCEPTION",
757 "INVALID EXCEPTION",
758 "INVALID EXCEPTION",
759 "INVALID EXCEPTION",
760 "INVALID EXCEPTION",
761 "INVALID EXCEPTION",
762 "INVALID EXCEPTION",
763 "Emulate",
764 "0x2000 - Run Mode/Trace",
765 "Signal Processor",
766 "Preemption",
767 "Context Switch",
768 "Shutdown",
769 "System Failure"
770 };
771 int TRAP_TYPES = sizeof (trap_type) / sizeof (trap_type[0]);
772
773 void unresolved_kernel_trap(int trapno,
774 struct savearea *ssp,
775 unsigned int dsisr,
776 addr64_t dar,
777 char *message)
778 {
779 char *trap_name;
780 extern void print_backtrace(struct savearea *);
781 extern unsigned int debug_mode, disableDebugOuput;
782
783 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
784 lastTrace = LLTraceSet(0); /* Disable low-level tracing */
785
786 if( logPanicDataToScreen )
787 disableDebugOuput = FALSE;
788
789 debug_mode++;
790 if ((unsigned)trapno <= T_MAX)
791 trap_name = trap_type[trapno / T_VECTOR_SIZE];
792 else
793 trap_name = "???? unrecognized exception";
794 if (message == NULL)
795 message = trap_name;
796
797 kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%016llX PC=0x%016llX\n",
798 cpu_number(), trap_name, dar, ssp->save_srr0);
799
800 print_backtrace(ssp);
801
802 draw_panic_dialog();
803
804 if( panicDebugging )
805 (void *)Call_Debugger(trapno, ssp);
806 panic(message);
807 }
808
809 void
810 thread_syscall_return(
811 kern_return_t ret)
812 {
813 register thread_act_t thr_act = current_act();
814 register struct savearea *regs = USER_REGS(thr_act);
815
816 if (kdebug_enable && ((unsigned int)regs->save_r0 & 0x80000000)) {
817 /* Mach trap */
818 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(regs->save_r0))) | DBG_FUNC_END,
819 ret, 0, 0, 0, 0);
820 }
821 regs->save_r3 = ret;
822
823 thread_exception_return();
824 /*NOTREACHED*/
825 }
826
827
828 #if MACH_KDB
829 void
830 thread_kdb_return(void)
831 {
832 register thread_act_t thr_act = current_act();
833 register thread_t cur_thr = current_thread();
834 register struct savearea *regs = USER_REGS(thr_act);
835
836 Call_Debugger(thr_act->mact.pcb->save_exception, regs);
837 #if MACH_LDEBUG
838 assert(cur_thr->mutex_count == 0);
839 #endif /* MACH_LDEBUG */
840 check_simple_locks();
841 thread_exception_return();
842 /*NOTREACHED*/
843 }
844 #endif /* MACH_KDB */