]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/ppc/trap.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / ppc / trap.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28
29#include <mach_kdb.h>
30#include <mach_kdp.h>
31#include <debug.h>
32#include <cpus.h>
33#include <kern/thread.h>
34#include <kern/exception.h>
35#include <kern/syscall_sw.h>
36#include <kern/cpu_data.h>
37#include <kern/debug.h>
38#include <mach/thread_status.h>
39#include <vm/vm_fault.h>
40#include <vm/vm_kern.h> /* For kernel_map */
41#include <ppc/misc_protos.h>
42#include <ppc/trap.h>
43#include <ppc/exception.h>
44#include <ppc/proc_reg.h> /* for SR_xxx definitions */
45#include <ppc/pmap.h>
46#include <ppc/mem.h>
47#include <ppc/mappings.h>
48#include <ppc/Firmware.h>
49#include <ppc/low_trace.h>
50#include <ppc/Diagnostics.h>
51#include <ppc/hw_perfmon.h>
52
53#include <sys/kdebug.h>
54
55perfTrap perfTrapHook = 0; /* Pointer to performance trap hook routine */
56
57#if MACH_KDB
58#include <ddb/db_watch.h>
59#include <ddb/db_run.h>
60#include <ddb/db_break.h>
61#include <ddb/db_trap.h>
62
63boolean_t let_ddb_vm_fault = FALSE;
64boolean_t debug_all_traps_with_kdb = FALSE;
65extern struct db_watchpoint *db_watchpoint_list;
66extern boolean_t db_watchpoints_inserted;
67extern boolean_t db_breakpoints_inserted;
68
69
70
71#endif /* MACH_KDB */
72
73extern int debugger_active[NCPUS];
74extern task_t bsd_init_task;
75extern char init_task_failure_data[];
76extern int not_in_kdp;
77
78#define PROT_EXEC (VM_PROT_EXECUTE)
79#define PROT_RO (VM_PROT_READ)
80#define PROT_RW (VM_PROT_READ|VM_PROT_WRITE)
81
82/* A useful macro to update the ppc_exception_state in the PCB
83 * before calling doexception
84 */
85#define UPDATE_PPC_EXCEPTION_STATE { \
86 thread_act_t thr_act = current_act(); \
87 thr_act->mact.pcb->save_dar = (uint64_t)dar; \
88 thr_act->mact.pcb->save_dsisr = dsisr; \
89 thr_act->mact.pcb->save_exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \
90}
91
92static void unresolved_kernel_trap(int trapno,
93 struct savearea *ssp,
94 unsigned int dsisr,
95 addr64_t dar,
96 char *message);
97
98struct savearea *trap(int trapno,
99 struct savearea *ssp,
100 unsigned int dsisr,
101 addr64_t dar)
102{
103 int exception;
104 int code;
105 int subcode;
106 vm_map_t map;
107 unsigned int sp;
108 unsigned int space, space2;
109 unsigned int offset;
110 thread_act_t thr_act;
111 boolean_t intr;
112
113#ifdef MACH_BSD
114 time_value_t tv;
115#endif /* MACH_BSD */
116
117 if(perfTrapHook) { /* Is there a hook? */
118 if(perfTrapHook(trapno, ssp, dsisr, (unsigned int)dar) == KERN_SUCCESS) return ssp; /* If it succeeds, we are done... */
119 }
120
121#if 0
122 {
123 extern void fctx_text(void);
124 fctx_test();
125 }
126#endif
127
128 thr_act = current_act(); /* Get current activation */
129 exception = 0; /* Clear exception for now */
130
131/*
132 * Remember that we are disabled for interruptions when we come in here. Because
133 * of latency concerns, we need to enable interruptions in the interrupted process
134 * was enabled itself as soon as we can.
135 */
136
137 intr = (ssp->save_srr1 & MASK(MSR_EE)) != 0; /* Remember if we were enabled */
138
139 /* Handle kernel traps first */
140
141 if (!USER_MODE(ssp->save_srr1)) {
142 /*
143 * Trap came from kernel
144 */
145 switch (trapno) {
146
147 case T_PREEMPT: /* Handle a preempt trap */
148 ast_taken(AST_PREEMPTION, FALSE);
149 break;
150
151 case T_PERF_MON:
152 perfmon_handle_pmi(ssp);
153 break;
154
155 case T_RESET: /* Reset interruption */
156 if (!Call_Debugger(trapno, ssp))
157 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
158 break; /* We just ignore these */
159
160 /*
161 * These trap types should never be seen by trap()
162 * in kernel mode, anyway.
163 * Some are interrupts that should be seen by
164 * interrupt() others just don't happen because they
165 * are handled elsewhere. Some could happen but are
166 * considered to be fatal in kernel mode.
167 */
168 case T_DECREMENTER:
169 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
170 case T_MACHINE_CHECK:
171 case T_SYSTEM_MANAGEMENT:
172 case T_ALTIVEC_ASSIST:
173 case T_INTERRUPT:
174 case T_FP_UNAVAILABLE:
175 case T_IO_ERROR:
176 case T_RESERVED:
177 default:
178 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
179 break;
180
181
182 case T_ALIGNMENT:
183/*
184* If enaNotifyEMb is set, we get here, and
185* we have actually already emulated the unaligned access.
186* All that we want to do here is to ignore the interrupt. This is to allow logging or
187* tracing of unaligned accesses.
188*/
189
190 KERNEL_DEBUG_CONSTANT(
191 MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
192 (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
193 break;
194
195 case T_EMULATE:
196/*
197* If enaNotifyEMb is set we get here, and
198* we have actually already emulated the instruction.
199* All that we want to do here is to ignore the interrupt. This is to allow logging or
200* tracing of emulated instructions.
201*/
202
203 KERNEL_DEBUG_CONSTANT(
204 MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
205 (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
206 break;
207
208
209
210
211
212 case T_TRACE:
213 case T_RUNMODE_TRACE:
214 case T_INSTRUCTION_BKPT:
215 if (!Call_Debugger(trapno, ssp))
216 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
217 break;
218
219 case T_PROGRAM:
220 if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
221 if (!Call_Debugger(trapno, ssp))
222 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
223 } else {
224 unresolved_kernel_trap(trapno, ssp,
225 dsisr, dar, NULL);
226 }
227 break;
228
229 case T_DATA_ACCESS:
230#if MACH_KDB
231 mp_disable_preemption();
232 if (debug_mode
233 && debugger_active[cpu_number()]
234 && !let_ddb_vm_fault) {
235 /*
236 * Force kdb to handle this one.
237 */
238 kdb_trap(trapno, ssp);
239 }
240 mp_enable_preemption();
241#endif /* MACH_KDB */
242 /* can we take this during normal panic dump operation? */
243 if (debug_mode
244 && debugger_active[cpu_number()]
245 && !not_in_kdp) {
246 /*
247 * Access fault while in kernel core dump.
248 */
249 kdp_dump_trap(trapno, ssp);
250 }
251
252
253 if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */
254 panic("trap: disallowed access to cache inhibited memory - %016llX\n", dar);
255 }
256
257 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
258
259 if(((dar >> 28) < 0xE) | ((dar >> 28) > 0xF)) { /* Is this a copy in/out? */
260
261 offset = (unsigned int)dar; /* Set the failing address */
262 map = kernel_map; /* No, this is a normal kernel access */
263
264/*
265 * Note: Some ROM device drivers will access page 0 when they start. The IOKit will
266 * set a flag to tell us to ignore any access fault on page 0. After the driver is
267 * opened, it will clear the flag.
268 */
269 if((0 == (offset & -PAGE_SIZE)) && /* Check for access of page 0 and */
270 ((thr_act->mact.specFlags) & ignoreZeroFault)) { /* special case of ignoring page zero faults */
271 ssp->save_srr0 += 4; /* Point to next instruction */
272 break;
273 }
274
275 code = vm_fault(map, trunc_page_32(offset),
276 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
277 FALSE, THREAD_UNINT, NULL, 0);
278
279 if (code != KERN_SUCCESS) {
280 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
281 } else {
282 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
283 ssp->save_dsisr = (ssp->save_dsisr &
284 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
285 }
286 break;
287 }
288
289 /* If we get here, the fault was due to a copyin/out */
290
291 map = thr_act->map;
292
293 offset = (unsigned int)(thr_act->mact.cioRelo + dar); /* Compute the user space address */
294
295 code = vm_fault(map, trunc_page_32(offset),
296 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
297 FALSE, THREAD_UNINT, NULL, 0);
298
299 /* If we failed, there should be a recovery
300 * spot to rfi to.
301 */
302 if (code != KERN_SUCCESS) {
303
304 if (thr_act->thread->recover) {
305
306 act_lock_thread(thr_act);
307 ssp->save_srr0 = thr_act->thread->recover;
308 thr_act->thread->recover =
309 (vm_offset_t)NULL;
310 act_unlock_thread(thr_act);
311 } else {
312 unresolved_kernel_trap(trapno, ssp, dsisr, dar, "copyin/out has no recovery point");
313 }
314 }
315 else {
316 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
317 ssp->save_dsisr = (ssp->save_dsisr &
318 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
319 }
320
321 break;
322
323 case T_INSTRUCTION_ACCESS:
324
325#if MACH_KDB
326 if (debug_mode
327 && debugger_active[cpu_number()]
328 && !let_ddb_vm_fault) {
329 /*
330 * Force kdb to handle this one.
331 */
332 kdb_trap(trapno, ssp);
333 }
334#endif /* MACH_KDB */
335
336 /* Same as for data access, except fault type
337 * is PROT_EXEC and addr comes from srr0
338 */
339
340 if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */
341
342 map = kernel_map;
343
344 code = vm_fault(map, trunc_page_64(ssp->save_srr0),
345 PROT_EXEC, FALSE, THREAD_UNINT, NULL, 0);
346
347 if (code != KERN_SUCCESS) {
348 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
349 } else {
350 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
351 ssp->save_srr1 = (ssp->save_srr1 &
352 ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
353 }
354 break;
355
356 /* Usually shandler handles all the system calls, but the
357 * atomic thread switcher may throwup (via thandler) and
358 * have to pass it up to the exception handler.
359 */
360
361 case T_SYSTEM_CALL:
362 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
363 break;
364
365 case T_AST:
366 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
367 break;
368 }
369 } else {
370
371 ml_set_interrupts_enabled(TRUE); /* Processing for user state traps is always enabled */
372
373#ifdef MACH_BSD
374 {
375 void get_procrustime(time_value_t *);
376
377 get_procrustime(&tv);
378 }
379#endif /* MACH_BSD */
380
381
382 /*
383 * Trap came from user task
384 */
385
386 switch (trapno) {
387
388 case T_PREEMPT:
389 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
390 break;
391
392 case T_PERF_MON:
393 perfmon_handle_pmi(ssp);
394 break;
395
396 /*
397 * These trap types should never be seen by trap()
398 * Some are interrupts that should be seen by
399 * interrupt() others just don't happen because they
400 * are handled elsewhere.
401 */
402 case T_DECREMENTER:
403 case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */
404 case T_MACHINE_CHECK:
405 case T_INTERRUPT:
406 case T_FP_UNAVAILABLE:
407 case T_SYSTEM_MANAGEMENT:
408 case T_RESERVED:
409 case T_IO_ERROR:
410
411 default:
412
413 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
414
415 panic("Unexpected user state trap(cpu %d): 0x%08X DSISR=0x%08X DAR=0x%016llX PC=0x%016llX, MSR=0x%016llX\n",
416 cpu_number(), trapno, dsisr, dar, ssp->save_srr0, ssp->save_srr1);
417 break;
418
419 case T_RESET:
420 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
421 if (!Call_Debugger(trapno, ssp))
422 panic("Unexpected Reset exception: srr0 = %016llx, srr1 = %016llx\n",
423 ssp->save_srr0, ssp->save_srr1);
424 break; /* We just ignore these */
425
426 case T_ALIGNMENT:
427/*
428* If enaNotifyEMb is set, we get here, and
429* we have actually already emulated the unaligned access.
430* All that we want to do here is to ignore the interrupt. This is to allow logging or
431* tracing of unaligned accesses.
432*/
433
434 KERNEL_DEBUG_CONSTANT(
435 MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
436 (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
437 break;
438
439 case T_EMULATE:
440/*
441* If enaNotifyEMb is set we get here, and
442* we have actually already emulated the instruction.
443* All that we want to do here is to ignore the interrupt. This is to allow logging or
444* tracing of emulated instructions.
445*/
446
447 KERNEL_DEBUG_CONSTANT(
448 MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
449 (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
450 break;
451
452 case T_TRACE: /* Real PPC chips */
453 if (be_tracing()) {
454 add_pcbuffer();
455 return ssp;
456 }
457 /* fall through */
458
459 case T_INSTRUCTION_BKPT:
460 exception = EXC_BREAKPOINT;
461 code = EXC_PPC_TRACE;
462 subcode = (unsigned int)ssp->save_srr0;
463 break;
464
465 case T_PROGRAM:
466 if (ssp->save_srr1 & MASK(SRR1_PRG_FE)) {
467 fpu_save(thr_act->mact.curctx);
468 UPDATE_PPC_EXCEPTION_STATE;
469 exception = EXC_ARITHMETIC;
470 code = EXC_ARITHMETIC;
471
472 mp_disable_preemption();
473 subcode = ssp->save_fpscr;
474 mp_enable_preemption();
475 }
476 else if (ssp->save_srr1 & MASK(SRR1_PRG_ILL_INS)) {
477
478 UPDATE_PPC_EXCEPTION_STATE
479 exception = EXC_BAD_INSTRUCTION;
480 code = EXC_PPC_UNIPL_INST;
481 subcode = (unsigned int)ssp->save_srr0;
482 } else if ((unsigned int)ssp->save_srr1 & MASK(SRR1_PRG_PRV_INS)) {
483
484 UPDATE_PPC_EXCEPTION_STATE;
485 exception = EXC_BAD_INSTRUCTION;
486 code = EXC_PPC_PRIVINST;
487 subcode = (unsigned int)ssp->save_srr0;
488 } else if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
489 unsigned int inst;
490 char *iaddr;
491
492 iaddr = CAST_DOWN(char *, ssp->save_srr0); /* Trim from long long and make a char pointer */
493 if (copyin(iaddr, (char *) &inst, 4 )) panic("copyin failed\n");
494
495 if(dgWork.dgFlags & enaDiagTrap) { /* Is the diagnostic trap enabled? */
496 if((inst & 0xFFFFFFF0) == 0x0FFFFFF0) { /* Is this a TWI 31,R31,0xFFFx? */
497 if(diagTrap(ssp, inst & 0xF)) { /* Call the trap code */
498 ssp->save_srr0 += 4ULL; /* If we eat the trap, bump pc */
499 exception = 0; /* Clear exception */
500 break; /* All done here */
501 }
502 }
503 }
504
505 UPDATE_PPC_EXCEPTION_STATE;
506
507 if (inst == 0x7FE00008) {
508 exception = EXC_BREAKPOINT;
509 code = EXC_PPC_BREAKPOINT;
510 } else {
511 exception = EXC_SOFTWARE;
512 code = EXC_PPC_TRAP;
513 }
514 subcode = (unsigned int)ssp->save_srr0;
515 }
516 break;
517
518 case T_ALTIVEC_ASSIST:
519 UPDATE_PPC_EXCEPTION_STATE;
520 exception = EXC_ARITHMETIC;
521 code = EXC_PPC_ALTIVECASSIST;
522 subcode = (unsigned int)ssp->save_srr0;
523 break;
524
525 case T_DATA_ACCESS:
526 map = thr_act->map;
527
528 if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */
529 UPDATE_PPC_EXCEPTION_STATE; /* Don't even bother VM with this one */
530 exception = EXC_BAD_ACCESS;
531 subcode = (unsigned int)dar;
532 break;
533 }
534
535 code = vm_fault(map, trunc_page_64(dar),
536 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
537 FALSE, THREAD_ABORTSAFE, NULL, 0);
538
539 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
540 UPDATE_PPC_EXCEPTION_STATE;
541 exception = EXC_BAD_ACCESS;
542 subcode = (unsigned int)dar;
543 } else {
544 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
545 ssp->save_dsisr = (ssp->save_dsisr &
546 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
547 }
548 break;
549
550 case T_INSTRUCTION_ACCESS:
551 /* Same as for data access, except fault type
552 * is PROT_EXEC and addr comes from srr0
553 */
554 map = thr_act->map;
555
556 code = vm_fault(map, trunc_page_64(ssp->save_srr0),
557 PROT_EXEC, FALSE, THREAD_ABORTSAFE, NULL, 0);
558
559 if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
560 UPDATE_PPC_EXCEPTION_STATE;
561 exception = EXC_BAD_ACCESS;
562 subcode = (unsigned int)ssp->save_srr0;
563 } else {
564 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
565 ssp->save_srr1 = (ssp->save_srr1 &
566 ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */
567 }
568 break;
569
570 case T_AST:
571 ml_set_interrupts_enabled(FALSE);
572 ast_taken(AST_ALL, intr);
573 break;
574
575 }
576#ifdef MACH_BSD
577 {
578 void bsd_uprofil(time_value_t *, unsigned int);
579
580 bsd_uprofil(&tv, ssp->save_srr0);
581 }
582#endif /* MACH_BSD */
583 }
584
585 if (exception) {
586 /* if this is the init task, save the exception information */
587 /* this probably is a fatal exception */
588#if 0
589 if(bsd_init_task == current_task()) {
590 char *buf;
591 int i;
592
593 buf = init_task_failure_data;
594
595
596 buf += sprintf(buf, "Exception Code = 0x%x, Subcode = 0x%x\n", code, subcode);
597 buf += sprintf(buf, "DSISR = 0x%08x, DAR = 0x%016llx\n"
598 , dsisr, dar);
599
600 for (i=0; i<32; i++) {
601 if ((i % 8) == 0) {
602 buf += sprintf(buf, "\n%4d :",i);
603 }
604 buf += sprintf(buf, " %08x",*(&ssp->save_r0+i));
605 }
606
607 buf += sprintf(buf, "\n\n");
608 buf += sprintf(buf, "cr = 0x%08X\t\t",ssp->save_cr);
609 buf += sprintf(buf, "xer = 0x%08X\n",ssp->save_xer);
610 buf += sprintf(buf, "lr = 0x%016llX\t\t",ssp->save_lr);
611 buf += sprintf(buf, "ctr = 0x%016llX\n",ssp->save_ctr);
612 buf += sprintf(buf, "srr0(iar) = 0x%016llX\t\t",ssp->save_srr0);
613 buf += sprintf(buf, "srr1(msr) = 0x%016llX\n",ssp->save_srr1,
614 "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18"
615 "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT");
616 buf += sprintf(buf, "\n\n");
617
618 /* generate some stack trace */
619 buf += sprintf(buf, "Application level back trace:\n");
620 if (ssp->save_srr1 & MASK(MSR_PR)) {
621 char *addr = (char*)ssp->save_r1;
622 unsigned int stack_buf[3];
623 for (i = 0; i < 8; i++) {
624 if (addr == (char*)NULL)
625 break;
626 if (!copyin(addr,(char*)stack_buf,
627 3 * sizeof(int))) {
628 buf += sprintf(buf, "0x%08X : 0x%08X\n"
629 ,addr,stack_buf[2]);
630 addr = (char*)stack_buf[0];
631 } else {
632 break;
633 }
634 }
635 }
636 buf[0] = '\0';
637 }
638#endif
639 doexception(exception, code, subcode);
640 }
641 /* AST delivery
642 * Check to see if we need an AST, if so take care of it here
643 */
644 ml_set_interrupts_enabled(FALSE);
645 if (USER_MODE(ssp->save_srr1))
646 while (ast_needed(cpu_number())) {
647 ast_taken(AST_ALL, intr);
648 ml_set_interrupts_enabled(FALSE);
649 }
650
651 return ssp;
652}
653
654/* This routine is called from assembly before each and every system call.
655 * It must preserve r3.
656 */
657
658extern int syscall_trace(int, struct savearea *);
659
660
661extern int pmdebug;
662
663int syscall_trace(int retval, struct savearea *ssp)
664{
665 int i, argc;
666 int kdarg[3];
667/* Always prepare to trace mach system calls */
668
669 kdarg[0]=0;
670 kdarg[1]=0;
671 kdarg[2]=0;
672
673 argc = mach_trap_table[-((unsigned int)ssp->save_r0)].mach_trap_arg_count;
674
675 if (argc > 3)
676 argc = 3;
677
678 for (i=0; i < argc; i++)
679 kdarg[i] = (int)*(&ssp->save_r3 + i);
680
681 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (-(ssp->save_r0))) | DBG_FUNC_START,
682 kdarg[0], kdarg[1], kdarg[2], 0, 0);
683
684 return retval;
685}
686
687/* This routine is called from assembly after each mach system call
688 * It must preserve r3.
689 */
690
691extern int syscall_trace_end(int, struct savearea *);
692
693int syscall_trace_end(int retval, struct savearea *ssp)
694{
695 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-((unsigned int)ssp->save_r0))) | DBG_FUNC_END,
696 retval, 0, 0, 0, 0);
697 return retval;
698}
699
700/*
701 * called from syscall if there is an error
702 */
703
704int syscall_error(
705 int exception,
706 int code,
707 int subcode,
708 struct savearea *ssp)
709{
710 register thread_t thread;
711
712 thread = current_thread();
713
714 if (thread == 0)
715 panic("syscall error in boot phase");
716
717 if (!USER_MODE(ssp->save_srr1))
718 panic("system call called from kernel");
719
720 doexception(exception, code, subcode);
721
722 return 0;
723}
724
725/* Pass up a server syscall/exception */
726void
727doexception(
728 int exc,
729 int code,
730 int sub)
731{
732 exception_data_type_t codes[EXCEPTION_CODE_MAX];
733
734 codes[0] = code;
735 codes[1] = sub;
736 exception(exc, codes, 2);
737}
738
739char *trap_type[] = {
740 "Unknown",
741 "0x100 - System reset",
742 "0x200 - Machine check",
743 "0x300 - Data access",
744 "0x400 - Inst access",
745 "0x500 - Ext int",
746 "0x600 - Alignment",
747 "0x700 - Program",
748 "0x800 - Floating point",
749 "0x900 - Decrementer",
750 "0xA00 - n/a",
751 "0xB00 - n/a",
752 "0xC00 - System call",
753 "0xD00 - Trace",
754 "0xE00 - FP assist",
755 "0xF00 - Perf mon",
756 "0xF20 - VMX",
757 "INVALID EXCEPTION",
758 "INVALID EXCEPTION",
759 "INVALID EXCEPTION",
760 "0x1300 - Inst bkpnt",
761 "0x1400 - Sys mgmt",
762 "0x1600 - Altivec Assist",
763 "0x1700 - Thermal",
764 "INVALID EXCEPTION",
765 "INVALID EXCEPTION",
766 "INVALID EXCEPTION",
767 "INVALID EXCEPTION",
768 "INVALID EXCEPTION",
769 "INVALID EXCEPTION",
770 "INVALID EXCEPTION",
771 "INVALID EXCEPTION",
772 "Emulate",
773 "0x2000 - Run Mode/Trace",
774 "Signal Processor",
775 "Preemption",
776 "Context Switch",
777 "Shutdown",
778 "System Failure"
779};
780int TRAP_TYPES = sizeof (trap_type) / sizeof (trap_type[0]);
781
782void unresolved_kernel_trap(int trapno,
783 struct savearea *ssp,
784 unsigned int dsisr,
785 addr64_t dar,
786 char *message)
787{
788 char *trap_name;
789 extern void print_backtrace(struct savearea *);
790 extern unsigned int debug_mode, disableDebugOuput;
791
792 ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */
793 lastTrace = LLTraceSet(0); /* Disable low-level tracing */
794
795 if( logPanicDataToScreen )
796 disableDebugOuput = FALSE;
797
798 debug_mode++;
799 if ((unsigned)trapno <= T_MAX)
800 trap_name = trap_type[trapno / T_VECTOR_SIZE];
801 else
802 trap_name = "???? unrecognized exception";
803 if (message == NULL)
804 message = trap_name;
805
806 kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%016llX PC=0x%016llX\n",
807 cpu_number(), trap_name, dar, ssp->save_srr0);
808
809 print_backtrace(ssp);
810
811 draw_panic_dialog();
812
813 if( panicDebugging )
814 (void *)Call_Debugger(trapno, ssp);
815 panic(message);
816}
817
818void
819thread_syscall_return(
820 kern_return_t ret)
821{
822 register thread_act_t thr_act = current_act();
823 register struct savearea *regs = USER_REGS(thr_act);
824
825 if (kdebug_enable && ((unsigned int)regs->save_r0 & 0x80000000)) {
826 /* Mach trap */
827 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(regs->save_r0))) | DBG_FUNC_END,
828 ret, 0, 0, 0, 0);
829 }
830 regs->save_r3 = ret;
831
832 thread_exception_return();
833 /*NOTREACHED*/
834}
835
836
837#if MACH_KDB
838void
839thread_kdb_return(void)
840{
841 register thread_act_t thr_act = current_act();
842 register thread_t cur_thr = current_thread();
843 register struct savearea *regs = USER_REGS(thr_act);
844
845 Call_Debugger(thr_act->mact.pcb->save_exception, regs);
846#if MACH_LDEBUG
847 assert(cur_thr->mutex_count == 0);
848#endif /* MACH_LDEBUG */
849 check_simple_locks();
850 thread_exception_return();
851 /*NOTREACHED*/
852}
853#endif /* MACH_KDB */