]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | ||
32 | #include <mach_kdb.h> | |
33 | #include <mach_kdp.h> | |
34 | #include <debug.h> | |
91447636 A |
35 | |
36 | #include <mach/mach_types.h> | |
37 | #include <mach/mach_traps.h> | |
38 | #include <mach/thread_status.h> | |
39 | ||
40 | #include <kern/processor.h> | |
1c79356b A |
41 | #include <kern/thread.h> |
42 | #include <kern/exception.h> | |
43 | #include <kern/syscall_sw.h> | |
44 | #include <kern/cpu_data.h> | |
45 | #include <kern/debug.h> | |
91447636 | 46 | |
1c79356b A |
47 | #include <vm/vm_fault.h> |
48 | #include <vm/vm_kern.h> /* For kernel_map */ | |
91447636 | 49 | |
1c79356b A |
50 | #include <ppc/misc_protos.h> |
51 | #include <ppc/trap.h> | |
52 | #include <ppc/exception.h> | |
53 | #include <ppc/proc_reg.h> /* for SR_xxx definitions */ | |
54 | #include <ppc/pmap.h> | |
55 | #include <ppc/mem.h> | |
55e303ae | 56 | #include <ppc/mappings.h> |
9bccf70c A |
57 | #include <ppc/Firmware.h> |
58 | #include <ppc/low_trace.h> | |
55e303ae A |
59 | #include <ppc/Diagnostics.h> |
60 | #include <ppc/hw_perfmon.h> | |
2d21ac55 | 61 | #include <ppc/fpu_protos.h> |
1c79356b A |
62 | |
63 | #include <sys/kdebug.h> | |
64 | ||
2d21ac55 A |
65 | perfCallback perfTrapHook; /* Pointer to CHUD trap hook routine */ |
66 | perfCallback perfASTHook; /* Pointer to CHUD AST hook routine */ | |
67 | ||
68 | #if CONFIG_DTRACE | |
69 | extern kern_return_t dtrace_user_probe(ppc_saved_state_t *sv); | |
70 | ||
71 | /* See <rdar://problem/4613924> */ | |
72 | perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */ | |
73 | ||
74 | extern boolean_t dtrace_tally_fault(user_addr_t); | |
75 | #endif | |
9bccf70c | 76 | |
1c79356b A |
77 | #if MACH_KDB |
78 | #include <ddb/db_watch.h> | |
79 | #include <ddb/db_run.h> | |
80 | #include <ddb/db_break.h> | |
81 | #include <ddb/db_trap.h> | |
82 | ||
83 | boolean_t let_ddb_vm_fault = FALSE; | |
84 | boolean_t debug_all_traps_with_kdb = FALSE; | |
85 | extern struct db_watchpoint *db_watchpoint_list; | |
86 | extern boolean_t db_watchpoints_inserted; | |
87 | extern boolean_t db_breakpoints_inserted; | |
88 | ||
89 | ||
90 | ||
91 | #endif /* MACH_KDB */ | |
92 | ||
9bccf70c | 93 | extern task_t bsd_init_task; |
1c79356b | 94 | extern char init_task_failure_data[]; |
55e303ae | 95 | extern int not_in_kdp; |
1c79356b | 96 | |
9bccf70c | 97 | #define PROT_EXEC (VM_PROT_EXECUTE) |
1c79356b A |
98 | #define PROT_RO (VM_PROT_READ) |
99 | #define PROT_RW (VM_PROT_READ|VM_PROT_WRITE) | |
100 | ||
0c530ab8 | 101 | |
1c79356b A |
102 | /* A useful macro to update the ppc_exception_state in the PCB |
103 | * before calling doexception | |
104 | */ | |
9bccf70c | 105 | #define UPDATE_PPC_EXCEPTION_STATE { \ |
91447636 A |
106 | thread_t _thread = current_thread(); \ |
107 | _thread->machine.pcb->save_dar = (uint64_t)dar; \ | |
108 | _thread->machine.pcb->save_dsisr = dsisr; \ | |
109 | _thread->machine.pcb->save_exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \ | |
1c79356b A |
110 | } |
111 | ||
91447636 | 112 | void unresolved_kernel_trap(int trapno, |
9bccf70c | 113 | struct savearea *ssp, |
1c79356b | 114 | unsigned int dsisr, |
55e303ae | 115 | addr64_t dar, |
91447636 | 116 | const char *message); |
1c79356b | 117 | |
e5568f75 A |
118 | static void handleMck(struct savearea *ssp); /* Common machine check handler */ |
119 | ||
91447636 A |
120 | #ifdef MACH_BSD |
121 | extern void get_procrustime(time_value_t *); | |
122 | extern void bsd_uprofil(time_value_t *, user_addr_t); | |
123 | #endif /* MACH_BSD */ | |
124 | ||
e5568f75 | 125 | |
9bccf70c A |
126 | struct savearea *trap(int trapno, |
127 | struct savearea *ssp, | |
1c79356b | 128 | unsigned int dsisr, |
55e303ae | 129 | addr64_t dar) |
1c79356b | 130 | { |
9bccf70c | 131 | int exception; |
2d21ac55 A |
132 | mach_exception_code_t code = 0; |
133 | mach_exception_subcode_t subcode = 0; | |
1c79356b | 134 | vm_map_t map; |
91447636 A |
135 | vm_map_offset_t offset; |
136 | thread_t thread = current_thread(); | |
1c79356b | 137 | boolean_t intr; |
91447636 | 138 | ast_t *myast; |
2d21ac55 | 139 | int ret; |
55e303ae | 140 | |
1c79356b A |
141 | #ifdef MACH_BSD |
142 | time_value_t tv; | |
143 | #endif /* MACH_BSD */ | |
144 | ||
91447636 A |
145 | myast = ast_pending(); |
146 | if(perfASTHook) { | |
0c530ab8 | 147 | if(*myast & AST_CHUD_ALL) { |
91447636 A |
148 | perfASTHook(trapno, ssp, dsisr, (unsigned int)dar); |
149 | } | |
150 | } else { | |
0c530ab8 | 151 | *myast &= ~AST_CHUD_ALL; |
91447636 A |
152 | } |
153 | ||
9bccf70c | 154 | if(perfTrapHook) { /* Is there a hook? */ |
55e303ae | 155 | if(perfTrapHook(trapno, ssp, dsisr, (unsigned int)dar) == KERN_SUCCESS) return ssp; /* If it succeeds, we are done... */ |
9bccf70c A |
156 | } |
157 | ||
2d21ac55 A |
158 | #if CONFIG_DTRACE |
159 | if(tempDTraceTrapHook) { /* Is there a hook? */ | |
160 | if(tempDTraceTrapHook(trapno, ssp, dsisr, (unsigned int)dar) == KERN_SUCCESS) return ssp; /* If it succeeds, we are done... */ | |
161 | } | |
162 | #endif | |
163 | ||
9bccf70c A |
164 | #if 0 |
165 | { | |
166 | extern void fctx_text(void); | |
167 | fctx_test(); | |
168 | } | |
169 | #endif | |
170 | ||
9bccf70c A |
171 | exception = 0; /* Clear exception for now */ |
172 | ||
1c79356b A |
173 | /* |
174 | * Remember that we are disabled for interruptions when we come in here. Because | |
175 | * of latency concerns, we need to enable interruptions in the interrupted process | |
176 | * was enabled itself as soon as we can. | |
177 | */ | |
178 | ||
9bccf70c | 179 | intr = (ssp->save_srr1 & MASK(MSR_EE)) != 0; /* Remember if we were enabled */ |
1c79356b A |
180 | |
181 | /* Handle kernel traps first */ | |
182 | ||
9bccf70c | 183 | if (!USER_MODE(ssp->save_srr1)) { |
1c79356b A |
184 | /* |
185 | * Trap came from kernel | |
186 | */ | |
2d21ac55 | 187 | switch (trapno) { |
1c79356b A |
188 | |
189 | case T_PREEMPT: /* Handle a preempt trap */ | |
55e303ae | 190 | ast_taken(AST_PREEMPTION, FALSE); |
1c79356b A |
191 | break; |
192 | ||
55e303ae A |
193 | case T_PERF_MON: |
194 | perfmon_handle_pmi(ssp); | |
195 | break; | |
196 | ||
1c79356b | 197 | case T_RESET: /* Reset interruption */ |
55e303ae A |
198 | if (!Call_Debugger(trapno, ssp)) |
199 | unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); | |
1c79356b A |
200 | break; /* We just ignore these */ |
201 | ||
202 | /* | |
203 | * These trap types should never be seen by trap() | |
204 | * in kernel mode, anyway. | |
205 | * Some are interrupts that should be seen by | |
206 | * interrupt() others just don't happen because they | |
207 | * are handled elsewhere. Some could happen but are | |
208 | * considered to be fatal in kernel mode. | |
209 | */ | |
210 | case T_DECREMENTER: | |
211 | case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */ | |
1c79356b A |
212 | case T_SYSTEM_MANAGEMENT: |
213 | case T_ALTIVEC_ASSIST: | |
214 | case T_INTERRUPT: | |
215 | case T_FP_UNAVAILABLE: | |
216 | case T_IO_ERROR: | |
217 | case T_RESERVED: | |
218 | default: | |
219 | unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); | |
220 | break; | |
55e303ae A |
221 | |
222 | ||
e5568f75 A |
223 | /* |
224 | * Here we handle a machine check in the kernel | |
225 | */ | |
226 | ||
227 | case T_MACHINE_CHECK: | |
228 | handleMck(ssp); /* Common to both user and kernel */ | |
229 | break; | |
230 | ||
231 | ||
55e303ae A |
232 | case T_ALIGNMENT: |
233 | /* | |
234 | * If enaNotifyEMb is set, we get here, and | |
235 | * we have actually already emulated the unaligned access. | |
236 | * All that we want to do here is to ignore the interrupt. This is to allow logging or | |
237 | * tracing of unaligned accesses. | |
238 | */ | |
239 | ||
91447636 A |
240 | if(ssp->save_hdr.save_misc3) { /* Was it a handled exception? */ |
241 | unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); /* Go panic */ | |
242 | break; | |
243 | } | |
55e303ae A |
244 | KERNEL_DEBUG_CONSTANT( |
245 | MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE, | |
246 | (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0); | |
247 | break; | |
248 | ||
249 | case T_EMULATE: | |
250 | /* | |
251 | * If enaNotifyEMb is set we get here, and | |
252 | * we have actually already emulated the instruction. | |
253 | * All that we want to do here is to ignore the interrupt. This is to allow logging or | |
254 | * tracing of emulated instructions. | |
255 | */ | |
256 | ||
257 | KERNEL_DEBUG_CONSTANT( | |
258 | MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE, | |
259 | (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0); | |
260 | break; | |
261 | ||
262 | ||
263 | ||
264 | ||
1c79356b A |
265 | |
266 | case T_TRACE: | |
267 | case T_RUNMODE_TRACE: | |
268 | case T_INSTRUCTION_BKPT: | |
269 | if (!Call_Debugger(trapno, ssp)) | |
270 | unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); | |
271 | break; | |
272 | ||
273 | case T_PROGRAM: | |
9bccf70c | 274 | if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) { |
1c79356b A |
275 | if (!Call_Debugger(trapno, ssp)) |
276 | unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); | |
277 | } else { | |
278 | unresolved_kernel_trap(trapno, ssp, | |
279 | dsisr, dar, NULL); | |
280 | } | |
281 | break; | |
282 | ||
1c79356b | 283 | case T_DATA_ACCESS: |
1c79356b A |
284 | #if MACH_KDB |
285 | mp_disable_preemption(); | |
286 | if (debug_mode | |
91447636 | 287 | && getPerProc()->debugger_active |
1c79356b A |
288 | && !let_ddb_vm_fault) { |
289 | /* | |
290 | * Force kdb to handle this one. | |
291 | */ | |
292 | kdb_trap(trapno, ssp); | |
293 | } | |
294 | mp_enable_preemption(); | |
295 | #endif /* MACH_KDB */ | |
55e303ae A |
296 | /* can we take this during normal panic dump operation? */ |
297 | if (debug_mode | |
91447636 | 298 | && getPerProc()->debugger_active |
55e303ae A |
299 | && !not_in_kdp) { |
300 | /* | |
301 | * Access fault while in kernel core dump. | |
302 | */ | |
303 | kdp_dump_trap(trapno, ssp); | |
304 | } | |
1c79356b | 305 | |
de355530 | 306 | |
55e303ae A |
307 | if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */ |
308 | panic("trap: disallowed access to cache inhibited memory - %016llX\n", dar); | |
309 | } | |
de355530 | 310 | |
55e303ae A |
311 | if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */ |
312 | ||
91447636 | 313 | if(((dar >> 28) < 0xE) | ((dar >> 28) > 0xF)) { /* User memory window access? */ |
55e303ae | 314 | |
91447636 | 315 | offset = (vm_map_offset_t)dar; /* Set the failing address */ |
55e303ae | 316 | map = kernel_map; /* No, this is a normal kernel access */ |
1c79356b | 317 | |
1c79356b A |
318 | /* |
319 | * Note: Some ROM device drivers will access page 0 when they start. The IOKit will | |
320 | * set a flag to tell us to ignore any access fault on page 0. After the driver is | |
321 | * opened, it will clear the flag. | |
322 | */ | |
55e303ae | 323 | if((0 == (offset & -PAGE_SIZE)) && /* Check for access of page 0 and */ |
91447636 | 324 | ((thread->machine.specFlags) & ignoreZeroFault)) { /* special case of ignoring page zero faults */ |
55e303ae | 325 | ssp->save_srr0 += 4; /* Point to next instruction */ |
1c79356b A |
326 | break; |
327 | } | |
328 | ||
2d21ac55 A |
329 | #if CONFIG_DTRACE |
330 | if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ | |
331 | if (dtrace_tally_fault(dar)) { /* Should a fault under dtrace be ignored? */ | |
332 | ssp->save_srr0 += 4; /* Point to next instruction */ | |
333 | break; | |
334 | } else { | |
335 | unresolved_kernel_trap(trapno, ssp, dsisr, dar, "Unexpected page fault under dtrace_probe"); | |
336 | } | |
337 | } | |
338 | #endif | |
339 | ||
91447636 | 340 | code = vm_fault(map, vm_map_trunc_page(offset), |
1c79356b | 341 | dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO, |
91447636 | 342 | FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0)); |
1c79356b A |
343 | |
344 | if (code != KERN_SUCCESS) { | |
345 | unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); | |
346 | } else { | |
9bccf70c | 347 | ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */ |
55e303ae A |
348 | ssp->save_dsisr = (ssp->save_dsisr & |
349 | ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */ | |
1c79356b A |
350 | } |
351 | break; | |
352 | } | |
353 | ||
91447636 | 354 | /* If we get here, the fault was due to a user memory window access */ |
1c79356b | 355 | |
2d21ac55 A |
356 | #if CONFIG_DTRACE |
357 | if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ | |
358 | if (dtrace_tally_fault(dar)) { /* Should a user memory window access fault under dtrace be ignored? */ | |
359 | if (thread->recover) { | |
360 | ssp->save_srr0 = thread->recover; | |
361 | thread->recover = (vm_offset_t)NULL; | |
362 | } else { | |
363 | unresolved_kernel_trap(trapno, ssp, dsisr, dar, "copyin/out has no recovery point"); | |
364 | } | |
365 | break; | |
366 | } else { | |
367 | unresolved_kernel_trap(trapno, ssp, dsisr, dar, "Unexpected UMW page fault under dtrace_probe"); | |
368 | } | |
369 | } | |
370 | #endif | |
371 | ||
91447636 | 372 | map = thread->map; |
55e303ae | 373 | |
91447636 | 374 | offset = (vm_map_offset_t)(thread->machine.umwRelo + dar); /* Compute the user space address */ |
1c79356b | 375 | |
91447636 | 376 | code = vm_fault(map, vm_map_trunc_page(offset), |
1c79356b | 377 | dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO, |
91447636 | 378 | FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0)); |
1c79356b A |
379 | |
380 | /* If we failed, there should be a recovery | |
381 | * spot to rfi to. | |
382 | */ | |
383 | if (code != KERN_SUCCESS) { | |
91447636 A |
384 | if (thread->recover) { |
385 | ssp->save_srr0 = thread->recover; | |
386 | thread->recover = (vm_offset_t)NULL; | |
1c79356b A |
387 | } else { |
388 | unresolved_kernel_trap(trapno, ssp, dsisr, dar, "copyin/out has no recovery point"); | |
389 | } | |
390 | } | |
391 | else { | |
9bccf70c | 392 | ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */ |
55e303ae A |
393 | ssp->save_dsisr = (ssp->save_dsisr & |
394 | ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */ | |
1c79356b A |
395 | } |
396 | ||
397 | break; | |
398 | ||
399 | case T_INSTRUCTION_ACCESS: | |
400 | ||
401 | #if MACH_KDB | |
402 | if (debug_mode | |
91447636 | 403 | && getPerProc()->debugger_active |
1c79356b A |
404 | && !let_ddb_vm_fault) { |
405 | /* | |
406 | * Force kdb to handle this one. | |
407 | */ | |
408 | kdb_trap(trapno, ssp); | |
409 | } | |
410 | #endif /* MACH_KDB */ | |
411 | ||
412 | /* Same as for data access, except fault type | |
413 | * is PROT_EXEC and addr comes from srr0 | |
414 | */ | |
415 | ||
416 | if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */ | |
417 | ||
418 | map = kernel_map; | |
419 | ||
91447636 | 420 | code = vm_fault(map, vm_map_trunc_page(ssp->save_srr0), |
0c530ab8 | 421 | (PROT_EXEC | PROT_RO), FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0)); |
1c79356b A |
422 | |
423 | if (code != KERN_SUCCESS) { | |
424 | unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); | |
425 | } else { | |
9bccf70c | 426 | ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */ |
55e303ae A |
427 | ssp->save_srr1 = (ssp->save_srr1 & |
428 | ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */ | |
1c79356b A |
429 | } |
430 | break; | |
431 | ||
432 | /* Usually shandler handles all the system calls, but the | |
433 | * atomic thread switcher may throwup (via thandler) and | |
434 | * have to pass it up to the exception handler. | |
435 | */ | |
436 | ||
437 | case T_SYSTEM_CALL: | |
438 | unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); | |
439 | break; | |
440 | ||
441 | case T_AST: | |
442 | unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); | |
443 | break; | |
444 | } | |
445 | } else { | |
446 | ||
91447636 A |
447 | /* |
448 | * Processing for user state traps with interrupt enabled | |
449 | * For T_AST, interrupts are enabled in the AST delivery | |
450 | */ | |
451 | if (trapno != T_AST) | |
452 | ml_set_interrupts_enabled(TRUE); | |
1c79356b A |
453 | |
454 | #ifdef MACH_BSD | |
455 | { | |
1c79356b A |
456 | get_procrustime(&tv); |
457 | } | |
458 | #endif /* MACH_BSD */ | |
459 | ||
460 | ||
461 | /* | |
462 | * Trap came from user task | |
463 | */ | |
464 | ||
2d21ac55 A |
465 | switch (trapno) { |
466 | ||
467 | case T_PREEMPT: | |
468 | unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); | |
469 | break; | |
470 | ||
471 | case T_PERF_MON: | |
472 | perfmon_handle_pmi(ssp); | |
473 | break; | |
474 | ||
475 | /* | |
476 | * These trap types should never be seen by trap() | |
477 | * Some are interrupts that should be seen by | |
478 | * interrupt() others just don't happen because they | |
479 | * are handled elsewhere. | |
480 | */ | |
481 | case T_DECREMENTER: | |
482 | case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */ | |
483 | case T_INTERRUPT: | |
484 | case T_FP_UNAVAILABLE: | |
485 | case T_SYSTEM_MANAGEMENT: | |
486 | case T_RESERVED: | |
487 | case T_IO_ERROR: | |
1c79356b | 488 | |
2d21ac55 A |
489 | default: |
490 | ||
491 | ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */ | |
492 | ||
493 | panic("Unexpected user state trap(cpu %d): 0x%08X DSISR=0x%08X DAR=0x%016llX PC=0x%016llX, MSR=0x%016llX\n", | |
494 | cpu_number(), trapno, dsisr, dar, ssp->save_srr0, ssp->save_srr1); | |
495 | break; | |
496 | ||
497 | ||
498 | /* | |
499 | * Here we handle a machine check in user state | |
500 | */ | |
501 | ||
502 | case T_MACHINE_CHECK: | |
503 | handleMck(ssp); /* Common to both user and kernel */ | |
504 | break; | |
505 | ||
506 | case T_RESET: | |
507 | ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */ | |
508 | if (!Call_Debugger(trapno, ssp)) | |
509 | panic("Unexpected Reset exception: srr0 = %016llx, srr1 = %016llx\n", | |
510 | ssp->save_srr0, ssp->save_srr1); | |
511 | break; /* We just ignore these */ | |
512 | ||
513 | case T_ALIGNMENT: | |
514 | /* | |
515 | * If enaNotifyEMb is set, we get here, and | |
516 | * we have actually already emulated the unaligned access. | |
517 | * All that we want to do here is to ignore the interrupt. This is to allow logging or | |
518 | * tracing of unaligned accesses. | |
519 | */ | |
55e303ae | 520 | |
2d21ac55 A |
521 | KERNEL_DEBUG_CONSTANT( |
522 | MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE, | |
523 | (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0); | |
55e303ae | 524 | |
2d21ac55 A |
525 | if(ssp->save_hdr.save_misc3) { /* Was it a handled exception? */ |
526 | exception = EXC_BAD_ACCESS; /* Yes, throw exception */ | |
527 | code = EXC_PPC_UNALIGNED; | |
528 | subcode = dar; | |
529 | } | |
530 | break; | |
531 | ||
532 | case T_EMULATE: | |
533 | /* | |
534 | * If enaNotifyEMb is set we get here, and | |
535 | * we have actually already emulated the instruction. | |
536 | * All that we want to do here is to ignore the interrupt. This is to allow logging or | |
537 | * tracing of emulated instructions. | |
538 | */ | |
539 | ||
540 | KERNEL_DEBUG_CONSTANT( | |
541 | MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE, | |
542 | (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0); | |
543 | break; | |
544 | ||
545 | case T_TRACE: /* Real PPC chips */ | |
546 | case T_INSTRUCTION_BKPT: | |
547 | exception = EXC_BREAKPOINT; | |
548 | code = EXC_PPC_TRACE; | |
549 | subcode = ssp->save_srr0; | |
550 | break; | |
551 | ||
552 | case T_PROGRAM: | |
553 | if (ssp->save_srr1 & MASK(SRR1_PRG_FE)) { | |
554 | fpu_save(thread->machine.curctx); | |
555 | UPDATE_PPC_EXCEPTION_STATE; | |
556 | exception = EXC_ARITHMETIC; | |
557 | code = EXC_ARITHMETIC; | |
558 | ||
559 | mp_disable_preemption(); | |
560 | subcode = ssp->save_fpscr; | |
561 | mp_enable_preemption(); | |
562 | } | |
563 | else if (ssp->save_srr1 & MASK(SRR1_PRG_ILL_INS)) { | |
564 | ||
565 | UPDATE_PPC_EXCEPTION_STATE | |
566 | exception = EXC_BAD_INSTRUCTION; | |
567 | code = EXC_PPC_UNIPL_INST; | |
568 | subcode = ssp->save_srr0; | |
569 | } else if ((unsigned int)ssp->save_srr1 & MASK(SRR1_PRG_PRV_INS)) { | |
570 | ||
571 | UPDATE_PPC_EXCEPTION_STATE; | |
572 | exception = EXC_BAD_INSTRUCTION; | |
573 | code = EXC_PPC_PRIVINST; | |
574 | subcode = ssp->save_srr0; | |
575 | } else if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) { | |
576 | unsigned int inst; | |
577 | ||
578 | if (copyin(ssp->save_srr0, (char *) &inst, 4 )) panic("copyin failed\n"); | |
579 | ||
580 | if(dgWork.dgFlags & enaDiagTrap) { /* Is the diagnostic trap enabled? */ | |
581 | if((inst & 0xFFFFFFF0) == 0x0FFFFFF0) { /* Is this a TWI 31,R31,0xFFFx? */ | |
582 | if(diagTrap(ssp, inst & 0xF)) { /* Call the trap code */ | |
583 | ssp->save_srr0 += 4ULL; /* If we eat the trap, bump pc */ | |
584 | exception = 0; /* Clear exception */ | |
585 | break; /* All done here */ | |
586 | } | |
587 | } | |
588 | } | |
589 | ||
590 | #if CONFIG_DTRACE | |
591 | if(inst == 0x0FFFDDDD) { /* Is this the dtrace trap? */ | |
592 | ret = dtrace_user_probe((ppc_saved_state_t *)ssp); /* Go check if it is for real and process if so... */ | |
593 | if(ret == KERN_SUCCESS) { /* Was it really? */ | |
594 | exception = 0; /* Clear the exception */ | |
595 | break; /* Go flow through and out... */ | |
55e303ae A |
596 | } |
597 | } | |
2d21ac55 A |
598 | #endif |
599 | ||
600 | UPDATE_PPC_EXCEPTION_STATE; | |
601 | ||
602 | if (inst == 0x7FE00008) { | |
603 | exception = EXC_BREAKPOINT; | |
604 | code = EXC_PPC_BREAKPOINT; | |
605 | } else { | |
606 | exception = EXC_SOFTWARE; | |
607 | code = EXC_PPC_TRAP; | |
608 | } | |
609 | subcode = ssp->save_srr0; | |
55e303ae | 610 | } |
2d21ac55 A |
611 | break; |
612 | ||
613 | #if CONFIG_DTRACE | |
614 | case T_DTRACE_RET: /* Are we returning from a dtrace injection? */ | |
615 | ret = dtrace_user_probe((ppc_saved_state_t *)ssp); /* Call the probe function if so... */ | |
616 | if(ret == KERN_SUCCESS) { /* Did this actually work? */ | |
617 | exception = 0; /* Clear the exception */ | |
618 | break; /* Go flow through and out... */ | |
619 | } | |
620 | break; | |
621 | #endif | |
55e303ae | 622 | |
2d21ac55 | 623 | case T_ALTIVEC_ASSIST: |
1c79356b | 624 | UPDATE_PPC_EXCEPTION_STATE; |
2d21ac55 A |
625 | exception = EXC_ARITHMETIC; |
626 | code = EXC_PPC_ALTIVECASSIST; | |
627 | subcode = ssp->save_srr0; | |
628 | break; | |
629 | ||
630 | case T_DATA_ACCESS: | |
631 | map = thread->map; | |
632 | ||
633 | if(ssp->save_dsisr & dsiInvMode) { /* Did someone try to reserve cache inhibited? */ | |
634 | UPDATE_PPC_EXCEPTION_STATE; /* Don't even bother VM with this one */ | |
635 | exception = EXC_BAD_ACCESS; | |
636 | subcode = dar; | |
637 | break; | |
638 | } | |
55e303ae | 639 | |
2d21ac55 A |
640 | code = vm_fault(map, vm_map_trunc_page(dar), |
641 | dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO, | |
642 | FALSE, THREAD_ABORTSAFE, NULL, vm_map_trunc_page(0)); | |
643 | ||
644 | if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) { | |
645 | UPDATE_PPC_EXCEPTION_STATE; | |
646 | exception = EXC_BAD_ACCESS; | |
647 | subcode = dar; | |
648 | } else { | |
649 | ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to retry fault */ | |
650 | ssp->save_dsisr = (ssp->save_dsisr & | |
651 | ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */ | |
1c79356b | 652 | } |
55e303ae | 653 | break; |
2d21ac55 A |
654 | |
655 | case T_INSTRUCTION_ACCESS: | |
656 | /* Same as for data access, except fault type | |
657 | * is PROT_EXEC and addr comes from srr0 | |
658 | */ | |
659 | map = thread->map; | |
660 | ||
661 | code = vm_fault(map, vm_map_trunc_page(ssp->save_srr0), | |
662 | (PROT_EXEC | PROT_RO), FALSE, THREAD_ABORTSAFE, NULL, vm_map_trunc_page(0)); | |
663 | ||
664 | if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) { | |
665 | UPDATE_PPC_EXCEPTION_STATE; | |
666 | exception = EXC_BAD_ACCESS; | |
667 | subcode = ssp->save_srr0; | |
668 | } else { | |
669 | ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */ | |
670 | ssp->save_srr1 = (ssp->save_srr1 & | |
671 | ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH); /* Make sure this is marked as a miss */ | |
672 | } | |
673 | break; | |
674 | ||
675 | case T_AST: | |
676 | /* AST delivery is done below */ | |
677 | break; | |
1c79356b A |
678 | |
679 | } | |
2d21ac55 | 680 | |
1c79356b A |
681 | #ifdef MACH_BSD |
682 | { | |
9bccf70c | 683 | bsd_uprofil(&tv, ssp->save_srr0); |
1c79356b A |
684 | } |
685 | #endif /* MACH_BSD */ | |
686 | } | |
687 | ||
688 | if (exception) { | |
689 | /* if this is the init task, save the exception information */ | |
690 | /* this probably is a fatal exception */ | |
55e303ae | 691 | #if 0 |
1c79356b A |
692 | if(bsd_init_task == current_task()) { |
693 | char *buf; | |
694 | int i; | |
695 | ||
696 | buf = init_task_failure_data; | |
697 | ||
698 | ||
699 | buf += sprintf(buf, "Exception Code = 0x%x, Subcode = 0x%x\n", code, subcode); | |
55e303ae | 700 | buf += sprintf(buf, "DSISR = 0x%08x, DAR = 0x%016llx\n" |
1c79356b A |
701 | , dsisr, dar); |
702 | ||
703 | for (i=0; i<32; i++) { | |
704 | if ((i % 8) == 0) { | |
705 | buf += sprintf(buf, "\n%4d :",i); | |
706 | } | |
9bccf70c | 707 | buf += sprintf(buf, " %08x",*(&ssp->save_r0+i)); |
1c79356b A |
708 | } |
709 | ||
710 | buf += sprintf(buf, "\n\n"); | |
55e303ae A |
711 | buf += sprintf(buf, "cr = 0x%08X\t\t",ssp->save_cr); |
712 | buf += sprintf(buf, "xer = 0x%08X\n",ssp->save_xer); | |
713 | buf += sprintf(buf, "lr = 0x%016llX\t\t",ssp->save_lr); | |
714 | buf += sprintf(buf, "ctr = 0x%016llX\n",ssp->save_ctr); | |
715 | buf += sprintf(buf, "srr0(iar) = 0x%016llX\t\t",ssp->save_srr0); | |
716 | buf += sprintf(buf, "srr1(msr) = 0x%016llX\n",ssp->save_srr1, | |
1c79356b A |
717 | "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18" |
718 | "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT"); | |
719 | buf += sprintf(buf, "\n\n"); | |
720 | ||
721 | /* generate some stack trace */ | |
722 | buf += sprintf(buf, "Application level back trace:\n"); | |
9bccf70c A |
723 | if (ssp->save_srr1 & MASK(MSR_PR)) { |
724 | char *addr = (char*)ssp->save_r1; | |
1c79356b A |
725 | unsigned int stack_buf[3]; |
726 | for (i = 0; i < 8; i++) { | |
727 | if (addr == (char*)NULL) | |
728 | break; | |
91447636 | 729 | if (!copyin(ssp->save_r1,(char*)stack_buf, |
1c79356b | 730 | 3 * sizeof(int))) { |
55e303ae | 731 | buf += sprintf(buf, "0x%08X : 0x%08X\n" |
1c79356b A |
732 | ,addr,stack_buf[2]); |
733 | addr = (char*)stack_buf[0]; | |
734 | } else { | |
735 | break; | |
736 | } | |
737 | } | |
738 | } | |
739 | buf[0] = '\0'; | |
740 | } | |
55e303ae | 741 | #endif |
1c79356b A |
742 | doexception(exception, code, subcode); |
743 | } | |
744 | /* AST delivery | |
745 | * Check to see if we need an AST, if so take care of it here | |
746 | */ | |
747 | ml_set_interrupts_enabled(FALSE); | |
91447636 A |
748 | |
749 | if (USER_MODE(ssp->save_srr1)) { | |
750 | myast = ast_pending(); | |
751 | while (*myast & AST_ALL) { | |
0b4e3aa0 | 752 | ast_taken(AST_ALL, intr); |
1c79356b | 753 | ml_set_interrupts_enabled(FALSE); |
91447636 | 754 | myast = ast_pending(); |
1c79356b | 755 | } |
91447636 | 756 | } |
1c79356b A |
757 | |
758 | return ssp; | |
759 | } | |
760 | ||
761 | /* This routine is called from assembly before each and every system call. | |
762 | * It must preserve r3. | |
763 | */ | |
764 | ||
9bccf70c | 765 | extern int syscall_trace(int, struct savearea *); |
1c79356b A |
766 | |
767 | ||
768 | extern int pmdebug; | |
769 | ||
9bccf70c | 770 | int syscall_trace(int retval, struct savearea *ssp) |
1c79356b A |
771 | { |
772 | int i, argc; | |
de355530 | 773 | int kdarg[3]; |
55e303ae A |
774 | /* Always prepare to trace mach system calls */ |
775 | ||
776 | kdarg[0]=0; | |
777 | kdarg[1]=0; | |
778 | kdarg[2]=0; | |
779 | ||
780 | argc = mach_trap_table[-((unsigned int)ssp->save_r0)].mach_trap_arg_count; | |
781 | ||
782 | if (argc > 3) | |
783 | argc = 3; | |
784 | ||
785 | for (i=0; i < argc; i++) | |
786 | kdarg[i] = (int)*(&ssp->save_r3 + i); | |
787 | ||
788 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (-(ssp->save_r0))) | DBG_FUNC_START, | |
789 | kdarg[0], kdarg[1], kdarg[2], 0, 0); | |
1c79356b A |
790 | |
791 | return retval; | |
792 | } | |
793 | ||
794 | /* This routine is called from assembly after each mach system call | |
795 | * It must preserve r3. | |
796 | */ | |
797 | ||
9bccf70c | 798 | extern int syscall_trace_end(int, struct savearea *); |
1c79356b | 799 | |
9bccf70c | 800 | int syscall_trace_end(int retval, struct savearea *ssp) |
1c79356b | 801 | { |
55e303ae A |
802 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-((unsigned int)ssp->save_r0))) | DBG_FUNC_END, |
803 | retval, 0, 0, 0, 0); | |
1c79356b A |
804 | return retval; |
805 | } | |
806 | ||
807 | /* | |
808 | * called from syscall if there is an error | |
809 | */ | |
810 | ||
811 | int syscall_error( | |
812 | int exception, | |
2d21ac55 A |
813 | mach_exception_code_t code, |
814 | mach_exception_subcode_t subcode, | |
9bccf70c | 815 | struct savearea *ssp) |
1c79356b A |
816 | { |
817 | register thread_t thread; | |
818 | ||
819 | thread = current_thread(); | |
820 | ||
821 | if (thread == 0) | |
822 | panic("syscall error in boot phase"); | |
823 | ||
9bccf70c | 824 | if (!USER_MODE(ssp->save_srr1)) |
1c79356b A |
825 | panic("system call called from kernel"); |
826 | ||
827 | doexception(exception, code, subcode); | |
828 | ||
829 | return 0; | |
830 | } | |
831 | ||
832 | /* Pass up a server syscall/exception */ | |
833 | void | |
834 | doexception( | |
835 | int exc, | |
2d21ac55 A |
836 | mach_exception_code_t code, |
837 | mach_exception_subcode_t sub) | |
1c79356b | 838 | { |
2d21ac55 | 839 | mach_exception_data_type_t codes[EXCEPTION_CODE_MAX]; |
1c79356b A |
840 | |
841 | codes[0] = code; | |
842 | codes[1] = sub; | |
91447636 | 843 | exception_triage(exc, codes, 2); |
1c79356b A |
844 | } |
845 | ||
2d21ac55 | 846 | const char *trap_type[] = { |
0b4e3aa0 A |
847 | "Unknown", |
848 | "0x100 - System reset", | |
849 | "0x200 - Machine check", | |
850 | "0x300 - Data access", | |
851 | "0x400 - Inst access", | |
852 | "0x500 - Ext int", | |
853 | "0x600 - Alignment", | |
854 | "0x700 - Program", | |
855 | "0x800 - Floating point", | |
856 | "0x900 - Decrementer", | |
857 | "0xA00 - n/a", | |
858 | "0xB00 - n/a", | |
859 | "0xC00 - System call", | |
860 | "0xD00 - Trace", | |
861 | "0xE00 - FP assist", | |
862 | "0xF00 - Perf mon", | |
863 | "0xF20 - VMX", | |
864 | "INVALID EXCEPTION", | |
865 | "INVALID EXCEPTION", | |
866 | "INVALID EXCEPTION", | |
867 | "0x1300 - Inst bkpnt", | |
868 | "0x1400 - Sys mgmt", | |
869 | "0x1600 - Altivec Assist", | |
870 | "0x1700 - Thermal", | |
871 | "INVALID EXCEPTION", | |
872 | "INVALID EXCEPTION", | |
873 | "INVALID EXCEPTION", | |
874 | "INVALID EXCEPTION", | |
875 | "INVALID EXCEPTION", | |
876 | "INVALID EXCEPTION", | |
877 | "INVALID EXCEPTION", | |
878 | "INVALID EXCEPTION", | |
55e303ae | 879 | "Emulate", |
0b4e3aa0 A |
880 | "0x2000 - Run Mode/Trace", |
881 | "Signal Processor", | |
882 | "Preemption", | |
883 | "Context Switch", | |
884 | "Shutdown", | |
885 | "System Failure" | |
1c79356b A |
886 | }; |
887 | int TRAP_TYPES = sizeof (trap_type) / sizeof (trap_type[0]); | |
888 | ||
889 | void unresolved_kernel_trap(int trapno, | |
9bccf70c | 890 | struct savearea *ssp, |
2d21ac55 | 891 | __unused unsigned int dsisr, |
55e303ae | 892 | addr64_t dar, |
91447636 | 893 | const char *message) |
1c79356b | 894 | { |
2d21ac55 | 895 | const char *trap_name; |
1c79356b A |
896 | |
897 | ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */ | |
9bccf70c | 898 | lastTrace = LLTraceSet(0); /* Disable low-level tracing */ |
2d21ac55 A |
899 | |
900 | #if 0 | |
901 | { | |
902 | struct per_proc_info *pp; | |
903 | kprintf(" srr0: %016llX\n", ssp->save_srr0); /* (TEST/DEBUG) */ | |
904 | kprintf(" srr1: %016llX\n", ssp->save_srr1); /* (TEST/DEBUG) */ | |
905 | kprintf(" dar: %016llX\n", ssp->save_dar); /* (TEST/DEBUG) */ | |
906 | kprintf(" xcp: %08X\n", ssp->save_exception); /* (TEST/DEBUG) */ | |
907 | kprintf(" ins0: %08X\n", ssp->save_instr[0]); /* (TEST/DEBUG) */ | |
908 | kprintf(" ins1: %08X\n", ssp->save_instr[1]); /* (TEST/DEBUG) */ | |
909 | kprintf(" ins2: %08X\n", ssp->save_instr[2]); /* (TEST/DEBUG) */ | |
910 | kprintf(" ins3: %08X\n", ssp->save_instr[3]); /* (TEST/DEBUG) */ | |
911 | kprintf(" ins4: %08X\n", ssp->save_instr[4]); /* (TEST/DEBUG) */ | |
912 | kprintf(" ins5: %08X\n", ssp->save_instr[5]); /* (TEST/DEBUG) */ | |
913 | kprintf(" ins6: %08X\n", ssp->save_instr[6]); /* (TEST/DEBUG) */ | |
914 | kprintf(" ins7: %08X\n", ssp->save_instr[7]); /* (TEST/DEBUG) */ | |
915 | pp = getPerProc(); /* (TEST/DEBUG) */ | |
916 | kprintf("ijsave: %016llX\n", pp->ijsave); /* (TEST/DEBUG) */ | |
917 | } | |
918 | #endif | |
1c79356b | 919 | |
9bccf70c | 920 | if( logPanicDataToScreen ) |
2d21ac55 | 921 | disable_debug_output = FALSE; |
91447636 | 922 | |
1c79356b A |
923 | debug_mode++; |
924 | if ((unsigned)trapno <= T_MAX) | |
925 | trap_name = trap_type[trapno / T_VECTOR_SIZE]; | |
926 | else | |
927 | trap_name = "???? unrecognized exception"; | |
928 | if (message == NULL) | |
929 | message = trap_name; | |
930 | ||
55e303ae | 931 | kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%016llX PC=0x%016llX\n", |
9bccf70c | 932 | cpu_number(), trap_name, dar, ssp->save_srr0); |
1c79356b A |
933 | |
934 | print_backtrace(ssp); | |
935 | ||
91447636 | 936 | panic_caller = (0xFFFF0000 | (trapno / T_VECTOR_SIZE) ); |
2d21ac55 A |
937 | /* Commit the panic log buffer to NVRAM, unless otherwise |
938 | * specified via a boot-arg. | |
939 | */ | |
940 | if (panicDebugging) | |
941 | commit_paniclog(); | |
942 | ||
9bccf70c | 943 | draw_panic_dialog(); |
2d21ac55 A |
944 | /* XXX: This is yet another codepath into the debugger, which should |
945 | * be reworked to enter the primary panic codepath instead. | |
946 | * The idea appears to be to enter the debugger (performing a | |
947 | * stack switch) as soon as possible, but we do have a | |
948 | * savearea encapsulating state (accessible by walking the savearea | |
949 | * chain), so that's superfluous. | |
950 | */ | |
9bccf70c | 951 | if( panicDebugging ) |
2d21ac55 A |
952 | (void)Call_Debugger(trapno, ssp); |
953 | panic_plain(message); | |
1c79356b A |
954 | } |
955 | ||
91447636 | 956 | const char *corr[2] = {"uncorrected", "corrected "}; |
e5568f75 A |
957 | |
958 | void handleMck(struct savearea *ssp) { /* Common machine check handler */ | |
959 | ||
960 | int cpu; | |
961 | ||
962 | cpu = cpu_number(); | |
963 | ||
964 | printf("Machine check (%d) - %s - pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n", | |
965 | cpu, corr[ssp->save_hdr.save_misc3], ssp->save_srr0, ssp->save_srr1, ssp->save_dsisr, ssp->save_dar); /* Tell us about it */ | |
966 | printf("Machine check (%d) - AsyncSrc = %016llX, CoreFIR = %016llx\n", cpu, ssp->save_xdat0, ssp->save_xdat1); | |
967 | printf("Machine check (%d) - L2FIR = %016llX, BusFir = %016llx\n", cpu, ssp->save_xdat2, ssp->save_xdat3); | |
968 | ||
969 | if(ssp->save_hdr.save_misc3) return; /* Leave the the machine check was recovered */ | |
970 | ||
971 | panic("Uncorrectable machine check: pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n" | |
972 | " AsyncSrc = %016llX, CoreFIR = %016llx\n" | |
973 | " L2FIR = %016llX, BusFir = %016llx\n", | |
974 | ssp->save_srr0, ssp->save_srr1, ssp->save_dsisr, ssp->save_dar, | |
975 | ssp->save_xdat0, ssp->save_xdat1, ssp->save_xdat2, ssp->save_xdat3); | |
976 | ||
977 | return; | |
978 | } | |
979 | ||
1c79356b A |
980 | void |
981 | thread_syscall_return( | |
982 | kern_return_t ret) | |
983 | { | |
91447636 A |
984 | register thread_t thread = current_thread(); |
985 | register struct savearea *regs = USER_REGS(thread); | |
1c79356b | 986 | |
55e303ae | 987 | if (kdebug_enable && ((unsigned int)regs->save_r0 & 0x80000000)) { |
1c79356b | 988 | /* Mach trap */ |
9bccf70c | 989 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(regs->save_r0))) | DBG_FUNC_END, |
1c79356b A |
990 | ret, 0, 0, 0, 0); |
991 | } | |
9bccf70c | 992 | regs->save_r3 = ret; |
1c79356b A |
993 | |
994 | thread_exception_return(); | |
995 | /*NOTREACHED*/ | |
996 | } | |
997 | ||
998 | ||
999 | #if MACH_KDB | |
1000 | void | |
1001 | thread_kdb_return(void) | |
1002 | { | |
91447636 A |
1003 | register thread_t thread = current_thread(); |
1004 | register struct savearea *regs = USER_REGS(thread); | |
1005 | ||
1006 | Call_Debugger(thread->machine.pcb->save_exception, regs); | |
1c79356b A |
1007 | thread_exception_return(); |
1008 | /*NOTREACHED*/ | |
1009 | } | |
1010 | #endif /* MACH_KDB */ |