]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/trap.c
xnu-792.22.5.tar.gz
[apple/xnu.git] / osfmk / i386 / trap.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * Hardware trap/fault handler.
60 */
61
62 #include <mach_kdb.h>
63 #include <mach_kgdb.h>
64 #include <mach_kdp.h>
65 #include <mach_ldebug.h>
66
67 #include <types.h>
68 #include <i386/eflags.h>
69 #include <i386/trap.h>
70 #include <i386/pmap.h>
71 #include <i386/fpu.h>
72 #include <i386/misc_protos.h> /* panic_io_port_read() */
73
74 #include <mach/exception.h>
75 #include <mach/kern_return.h>
76 #include <mach/vm_param.h>
77 #include <mach/i386/thread_status.h>
78
79 #include <vm/vm_kern.h>
80 #include <vm/vm_fault.h>
81
82 #include <kern/kern_types.h>
83 #include <kern/processor.h>
84 #include <kern/thread.h>
85 #include <kern/task.h>
86 #include <kern/sched.h>
87 #include <kern/sched_prim.h>
88 #include <kern/exception.h>
89 #include <kern/spl.h>
90 #include <kern/misc_protos.h>
91
92 #include <sys/kdebug.h>
93
94 #if MACH_KGDB
95 #include <kgdb/kgdb_defs.h>
96 #endif /* MACH_KGDB */
97
98 #if MACH_KDB
99 #include <debug.h>
100 #include <ddb/db_watch.h>
101 #include <ddb/db_run.h>
102 #include <ddb/db_break.h>
103 #include <ddb/db_trap.h>
104 #endif /* MACH_KDB */
105
106 #include <string.h>
107
108 #include <i386/io_emulate.h>
109 #include <i386/postcode.h>
110 #include <i386/mp_desc.h>
111 #include <i386/proc_reg.h>
112 #include <i386/machine_check.h>
113 #include <mach/i386/syscall_sw.h>
114
115 /*
116 * Forward declarations
117 */
118 static void user_page_fault_continue(kern_return_t kret);
119 static void panic_trap(x86_saved_state32_t *saved_state);
120 static void set_recovery_ip(x86_saved_state32_t *saved_state, vm_offset_t ip);
121
122 perfCallback perfTrapHook = NULL; /* Pointer to CHUD trap hook routine */
123 perfCallback perfASTHook = NULL; /* Pointer to CHUD AST hook routine */
124
125 void
126 thread_syscall_return(
127 kern_return_t ret)
128 {
129 thread_t thr_act = current_thread();
130
131 if (thread_is_64bit(thr_act)) {
132 x86_saved_state64_t *regs;
133
134 regs = USER_REGS64(thr_act);
135
136 if (kdebug_enable && ((regs->rax & SYSCALL_CLASS_MASK) == (SYSCALL_CLASS_MACH << SYSCALL_CLASS_SHIFT))) {
137 /* Mach trap */
138 KERNEL_DEBUG_CONSTANT(
139 MACHDBG_CODE(DBG_MACH_EXCP_SC, ((int) (regs->rax & SYSCALL_NUMBER_MASK)))
140 | DBG_FUNC_END,
141 ret, 0, 0, 0, 0);
142 }
143 regs->rax = ret;
144
145 } else {
146 x86_saved_state32_t *regs;
147
148 regs = USER_REGS32(thr_act);
149
150 if (kdebug_enable && ((int) regs->eax < 0)) {
151 /* Mach trap */
152 KERNEL_DEBUG_CONSTANT(
153 MACHDBG_CODE(DBG_MACH_EXCP_SC, -((int) regs->eax))
154 | DBG_FUNC_END,
155 ret, 0, 0, 0, 0);
156 }
157 regs->eax = ret;
158 }
159 thread_exception_return();
160 /*NOTREACHED*/
161 }
162
163
164 #if MACH_KDB
165 boolean_t debug_all_traps_with_kdb = FALSE;
166 extern struct db_watchpoint *db_watchpoint_list;
167 extern boolean_t db_watchpoints_inserted;
168 extern boolean_t db_breakpoints_inserted;
169
170 void
171 thread_kdb_return(void)
172 {
173 thread_t thr_act = current_thread();
174 x86_saved_state_t *iss = USER_STATE(thr_act);
175
176 if (is_saved_state64(iss)) {
177 x86_saved_state64_t *regs;
178
179 regs = saved_state64(iss);
180
181 if (kdb_trap(regs->isf.trapno, (int)regs->isf.err, (void *)regs)) {
182 thread_exception_return();
183 /*NOTREACHED*/
184 }
185
186 } else {
187 x86_saved_state32_t *regs;
188
189 regs = saved_state32(iss);
190
191 if (kdb_trap(regs->trapno, regs->err, (void *)regs)) {
192 thread_exception_return();
193 /*NOTREACHED*/
194 }
195 }
196 }
197
198 #endif /* MACH_KDB */
199
200 void
201 user_page_fault_continue(
202 kern_return_t kr)
203 {
204 thread_t thread = current_thread();
205 x86_saved_state_t *regs = USER_STATE(thread);
206 ast_t *myast;
207 boolean_t intr;
208 user_addr_t vaddr;
209 #if MACH_KDB
210 int err;
211 int trapno;
212 #endif
213
214 assert((is_saved_state32(regs) && !thread_is_64bit(thread)) ||
215 (is_saved_state64(regs) && thread_is_64bit(thread)));
216
217 if (thread_is_64bit(thread)) {
218 x86_saved_state64_t *uregs;
219
220 uregs = USER_REGS64(thread);
221
222 #if MACH_KDB
223 trapno = uregs->isf.trapno;
224 err = uregs->isf.err;
225 #endif
226 vaddr = (user_addr_t)uregs->cr2;
227 } else {
228 x86_saved_state32_t *uregs;
229
230 uregs = USER_REGS32(thread);
231
232 #if MACH_KDB
233 trapno = uregs->trapno;
234 err = uregs->err;
235 #endif
236 vaddr = uregs->cr2;
237 }
238
239 if ((kr == KERN_SUCCESS) || (kr == KERN_ABORTED)) {
240 #if MACH_KDB
241 if (!db_breakpoints_inserted) {
242 db_set_breakpoints();
243 }
244 if (db_watchpoint_list &&
245 db_watchpoints_inserted &&
246 (err & T_PF_WRITE) &&
247 db_find_watchpoint(thread->map,
248 (vm_offset_t)vaddr,
249 regs))
250 kdb_trap(T_WATCHPOINT, 0, regs);
251 #endif /* MACH_KDB */
252 intr = ml_set_interrupts_enabled(FALSE);
253 myast = ast_pending();
254 while (*myast & AST_ALL) {
255 ast_taken(AST_ALL, intr);
256 ml_set_interrupts_enabled(FALSE);
257 myast = ast_pending();
258 }
259 ml_set_interrupts_enabled(intr);
260
261 thread_exception_return();
262 /*NOTREACHED*/
263 }
264
265 #if MACH_KDB
266 if (debug_all_traps_with_kdb &&
267 kdb_trap(trapno, err, regs)) {
268 thread_exception_return();
269 /*NOTREACHED*/
270 }
271 #endif /* MACH_KDB */
272
273 i386_exception(EXC_BAD_ACCESS, kr, vaddr);
274 /*NOTREACHED*/
275 }
276
277 /*
278 * Fault recovery in copyin/copyout routines.
279 */
280 struct recovery {
281 uint32_t fault_addr;
282 uint32_t recover_addr;
283 };
284
285 extern struct recovery recover_table[];
286 extern struct recovery recover_table_end[];
287
288 const char * trap_type[] = {TRAP_NAMES};
289 unsigned TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]);
290
291 static inline void
292 reset_dr7(void)
293 {
294 uint32_t dr7 = 0x400; /* magic dr7 reset value */
295 __asm__ volatile("movl %0,%%dr7" : : "r" (dr7));
296 }
297 #if MACH_KDP
298 unsigned kdp_has_active_watchpoints = 0;
299 #endif
300 /*
301 * Trap from kernel mode. Only page-fault errors are recoverable,
302 * and then only in special circumstances. All other errors are
303 * fatal. Return value indicates if trap was handled.
304 */
305 void
306 kernel_trap(
307 x86_saved_state_t *state)
308 {
309 x86_saved_state32_t *saved_state;
310 int code;
311 user_addr_t vaddr;
312 int type;
313 vm_map_t map;
314 kern_return_t result = KERN_FAILURE;
315 thread_t thread;
316 ast_t *myast;
317 boolean_t intr;
318 vm_prot_t prot;
319 struct recovery *rp;
320 vm_offset_t kern_ip;
321 int fault_in_copy_window = -1;
322 int is_user = 0;
323 #if MACH_KDB
324 pt_entry_t *pte;
325 #endif /* MACH_KDB */
326
327 thread = current_thread();
328
329 if (is_saved_state64(state))
330 panic("kernel_trap(%p) with 64-bit state", state);
331 saved_state = saved_state32(state);
332
333 vaddr = (user_addr_t)saved_state->cr2;
334 type = saved_state->trapno;
335 code = saved_state->err & 0xffff;
336 intr = (saved_state->efl & EFL_IF) != 0; /* state of ints at trap */
337
338 kern_ip = (vm_offset_t)saved_state->eip;
339
340 myast = ast_pending();
341
342 if (perfASTHook) {
343 if (*myast & AST_CHUD_ALL)
344 perfASTHook(type, NULL, 0, 0);
345 } else
346 *myast &= ~AST_CHUD_ALL;
347
348 /*
349 * Is there a hook?
350 */
351 if (perfTrapHook) {
352 if (perfTrapHook(type, NULL, 0, 0) == KERN_SUCCESS) {
353 /*
354 * If it succeeds, we are done...
355 */
356 return;
357 }
358 }
359 /*
360 * we come here with interrupts off as we don't want to recurse
361 * on preemption below. but we do want to re-enable interrupts
362 * as soon we possibly can to hold latency down
363 */
364 if (T_PREEMPT == type) {
365
366 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
367 0, 0, 0, kern_ip, 0);
368
369 ast_taken(AST_PREEMPTION, FALSE);
370 return;
371 }
372
373 if (T_PAGE_FAULT == type) {
374 /*
375 * assume we're faulting in the kernel map
376 */
377 map = kernel_map;
378
379 if (thread != THREAD_NULL && thread->map != kernel_map) {
380 vm_offset_t copy_window_base;
381 vm_offset_t kvaddr;
382 int window_index;
383
384 kvaddr = (vm_offset_t)vaddr;
385 /*
386 * must determine if fault occurred in
387 * the copy window while pre-emption is
388 * disabled for this processor so that
389 * we only need to look at the window
390 * associated with this processor
391 */
392 copy_window_base = current_cpu_datap()->cpu_copywindow_base;
393
394 if (kvaddr >= copy_window_base && kvaddr < (copy_window_base + (NBPDE * NCOPY_WINDOWS)) ) {
395
396 window_index = (kvaddr - copy_window_base) / NBPDE;
397
398 if (thread->machine.copy_window[window_index].user_base != (user_addr_t)-1) {
399
400 kvaddr -= (copy_window_base + (NBPDE * window_index));
401 vaddr = thread->machine.copy_window[window_index].user_base + kvaddr;
402
403 map = thread->map;
404 fault_in_copy_window = window_index;
405 }
406 is_user = -1;
407 }
408 }
409 }
410 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
411 (int)(vaddr >> 32), (int)vaddr, is_user, kern_ip, 0);
412
413
414 (void) ml_set_interrupts_enabled(intr);
415
416 switch (type) {
417
418 case T_NO_FPU:
419 fpnoextflt();
420 return;
421
422 case T_FPU_FAULT:
423 fpextovrflt();
424 return;
425
426 case T_FLOATING_POINT_ERROR:
427 fpexterrflt();
428 return;
429
430 case T_SSE_FLOAT_ERROR:
431 fpSSEexterrflt();
432 return;
433 case T_DEBUG:
434 if ((saved_state->efl & EFL_TF) == 0
435 && !kdp_has_active_watchpoints) {
436 /* We've somehow encountered a debug
437 * register match that does not belong
438 * to the kernel debugger.
439 * This isn't supposed to happen.
440 */
441 reset_dr7();
442 return;
443 }
444 goto debugger_entry;
445 case T_PAGE_FAULT:
446 /*
447 * If the current map is a submap of the kernel map,
448 * and the address is within that map, fault on that
449 * map. If the same check is done in vm_fault
450 * (vm_map_lookup), we may deadlock on the kernel map
451 * lock.
452 */
453
454 prot = VM_PROT_READ;
455
456 if (code & T_PF_WRITE)
457 prot |= VM_PROT_WRITE;
458 #if PAE
459 if (code & T_PF_EXECUTE)
460 prot |= VM_PROT_EXECUTE;
461 #endif
462
463 #if MACH_KDB
464 /*
465 * Check for watchpoint on kernel static data.
466 * vm_fault would fail in this case
467 */
468 if (map == kernel_map && db_watchpoint_list && db_watchpoints_inserted &&
469 (code & T_PF_WRITE) && vaddr < vm_map_max(map) &&
470 ((*(pte = pmap_pte(kernel_pmap, (vm_map_offset_t)vaddr))) & INTEL_PTE_WRITE) == 0) {
471 pmap_store_pte(
472 pte,
473 *pte | INTEL_PTE_VALID | INTEL_PTE_WRITE);
474 /* XXX need invltlb here? */
475
476 result = KERN_SUCCESS;
477 goto look_for_watchpoints;
478 }
479 #endif /* MACH_KDB */
480
481 result = vm_fault(map,
482 vm_map_trunc_page(vaddr),
483 prot,
484 FALSE,
485 THREAD_UNINT, NULL, 0);
486
487 #if MACH_KDB
488 if (result == KERN_SUCCESS) {
489 /*
490 * Look for watchpoints
491 */
492 look_for_watchpoints:
493 if (map == kernel_map && db_watchpoint_list && db_watchpoints_inserted && (code & T_PF_WRITE) &&
494 db_find_watchpoint(map, vaddr, saved_state))
495 kdb_trap(T_WATCHPOINT, 0, saved_state);
496 }
497 #endif /* MACH_KDB */
498
499 if (result == KERN_SUCCESS) {
500
501 if (fault_in_copy_window != -1) {
502 pt_entry_t *updp;
503 pt_entry_t *kpdp;
504
505 /*
506 * in case there was no page table assigned
507 * for the user base address and the pmap
508 * got 'expanded' due to this fault, we'll
509 * copy in the descriptor
510 *
511 * we're either setting the page table descriptor
512 * to the same value or it was 0... no need
513 * for a TLB flush in either case
514 */
515
516 ml_set_interrupts_enabled(FALSE);
517 updp = pmap_pde(map->pmap, thread->machine.copy_window[fault_in_copy_window].user_base);
518 assert(updp);
519 if (0 == updp) panic("trap: updp 0"); /* XXX DEBUG */
520 kpdp = current_cpu_datap()->cpu_copywindow_pdp;
521 kpdp += fault_in_copy_window;
522
523 #if JOE_DEBUG
524 if (*kpdp && (*kpdp & PG_FRAME) != (*updp & PG_FRAME))
525 panic("kernel_fault: user pdp doesn't match - updp = 0x%x, kpdp = 0x%x\n", updp, kpdp);
526 #endif
527 pmap_store_pte(kpdp, *updp);
528
529 (void) ml_set_interrupts_enabled(intr);
530 }
531 return;
532 }
533 /*
534 * fall through
535 */
536
537 case T_GENERAL_PROTECTION:
538 /*
539 * If there is a failure recovery address
540 * for this fault, go there.
541 */
542 for (rp = recover_table; rp < recover_table_end; rp++) {
543 if (kern_ip == rp->fault_addr) {
544 set_recovery_ip(saved_state, rp->recover_addr);
545 return;
546 }
547 }
548
549 /*
550 * Check thread recovery address also.
551 */
552 if (thread->recover) {
553 set_recovery_ip(saved_state, thread->recover);
554 thread->recover = 0;
555 return;
556 }
557 /*
558 * Unanticipated page-fault errors in kernel
559 * should not happen.
560 *
561 * fall through...
562 */
563
564 default:
565 /*
566 * Exception 15 is reserved but some chips may generate it
567 * spuriously. Seen at startup on AMD Athlon-64.
568 */
569 if (type == 15) {
570 kprintf("kernel_trap() ignoring spurious trap 15\n");
571 return;
572 }
573 debugger_entry:
574 /* Ensure that the i386_kernel_state at the base of the
575 * current thread's stack (if any) is synchronized with the
576 * context at the moment of the trap, to facilitate
577 * access through the debugger.
578 */
579 sync_iss_to_iks(saved_state);
580 #if MACH_KDB
581 restart_debugger:
582 #endif /* MACH_KDB */
583 #if MACH_KDP
584 if (current_debugger != KDB_CUR_DB) {
585 if (kdp_i386_trap(type, saved_state, result, vaddr))
586 return;
587 }
588 #endif /* MACH_KDP */
589 #if MACH_KDB
590 else
591 if (kdb_trap(type, code, saved_state)) {
592 if (switch_debugger) {
593 current_debugger = KDP_CUR_DB;
594 switch_debugger = 0;
595 goto restart_debugger;
596 }
597 return;
598 }
599 #endif /* MACH_KDB */
600 }
601
602 panic_trap(saved_state);
603 /*
604 * NO RETURN
605 */
606 }
607
608
609 static void
610 set_recovery_ip(x86_saved_state32_t *saved_state, vm_offset_t ip)
611 {
612 saved_state->eip = ip;
613 }
614
615
616 static void
617 panic_trap(x86_saved_state32_t *regs)
618 {
619 const char *trapname = "Unknown";
620 uint32_t cr0 = get_cr0();
621 uint32_t cr2 = get_cr2();
622 uint32_t cr3 = get_cr3();
623 uint32_t cr4 = get_cr4();
624
625 panic_io_port_read();
626
627 kprintf("panic trap number 0x%x, eip 0x%x\n", regs->trapno, regs->eip);
628 kprintf("cr0 0x%08x cr2 0x%08x cr3 0x%08x cr4 0x%08x\n",
629 cr0, cr2, cr3, cr4);
630
631 if (regs->trapno < TRAP_TYPES)
632 trapname = trap_type[regs->trapno];
633
634 panic("Unresolved kernel trap (CPU %d, Type %d=%s), registers:\n"
635 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
636 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
637 "CR2: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
638 "EFL: 0x%08x, EIP: 0x%08x, CS: 0x%08x, DS: 0x%08x\n",
639 cpu_number(), regs->trapno, trapname, cr0, cr2, cr3, cr4,
640 regs->eax,regs->ebx,regs->ecx,regs->edx,
641 regs->cr2,regs->ebp,regs->esi,regs->edi,
642 regs->efl,regs->eip,regs->cs, regs->ds);
643 /*
644 * This next statement is not executed,
645 * but it's needed to stop the compiler using tail call optimization
646 * for the panic call - which confuses the subsequent backtrace.
647 */
648 cr0 = 0;
649 }
650
651 extern void kprintf_break_lock(void);
652
653
654 /*
655 * Called from locore on a special reserved stack after a double-fault
656 * is taken in kernel space.
657 * Kernel stack overflow is one route here.
658 */
659 void
660 panic_double_fault(int code)
661 {
662 struct i386_tss *my_ktss = current_ktss();
663
664 /* Set postcode (DEBUG only) */
665 postcode(PANIC_DOUBLE_FAULT);
666
667 /* Issue an I/O port read if one has been requested - this is an event logic
668 * analyzers can use as a trigger point.
669 */
670 panic_io_port_read();
671
672 /*
673 * Break kprintf lock in case of recursion,
674 * and record originally faulted instruction address.
675 */
676 kprintf_break_lock();
677
678 #if MACH_KDP
679 /*
680 * Print backtrace leading to first fault:
681 */
682 panic_i386_backtrace((void *) my_ktss->ebp, 10);
683 #endif
684
685 panic("Double fault (CPU:%d, thread:%p, code:0x%x),"
686 "registers:\n"
687 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
688 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
689 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
690 "EFL: 0x%08x, EIP: 0x%08x\n",
691 cpu_number(), current_thread(), code,
692 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
693 my_ktss->eax, my_ktss->ebx, my_ktss->ecx, my_ktss->edx,
694 my_ktss->esp, my_ktss->ebp, my_ktss->esi, my_ktss->edi,
695 my_ktss->eflags, my_ktss->eip);
696 }
697
698
699 /*
700 * Called from locore on a special reserved stack after a machine-check
701 */
702 void
703 panic_machine_check(int code)
704 {
705 struct i386_tss *my_ktss = current_ktss();
706
707 /* Set postcode (DEBUG only) */
708 postcode(PANIC_MACHINE_CHECK);
709
710 /*
711 * Break kprintf lock in case of recursion,
712 * and record originally faulted instruction address.
713 */
714 kprintf_break_lock();
715
716 /*
717 * Dump the contents of the machine check MSRs (if any).
718 */
719 mca_dump();
720
721 /*
722 * And that's all folks, we don't attempt recovery...
723 */
724 panic("Machine-check (CPU:%d, thread:%p, code:0x%x),"
725 "registers:\n"
726 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
727 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
728 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
729 "EFL: 0x%08x, EIP: 0x%08x\n",
730 cpu_number(), current_thread(), code,
731 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
732 my_ktss->eax, my_ktss->ebx, my_ktss->ecx, my_ktss->edx,
733 my_ktss->esp, my_ktss->ebp, my_ktss->esi, my_ktss->edi,
734 my_ktss->eflags, my_ktss->eip);
735 }
736
737 void
738 panic_double_fault64(x86_saved_state_t *esp)
739 {
740 /* Set postcode (DEBUG only) */
741 postcode(PANIC_DOUBLE_FAULT);
742
743 /*
744 * Break kprintf lock in case of recursion,
745 * and record originally faulted instruction address.
746 */
747 kprintf_break_lock();
748
749 /*
750 * Dump the interrupt stack frame at last kernel entry.
751 */
752 if (is_saved_state64(esp)) {
753 x86_saved_state64_t *ss64p = saved_state64(esp);
754 panic("Double fault (CPU:%d, thread:%p, trapno:0x%x, err:0x%qx),"
755 "registers:\n"
756 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
757 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
758 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
759 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
760 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
761 "RFL: 0x%016qx, RIP: 0x%016qx, CR2: 0x%016qx\n",
762 cpu_number(), current_thread(), ss64p->isf.trapno, ss64p->isf.err,
763 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
764 ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx,
765 ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi,
766 ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11,
767 ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15,
768 ss64p->isf.rflags, ss64p->isf.rip, ss64p->cr2);
769 } else {
770 x86_saved_state32_t *ss32p = saved_state32(esp);
771 panic("Double fault (CPU:%d, thread:%p, trapno:0x%x, err:0x%x),"
772 "registers:\n"
773 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
774 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
775 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
776 "EFL: 0x%08x, EIP: 0x%08x\n",
777 cpu_number(), current_thread(), ss32p->trapno, ss32p->err,
778 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
779 ss32p->eax, ss32p->ebx, ss32p->ecx, ss32p->edx,
780 ss32p->uesp, ss32p->ebp, ss32p->esi, ss32p->edi,
781 ss32p->efl, ss32p->eip);
782 }
783 }
784
785 /*
786 * Machine check handler for 64-bit.
787 */
788 void
789 panic_machine_check64(x86_saved_state_t *esp)
790 {
791 /* Set postcode (DEBUG only) */
792 postcode(PANIC_MACHINE_CHECK);
793
794 /*
795 * Break kprintf lock in case of recursion,
796 * and record originally faulted instruction address.
797 */
798 kprintf_break_lock();
799
800 /*
801 * Dump the contents of the machine check MSRs (if any).
802 */
803 mca_dump();
804
805 /*
806 * And that's all folks, we don't attempt recovery...
807 */
808 if (is_saved_state64(esp)) {
809 x86_saved_state64_t *ss64p = saved_state64(esp);
810 panic("Machine Check (CPU:%d, thread:%p, trapno:0x%x, err:0x%qx),"
811 "registers:\n"
812 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
813 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
814 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
815 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
816 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
817 "RFL: 0x%016qx, RIP: 0x%016qx\n",
818 cpu_number(), current_thread(), ss64p->isf.trapno, ss64p->isf.err,
819 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
820 ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx,
821 ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi,
822 ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11,
823 ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15,
824 ss64p->isf.rflags, ss64p->isf.rip);
825 } else {
826 x86_saved_state32_t *ss32p = saved_state32(esp);
827 panic("Machine Check (CPU:%d, thread:%p, trapno:0x%x, err:0x%x),"
828 "registers:\n"
829 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
830 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
831 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
832 "EFL: 0x%08x, EIP: 0x%08x\n",
833 cpu_number(), current_thread(), ss32p->trapno, ss32p->err,
834 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
835 ss32p->eax, ss32p->ebx, ss32p->ecx, ss32p->edx,
836 ss32p->uesp, ss32p->ebp, ss32p->esi, ss32p->edi,
837 ss32p->efl, ss32p->eip);
838 }
839 }
840
841 /*
842 * Trap from user mode.
843 */
844 void
845 user_trap(
846 x86_saved_state_t *saved_state)
847 {
848 int exc;
849 int code;
850 int err;
851 unsigned int subcode;
852 int type;
853 user_addr_t vaddr;
854 vm_prot_t prot;
855 thread_t thread = current_thread();
856 ast_t *myast;
857 boolean_t intr;
858 kern_return_t kret;
859 user_addr_t rip;
860
861 assert((is_saved_state32(saved_state) && !thread_is_64bit(thread)) ||
862 (is_saved_state64(saved_state) && thread_is_64bit(thread)));
863
864 if (is_saved_state64(saved_state)) {
865 x86_saved_state64_t *regs;
866
867 regs = saved_state64(saved_state);
868
869 type = regs->isf.trapno;
870 err = regs->isf.err & 0xffff;
871 vaddr = (user_addr_t)regs->cr2;
872 rip = (user_addr_t)regs->isf.rip;
873 } else {
874 x86_saved_state32_t *regs;
875
876 regs = saved_state32(saved_state);
877
878 type = regs->trapno;
879 err = regs->err & 0xffff;
880 vaddr = (user_addr_t)regs->cr2;
881 rip = (user_addr_t)regs->eip;
882 }
883
884 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86, type)) | DBG_FUNC_NONE,
885 (int)(vaddr>>32), (int)vaddr, (int)(rip>>32), (int)rip, 0);
886
887 code = 0;
888 subcode = 0;
889 exc = 0;
890
891 #if DEBUG_TRACE
892 kprintf("user_trap(0x%08x) type=%d vaddr=0x%016llx\n",
893 saved_state, type, vaddr);
894 #endif
895 myast = ast_pending();
896 if (perfASTHook) {
897 if (*myast & AST_CHUD_ALL) {
898 perfASTHook(type, saved_state, 0, 0);
899 }
900 } else {
901 *myast &= ~AST_CHUD_ALL;
902 }
903
904 /* Is there a hook? */
905 if (perfTrapHook) {
906 if (perfTrapHook(type, saved_state, 0, 0) == KERN_SUCCESS)
907 return; /* If it succeeds, we are done... */
908 }
909
910 switch (type) {
911
912 case T_DIVIDE_ERROR:
913 exc = EXC_ARITHMETIC;
914 code = EXC_I386_DIV;
915 break;
916
917 case T_DEBUG:
918 {
919 pcb_t pcb;
920 unsigned int clear = 0;
921 /*
922 * get dr6 and set it in the thread's pcb before
923 * returning to userland
924 */
925 pcb = thread->machine.pcb;
926 if (pcb->ids) {
927 /*
928 * We can get and set the status register
929 * in 32-bit mode even on a 64-bit thread
930 * because the high order bits are not
931 * used on x86_64
932 */
933 if (thread_is_64bit(thread)) {
934 uint32_t dr6;
935 x86_debug_state64_t *ids = pcb->ids;
936 dr6 = (uint32_t)ids->dr6;
937 __asm__ volatile ("movl %%db6, %0" : "=r" (dr6));
938 ids->dr6 = dr6;
939 } else { /* 32 bit thread */
940 x86_debug_state32_t *ids = pcb->ids;
941 __asm__ volatile ("movl %%db6, %0" : "=r" (ids->dr6));
942 }
943 __asm__ volatile ("movl %0, %%db6" : : "r" (clear));
944 }
945 exc = EXC_BREAKPOINT;
946 code = EXC_I386_SGL;
947 break;
948 }
949 case T_INT3:
950 exc = EXC_BREAKPOINT;
951 code = EXC_I386_BPT;
952 break;
953
954 case T_OVERFLOW:
955 exc = EXC_ARITHMETIC;
956 code = EXC_I386_INTO;
957 break;
958
959 case T_OUT_OF_BOUNDS:
960 exc = EXC_SOFTWARE;
961 code = EXC_I386_BOUND;
962 break;
963
964 case T_INVALID_OPCODE:
965 exc = EXC_BAD_INSTRUCTION;
966 code = EXC_I386_INVOP;
967 break;
968
969 case T_NO_FPU:
970 case 32: /* XXX */
971 fpnoextflt();
972 return;
973
974 case T_FPU_FAULT:
975 fpextovrflt();
976 return;
977
978 case 10: /* invalid TSS == iret with NT flag set */
979 exc = EXC_BAD_INSTRUCTION;
980 code = EXC_I386_INVTSSFLT;
981 subcode = err;
982 break;
983
984 case T_SEGMENT_NOT_PRESENT:
985 exc = EXC_BAD_INSTRUCTION;
986 code = EXC_I386_SEGNPFLT;
987 subcode = err;
988 break;
989
990 case T_STACK_FAULT:
991 exc = EXC_BAD_INSTRUCTION;
992 code = EXC_I386_STKFLT;
993 subcode = err;
994 break;
995
996 case T_GENERAL_PROTECTION:
997 exc = EXC_BAD_INSTRUCTION;
998 code = EXC_I386_GPFLT;
999 subcode = err;
1000 break;
1001
1002 case T_PAGE_FAULT:
1003 prot = VM_PROT_READ;
1004
1005 if (err & T_PF_WRITE)
1006 prot |= VM_PROT_WRITE;
1007 #if PAE
1008 if (err & T_PF_EXECUTE)
1009 prot |= VM_PROT_EXECUTE;
1010 #endif
1011 kret = vm_fault(thread->map, vm_map_trunc_page(vaddr),
1012 prot, FALSE,
1013 THREAD_ABORTSAFE, NULL, 0);
1014
1015 user_page_fault_continue(kret);
1016
1017 /* NOTREACHED */
1018 break;
1019
1020 case T_SSE_FLOAT_ERROR:
1021 fpSSEexterrflt();
1022 return;
1023
1024
1025 case T_FLOATING_POINT_ERROR:
1026 fpexterrflt();
1027 return;
1028
1029 default:
1030 #if MACH_KGDB
1031 Debugger("Unanticipated user trap");
1032 return;
1033 #endif /* MACH_KGDB */
1034 #if MACH_KDB
1035 if (kdb_trap(type, err, saved_state))
1036 return;
1037 #endif /* MACH_KDB */
1038 panic("user trap");
1039 return;
1040 }
1041 intr = ml_set_interrupts_enabled(FALSE);
1042 myast = ast_pending();
1043 while (*myast & AST_ALL) {
1044 ast_taken(AST_ALL, intr);
1045 ml_set_interrupts_enabled(FALSE);
1046 myast = ast_pending();
1047 }
1048 ml_set_interrupts_enabled(intr);
1049
1050 i386_exception(exc, code, subcode);
1051 /*NOTREACHED*/
1052 }
1053
1054
1055 /*
1056 * Handle AST traps for i386.
1057 * Check for delayed floating-point exception from
1058 * AT-bus machines.
1059 */
1060
1061 extern void log_thread_action (thread_t, char *);
1062
1063 void
1064 i386_astintr(int preemption)
1065 {
1066 ast_t mask = AST_ALL;
1067 spl_t s;
1068
1069 if (preemption)
1070 mask = AST_PREEMPTION;
1071
1072 s = splsched();
1073
1074 ast_taken(mask, s);
1075
1076 splx(s);
1077 }
1078
1079 /*
1080 * Handle exceptions for i386.
1081 *
1082 * If we are an AT bus machine, we must turn off the AST for a
1083 * delayed floating-point exception.
1084 *
1085 * If we are providing floating-point emulation, we may have
1086 * to retrieve the real register values from the floating point
1087 * emulator.
1088 */
1089 void
1090 i386_exception(
1091 int exc,
1092 int code,
1093 int subcode)
1094 {
1095 exception_data_type_t codes[EXCEPTION_CODE_MAX];
1096
1097 codes[0] = code; /* new exception interface */
1098 codes[1] = subcode;
1099 exception_triage(exc, codes, 2);
1100 /*NOTREACHED*/
1101 }
1102
1103
1104 void
1105 kernel_preempt_check(void)
1106 {
1107 ast_t *myast;
1108 boolean_t intr;
1109
1110 /*
1111 * disable interrupts to both prevent pre-emption
1112 * and to keep the ast state from changing via
1113 * an interrupt handler making something runnable
1114 */
1115 intr = ml_set_interrupts_enabled(FALSE);
1116
1117 myast = ast_pending();
1118
1119 if ((*myast & AST_URGENT) && intr == TRUE && get_interrupt_level() == 0) {
1120 /*
1121 * can handle interrupts and preemptions
1122 * at this point
1123 */
1124 ml_set_interrupts_enabled(intr);
1125
1126 /*
1127 * now cause the PRE-EMPTION trap
1128 */
1129 __asm__ volatile (" int $0xff");
1130 } else {
1131 /*
1132 * if interrupts were already disabled or
1133 * we're in an interrupt context, we can't
1134 * preempt... of course if AST_URGENT
1135 * isn't set we also don't want to
1136 */
1137 ml_set_interrupts_enabled(intr);
1138 }
1139 }
1140
1141 #if MACH_KDB
1142
1143 extern void db_i386_state(x86_saved_state32_t *regs);
1144
1145 #include <ddb/db_output.h>
1146
1147 void
1148 db_i386_state(
1149 x86_saved_state32_t *regs)
1150 {
1151 db_printf("eip %8x\n", regs->eip);
1152 db_printf("trap %8x\n", regs->trapno);
1153 db_printf("err %8x\n", regs->err);
1154 db_printf("efl %8x\n", regs->efl);
1155 db_printf("ebp %8x\n", regs->ebp);
1156 db_printf("esp %8x\n", regs->cr2);
1157 db_printf("uesp %8x\n", regs->uesp);
1158 db_printf("cs %8x\n", regs->cs & 0xff);
1159 db_printf("ds %8x\n", regs->ds & 0xff);
1160 db_printf("es %8x\n", regs->es & 0xff);
1161 db_printf("fs %8x\n", regs->fs & 0xff);
1162 db_printf("gs %8x\n", regs->gs & 0xff);
1163 db_printf("ss %8x\n", regs->ss & 0xff);
1164 db_printf("eax %8x\n", regs->eax);
1165 db_printf("ebx %8x\n", regs->ebx);
1166 db_printf("ecx %8x\n", regs->ecx);
1167 db_printf("edx %8x\n", regs->edx);
1168 db_printf("esi %8x\n", regs->esi);
1169 db_printf("edi %8x\n", regs->edi);
1170 }
1171
1172 #endif /* MACH_KDB */
1173
1174 /* Synchronize a thread's i386_kernel_state (if any) with the given
1175 * i386_saved_state_t obtained from the trap/IPI handler; called in
1176 * kernel_trap() prior to entering the debugger, and when receiving
1177 * an "MP_KDP" IPI.
1178 */
1179
1180 void
1181 sync_iss_to_iks(x86_saved_state32_t *saved_state)
1182 {
1183 struct x86_kernel_state32 *iks;
1184 vm_offset_t kstack;
1185 boolean_t record_active_regs = FALSE;
1186
1187 if ((kstack = current_thread()->kernel_stack) != 0) {
1188 x86_saved_state32_t *regs;
1189
1190 regs = saved_state;
1191
1192 iks = STACK_IKS(kstack);
1193
1194 /*
1195 * Did we take the trap/interrupt in kernel mode?
1196 */
1197 if (regs == USER_REGS32(current_thread()))
1198 record_active_regs = TRUE;
1199 else {
1200 iks->k_ebx = regs->ebx;
1201 iks->k_esp = (int)regs;
1202 iks->k_ebp = regs->ebp;
1203 iks->k_edi = regs->edi;
1204 iks->k_esi = regs->esi;
1205 iks->k_eip = regs->eip;
1206 }
1207 }
1208
1209 if (record_active_regs == TRUE) {
1210 /*
1211 * Show the trap handler path
1212 */
1213 __asm__ volatile("movl %%ebx, %0" : "=m" (iks->k_ebx));
1214 __asm__ volatile("movl %%esp, %0" : "=m" (iks->k_esp));
1215 __asm__ volatile("movl %%ebp, %0" : "=m" (iks->k_ebp));
1216 __asm__ volatile("movl %%edi, %0" : "=m" (iks->k_edi));
1217 __asm__ volatile("movl %%esi, %0" : "=m" (iks->k_esi));
1218 /*
1219 * "Current" instruction pointer
1220 */
1221 __asm__ volatile("movl $1f, %0\n1:" : "=m" (iks->k_eip));
1222 }
1223 }
1224
1225 /*
1226 * This is used by the NMI interrupt handler (from mp.c) to
1227 * uncondtionally sync the trap handler context to the IKS
1228 * irrespective of whether the NMI was fielded in kernel
1229 * or user space.
1230 */
1231 void
1232 sync_iss_to_iks_unconditionally(__unused x86_saved_state32_t *saved_state) {
1233 struct x86_kernel_state32 *iks;
1234 vm_offset_t kstack;
1235 boolean_t record_active_regs = FALSE;
1236
1237 if ((kstack = current_thread()->kernel_stack) != 0) {
1238
1239 iks = STACK_IKS(kstack);
1240 /*
1241 * Show the trap handler path
1242 */
1243 __asm__ volatile("movl %%ebx, %0" : "=m" (iks->k_ebx));
1244 __asm__ volatile("movl %%esp, %0" : "=m" (iks->k_esp));
1245 __asm__ volatile("movl %%ebp, %0" : "=m" (iks->k_ebp));
1246 __asm__ volatile("movl %%edi, %0" : "=m" (iks->k_edi));
1247 __asm__ volatile("movl %%esi, %0" : "=m" (iks->k_esi));
1248 /*
1249 * "Current" instruction pointer
1250 */
1251 __asm__ volatile("movl $1f, %0\n1:" : "=m" (iks->k_eip));
1252
1253 }
1254 }