2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * Hardware trap/fault handler.
63 #include <mach_kgdb.h>
65 #include <mach_ldebug.h>
68 #include <i386/eflags.h>
69 #include <i386/trap.h>
70 #include <i386/pmap.h>
72 #include <i386/misc_protos.h> /* panic_io_port_read() */
74 #include <mach/exception.h>
75 #include <mach/kern_return.h>
76 #include <mach/vm_param.h>
77 #include <mach/i386/thread_status.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_fault.h>
82 #include <kern/kern_types.h>
83 #include <kern/processor.h>
84 #include <kern/thread.h>
85 #include <kern/task.h>
86 #include <kern/sched.h>
87 #include <kern/sched_prim.h>
88 #include <kern/exception.h>
90 #include <kern/misc_protos.h>
92 #include <sys/kdebug.h>
95 #include <kgdb/kgdb_defs.h>
96 #endif /* MACH_KGDB */
100 #include <ddb/db_watch.h>
101 #include <ddb/db_run.h>
102 #include <ddb/db_break.h>
103 #include <ddb/db_trap.h>
104 #endif /* MACH_KDB */
108 #include <i386/io_emulate.h>
109 #include <i386/postcode.h>
110 #include <i386/mp_desc.h>
111 #include <i386/proc_reg.h>
112 #include <i386/machine_check.h>
113 #include <mach/i386/syscall_sw.h>
116 * Forward declarations
118 static void user_page_fault_continue(kern_return_t kret
);
119 static void panic_trap(x86_saved_state32_t
*saved_state
);
120 static void set_recovery_ip(x86_saved_state32_t
*saved_state
, vm_offset_t ip
);
122 perfCallback perfTrapHook
= NULL
; /* Pointer to CHUD trap hook routine */
123 perfCallback perfASTHook
= NULL
; /* Pointer to CHUD AST hook routine */
126 thread_syscall_return(
129 thread_t thr_act
= current_thread();
131 if (thread_is_64bit(thr_act
)) {
132 x86_saved_state64_t
*regs
;
134 regs
= USER_REGS64(thr_act
);
136 if (kdebug_enable
&& ((regs
->rax
& SYSCALL_CLASS_MASK
) == (SYSCALL_CLASS_MACH
<< SYSCALL_CLASS_SHIFT
))) {
138 KERNEL_DEBUG_CONSTANT(
139 MACHDBG_CODE(DBG_MACH_EXCP_SC
, ((int) (regs
->rax
& SYSCALL_NUMBER_MASK
)))
146 x86_saved_state32_t
*regs
;
148 regs
= USER_REGS32(thr_act
);
150 if (kdebug_enable
&& ((int) regs
->eax
< 0)) {
152 KERNEL_DEBUG_CONSTANT(
153 MACHDBG_CODE(DBG_MACH_EXCP_SC
, -((int) regs
->eax
))
159 thread_exception_return();
165 boolean_t debug_all_traps_with_kdb
= FALSE
;
166 extern struct db_watchpoint
*db_watchpoint_list
;
167 extern boolean_t db_watchpoints_inserted
;
168 extern boolean_t db_breakpoints_inserted
;
171 thread_kdb_return(void)
173 thread_t thr_act
= current_thread();
174 x86_saved_state_t
*iss
= USER_STATE(thr_act
);
176 if (is_saved_state64(iss
)) {
177 x86_saved_state64_t
*regs
;
179 regs
= saved_state64(iss
);
181 if (kdb_trap(regs
->isf
.trapno
, (int)regs
->isf
.err
, (void *)regs
)) {
182 thread_exception_return();
187 x86_saved_state32_t
*regs
;
189 regs
= saved_state32(iss
);
191 if (kdb_trap(regs
->trapno
, regs
->err
, (void *)regs
)) {
192 thread_exception_return();
198 #endif /* MACH_KDB */
201 user_page_fault_continue(
204 thread_t thread
= current_thread();
205 x86_saved_state_t
*regs
= USER_STATE(thread
);
214 assert((is_saved_state32(regs
) && !thread_is_64bit(thread
)) ||
215 (is_saved_state64(regs
) && thread_is_64bit(thread
)));
217 if (thread_is_64bit(thread
)) {
218 x86_saved_state64_t
*uregs
;
220 uregs
= USER_REGS64(thread
);
223 trapno
= uregs
->isf
.trapno
;
224 err
= uregs
->isf
.err
;
226 vaddr
= (user_addr_t
)uregs
->cr2
;
228 x86_saved_state32_t
*uregs
;
230 uregs
= USER_REGS32(thread
);
233 trapno
= uregs
->trapno
;
239 if ((kr
== KERN_SUCCESS
) || (kr
== KERN_ABORTED
)) {
241 if (!db_breakpoints_inserted
) {
242 db_set_breakpoints();
244 if (db_watchpoint_list
&&
245 db_watchpoints_inserted
&&
246 (err
& T_PF_WRITE
) &&
247 db_find_watchpoint(thread
->map
,
250 kdb_trap(T_WATCHPOINT
, 0, regs
);
251 #endif /* MACH_KDB */
252 intr
= ml_set_interrupts_enabled(FALSE
);
253 myast
= ast_pending();
254 while (*myast
& AST_ALL
) {
255 ast_taken(AST_ALL
, intr
);
256 ml_set_interrupts_enabled(FALSE
);
257 myast
= ast_pending();
259 ml_set_interrupts_enabled(intr
);
261 thread_exception_return();
266 if (debug_all_traps_with_kdb
&&
267 kdb_trap(trapno
, err
, regs
)) {
268 thread_exception_return();
271 #endif /* MACH_KDB */
273 i386_exception(EXC_BAD_ACCESS
, kr
, vaddr
);
278 * Fault recovery in copyin/copyout routines.
282 uint32_t recover_addr
;
285 extern struct recovery recover_table
[];
286 extern struct recovery recover_table_end
[];
288 const char * trap_type
[] = {TRAP_NAMES
};
289 unsigned TRAP_TYPES
= sizeof(trap_type
)/sizeof(trap_type
[0]);
294 uint32_t dr7
= 0x400; /* magic dr7 reset value */
295 __asm__
volatile("movl %0,%%dr7" : : "r" (dr7
));
298 unsigned kdp_has_active_watchpoints
= 0;
301 * Trap from kernel mode. Only page-fault errors are recoverable,
302 * and then only in special circumstances. All other errors are
303 * fatal. Return value indicates if trap was handled.
307 x86_saved_state_t
*state
)
309 x86_saved_state32_t
*saved_state
;
314 kern_return_t result
= KERN_FAILURE
;
321 int fault_in_copy_window
= -1;
325 #endif /* MACH_KDB */
327 thread
= current_thread();
329 if (is_saved_state64(state
))
330 panic("kernel_trap(%p) with 64-bit state", state
);
331 saved_state
= saved_state32(state
);
333 vaddr
= (user_addr_t
)saved_state
->cr2
;
334 type
= saved_state
->trapno
;
335 code
= saved_state
->err
& 0xffff;
336 intr
= (saved_state
->efl
& EFL_IF
) != 0; /* state of ints at trap */
338 kern_ip
= (vm_offset_t
)saved_state
->eip
;
340 myast
= ast_pending();
343 if (*myast
& AST_CHUD_ALL
)
344 perfASTHook(type
, NULL
, 0, 0);
346 *myast
&= ~AST_CHUD_ALL
;
352 if (perfTrapHook(type
, NULL
, 0, 0) == KERN_SUCCESS
) {
354 * If it succeeds, we are done...
360 * we come here with interrupts off as we don't want to recurse
361 * on preemption below. but we do want to re-enable interrupts
362 * as soon we possibly can to hold latency down
364 if (T_PREEMPT
== type
) {
366 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86
, type
)) | DBG_FUNC_NONE
,
367 0, 0, 0, kern_ip
, 0);
369 ast_taken(AST_PREEMPTION
, FALSE
);
373 if (T_PAGE_FAULT
== type
) {
375 * assume we're faulting in the kernel map
379 if (thread
!= THREAD_NULL
&& thread
->map
!= kernel_map
) {
380 vm_offset_t copy_window_base
;
384 kvaddr
= (vm_offset_t
)vaddr
;
386 * must determine if fault occurred in
387 * the copy window while pre-emption is
388 * disabled for this processor so that
389 * we only need to look at the window
390 * associated with this processor
392 copy_window_base
= current_cpu_datap()->cpu_copywindow_base
;
394 if (kvaddr
>= copy_window_base
&& kvaddr
< (copy_window_base
+ (NBPDE
* NCOPY_WINDOWS
)) ) {
396 window_index
= (kvaddr
- copy_window_base
) / NBPDE
;
398 if (thread
->machine
.copy_window
[window_index
].user_base
!= (user_addr_t
)-1) {
400 kvaddr
-= (copy_window_base
+ (NBPDE
* window_index
));
401 vaddr
= thread
->machine
.copy_window
[window_index
].user_base
+ kvaddr
;
404 fault_in_copy_window
= window_index
;
410 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86
, type
)) | DBG_FUNC_NONE
,
411 (int)(vaddr
>> 32), (int)vaddr
, is_user
, kern_ip
, 0);
414 (void) ml_set_interrupts_enabled(intr
);
426 case T_FLOATING_POINT_ERROR
:
430 case T_SSE_FLOAT_ERROR
:
434 if ((saved_state
->efl
& EFL_TF
) == 0
435 && !kdp_has_active_watchpoints
) {
436 /* We've somehow encountered a debug
437 * register match that does not belong
438 * to the kernel debugger.
439 * This isn't supposed to happen.
447 * If the current map is a submap of the kernel map,
448 * and the address is within that map, fault on that
449 * map. If the same check is done in vm_fault
450 * (vm_map_lookup), we may deadlock on the kernel map
456 if (code
& T_PF_WRITE
)
457 prot
|= VM_PROT_WRITE
;
459 if (code
& T_PF_EXECUTE
)
460 prot
|= VM_PROT_EXECUTE
;
465 * Check for watchpoint on kernel static data.
466 * vm_fault would fail in this case
468 if (map
== kernel_map
&& db_watchpoint_list
&& db_watchpoints_inserted
&&
469 (code
& T_PF_WRITE
) && vaddr
< vm_map_max(map
) &&
470 ((*(pte
= pmap_pte(kernel_pmap
, (vm_map_offset_t
)vaddr
))) & INTEL_PTE_WRITE
) == 0) {
473 *pte
| INTEL_PTE_VALID
| INTEL_PTE_WRITE
);
474 /* XXX need invltlb here? */
476 result
= KERN_SUCCESS
;
477 goto look_for_watchpoints
;
479 #endif /* MACH_KDB */
481 result
= vm_fault(map
,
482 vm_map_trunc_page(vaddr
),
485 THREAD_UNINT
, NULL
, 0);
488 if (result
== KERN_SUCCESS
) {
490 * Look for watchpoints
492 look_for_watchpoints
:
493 if (map
== kernel_map
&& db_watchpoint_list
&& db_watchpoints_inserted
&& (code
& T_PF_WRITE
) &&
494 db_find_watchpoint(map
, vaddr
, saved_state
))
495 kdb_trap(T_WATCHPOINT
, 0, saved_state
);
497 #endif /* MACH_KDB */
499 if (result
== KERN_SUCCESS
) {
501 if (fault_in_copy_window
!= -1) {
506 * in case there was no page table assigned
507 * for the user base address and the pmap
508 * got 'expanded' due to this fault, we'll
509 * copy in the descriptor
511 * we're either setting the page table descriptor
512 * to the same value or it was 0... no need
513 * for a TLB flush in either case
516 ml_set_interrupts_enabled(FALSE
);
517 updp
= pmap_pde(map
->pmap
, thread
->machine
.copy_window
[fault_in_copy_window
].user_base
);
519 if (0 == updp
) panic("trap: updp 0"); /* XXX DEBUG */
520 kpdp
= current_cpu_datap()->cpu_copywindow_pdp
;
521 kpdp
+= fault_in_copy_window
;
524 if (*kpdp
&& (*kpdp
& PG_FRAME
) != (*updp
& PG_FRAME
))
525 panic("kernel_fault: user pdp doesn't match - updp = 0x%x, kpdp = 0x%x\n", updp
, kpdp
);
527 pmap_store_pte(kpdp
, *updp
);
529 (void) ml_set_interrupts_enabled(intr
);
537 case T_GENERAL_PROTECTION
:
539 * If there is a failure recovery address
540 * for this fault, go there.
542 for (rp
= recover_table
; rp
< recover_table_end
; rp
++) {
543 if (kern_ip
== rp
->fault_addr
) {
544 set_recovery_ip(saved_state
, rp
->recover_addr
);
550 * Check thread recovery address also.
552 if (thread
->recover
) {
553 set_recovery_ip(saved_state
, thread
->recover
);
558 * Unanticipated page-fault errors in kernel
566 * Exception 15 is reserved but some chips may generate it
567 * spuriously. Seen at startup on AMD Athlon-64.
570 kprintf("kernel_trap() ignoring spurious trap 15\n");
574 /* Ensure that the i386_kernel_state at the base of the
575 * current thread's stack (if any) is synchronized with the
576 * context at the moment of the trap, to facilitate
577 * access through the debugger.
579 sync_iss_to_iks(saved_state
);
582 #endif /* MACH_KDB */
584 if (current_debugger
!= KDB_CUR_DB
) {
585 if (kdp_i386_trap(type
, saved_state
, result
, vaddr
))
588 #endif /* MACH_KDP */
591 if (kdb_trap(type
, code
, saved_state
)) {
592 if (switch_debugger
) {
593 current_debugger
= KDP_CUR_DB
;
595 goto restart_debugger
;
599 #endif /* MACH_KDB */
602 panic_trap(saved_state
);
610 set_recovery_ip(x86_saved_state32_t
*saved_state
, vm_offset_t ip
)
612 saved_state
->eip
= ip
;
617 panic_trap(x86_saved_state32_t
*regs
)
619 const char *trapname
= "Unknown";
620 uint32_t cr0
= get_cr0();
621 uint32_t cr2
= get_cr2();
622 uint32_t cr3
= get_cr3();
623 uint32_t cr4
= get_cr4();
625 panic_io_port_read();
627 kprintf("panic trap number 0x%x, eip 0x%x\n", regs
->trapno
, regs
->eip
);
628 kprintf("cr0 0x%08x cr2 0x%08x cr3 0x%08x cr4 0x%08x\n",
631 if (regs
->trapno
< TRAP_TYPES
)
632 trapname
= trap_type
[regs
->trapno
];
634 panic("Unresolved kernel trap (CPU %d, Type %d=%s), registers:\n"
635 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
636 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
637 "CR2: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
638 "EFL: 0x%08x, EIP: 0x%08x, CS: 0x%08x, DS: 0x%08x\n",
639 cpu_number(), regs
->trapno
, trapname
, cr0
, cr2
, cr3
, cr4
,
640 regs
->eax
,regs
->ebx
,regs
->ecx
,regs
->edx
,
641 regs
->cr2
,regs
->ebp
,regs
->esi
,regs
->edi
,
642 regs
->efl
,regs
->eip
,regs
->cs
, regs
->ds
);
644 * This next statement is not executed,
645 * but it's needed to stop the compiler using tail call optimization
646 * for the panic call - which confuses the subsequent backtrace.
651 extern void kprintf_break_lock(void);
655 * Called from locore on a special reserved stack after a double-fault
656 * is taken in kernel space.
657 * Kernel stack overflow is one route here.
660 panic_double_fault(int code
)
662 struct i386_tss
*my_ktss
= current_ktss();
664 /* Set postcode (DEBUG only) */
665 postcode(PANIC_DOUBLE_FAULT
);
667 /* Issue an I/O port read if one has been requested - this is an event logic
668 * analyzers can use as a trigger point.
670 panic_io_port_read();
673 * Break kprintf lock in case of recursion,
674 * and record originally faulted instruction address.
676 kprintf_break_lock();
680 * Print backtrace leading to first fault:
682 panic_i386_backtrace((void *) my_ktss
->ebp
, 10);
685 panic("Double fault (CPU:%d, thread:%p, code:0x%x),"
687 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
688 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
689 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
690 "EFL: 0x%08x, EIP: 0x%08x\n",
691 cpu_number(), current_thread(), code
,
692 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
693 my_ktss
->eax
, my_ktss
->ebx
, my_ktss
->ecx
, my_ktss
->edx
,
694 my_ktss
->esp
, my_ktss
->ebp
, my_ktss
->esi
, my_ktss
->edi
,
695 my_ktss
->eflags
, my_ktss
->eip
);
700 * Called from locore on a special reserved stack after a machine-check
703 panic_machine_check(int code
)
705 struct i386_tss
*my_ktss
= current_ktss();
707 /* Set postcode (DEBUG only) */
708 postcode(PANIC_MACHINE_CHECK
);
711 * Break kprintf lock in case of recursion,
712 * and record originally faulted instruction address.
714 kprintf_break_lock();
717 * Dump the contents of the machine check MSRs (if any).
722 * And that's all folks, we don't attempt recovery...
724 panic("Machine-check (CPU:%d, thread:%p, code:0x%x),"
726 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
727 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
728 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
729 "EFL: 0x%08x, EIP: 0x%08x\n",
730 cpu_number(), current_thread(), code
,
731 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
732 my_ktss
->eax
, my_ktss
->ebx
, my_ktss
->ecx
, my_ktss
->edx
,
733 my_ktss
->esp
, my_ktss
->ebp
, my_ktss
->esi
, my_ktss
->edi
,
734 my_ktss
->eflags
, my_ktss
->eip
);
738 panic_double_fault64(x86_saved_state_t
*esp
)
740 /* Set postcode (DEBUG only) */
741 postcode(PANIC_DOUBLE_FAULT
);
744 * Break kprintf lock in case of recursion,
745 * and record originally faulted instruction address.
747 kprintf_break_lock();
750 * Dump the interrupt stack frame at last kernel entry.
752 if (is_saved_state64(esp
)) {
753 x86_saved_state64_t
*ss64p
= saved_state64(esp
);
754 panic("Double fault (CPU:%d, thread:%p, trapno:0x%x, err:0x%qx),"
756 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
757 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
758 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
759 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
760 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
761 "RFL: 0x%016qx, RIP: 0x%016qx, CR2: 0x%016qx\n",
762 cpu_number(), current_thread(), ss64p
->isf
.trapno
, ss64p
->isf
.err
,
763 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
764 ss64p
->rax
, ss64p
->rbx
, ss64p
->rcx
, ss64p
->rdx
,
765 ss64p
->isf
.rsp
, ss64p
->rbp
, ss64p
->rsi
, ss64p
->rdi
,
766 ss64p
->r8
, ss64p
->r9
, ss64p
->r10
, ss64p
->r11
,
767 ss64p
->r12
, ss64p
->r13
, ss64p
->r14
, ss64p
->r15
,
768 ss64p
->isf
.rflags
, ss64p
->isf
.rip
, ss64p
->cr2
);
770 x86_saved_state32_t
*ss32p
= saved_state32(esp
);
771 panic("Double fault (CPU:%d, thread:%p, trapno:0x%x, err:0x%x),"
773 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
774 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
775 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
776 "EFL: 0x%08x, EIP: 0x%08x\n",
777 cpu_number(), current_thread(), ss32p
->trapno
, ss32p
->err
,
778 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
779 ss32p
->eax
, ss32p
->ebx
, ss32p
->ecx
, ss32p
->edx
,
780 ss32p
->uesp
, ss32p
->ebp
, ss32p
->esi
, ss32p
->edi
,
781 ss32p
->efl
, ss32p
->eip
);
786 * Machine check handler for 64-bit.
789 panic_machine_check64(x86_saved_state_t
*esp
)
791 /* Set postcode (DEBUG only) */
792 postcode(PANIC_MACHINE_CHECK
);
795 * Break kprintf lock in case of recursion,
796 * and record originally faulted instruction address.
798 kprintf_break_lock();
801 * Dump the contents of the machine check MSRs (if any).
806 * And that's all folks, we don't attempt recovery...
808 if (is_saved_state64(esp
)) {
809 x86_saved_state64_t
*ss64p
= saved_state64(esp
);
810 panic("Machine Check (CPU:%d, thread:%p, trapno:0x%x, err:0x%qx),"
812 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
813 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
814 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
815 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
816 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
817 "RFL: 0x%016qx, RIP: 0x%016qx\n",
818 cpu_number(), current_thread(), ss64p
->isf
.trapno
, ss64p
->isf
.err
,
819 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
820 ss64p
->rax
, ss64p
->rbx
, ss64p
->rcx
, ss64p
->rdx
,
821 ss64p
->isf
.rsp
, ss64p
->rbp
, ss64p
->rsi
, ss64p
->rdi
,
822 ss64p
->r8
, ss64p
->r9
, ss64p
->r10
, ss64p
->r11
,
823 ss64p
->r12
, ss64p
->r13
, ss64p
->r14
, ss64p
->r15
,
824 ss64p
->isf
.rflags
, ss64p
->isf
.rip
);
826 x86_saved_state32_t
*ss32p
= saved_state32(esp
);
827 panic("Machine Check (CPU:%d, thread:%p, trapno:0x%x, err:0x%x),"
829 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
830 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
831 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
832 "EFL: 0x%08x, EIP: 0x%08x\n",
833 cpu_number(), current_thread(), ss32p
->trapno
, ss32p
->err
,
834 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
835 ss32p
->eax
, ss32p
->ebx
, ss32p
->ecx
, ss32p
->edx
,
836 ss32p
->uesp
, ss32p
->ebp
, ss32p
->esi
, ss32p
->edi
,
837 ss32p
->efl
, ss32p
->eip
);
842 * Trap from user mode.
846 x86_saved_state_t
*saved_state
)
851 unsigned int subcode
;
855 thread_t thread
= current_thread();
861 assert((is_saved_state32(saved_state
) && !thread_is_64bit(thread
)) ||
862 (is_saved_state64(saved_state
) && thread_is_64bit(thread
)));
864 if (is_saved_state64(saved_state
)) {
865 x86_saved_state64_t
*regs
;
867 regs
= saved_state64(saved_state
);
869 type
= regs
->isf
.trapno
;
870 err
= regs
->isf
.err
& 0xffff;
871 vaddr
= (user_addr_t
)regs
->cr2
;
872 rip
= (user_addr_t
)regs
->isf
.rip
;
874 x86_saved_state32_t
*regs
;
876 regs
= saved_state32(saved_state
);
879 err
= regs
->err
& 0xffff;
880 vaddr
= (user_addr_t
)regs
->cr2
;
881 rip
= (user_addr_t
)regs
->eip
;
884 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86
, type
)) | DBG_FUNC_NONE
,
885 (int)(vaddr
>>32), (int)vaddr
, (int)(rip
>>32), (int)rip
, 0);
892 kprintf("user_trap(0x%08x) type=%d vaddr=0x%016llx\n",
893 saved_state
, type
, vaddr
);
895 myast
= ast_pending();
897 if (*myast
& AST_CHUD_ALL
) {
898 perfASTHook(type
, saved_state
, 0, 0);
901 *myast
&= ~AST_CHUD_ALL
;
904 /* Is there a hook? */
906 if (perfTrapHook(type
, saved_state
, 0, 0) == KERN_SUCCESS
)
907 return; /* If it succeeds, we are done... */
913 exc
= EXC_ARITHMETIC
;
920 unsigned int clear
= 0;
922 * get dr6 and set it in the thread's pcb before
923 * returning to userland
925 pcb
= thread
->machine
.pcb
;
928 * We can get and set the status register
929 * in 32-bit mode even on a 64-bit thread
930 * because the high order bits are not
933 if (thread_is_64bit(thread
)) {
935 x86_debug_state64_t
*ids
= pcb
->ids
;
936 dr6
= (uint32_t)ids
->dr6
;
937 __asm__
volatile ("movl %%db6, %0" : "=r" (dr6
));
939 } else { /* 32 bit thread */
940 x86_debug_state32_t
*ids
= pcb
->ids
;
941 __asm__
volatile ("movl %%db6, %0" : "=r" (ids
->dr6
));
943 __asm__
volatile ("movl %0, %%db6" : : "r" (clear
));
945 exc
= EXC_BREAKPOINT
;
950 exc
= EXC_BREAKPOINT
;
955 exc
= EXC_ARITHMETIC
;
956 code
= EXC_I386_INTO
;
959 case T_OUT_OF_BOUNDS
:
961 code
= EXC_I386_BOUND
;
964 case T_INVALID_OPCODE
:
965 exc
= EXC_BAD_INSTRUCTION
;
966 code
= EXC_I386_INVOP
;
978 case 10: /* invalid TSS == iret with NT flag set */
979 exc
= EXC_BAD_INSTRUCTION
;
980 code
= EXC_I386_INVTSSFLT
;
984 case T_SEGMENT_NOT_PRESENT
:
985 exc
= EXC_BAD_INSTRUCTION
;
986 code
= EXC_I386_SEGNPFLT
;
991 exc
= EXC_BAD_INSTRUCTION
;
992 code
= EXC_I386_STKFLT
;
996 case T_GENERAL_PROTECTION
:
997 exc
= EXC_BAD_INSTRUCTION
;
998 code
= EXC_I386_GPFLT
;
1003 prot
= VM_PROT_READ
;
1005 if (err
& T_PF_WRITE
)
1006 prot
|= VM_PROT_WRITE
;
1008 if (err
& T_PF_EXECUTE
)
1009 prot
|= VM_PROT_EXECUTE
;
1011 kret
= vm_fault(thread
->map
, vm_map_trunc_page(vaddr
),
1013 THREAD_ABORTSAFE
, NULL
, 0);
1015 user_page_fault_continue(kret
);
1020 case T_SSE_FLOAT_ERROR
:
1025 case T_FLOATING_POINT_ERROR
:
1031 Debugger("Unanticipated user trap");
1033 #endif /* MACH_KGDB */
1035 if (kdb_trap(type
, err
, saved_state
))
1037 #endif /* MACH_KDB */
1041 intr
= ml_set_interrupts_enabled(FALSE
);
1042 myast
= ast_pending();
1043 while (*myast
& AST_ALL
) {
1044 ast_taken(AST_ALL
, intr
);
1045 ml_set_interrupts_enabled(FALSE
);
1046 myast
= ast_pending();
1048 ml_set_interrupts_enabled(intr
);
1050 i386_exception(exc
, code
, subcode
);
1056 * Handle AST traps for i386.
1057 * Check for delayed floating-point exception from
1061 extern void log_thread_action (thread_t
, char *);
1064 i386_astintr(int preemption
)
1066 ast_t mask
= AST_ALL
;
1070 mask
= AST_PREEMPTION
;
1080 * Handle exceptions for i386.
1082 * If we are an AT bus machine, we must turn off the AST for a
1083 * delayed floating-point exception.
1085 * If we are providing floating-point emulation, we may have
1086 * to retrieve the real register values from the floating point
1095 exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
1097 codes
[0] = code
; /* new exception interface */
1099 exception_triage(exc
, codes
, 2);
1105 kernel_preempt_check(void)
1111 * disable interrupts to both prevent pre-emption
1112 * and to keep the ast state from changing via
1113 * an interrupt handler making something runnable
1115 intr
= ml_set_interrupts_enabled(FALSE
);
1117 myast
= ast_pending();
1119 if ((*myast
& AST_URGENT
) && intr
== TRUE
&& get_interrupt_level() == 0) {
1121 * can handle interrupts and preemptions
1124 ml_set_interrupts_enabled(intr
);
1127 * now cause the PRE-EMPTION trap
1129 __asm__
volatile (" int $0xff");
1132 * if interrupts were already disabled or
1133 * we're in an interrupt context, we can't
1134 * preempt... of course if AST_URGENT
1135 * isn't set we also don't want to
1137 ml_set_interrupts_enabled(intr
);
1143 extern void db_i386_state(x86_saved_state32_t
*regs
);
1145 #include <ddb/db_output.h>
1149 x86_saved_state32_t
*regs
)
1151 db_printf("eip %8x\n", regs
->eip
);
1152 db_printf("trap %8x\n", regs
->trapno
);
1153 db_printf("err %8x\n", regs
->err
);
1154 db_printf("efl %8x\n", regs
->efl
);
1155 db_printf("ebp %8x\n", regs
->ebp
);
1156 db_printf("esp %8x\n", regs
->cr2
);
1157 db_printf("uesp %8x\n", regs
->uesp
);
1158 db_printf("cs %8x\n", regs
->cs
& 0xff);
1159 db_printf("ds %8x\n", regs
->ds
& 0xff);
1160 db_printf("es %8x\n", regs
->es
& 0xff);
1161 db_printf("fs %8x\n", regs
->fs
& 0xff);
1162 db_printf("gs %8x\n", regs
->gs
& 0xff);
1163 db_printf("ss %8x\n", regs
->ss
& 0xff);
1164 db_printf("eax %8x\n", regs
->eax
);
1165 db_printf("ebx %8x\n", regs
->ebx
);
1166 db_printf("ecx %8x\n", regs
->ecx
);
1167 db_printf("edx %8x\n", regs
->edx
);
1168 db_printf("esi %8x\n", regs
->esi
);
1169 db_printf("edi %8x\n", regs
->edi
);
1172 #endif /* MACH_KDB */
1174 /* Synchronize a thread's i386_kernel_state (if any) with the given
1175 * i386_saved_state_t obtained from the trap/IPI handler; called in
1176 * kernel_trap() prior to entering the debugger, and when receiving
1181 sync_iss_to_iks(x86_saved_state32_t
*saved_state
)
1183 struct x86_kernel_state32
*iks
;
1185 boolean_t record_active_regs
= FALSE
;
1187 if ((kstack
= current_thread()->kernel_stack
) != 0) {
1188 x86_saved_state32_t
*regs
;
1192 iks
= STACK_IKS(kstack
);
1195 * Did we take the trap/interrupt in kernel mode?
1197 if (regs
== USER_REGS32(current_thread()))
1198 record_active_regs
= TRUE
;
1200 iks
->k_ebx
= regs
->ebx
;
1201 iks
->k_esp
= (int)regs
;
1202 iks
->k_ebp
= regs
->ebp
;
1203 iks
->k_edi
= regs
->edi
;
1204 iks
->k_esi
= regs
->esi
;
1205 iks
->k_eip
= regs
->eip
;
1209 if (record_active_regs
== TRUE
) {
1211 * Show the trap handler path
1213 __asm__
volatile("movl %%ebx, %0" : "=m" (iks
->k_ebx
));
1214 __asm__
volatile("movl %%esp, %0" : "=m" (iks
->k_esp
));
1215 __asm__
volatile("movl %%ebp, %0" : "=m" (iks
->k_ebp
));
1216 __asm__
volatile("movl %%edi, %0" : "=m" (iks
->k_edi
));
1217 __asm__
volatile("movl %%esi, %0" : "=m" (iks
->k_esi
));
1219 * "Current" instruction pointer
1221 __asm__
volatile("movl $1f, %0\n1:" : "=m" (iks
->k_eip
));
1226 * This is used by the NMI interrupt handler (from mp.c) to
1227 * uncondtionally sync the trap handler context to the IKS
1228 * irrespective of whether the NMI was fielded in kernel
1232 sync_iss_to_iks_unconditionally(__unused x86_saved_state32_t
*saved_state
) {
1233 struct x86_kernel_state32
*iks
;
1235 boolean_t record_active_regs
= FALSE
;
1237 if ((kstack
= current_thread()->kernel_stack
) != 0) {
1239 iks
= STACK_IKS(kstack
);
1241 * Show the trap handler path
1243 __asm__
volatile("movl %%ebx, %0" : "=m" (iks
->k_ebx
));
1244 __asm__
volatile("movl %%esp, %0" : "=m" (iks
->k_esp
));
1245 __asm__
volatile("movl %%ebp, %0" : "=m" (iks
->k_ebp
));
1246 __asm__
volatile("movl %%edi, %0" : "=m" (iks
->k_edi
));
1247 __asm__
volatile("movl %%esi, %0" : "=m" (iks
->k_esi
));
1249 * "Current" instruction pointer
1251 __asm__
volatile("movl $1f, %0\n1:" : "=m" (iks
->k_eip
));