2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Hardware trap/fault handler.
64 #include <mach_kgdb.h>
66 #include <mach_ldebug.h>
69 #include <i386/eflags.h>
70 #include <i386/trap.h>
71 #include <i386/pmap.h>
73 #include <i386/misc_protos.h> /* panic_io_port_read() */
75 #include <mach/exception.h>
76 #include <mach/kern_return.h>
77 #include <mach/vm_param.h>
78 #include <mach/i386/thread_status.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_fault.h>
83 #include <kern/kern_types.h>
84 #include <kern/processor.h>
85 #include <kern/thread.h>
86 #include <kern/task.h>
87 #include <kern/sched.h>
88 #include <kern/sched_prim.h>
89 #include <kern/exception.h>
91 #include <kern/misc_protos.h>
93 #include <sys/kdebug.h>
96 #include <kgdb/kgdb_defs.h>
97 #endif /* MACH_KGDB */
101 #include <ddb/db_watch.h>
102 #include <ddb/db_run.h>
103 #include <ddb/db_break.h>
104 #include <ddb/db_trap.h>
105 #endif /* MACH_KDB */
109 #include <i386/postcode.h>
110 #include <i386/mp_desc.h>
111 #include <i386/proc_reg.h>
112 #include <i386/machine_check.h>
113 #include <mach/i386/syscall_sw.h>
116 * Forward declarations
118 static void user_page_fault_continue(kern_return_t kret
);
119 static void panic_trap(x86_saved_state32_t
*saved_state
);
120 static void set_recovery_ip(x86_saved_state32_t
*saved_state
, vm_offset_t ip
);
122 perfCallback perfTrapHook
= NULL
; /* Pointer to CHUD trap hook routine */
123 perfCallback perfASTHook
= NULL
; /* Pointer to CHUD AST hook routine */
126 /* See <rdar://problem/4613924> */
127 perfCallback tempDTraceTrapHook
= NULL
; /* Pointer to DTrace fbt trap hook routine */
129 extern boolean_t
dtrace_tally_fault(user_addr_t
);
133 thread_syscall_return(
136 thread_t thr_act
= current_thread();
138 if (thread_is_64bit(thr_act
)) {
139 x86_saved_state64_t
*regs
;
141 regs
= USER_REGS64(thr_act
);
143 if (kdebug_enable
&& ((regs
->rax
& SYSCALL_CLASS_MASK
) == (SYSCALL_CLASS_MACH
<< SYSCALL_CLASS_SHIFT
))) {
145 KERNEL_DEBUG_CONSTANT(
146 MACHDBG_CODE(DBG_MACH_EXCP_SC
, ((int) (regs
->rax
& SYSCALL_NUMBER_MASK
)))
153 x86_saved_state32_t
*regs
;
155 regs
= USER_REGS32(thr_act
);
157 if (kdebug_enable
&& ((int) regs
->eax
< 0)) {
159 KERNEL_DEBUG_CONSTANT(
160 MACHDBG_CODE(DBG_MACH_EXCP_SC
, -((int) regs
->eax
))
166 thread_exception_return();
172 boolean_t debug_all_traps_with_kdb
= FALSE
;
173 extern struct db_watchpoint
*db_watchpoint_list
;
174 extern boolean_t db_watchpoints_inserted
;
175 extern boolean_t db_breakpoints_inserted
;
178 thread_kdb_return(void)
180 thread_t thr_act
= current_thread();
181 x86_saved_state_t
*iss
= USER_STATE(thr_act
);
183 if (is_saved_state64(iss
)) {
184 x86_saved_state64_t
*regs
;
186 regs
= saved_state64(iss
);
188 if (kdb_trap(regs
->isf
.trapno
, (int)regs
->isf
.err
, (void *)regs
)) {
189 thread_exception_return();
194 x86_saved_state32_t
*regs
;
196 regs
= saved_state32(iss
);
198 if (kdb_trap(regs
->trapno
, regs
->err
, (void *)regs
)) {
199 thread_exception_return();
205 #endif /* MACH_KDB */
208 user_page_fault_continue(
211 thread_t thread
= current_thread();
216 x86_saved_state_t
*regs
= USER_STATE(thread
);
220 assert((is_saved_state32(regs
) && !thread_is_64bit(thread
)) ||
221 (is_saved_state64(regs
) && thread_is_64bit(thread
)));
224 if (thread_is_64bit(thread
)) {
225 x86_saved_state64_t
*uregs
;
227 uregs
= USER_REGS64(thread
);
230 trapno
= uregs
->isf
.trapno
;
231 err
= uregs
->isf
.err
;
233 vaddr
= (user_addr_t
)uregs
->cr2
;
235 x86_saved_state32_t
*uregs
;
237 uregs
= USER_REGS32(thread
);
240 trapno
= uregs
->trapno
;
246 if ((kr
== KERN_SUCCESS
) || (kr
== KERN_ABORTED
)) {
248 if (!db_breakpoints_inserted
) {
249 db_set_breakpoints();
251 if (db_watchpoint_list
&&
252 db_watchpoints_inserted
&&
253 (err
& T_PF_WRITE
) &&
254 db_find_watchpoint(thread
->map
,
256 saved_state32(regs
)))
257 kdb_trap(T_WATCHPOINT
, 0, saved_state32(regs
));
258 #endif /* MACH_KDB */
259 intr
= ml_set_interrupts_enabled(FALSE
);
260 myast
= ast_pending();
261 while (*myast
& AST_ALL
) {
262 ast_taken(AST_ALL
, intr
);
263 ml_set_interrupts_enabled(FALSE
);
264 myast
= ast_pending();
266 ml_set_interrupts_enabled(intr
);
268 thread_exception_return();
273 if (debug_all_traps_with_kdb
&&
274 kdb_trap(trapno
, err
, saved_state32(regs
))) {
275 thread_exception_return();
278 #endif /* MACH_KDB */
280 i386_exception(EXC_BAD_ACCESS
, kr
, vaddr
);
285 * Fault recovery in copyin/copyout routines.
289 uint32_t recover_addr
;
292 extern struct recovery recover_table
[];
293 extern struct recovery recover_table_end
[];
295 const char * trap_type
[] = {TRAP_NAMES
};
296 unsigned TRAP_TYPES
= sizeof(trap_type
)/sizeof(trap_type
[0]);
302 uint32_t dr7
= 0x400; /* magic dr7 reset value */
303 __asm__
volatile("movl %0,%%dr7" : : "r" (dr7
));
306 unsigned kdp_has_active_watchpoints
= 0;
309 * Trap from kernel mode. Only page-fault errors are recoverable,
310 * and then only in special circumstances. All other errors are
311 * fatal. Return value indicates if trap was handled.
315 x86_saved_state_t
*state
)
317 x86_saved_state32_t
*saved_state
;
321 vm_map_t map
= 0; /* protected by T_PAGE_FAULT */
322 kern_return_t result
= KERN_FAILURE
;
329 int fault_in_copy_window
= -1;
333 #endif /* MACH_KDB */
335 thread
= current_thread();
337 if (is_saved_state64(state
))
338 panic("kernel_trap(%p) with 64-bit state", state
);
339 saved_state
= saved_state32(state
);
341 vaddr
= (user_addr_t
)saved_state
->cr2
;
342 type
= saved_state
->trapno
;
343 code
= saved_state
->err
& 0xffff;
344 intr
= (saved_state
->efl
& EFL_IF
) != 0; /* state of ints at trap */
346 kern_ip
= (vm_offset_t
)saved_state
->eip
;
348 myast
= ast_pending();
351 if (*myast
& AST_CHUD_ALL
)
352 perfASTHook(type
, NULL
, 0, 0);
354 *myast
&= ~AST_CHUD_ALL
;
360 if (perfTrapHook(type
, NULL
, 0, 0) == KERN_SUCCESS
) {
362 * If it succeeds, we are done...
369 if (tempDTraceTrapHook
) {
370 if (tempDTraceTrapHook(type
, state
, 0, 0) == KERN_SUCCESS
) {
372 * If it succeeds, we are done...
377 #endif /* CONFIG_DTRACE */
380 * we come here with interrupts off as we don't want to recurse
381 * on preemption below. but we do want to re-enable interrupts
382 * as soon we possibly can to hold latency down
384 if (T_PREEMPT
== type
) {
385 ast_taken(AST_PREEMPTION
, FALSE
);
387 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86
, type
)) | DBG_FUNC_NONE
,
388 0, 0, 0, kern_ip
, 0);
392 if (T_PAGE_FAULT
== type
) {
394 * assume we're faulting in the kernel map
398 if (thread
!= THREAD_NULL
&& thread
->map
!= kernel_map
) {
399 vm_offset_t copy_window_base
;
403 kvaddr
= (vm_offset_t
)vaddr
;
405 * must determine if fault occurred in
406 * the copy window while pre-emption is
407 * disabled for this processor so that
408 * we only need to look at the window
409 * associated with this processor
411 copy_window_base
= current_cpu_datap()->cpu_copywindow_base
;
413 if (kvaddr
>= copy_window_base
&& kvaddr
< (copy_window_base
+ (NBPDE
* NCOPY_WINDOWS
)) ) {
415 window_index
= (kvaddr
- copy_window_base
) / NBPDE
;
417 if (thread
->machine
.copy_window
[window_index
].user_base
!= (user_addr_t
)-1) {
419 kvaddr
-= (copy_window_base
+ (NBPDE
* window_index
));
420 vaddr
= thread
->machine
.copy_window
[window_index
].user_base
+ kvaddr
;
423 fault_in_copy_window
= window_index
;
429 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86
, type
)) | DBG_FUNC_NONE
,
430 (int)(vaddr
>> 32), (int)vaddr
, is_user
, kern_ip
, 0);
433 (void) ml_set_interrupts_enabled(intr
);
445 case T_FLOATING_POINT_ERROR
:
449 case T_SSE_FLOAT_ERROR
:
454 if ((saved_state
->efl
& EFL_TF
) == 0
455 && !kdp_has_active_watchpoints
)
457 if ((saved_state
->efl
& EFL_TF
) == 0)
460 /* We've somehow encountered a debug
461 * register match that does not belong
462 * to the kernel debugger.
463 * This isn't supposed to happen.
471 * If the current map is a submap of the kernel map,
472 * and the address is within that map, fault on that
473 * map. If the same check is done in vm_fault
474 * (vm_map_lookup), we may deadlock on the kernel map
480 if (code
& T_PF_WRITE
)
481 prot
|= VM_PROT_WRITE
;
483 if (code
& T_PF_EXECUTE
)
484 prot
|= VM_PROT_EXECUTE
;
489 * Check for watchpoint on kernel static data.
490 * vm_fault would fail in this case
492 if (map
== kernel_map
&& db_watchpoint_list
&& db_watchpoints_inserted
&&
493 (code
& T_PF_WRITE
) && vaddr
< vm_map_max(map
) &&
494 ((*(pte
= pmap_pte(kernel_pmap
, (vm_map_offset_t
)vaddr
))) & INTEL_PTE_WRITE
) == 0) {
497 *pte
| INTEL_PTE_VALID
| INTEL_PTE_WRITE
);
498 /* XXX need invltlb here? */
500 result
= KERN_SUCCESS
;
501 goto look_for_watchpoints
;
503 #endif /* MACH_KDB */
506 if (thread
->options
& TH_OPT_DTRACE
) { /* Executing under dtrace_probe? */
507 if (dtrace_tally_fault(vaddr
)) { /* Should a fault under dtrace be ignored? */
509 * DTrace has "anticipated" the possibility of this fault, and has
510 * established the suitable recovery state. Drop down now into the
511 * recovery handling code in "case T_GENERAL_PROTECTION:".
516 #endif /* CONFIG_DTRACE */
518 result
= vm_fault(map
,
519 vm_map_trunc_page(vaddr
),
522 THREAD_UNINT
, NULL
, 0);
525 if (result
== KERN_SUCCESS
) {
527 * Look for watchpoints
529 look_for_watchpoints
:
530 if (map
== kernel_map
&& db_watchpoint_list
&& db_watchpoints_inserted
&& (code
& T_PF_WRITE
) &&
531 db_find_watchpoint(map
, vaddr
, saved_state
))
532 kdb_trap(T_WATCHPOINT
, 0, saved_state
);
534 #endif /* MACH_KDB */
536 if (result
== KERN_SUCCESS
) {
538 if (fault_in_copy_window
!= -1) {
543 * in case there was no page table assigned
544 * for the user base address and the pmap
545 * got 'expanded' due to this fault, we'll
546 * copy in the descriptor
548 * we're either setting the page table descriptor
549 * to the same value or it was 0... no need
550 * for a TLB flush in either case
553 ml_set_interrupts_enabled(FALSE
);
554 updp
= pmap_pde(map
->pmap
, thread
->machine
.copy_window
[fault_in_copy_window
].user_base
);
556 if (0 == updp
) panic("trap: updp 0"); /* XXX DEBUG */
557 kpdp
= current_cpu_datap()->cpu_copywindow_pdp
;
558 kpdp
+= fault_in_copy_window
;
561 if (*kpdp
&& (*kpdp
& PG_FRAME
) != (*updp
& PG_FRAME
))
562 panic("kernel_fault: user pdp doesn't match - updp = 0x%x, kpdp = 0x%x\n", updp
, kpdp
);
564 pmap_store_pte(kpdp
, *updp
);
566 (void) ml_set_interrupts_enabled(intr
);
575 #endif /* CONFIG_DTRACE */
577 case T_GENERAL_PROTECTION
:
579 * If there is a failure recovery address
580 * for this fault, go there.
582 for (rp
= recover_table
; rp
< recover_table_end
; rp
++) {
583 if (kern_ip
== rp
->fault_addr
) {
584 set_recovery_ip(saved_state
, rp
->recover_addr
);
590 * Check thread recovery address also.
592 if (thread
->recover
) {
593 set_recovery_ip(saved_state
, thread
->recover
);
598 * Unanticipated page-fault errors in kernel
606 * Exception 15 is reserved but some chips may generate it
607 * spuriously. Seen at startup on AMD Athlon-64.
610 kprintf("kernel_trap() ignoring spurious trap 15\n");
614 /* Ensure that the i386_kernel_state at the base of the
615 * current thread's stack (if any) is synchronized with the
616 * context at the moment of the trap, to facilitate
617 * access through the debugger.
619 sync_iss_to_iks(saved_state
);
622 #endif /* MACH_KDB */
624 if (current_debugger
!= KDB_CUR_DB
) {
625 if (kdp_i386_trap(type
, saved_state
, result
, vaddr
))
628 #endif /* MACH_KDP */
630 if (kdb_trap(type
, code
, saved_state
)) {
631 if (switch_debugger
) {
632 current_debugger
= KDP_CUR_DB
;
634 goto restart_debugger
;
638 #endif /* MACH_KDB */
644 panic_trap(saved_state
);
652 set_recovery_ip(x86_saved_state32_t
*saved_state
, vm_offset_t ip
)
654 saved_state
->eip
= ip
;
659 panic_trap(x86_saved_state32_t
*regs
)
661 const char *trapname
= "Unknown";
662 uint32_t cr0
= get_cr0();
663 uint32_t cr2
= get_cr2();
664 uint32_t cr3
= get_cr3();
665 uint32_t cr4
= get_cr4();
668 * Issue an I/O port read if one has been requested - this is an
669 * event logic analyzers can use as a trigger point.
671 panic_io_port_read();
673 kprintf("panic trap number 0x%x, eip 0x%x\n", regs
->trapno
, regs
->eip
);
674 kprintf("cr0 0x%08x cr2 0x%08x cr3 0x%08x cr4 0x%08x\n",
677 if (regs
->trapno
< TRAP_TYPES
)
678 trapname
= trap_type
[regs
->trapno
];
680 panic("Kernel trap at 0x%08x, type %d=%s, registers:\n"
681 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
682 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
683 "CR2: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
684 "EFL: 0x%08x, EIP: 0x%08x, CS: 0x%08x, DS: 0x%08x\n"
685 "Error code: 0x%08x\n",
686 regs
->eip
, regs
->trapno
, trapname
, cr0
, cr2
, cr3
, cr4
,
687 regs
->eax
,regs
->ebx
,regs
->ecx
,regs
->edx
,
688 regs
->cr2
,regs
->ebp
,regs
->esi
,regs
->edi
,
689 regs
->efl
,regs
->eip
,regs
->cs
, regs
->ds
, regs
->err
);
691 * This next statement is not executed,
692 * but it's needed to stop the compiler using tail call optimization
693 * for the panic call - which confuses the subsequent backtrace.
698 extern void kprintf_break_lock(void);
702 * Called from locore on a special reserved stack after a double-fault
703 * is taken in kernel space.
704 * Kernel stack overflow is one route here.
708 #if CONFIG_NO_PANIC_STRINGS
715 #if MACH_KDP || !CONFIG_NO_PANIC_STRINGS
716 struct i386_tss
*my_ktss
= current_ktss();
719 /* Set postcode (DEBUG only) */
720 postcode(PANIC_DOUBLE_FAULT
);
723 * Issue an I/O port read if one has been requested - this is an
724 * event logic analyzers can use as a trigger point.
726 panic_io_port_read();
729 * Break kprintf lock in case of recursion,
730 * and record originally faulted instruction address.
732 kprintf_break_lock();
736 * Print backtrace leading to first fault:
738 panic_i386_backtrace((void *) my_ktss
->ebp
, 10);
741 panic("Double fault at 0x%08x, thread:%p, code:0x%x, "
743 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
744 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
745 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
746 "EFL: 0x%08x, EIP: 0x%08x\n",
747 my_ktss
->eip
, current_thread(), code
,
748 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
749 my_ktss
->eax
, my_ktss
->ebx
, my_ktss
->ecx
, my_ktss
->edx
,
750 my_ktss
->esp
, my_ktss
->ebp
, my_ktss
->esi
, my_ktss
->edi
,
751 my_ktss
->eflags
, my_ktss
->eip
);
756 * Called from locore on a special reserved stack after a machine-check
760 #if CONFIG_NO_PANIC_STRINGS
767 #if !CONFIG_NO_PANIC_STRINGS
768 struct i386_tss
*my_ktss
= current_ktss();
771 /* Set postcode (DEBUG only) */
772 postcode(PANIC_MACHINE_CHECK
);
775 * Issue an I/O port read if one has been requested - this is an
776 * event logic analyzers can use as a trigger point.
778 panic_io_port_read();
781 * Break kprintf lock in case of recursion,
782 * and record originally faulted instruction address.
784 kprintf_break_lock();
787 * Dump the contents of the machine check MSRs (if any).
792 * And that's all folks, we don't attempt recovery...
794 panic("Machine-check at 0x%08x, thread:%p, code:0x%x, "
796 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
797 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
798 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
799 "EFL: 0x%08x, EIP: 0x%08x\n",
800 my_ktss
->eip
, current_thread(), code
,
801 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
802 my_ktss
->eax
, my_ktss
->ebx
, my_ktss
->ecx
, my_ktss
->edx
,
803 my_ktss
->esp
, my_ktss
->ebp
, my_ktss
->esi
, my_ktss
->edi
,
804 my_ktss
->eflags
, my_ktss
->eip
);
808 panic_double_fault64(x86_saved_state_t
*esp
)
810 /* Set postcode (DEBUG only) */
811 postcode(PANIC_DOUBLE_FAULT
);
814 * Issue an I/O port read if one has been requested - this is an
815 * event logic analyzers can use as a trigger point.
817 panic_io_port_read();
820 * Break kprintf lock in case of recursion,
821 * and record originally faulted instruction address.
823 kprintf_break_lock();
826 * Dump the interrupt stack frame at last kernel entry.
828 if (is_saved_state64(esp
)) {
829 #if !CONFIG_NO_PANIC_STRINGS
830 x86_saved_state64_t
*ss64p
= saved_state64(esp
);
832 panic("Double fault thread:%p, trapno:0x%x, err:0x%qx, "
834 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
835 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
836 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
837 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
838 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
839 "RFL: 0x%016qx, RIP: 0x%016qx, CR2: 0x%016qx\n",
840 current_thread(), ss64p
->isf
.trapno
, ss64p
->isf
.err
,
841 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
842 ss64p
->rax
, ss64p
->rbx
, ss64p
->rcx
, ss64p
->rdx
,
843 ss64p
->isf
.rsp
, ss64p
->rbp
, ss64p
->rsi
, ss64p
->rdi
,
844 ss64p
->r8
, ss64p
->r9
, ss64p
->r10
, ss64p
->r11
,
845 ss64p
->r12
, ss64p
->r13
, ss64p
->r14
, ss64p
->r15
,
846 ss64p
->isf
.rflags
, ss64p
->isf
.rip
, ss64p
->cr2
);
848 #if !CONFIG_NO_PANIC_STRINGS
849 x86_saved_state32_t
*ss32p
= saved_state32(esp
);
851 panic("Double fault at 0x%08x, thread:%p, trapno:0x%x, err:0x%x),"
853 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
854 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
855 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
856 "EFL: 0x%08x, EIP: 0x%08x\n",
857 ss32p
->eip
, current_thread(), ss32p
->trapno
, ss32p
->err
,
858 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
859 ss32p
->eax
, ss32p
->ebx
, ss32p
->ecx
, ss32p
->edx
,
860 ss32p
->uesp
, ss32p
->ebp
, ss32p
->esi
, ss32p
->edi
,
861 ss32p
->efl
, ss32p
->eip
);
866 * Machine check handler for 64-bit.
869 panic_machine_check64(x86_saved_state_t
*esp
)
871 /* Set postcode (DEBUG only) */
872 postcode(PANIC_MACHINE_CHECK
);
875 * Issue an I/O port read if one has been requested - this is an
876 * event logic analyzers can use as a trigger point.
878 panic_io_port_read();
881 * Break kprintf lock in case of recursion,
882 * and record originally faulted instruction address.
884 kprintf_break_lock();
887 * Dump the contents of the machine check MSRs (if any).
892 * And that's all folks, we don't attempt recovery...
894 if (is_saved_state64(esp
)) {
895 #if !CONFIG_NO_PANIC_STRINGS
896 x86_saved_state64_t
*ss64p
= saved_state64(esp
);
898 panic("Machine Check thread:%p, trapno:0x%x, err:0x%qx, "
900 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
901 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
902 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
903 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
904 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
905 "RFL: 0x%016qx, RIP: 0x%016qx\n",
906 current_thread(), ss64p
->isf
.trapno
, ss64p
->isf
.err
,
907 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
908 ss64p
->rax
, ss64p
->rbx
, ss64p
->rcx
, ss64p
->rdx
,
909 ss64p
->isf
.rsp
, ss64p
->rbp
, ss64p
->rsi
, ss64p
->rdi
,
910 ss64p
->r8
, ss64p
->r9
, ss64p
->r10
, ss64p
->r11
,
911 ss64p
->r12
, ss64p
->r13
, ss64p
->r14
, ss64p
->r15
,
912 ss64p
->isf
.rflags
, ss64p
->isf
.rip
);
914 #if !CONFIG_NO_PANIC_STRINGS
915 x86_saved_state32_t
*ss32p
= saved_state32(esp
);
917 panic("Machine Check at 0x%08x, thread:%p, trapno:0x%x, err:0x%x, "
919 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
920 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
921 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
922 "EFL: 0x%08x, EIP: 0x%08x\n",
923 ss32p
->eip
, current_thread(), ss32p
->trapno
, ss32p
->err
,
924 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
925 ss32p
->eax
, ss32p
->ebx
, ss32p
->ecx
, ss32p
->edx
,
926 ss32p
->uesp
, ss32p
->ebp
, ss32p
->esi
, ss32p
->edi
,
927 ss32p
->efl
, ss32p
->eip
);
932 extern kern_return_t
dtrace_user_probe(x86_saved_state_t
*);
936 * Trap from user mode.
940 x86_saved_state_t
*saved_state
)
944 mach_exception_code_t code
;
945 mach_exception_subcode_t subcode
;
949 thread_t thread
= current_thread();
954 assert((is_saved_state32(saved_state
) && !thread_is_64bit(thread
)) ||
955 (is_saved_state64(saved_state
) && thread_is_64bit(thread
)));
957 if (is_saved_state64(saved_state
)) {
958 x86_saved_state64_t
*regs
;
960 regs
= saved_state64(saved_state
);
962 type
= regs
->isf
.trapno
;
963 err
= regs
->isf
.err
& 0xffff;
964 vaddr
= (user_addr_t
)regs
->cr2
;
965 rip
= (user_addr_t
)regs
->isf
.rip
;
967 x86_saved_state32_t
*regs
;
969 regs
= saved_state32(saved_state
);
972 err
= regs
->err
& 0xffff;
973 vaddr
= (user_addr_t
)regs
->cr2
;
974 rip
= (user_addr_t
)regs
->eip
;
977 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86
, type
)) | DBG_FUNC_NONE
,
978 (int)(vaddr
>>32), (int)vaddr
, (int)(rip
>>32), (int)rip
, 0);
985 kprintf("user_trap(0x%08x) type=%d vaddr=0x%016llx\n",
986 saved_state
, type
, vaddr
);
988 myast
= ast_pending();
990 if (*myast
& AST_CHUD_ALL
) {
991 perfASTHook(type
, saved_state
, 0, 0);
994 *myast
&= ~AST_CHUD_ALL
;
997 /* Is there a hook? */
999 if (perfTrapHook(type
, saved_state
, 0, 0) == KERN_SUCCESS
)
1000 return; /* If it succeeds, we are done... */
1004 * DTrace does not consume all user traps, only INT_3's for now.
1005 * Avoid needlessly calling tempDTraceTrapHook here, and let the
1006 * INT_3 case handle them.
1011 case T_DIVIDE_ERROR
:
1012 exc
= EXC_ARITHMETIC
;
1013 code
= EXC_I386_DIV
;
1019 unsigned int clear
= 0;
1021 * get dr6 and set it in the thread's pcb before
1022 * returning to userland
1024 pcb
= thread
->machine
.pcb
;
1027 * We can get and set the status register
1028 * in 32-bit mode even on a 64-bit thread
1029 * because the high order bits are not
1032 if (thread_is_64bit(thread
)) {
1034 x86_debug_state64_t
*ids
= pcb
->ids
;
1035 dr6
= (uint32_t)ids
->dr6
;
1036 __asm__
volatile ("movl %%db6, %0" : "=r" (dr6
));
1038 } else { /* 32 bit thread */
1039 x86_debug_state32_t
*ids
= pcb
->ids
;
1040 __asm__
volatile ("movl %%db6, %0" : "=r" (ids
->dr6
));
1042 __asm__
volatile ("movl %0, %%db6" : : "r" (clear
));
1044 exc
= EXC_BREAKPOINT
;
1045 code
= EXC_I386_SGL
;
1050 if (dtrace_user_probe(saved_state
) == KERN_SUCCESS
)
1051 return; /* If it succeeds, we are done... */
1053 exc
= EXC_BREAKPOINT
;
1054 code
= EXC_I386_BPT
;
1058 exc
= EXC_ARITHMETIC
;
1059 code
= EXC_I386_INTO
;
1062 case T_OUT_OF_BOUNDS
:
1064 code
= EXC_I386_BOUND
;
1067 case T_INVALID_OPCODE
:
1068 exc
= EXC_BAD_INSTRUCTION
;
1069 code
= EXC_I386_INVOP
;
1077 fpextovrflt(); /* Propagates exception directly, doesn't return */
1080 case T_INVALID_TSS
: /* invalid TSS == iret with NT flag set */
1081 exc
= EXC_BAD_INSTRUCTION
;
1082 code
= EXC_I386_INVTSSFLT
;
1086 case T_SEGMENT_NOT_PRESENT
:
1087 exc
= EXC_BAD_INSTRUCTION
;
1088 code
= EXC_I386_SEGNPFLT
;
1093 exc
= EXC_BAD_INSTRUCTION
;
1094 code
= EXC_I386_STKFLT
;
1098 case T_GENERAL_PROTECTION
:
1100 * There's a wide range of circumstances which generate this
1101 * class of exception. From user-space, many involve bad
1102 * addresses (such as a non-canonical 64-bit address).
1103 * So we map this to EXC_BAD_ACCESS (and thereby SIGSEGV).
1104 * The trouble is cr2 doesn't contain the faulting address;
1105 * we'd need to decode the faulting instruction to really
1106 * determine this. We'll leave that to debuggers.
1107 * However, attempted execution of privileged instructions
1108 * (e.g. cli) also generate GP faults and so we map these to
1109 * to EXC_BAD_ACCESS (and thence SIGSEGV) also - rather than
1110 * EXC_BAD_INSTRUCTION which is more accurate. We just can't
1113 exc
= EXC_BAD_ACCESS
;
1114 code
= EXC_I386_GPFLT
;
1119 prot
= VM_PROT_READ
;
1121 if (err
& T_PF_WRITE
)
1122 prot
|= VM_PROT_WRITE
;
1124 if (err
& T_PF_EXECUTE
)
1125 prot
|= VM_PROT_EXECUTE
;
1127 kret
= vm_fault(thread
->map
, vm_map_trunc_page(vaddr
),
1129 THREAD_ABORTSAFE
, NULL
, 0);
1131 user_page_fault_continue(kret
);
1136 case T_SSE_FLOAT_ERROR
:
1137 fpSSEexterrflt(); /* Propagates exception directly, doesn't return */
1141 case T_FLOATING_POINT_ERROR
:
1142 fpexterrflt(); /* Propagates exception directly, doesn't return */
1147 if (dtrace_user_probe(saved_state
) == KERN_SUCCESS
)
1148 return; /* If it succeeds, we are done... */
1151 * If we get an INT 0x7f when we do not expect to,
1152 * treat it as an illegal instruction
1154 exc
= EXC_BAD_INSTRUCTION
;
1155 code
= EXC_I386_INVOP
;
1160 Debugger("Unanticipated user trap");
1162 #endif /* MACH_KGDB */
1164 if (kdb_trap(type
, err
, saved_state32(saved_state
)))
1166 #endif /* MACH_KDB */
1167 panic("Unexpected user trap, type %d", type
);
1170 /* Note: Codepaths that directly return from user_trap() have pending
1171 * ASTs processed in locore
1173 i386_exception(exc
, code
, subcode
);
1179 * Handle AST traps for i386.
1180 * Check for delayed floating-point exception from
1184 extern void log_thread_action (thread_t
, char *);
1187 i386_astintr(int preemption
)
1189 ast_t mask
= AST_ALL
;
1193 mask
= AST_PREEMPTION
;
1203 * Handle exceptions for i386.
1205 * If we are an AT bus machine, we must turn off the AST for a
1206 * delayed floating-point exception.
1208 * If we are providing floating-point emulation, we may have
1209 * to retrieve the real register values from the floating point
1215 mach_exception_code_t code
,
1216 mach_exception_subcode_t subcode
)
1218 mach_exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
1220 codes
[0] = code
; /* new exception interface */
1222 exception_triage(exc
, codes
, 2);
1228 kernel_preempt_check(void)
1234 * disable interrupts to both prevent pre-emption
1235 * and to keep the ast state from changing via
1236 * an interrupt handler making something runnable
1238 intr
= ml_set_interrupts_enabled(FALSE
);
1240 myast
= ast_pending();
1242 if ((*myast
& AST_URGENT
) && intr
== TRUE
&& get_interrupt_level() == 0) {
1244 * can handle interrupts and preemptions
1247 ml_set_interrupts_enabled(intr
);
1250 * now cause the PRE-EMPTION trap
1252 __asm__
volatile (" int $0xff");
1255 * if interrupts were already disabled or
1256 * we're in an interrupt context, we can't
1257 * preempt... of course if AST_URGENT
1258 * isn't set we also don't want to
1260 ml_set_interrupts_enabled(intr
);
1266 extern void db_i386_state(x86_saved_state32_t
*regs
);
1268 #include <ddb/db_output.h>
1272 x86_saved_state32_t
*regs
)
1274 db_printf("eip %8x\n", regs
->eip
);
1275 db_printf("trap %8x\n", regs
->trapno
);
1276 db_printf("err %8x\n", regs
->err
);
1277 db_printf("efl %8x\n", regs
->efl
);
1278 db_printf("ebp %8x\n", regs
->ebp
);
1279 db_printf("esp %8x\n", regs
->cr2
);
1280 db_printf("uesp %8x\n", regs
->uesp
);
1281 db_printf("cs %8x\n", regs
->cs
& 0xff);
1282 db_printf("ds %8x\n", regs
->ds
& 0xff);
1283 db_printf("es %8x\n", regs
->es
& 0xff);
1284 db_printf("fs %8x\n", regs
->fs
& 0xff);
1285 db_printf("gs %8x\n", regs
->gs
& 0xff);
1286 db_printf("ss %8x\n", regs
->ss
& 0xff);
1287 db_printf("eax %8x\n", regs
->eax
);
1288 db_printf("ebx %8x\n", regs
->ebx
);
1289 db_printf("ecx %8x\n", regs
->ecx
);
1290 db_printf("edx %8x\n", regs
->edx
);
1291 db_printf("esi %8x\n", regs
->esi
);
1292 db_printf("edi %8x\n", regs
->edi
);
1295 #endif /* MACH_KDB */
1297 /* Synchronize a thread's i386_kernel_state (if any) with the given
1298 * i386_saved_state_t obtained from the trap/IPI handler; called in
1299 * kernel_trap() prior to entering the debugger, and when receiving
1304 sync_iss_to_iks(x86_saved_state32_t
*saved_state
)
1306 struct x86_kernel_state32
*iks
;
1308 boolean_t record_active_regs
= FALSE
;
1310 if ((kstack
= current_thread()->kernel_stack
) != 0) {
1311 x86_saved_state32_t
*regs
;
1315 iks
= STACK_IKS(kstack
);
1318 * Did we take the trap/interrupt in kernel mode?
1320 if (regs
== USER_REGS32(current_thread()))
1321 record_active_regs
= TRUE
;
1323 iks
->k_ebx
= regs
->ebx
;
1324 iks
->k_esp
= (int)regs
;
1325 iks
->k_ebp
= regs
->ebp
;
1326 iks
->k_edi
= regs
->edi
;
1327 iks
->k_esi
= regs
->esi
;
1328 iks
->k_eip
= regs
->eip
;
1332 if (record_active_regs
== TRUE
) {
1334 * Show the trap handler path
1336 __asm__
volatile("movl %%ebx, %0" : "=m" (iks
->k_ebx
));
1337 __asm__
volatile("movl %%esp, %0" : "=m" (iks
->k_esp
));
1338 __asm__
volatile("movl %%ebp, %0" : "=m" (iks
->k_ebp
));
1339 __asm__
volatile("movl %%edi, %0" : "=m" (iks
->k_edi
));
1340 __asm__
volatile("movl %%esi, %0" : "=m" (iks
->k_esi
));
1342 * "Current" instruction pointer
1344 __asm__
volatile("movl $1f, %0\n1:" : "=m" (iks
->k_eip
));
1349 * This is used by the NMI interrupt handler (from mp.c) to
1350 * uncondtionally sync the trap handler context to the IKS
1351 * irrespective of whether the NMI was fielded in kernel
1355 sync_iss_to_iks_unconditionally(__unused x86_saved_state_t
*saved_state
) {
1356 struct x86_kernel_state32
*iks
;
1359 if ((kstack
= current_thread()->kernel_stack
) != 0) {
1360 iks
= STACK_IKS(kstack
);
1362 * Display the trap handler path.
1364 __asm__
volatile("movl %%ebx, %0" : "=m" (iks
->k_ebx
));
1365 __asm__
volatile("movl %%esp, %0" : "=m" (iks
->k_esp
));
1366 __asm__
volatile("movl %%ebp, %0" : "=m" (iks
->k_ebp
));
1367 __asm__
volatile("movl %%edi, %0" : "=m" (iks
->k_edi
));
1368 __asm__
volatile("movl %%esi, %0" : "=m" (iks
->k_esi
));
1370 * "Current" instruction pointer.
1372 __asm__
volatile("movl $1f, %0\n1:" : "=m" (iks
->k_eip
));