2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
36 * All Rights Reserved.
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
61 * Hardware trap/fault handler.
65 #include <mach_kgdb.h>
67 #include <mach_ldebug.h>
70 #include <i386/eflags.h>
71 #include <i386/trap.h>
72 #include <i386/pmap.h>
74 #include <architecture/i386/pio.h> /* inb() */
76 #include <mach/exception.h>
77 #include <mach/kern_return.h>
78 #include <mach/vm_param.h>
79 #include <mach/i386/thread_status.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_fault.h>
84 #include <kern/kern_types.h>
85 #include <kern/processor.h>
86 #include <kern/thread.h>
87 #include <kern/task.h>
88 #include <kern/sched.h>
89 #include <kern/sched_prim.h>
90 #include <kern/exception.h>
92 #include <kern/misc_protos.h>
94 #include <sys/kdebug.h>
97 #include <kgdb/kgdb_defs.h>
98 #endif /* MACH_KGDB */
102 #include <ddb/db_watch.h>
103 #include <ddb/db_run.h>
104 #include <ddb/db_break.h>
105 #include <ddb/db_trap.h>
106 #endif /* MACH_KDB */
110 #include <i386/io_emulate.h>
111 #include <i386/postcode.h>
112 #include <i386/mp_desc.h>
113 #include <i386/proc_reg.h>
114 #include <mach/i386/syscall_sw.h>
117 * Forward declarations
119 static void user_page_fault_continue(kern_return_t kret
);
120 static void panic_trap(x86_saved_state32_t
*saved_state
);
121 static void set_recovery_ip(x86_saved_state32_t
*saved_state
, vm_offset_t ip
);
123 perfCallback perfTrapHook
= NULL
; /* Pointer to CHUD trap hook routine */
124 perfCallback perfASTHook
= NULL
; /* Pointer to CHUD AST hook routine */
127 thread_syscall_return(
130 thread_t thr_act
= current_thread();
132 if (thread_is_64bit(thr_act
)) {
133 x86_saved_state64_t
*regs
;
135 regs
= USER_REGS64(thr_act
);
137 if (kdebug_enable
&& ((regs
->rax
& SYSCALL_CLASS_MASK
) == (SYSCALL_CLASS_MACH
<< SYSCALL_CLASS_SHIFT
))) {
139 KERNEL_DEBUG_CONSTANT(
140 MACHDBG_CODE(DBG_MACH_EXCP_SC
, ((int) (regs
->rax
& SYSCALL_NUMBER_MASK
)))
147 x86_saved_state32_t
*regs
;
149 regs
= USER_REGS32(thr_act
);
151 if (kdebug_enable
&& ((int) regs
->eax
< 0)) {
153 KERNEL_DEBUG_CONSTANT(
154 MACHDBG_CODE(DBG_MACH_EXCP_SC
, -((int) regs
->eax
))
160 thread_exception_return();
166 boolean_t debug_all_traps_with_kdb
= FALSE
;
167 extern struct db_watchpoint
*db_watchpoint_list
;
168 extern boolean_t db_watchpoints_inserted
;
169 extern boolean_t db_breakpoints_inserted
;
172 thread_kdb_return(void)
174 thread_t thr_act
= current_thread();
175 x86_saved_state_t
*iss
= USER_STATE(thr_act
);
177 if (is_saved_state64(iss
)) {
178 x86_saved_state64_t
*regs
;
180 regs
= saved_state64(iss
);
182 if (kdb_trap(regs
->isf
.trapno
, (int)regs
->isf
.err
, (void *)regs
)) {
183 thread_exception_return();
188 x86_saved_state32_t
*regs
;
190 regs
= saved_state32(iss
);
192 if (kdb_trap(regs
->trapno
, regs
->err
, (void *)regs
)) {
193 thread_exception_return();
199 #endif /* MACH_KDB */
202 user_page_fault_continue(
205 thread_t thread
= current_thread();
206 x86_saved_state_t
*regs
= USER_STATE(thread
);
215 assert((is_saved_state32(regs
) && !thread_is_64bit(thread
)) ||
216 (is_saved_state64(regs
) && thread_is_64bit(thread
)));
218 if (thread_is_64bit(thread
)) {
219 x86_saved_state64_t
*uregs
;
221 uregs
= USER_REGS64(thread
);
224 trapno
= uregs
->isf
.trapno
;
225 err
= uregs
->isf
.err
;
227 vaddr
= (user_addr_t
)uregs
->cr2
;
229 x86_saved_state32_t
*uregs
;
231 uregs
= USER_REGS32(thread
);
234 trapno
= uregs
->trapno
;
240 if ((kr
== KERN_SUCCESS
) || (kr
== KERN_ABORTED
)) {
242 if (!db_breakpoints_inserted
) {
243 db_set_breakpoints();
245 if (db_watchpoint_list
&&
246 db_watchpoints_inserted
&&
247 (err
& T_PF_WRITE
) &&
248 db_find_watchpoint(thread
->map
,
251 kdb_trap(T_WATCHPOINT
, 0, regs
);
252 #endif /* MACH_KDB */
253 intr
= ml_set_interrupts_enabled(FALSE
);
254 myast
= ast_pending();
255 while (*myast
& AST_ALL
) {
256 ast_taken(AST_ALL
, intr
);
257 ml_set_interrupts_enabled(FALSE
);
258 myast
= ast_pending();
260 ml_set_interrupts_enabled(intr
);
262 thread_exception_return();
267 if (debug_all_traps_with_kdb
&&
268 kdb_trap(trapno
, err
, regs
)) {
269 thread_exception_return();
272 #endif /* MACH_KDB */
274 i386_exception(EXC_BAD_ACCESS
, kr
, vaddr
);
279 * Fault recovery in copyin/copyout routines.
283 uint32_t recover_addr
;
286 extern struct recovery recover_table
[];
287 extern struct recovery recover_table_end
[];
289 const char * trap_type
[] = {TRAP_NAMES
};
290 unsigned TRAP_TYPES
= sizeof(trap_type
)/sizeof(trap_type
[0]);
292 extern unsigned panic_io_port
;
297 uint32_t dr7
= 0x400; /* magic dr7 reset value */
298 __asm__
volatile("movl %0,%%dr7" : : "r" (dr7
));
301 unsigned kdp_has_active_watchpoints
= 0;
304 * Trap from kernel mode. Only page-fault errors are recoverable,
305 * and then only in special circumstances. All other errors are
306 * fatal. Return value indicates if trap was handled.
310 x86_saved_state_t
*state
)
312 x86_saved_state32_t
*saved_state
;
317 kern_return_t result
= KERN_FAILURE
;
324 int fault_in_copy_window
= -1;
328 #endif /* MACH_KDB */
330 thread
= current_thread();
332 if (is_saved_state64(state
))
333 panic("kernel_trap(%p) with 64-bit state", state
);
334 saved_state
= saved_state32(state
);
336 vaddr
= (user_addr_t
)saved_state
->cr2
;
337 type
= saved_state
->trapno
;
338 code
= saved_state
->err
& 0xffff;
339 intr
= (saved_state
->efl
& EFL_IF
) != 0; /* state of ints at trap */
341 kern_ip
= (vm_offset_t
)saved_state
->eip
;
343 myast
= ast_pending();
346 if (*myast
& AST_CHUD_ALL
)
347 perfASTHook(type
, NULL
, 0, 0);
349 *myast
&= ~AST_CHUD_ALL
;
355 if (perfTrapHook(type
, NULL
, 0, 0) == KERN_SUCCESS
) {
357 * If it succeeds, we are done...
363 * we come here with interrupts off as we don't want to recurse
364 * on preemption below. but we do want to re-enable interrupts
365 * as soon we possibly can to hold latency down
367 if (T_PREEMPT
== type
) {
369 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86
, type
)) | DBG_FUNC_NONE
,
370 0, 0, 0, kern_ip
, 0);
372 ast_taken(AST_PREEMPTION
, FALSE
);
376 if (T_PAGE_FAULT
== type
) {
378 * assume we're faulting in the kernel map
382 if (thread
!= THREAD_NULL
&& thread
->map
!= kernel_map
) {
383 vm_offset_t copy_window_base
;
387 kvaddr
= (vm_offset_t
)vaddr
;
389 * must determine if fault occurred in
390 * the copy window while pre-emption is
391 * disabled for this processor so that
392 * we only need to look at the window
393 * associated with this processor
395 copy_window_base
= current_cpu_datap()->cpu_copywindow_base
;
397 if (kvaddr
>= copy_window_base
&& kvaddr
< (copy_window_base
+ (NBPDE
* NCOPY_WINDOWS
)) ) {
399 window_index
= (kvaddr
- copy_window_base
) / NBPDE
;
401 if (thread
->machine
.copy_window
[window_index
].user_base
!= (user_addr_t
)-1) {
403 kvaddr
-= (copy_window_base
+ (NBPDE
* window_index
));
404 vaddr
= thread
->machine
.copy_window
[window_index
].user_base
+ kvaddr
;
407 fault_in_copy_window
= window_index
;
413 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86
, type
)) | DBG_FUNC_NONE
,
414 (int)(vaddr
>> 32), (int)vaddr
, is_user
, kern_ip
, 0);
417 (void) ml_set_interrupts_enabled(intr
);
429 case T_FLOATING_POINT_ERROR
:
433 case T_SSE_FLOAT_ERROR
:
437 if ((saved_state
->efl
& EFL_TF
) == 0
438 && !kdp_has_active_watchpoints
) {
439 /* We've somehow encountered a debug
440 * register match that does not belong
441 * to the kernel debugger.
442 * This isn't supposed to happen.
450 * If the current map is a submap of the kernel map,
451 * and the address is within that map, fault on that
452 * map. If the same check is done in vm_fault
453 * (vm_map_lookup), we may deadlock on the kernel map
459 if (code
& T_PF_WRITE
)
460 prot
|= VM_PROT_WRITE
;
462 if (code
& T_PF_EXECUTE
)
463 prot
|= VM_PROT_EXECUTE
;
468 * Check for watchpoint on kernel static data.
469 * vm_fault would fail in this case
471 if (map
== kernel_map
&& db_watchpoint_list
&& db_watchpoints_inserted
&&
472 (code
& T_PF_WRITE
) && vaddr
< vm_map_max(map
) &&
473 ((*(pte
= pmap_pte(kernel_pmap
, (vm_map_offset_t
)vaddr
))) & INTEL_PTE_WRITE
) == 0) {
476 *pte
| INTEL_PTE_VALID
| INTEL_PTE_WRITE
);
477 /* XXX need invltlb here? */
479 result
= KERN_SUCCESS
;
480 goto look_for_watchpoints
;
482 #endif /* MACH_KDB */
484 result
= vm_fault(map
,
485 vm_map_trunc_page(vaddr
),
488 THREAD_UNINT
, NULL
, 0);
491 if (result
== KERN_SUCCESS
) {
493 * Look for watchpoints
495 look_for_watchpoints
:
496 if (map
== kernel_map
&& db_watchpoint_list
&& db_watchpoints_inserted
&& (code
& T_PF_WRITE
) &&
497 db_find_watchpoint(map
, vaddr
, saved_state
))
498 kdb_trap(T_WATCHPOINT
, 0, saved_state
);
500 #endif /* MACH_KDB */
502 if (result
== KERN_SUCCESS
) {
504 if (fault_in_copy_window
!= -1) {
509 * in case there was no page table assigned
510 * for the user base address and the pmap
511 * got 'expanded' due to this fault, we'll
512 * copy in the descriptor
514 * we're either setting the page table descriptor
515 * to the same value or it was 0... no need
516 * for a TLB flush in either case
519 ml_set_interrupts_enabled(FALSE
);
520 updp
= pmap_pde(map
->pmap
, thread
->machine
.copy_window
[fault_in_copy_window
].user_base
);
522 if (0 == updp
) panic("trap: updp 0"); /* XXX DEBUG */
523 kpdp
= current_cpu_datap()->cpu_copywindow_pdp
;
524 kpdp
+= fault_in_copy_window
;
527 if (*kpdp
&& (*kpdp
& PG_FRAME
) != (*updp
& PG_FRAME
))
528 panic("kernel_fault: user pdp doesn't match - updp = 0x%x, kpdp = 0x%x\n", updp
, kpdp
);
530 pmap_store_pte(kpdp
, *updp
);
532 (void) ml_set_interrupts_enabled(intr
);
540 case T_GENERAL_PROTECTION
:
542 * If there is a failure recovery address
543 * for this fault, go there.
545 for (rp
= recover_table
; rp
< recover_table_end
; rp
++) {
546 if (kern_ip
== rp
->fault_addr
) {
547 set_recovery_ip(saved_state
, rp
->recover_addr
);
553 * Check thread recovery address also.
555 if (thread
->recover
) {
556 set_recovery_ip(saved_state
, thread
->recover
);
561 * Unanticipated page-fault errors in kernel
569 * Exception 15 is reserved but some chips may generate it
570 * spuriously. Seen at startup on AMD Athlon-64.
573 kprintf("kernel_trap() ignoring spurious trap 15\n");
577 /* Ensure that the i386_kernel_state at the base of the
578 * current thread's stack (if any) is synchronized with the
579 * context at the moment of the trap, to facilitate
580 * access through the debugger.
582 sync_iss_to_iks(saved_state
);
585 #endif /* MACH_KDB */
587 if (current_debugger
!= KDB_CUR_DB
) {
588 if (kdp_i386_trap(type
, saved_state
, result
, vaddr
))
591 #endif /* MACH_KDP */
594 if (kdb_trap(type
, code
, saved_state
)) {
595 if (switch_debugger
) {
596 current_debugger
= KDP_CUR_DB
;
598 goto restart_debugger
;
602 #endif /* MACH_KDB */
605 panic_trap(saved_state
);
613 set_recovery_ip(x86_saved_state32_t
*saved_state
, vm_offset_t ip
)
615 saved_state
->eip
= ip
;
620 panic_trap(x86_saved_state32_t
*regs
)
622 const char *trapname
= "Unknown";
623 uint32_t cr0
= get_cr0();
624 uint32_t cr2
= get_cr2();
625 uint32_t cr3
= get_cr3();
626 uint32_t cr4
= get_cr4();
629 (void)inb(panic_io_port
);
631 kprintf("panic trap number 0x%x, eip 0x%x\n", regs
->trapno
, regs
->eip
);
632 kprintf("cr0 0x%08x cr2 0x%08x cr3 0x%08x cr4 0x%08x\n",
635 if (regs
->trapno
< TRAP_TYPES
)
636 trapname
= trap_type
[regs
->trapno
];
638 panic("Unresolved kernel trap (CPU %d, Type %d=%s), registers:\n"
639 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
640 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
641 "CR2: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
642 "EFL: 0x%08x, EIP: 0x%08x, CS: 0x%08x, DS: 0x%08x\n",
643 cpu_number(), regs
->trapno
, trapname
, cr0
, cr2
, cr3
, cr4
,
644 regs
->eax
,regs
->ebx
,regs
->ecx
,regs
->edx
,
645 regs
->cr2
,regs
->ebp
,regs
->esi
,regs
->edi
,
646 regs
->efl
,regs
->eip
,regs
->cs
, regs
->ds
);
648 * This next statement is not executed,
649 * but it's needed to stop the compiler using tail call optimization
650 * for the panic call - which confuses the subsequent backtrace.
655 extern void kprintf_break_lock(void);
659 * Called from locore on a special reserved stack after a double-fault
660 * is taken in kernel space.
661 * Kernel stack overflow is one route here.
664 panic_double_fault(int code
)
666 struct i386_tss
*my_ktss
= current_ktss();
668 /* Set postcode (DEBUG only) */
669 postcode(PANIC_DOUBLE_FAULT
);
671 /* Issue an I/O port read if one has been requested - this is an event logic
672 * analyzers can use as a trigger point.
675 (void)inb(panic_io_port
);
678 * Break kprintf lock in case of recursion,
679 * and record originally faulted instruction address.
681 kprintf_break_lock();
685 * Print backtrace leading to first fault:
687 panic_i386_backtrace((void *) my_ktss
->ebp
, 10);
690 panic("Double fault (CPU:%d, thread:%p, code:0x%x),"
692 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
693 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
694 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
695 "EFL: 0x%08x, EIP: 0x%08x\n",
696 cpu_number(), current_thread(), code
,
697 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
698 my_ktss
->eax
, my_ktss
->ebx
, my_ktss
->ecx
, my_ktss
->edx
,
699 my_ktss
->esp
, my_ktss
->ebp
, my_ktss
->esi
, my_ktss
->edi
,
700 my_ktss
->eflags
, my_ktss
->eip
);
705 * Called from locore on a special reserved stack after a machine-check
708 panic_machine_check(int code
)
710 struct i386_tss
*my_ktss
= current_ktss();
712 /* Set postcode (DEBUG only) */
713 postcode(PANIC_MACHINE_CHECK
);
716 * Break kprintf lock in case of recursion,
717 * and record originally faulted instruction address.
719 kprintf_break_lock();
720 panic("Machine-check (CPU:%d, thread:%p, code:0x%x),"
722 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
723 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
724 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
725 "EFL: 0x%08x, EIP: 0x%08x\n",
726 cpu_number(), current_thread(), code
,
727 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
728 my_ktss
->eax
, my_ktss
->ebx
, my_ktss
->ecx
, my_ktss
->edx
,
729 my_ktss
->esp
, my_ktss
->ebp
, my_ktss
->esi
, my_ktss
->edi
,
730 my_ktss
->eflags
, my_ktss
->eip
);
734 panic_double_fault64(x86_saved_state_t
*esp
)
736 /* Set postcode (DEBUG only) */
737 postcode(PANIC_DOUBLE_FAULT
);
740 * Break kprintf lock in case of recursion,
741 * and record originally faulted instruction address.
743 kprintf_break_lock();
746 * Dump the interrupt stack frame at last kernel entry.
748 if (is_saved_state64(esp
)) {
749 x86_saved_state64_t
*ss64p
= saved_state64(esp
);
750 panic("Double fault (CPU:%d, thread:%p, trapno:0x%x, err:0x%qx),"
752 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
753 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
754 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
755 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
756 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
757 "RFL: 0x%016qx, RIP: 0x%016qx\n",
758 cpu_number(), current_thread(), ss64p
->isf
.trapno
, ss64p
->isf
.err
,
759 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
760 ss64p
->rax
, ss64p
->rbx
, ss64p
->rcx
, ss64p
->rdx
,
761 ss64p
->isf
.rsp
, ss64p
->rbp
, ss64p
->rsi
, ss64p
->rdi
,
762 ss64p
->r8
, ss64p
->r9
, ss64p
->r10
, ss64p
->r11
,
763 ss64p
->r12
, ss64p
->r13
, ss64p
->r14
, ss64p
->r15
,
764 ss64p
->isf
.rflags
, ss64p
->isf
.rip
);
766 x86_saved_state32_t
*ss32p
= saved_state32(esp
);
767 panic("Double fault (CPU:%d, thread:%p, trapno:0x%x, err:0x%x),"
769 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
770 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
771 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
772 "EFL: 0x%08x, EIP: 0x%08x\n",
773 cpu_number(), current_thread(), ss32p
->trapno
, ss32p
->err
,
774 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
775 ss32p
->eax
, ss32p
->ebx
, ss32p
->ecx
, ss32p
->edx
,
776 ss32p
->uesp
, ss32p
->ebp
, ss32p
->esi
, ss32p
->edi
,
777 ss32p
->efl
, ss32p
->eip
);
782 * Simplistic machine check handler.
783 * We could peruse all those MSRs but we only dump register state as we do for
784 * the double fault exception.
785 * Note: the machine check registers are non-volatile across warm boot - so
786 * they'll be around when we return.
789 panic_machine_check64(x86_saved_state_t
*esp
)
791 /* Set postcode (DEBUG only) */
792 postcode(PANIC_MACHINE_CHECK
);
795 * Break kprintf lock in case of recursion,
796 * and record originally faulted instruction address.
798 kprintf_break_lock();
801 * Dump the interrupt stack frame at last kernel entry.
803 if (is_saved_state64(esp
)) {
804 x86_saved_state64_t
*ss64p
= saved_state64(esp
);
805 panic("Machine Check (CPU:%d, thread:%p, trapno:0x%x, err:0x%qx),"
807 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
808 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
809 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
810 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
811 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
812 "RFL: 0x%016qx, RIP: 0x%016qx\n",
813 cpu_number(), current_thread(), ss64p
->isf
.trapno
, ss64p
->isf
.err
,
814 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
815 ss64p
->rax
, ss64p
->rbx
, ss64p
->rcx
, ss64p
->rdx
,
816 ss64p
->isf
.rsp
, ss64p
->rbp
, ss64p
->rsi
, ss64p
->rdi
,
817 ss64p
->r8
, ss64p
->r9
, ss64p
->r10
, ss64p
->r11
,
818 ss64p
->r12
, ss64p
->r13
, ss64p
->r14
, ss64p
->r15
,
819 ss64p
->isf
.rflags
, ss64p
->isf
.rip
);
821 x86_saved_state32_t
*ss32p
= saved_state32(esp
);
822 panic("Machine Check (CPU:%d, thread:%p, trapno:0x%x, err:0x%x),"
824 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
825 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
826 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
827 "EFL: 0x%08x, EIP: 0x%08x\n",
828 cpu_number(), current_thread(), ss32p
->trapno
, ss32p
->err
,
829 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
830 ss32p
->eax
, ss32p
->ebx
, ss32p
->ecx
, ss32p
->edx
,
831 ss32p
->uesp
, ss32p
->ebp
, ss32p
->esi
, ss32p
->edi
,
832 ss32p
->efl
, ss32p
->eip
);
837 * Trap from user mode.
841 x86_saved_state_t
*saved_state
)
846 unsigned int subcode
;
850 thread_t thread
= current_thread();
856 assert((is_saved_state32(saved_state
) && !thread_is_64bit(thread
)) ||
857 (is_saved_state64(saved_state
) && thread_is_64bit(thread
)));
859 if (is_saved_state64(saved_state
)) {
860 x86_saved_state64_t
*regs
;
862 regs
= saved_state64(saved_state
);
864 type
= regs
->isf
.trapno
;
865 err
= regs
->isf
.err
& 0xffff;
866 vaddr
= (user_addr_t
)regs
->cr2
;
867 rip
= (user_addr_t
)regs
->isf
.rip
;
869 x86_saved_state32_t
*regs
;
871 regs
= saved_state32(saved_state
);
874 err
= regs
->err
& 0xffff;
875 vaddr
= (user_addr_t
)regs
->cr2
;
876 rip
= (user_addr_t
)regs
->eip
;
879 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86
, type
)) | DBG_FUNC_NONE
,
880 (int)(vaddr
>>32), (int)vaddr
, (int)(rip
>>32), (int)rip
, 0);
887 kprintf("user_trap(0x%08x) type=%d vaddr=0x%016llx\n",
888 saved_state
, type
, vaddr
);
890 myast
= ast_pending();
892 if (*myast
& AST_CHUD_ALL
) {
893 perfASTHook(type
, saved_state
, 0, 0);
896 *myast
&= ~AST_CHUD_ALL
;
899 /* Is there a hook? */
901 if (perfTrapHook(type
, saved_state
, 0, 0) == KERN_SUCCESS
)
902 return; /* If it succeeds, we are done... */
908 exc
= EXC_ARITHMETIC
;
915 unsigned int clear
= 0;
917 * get dr6 and set it in the thread's pcb before
918 * returning to userland
920 pcb
= thread
->machine
.pcb
;
923 * We can get and set the status register
924 * in 32-bit mode even on a 64-bit thread
925 * because the high order bits are not
928 if (thread_is_64bit(thread
)) {
930 x86_debug_state64_t
*ids
= pcb
->ids
;
931 dr6
= (uint32_t)ids
->dr6
;
932 __asm__
volatile ("movl %%db6, %0" : "=r" (dr6
));
934 } else { /* 32 bit thread */
935 x86_debug_state32_t
*ids
= pcb
->ids
;
936 __asm__
volatile ("movl %%db6, %0" : "=r" (ids
->dr6
));
938 __asm__
volatile ("movl %0, %%db6" : : "r" (clear
));
940 exc
= EXC_BREAKPOINT
;
945 exc
= EXC_BREAKPOINT
;
950 exc
= EXC_ARITHMETIC
;
951 code
= EXC_I386_INTO
;
954 case T_OUT_OF_BOUNDS
:
956 code
= EXC_I386_BOUND
;
959 case T_INVALID_OPCODE
:
960 exc
= EXC_BAD_INSTRUCTION
;
961 code
= EXC_I386_INVOP
;
973 case 10: /* invalid TSS == iret with NT flag set */
974 exc
= EXC_BAD_INSTRUCTION
;
975 code
= EXC_I386_INVTSSFLT
;
979 case T_SEGMENT_NOT_PRESENT
:
980 exc
= EXC_BAD_INSTRUCTION
;
981 code
= EXC_I386_SEGNPFLT
;
986 exc
= EXC_BAD_INSTRUCTION
;
987 code
= EXC_I386_STKFLT
;
991 case T_GENERAL_PROTECTION
:
992 exc
= EXC_BAD_INSTRUCTION
;
993 code
= EXC_I386_GPFLT
;
1000 if (err
& T_PF_WRITE
)
1001 prot
|= VM_PROT_WRITE
;
1003 if (err
& T_PF_EXECUTE
)
1004 prot
|= VM_PROT_EXECUTE
;
1006 kret
= vm_fault(thread
->map
, vm_map_trunc_page(vaddr
),
1008 THREAD_ABORTSAFE
, NULL
, 0);
1010 user_page_fault_continue(kret
);
1015 case T_SSE_FLOAT_ERROR
:
1020 case T_FLOATING_POINT_ERROR
:
1026 Debugger("Unanticipated user trap");
1028 #endif /* MACH_KGDB */
1030 if (kdb_trap(type
, err
, saved_state
))
1032 #endif /* MACH_KDB */
1036 intr
= ml_set_interrupts_enabled(FALSE
);
1037 myast
= ast_pending();
1038 while (*myast
& AST_ALL
) {
1039 ast_taken(AST_ALL
, intr
);
1040 ml_set_interrupts_enabled(FALSE
);
1041 myast
= ast_pending();
1043 ml_set_interrupts_enabled(intr
);
1045 i386_exception(exc
, code
, subcode
);
1051 * Handle AST traps for i386.
1052 * Check for delayed floating-point exception from
1056 extern void log_thread_action (thread_t
, char *);
1059 i386_astintr(int preemption
)
1061 ast_t mask
= AST_ALL
;
1065 mask
= AST_PREEMPTION
;
1075 * Handle exceptions for i386.
1077 * If we are an AT bus machine, we must turn off the AST for a
1078 * delayed floating-point exception.
1080 * If we are providing floating-point emulation, we may have
1081 * to retrieve the real register values from the floating point
1090 exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
1092 codes
[0] = code
; /* new exception interface */
1094 exception_triage(exc
, codes
, 2);
1100 kernel_preempt_check(void)
1106 * disable interrupts to both prevent pre-emption
1107 * and to keep the ast state from changing via
1108 * an interrupt handler making something runnable
1110 intr
= ml_set_interrupts_enabled(FALSE
);
1112 myast
= ast_pending();
1114 if ((*myast
& AST_URGENT
) && intr
== TRUE
&& get_interrupt_level() == 0) {
1116 * can handle interrupts and preemptions
1119 ml_set_interrupts_enabled(intr
);
1122 * now cause the PRE-EMPTION trap
1124 __asm__
volatile (" int $0xff");
1127 * if interrupts were already disabled or
1128 * we're in an interrupt context, we can't
1129 * preempt... of course if AST_URGENT
1130 * isn't set we also don't want to
1132 ml_set_interrupts_enabled(intr
);
1138 extern void db_i386_state(x86_saved_state32_t
*regs
);
1140 #include <ddb/db_output.h>
1144 x86_saved_state32_t
*regs
)
1146 db_printf("eip %8x\n", regs
->eip
);
1147 db_printf("trap %8x\n", regs
->trapno
);
1148 db_printf("err %8x\n", regs
->err
);
1149 db_printf("efl %8x\n", regs
->efl
);
1150 db_printf("ebp %8x\n", regs
->ebp
);
1151 db_printf("esp %8x\n", regs
->cr2
);
1152 db_printf("uesp %8x\n", regs
->uesp
);
1153 db_printf("cs %8x\n", regs
->cs
& 0xff);
1154 db_printf("ds %8x\n", regs
->ds
& 0xff);
1155 db_printf("es %8x\n", regs
->es
& 0xff);
1156 db_printf("fs %8x\n", regs
->fs
& 0xff);
1157 db_printf("gs %8x\n", regs
->gs
& 0xff);
1158 db_printf("ss %8x\n", regs
->ss
& 0xff);
1159 db_printf("eax %8x\n", regs
->eax
);
1160 db_printf("ebx %8x\n", regs
->ebx
);
1161 db_printf("ecx %8x\n", regs
->ecx
);
1162 db_printf("edx %8x\n", regs
->edx
);
1163 db_printf("esi %8x\n", regs
->esi
);
1164 db_printf("edi %8x\n", regs
->edi
);
1167 #endif /* MACH_KDB */
1169 /* Synchronize a thread's i386_kernel_state (if any) with the given
1170 * i386_saved_state_t obtained from the trap/IPI handler; called in
1171 * kernel_trap() prior to entering the debugger, and when receiving
1176 sync_iss_to_iks(x86_saved_state32_t
*saved_state
)
1178 struct x86_kernel_state32
*iks
;
1180 boolean_t record_active_regs
= FALSE
;
1182 if ((kstack
= current_thread()->kernel_stack
) != 0) {
1183 x86_saved_state32_t
*regs
;
1187 iks
= STACK_IKS(kstack
);
1190 * Did we take the trap/interrupt in kernel mode?
1192 if (regs
== USER_REGS32(current_thread()))
1193 record_active_regs
= TRUE
;
1195 iks
->k_ebx
= regs
->ebx
;
1196 iks
->k_esp
= (int)regs
;
1197 iks
->k_ebp
= regs
->ebp
;
1198 iks
->k_edi
= regs
->edi
;
1199 iks
->k_esi
= regs
->esi
;
1200 iks
->k_eip
= regs
->eip
;
1204 if (record_active_regs
== TRUE
) {
1206 * Show the trap handler path
1208 __asm__
volatile("movl %%ebx, %0" : "=m" (iks
->k_ebx
));
1209 __asm__
volatile("movl %%esp, %0" : "=m" (iks
->k_esp
));
1210 __asm__
volatile("movl %%ebp, %0" : "=m" (iks
->k_ebp
));
1211 __asm__
volatile("movl %%edi, %0" : "=m" (iks
->k_edi
));
1212 __asm__
volatile("movl %%esi, %0" : "=m" (iks
->k_esi
));
1214 * "Current" instruction pointer
1216 __asm__
volatile("movl $1f, %0\n1:" : "=m" (iks
->k_eip
));
1221 * This is used by the NMI interrupt handler (from mp.c) to
1222 * uncondtionally sync the trap handler context to the IKS
1223 * irrespective of whether the NMI was fielded in kernel
1227 sync_iss_to_iks_unconditionally(__unused x86_saved_state32_t
*saved_state
) {
1228 struct x86_kernel_state32
*iks
;
1230 boolean_t record_active_regs
= FALSE
;
1232 if ((kstack
= current_thread()->kernel_stack
) != 0) {
1234 iks
= STACK_IKS(kstack
);
1236 * Show the trap handler path
1238 __asm__
volatile("movl %%ebx, %0" : "=m" (iks
->k_ebx
));
1239 __asm__
volatile("movl %%esp, %0" : "=m" (iks
->k_esp
));
1240 __asm__
volatile("movl %%ebp, %0" : "=m" (iks
->k_ebp
));
1241 __asm__
volatile("movl %%edi, %0" : "=m" (iks
->k_edi
));
1242 __asm__
volatile("movl %%esi, %0" : "=m" (iks
->k_esi
));
1244 * "Current" instruction pointer
1246 __asm__
volatile("movl $1f, %0\n1:" : "=m" (iks
->k_eip
));