2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * Hardware trap/fault handler.
57 #include <mach_kgdb.h>
59 #include <mach_ldebug.h>
62 #include <i386/eflags.h>
63 #include <i386/trap.h>
64 #include <i386/pmap.h>
66 #include <architecture/i386/pio.h> /* inb() */
68 #include <mach/exception.h>
69 #include <mach/kern_return.h>
70 #include <mach/vm_param.h>
71 #include <mach/i386/thread_status.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_fault.h>
76 #include <kern/kern_types.h>
77 #include <kern/processor.h>
78 #include <kern/thread.h>
79 #include <kern/task.h>
80 #include <kern/sched.h>
81 #include <kern/sched_prim.h>
82 #include <kern/exception.h>
84 #include <kern/misc_protos.h>
86 #include <sys/kdebug.h>
89 #include <kgdb/kgdb_defs.h>
90 #endif /* MACH_KGDB */
94 #include <ddb/db_watch.h>
95 #include <ddb/db_run.h>
96 #include <ddb/db_break.h>
97 #include <ddb/db_trap.h>
102 #include <i386/io_emulate.h>
103 #include <i386/postcode.h>
104 #include <i386/mp_desc.h>
105 #include <i386/proc_reg.h>
106 #include <mach/i386/syscall_sw.h>
109 * Forward declarations
111 static void user_page_fault_continue(kern_return_t kret
);
112 static void panic_trap(x86_saved_state32_t
*saved_state
);
113 static void set_recovery_ip(x86_saved_state32_t
*saved_state
, vm_offset_t ip
);
115 perfCallback perfTrapHook
= NULL
; /* Pointer to CHUD trap hook routine */
116 perfCallback perfASTHook
= NULL
; /* Pointer to CHUD AST hook routine */
119 thread_syscall_return(
122 thread_t thr_act
= current_thread();
124 if (thread_is_64bit(thr_act
)) {
125 x86_saved_state64_t
*regs
;
127 regs
= USER_REGS64(thr_act
);
129 if (kdebug_enable
&& ((regs
->rax
& SYSCALL_CLASS_MASK
) == (SYSCALL_CLASS_MACH
<< SYSCALL_CLASS_SHIFT
))) {
131 KERNEL_DEBUG_CONSTANT(
132 MACHDBG_CODE(DBG_MACH_EXCP_SC
, ((int) (regs
->rax
& SYSCALL_NUMBER_MASK
)))
139 x86_saved_state32_t
*regs
;
141 regs
= USER_REGS32(thr_act
);
143 if (kdebug_enable
&& ((int) regs
->eax
< 0)) {
145 KERNEL_DEBUG_CONSTANT(
146 MACHDBG_CODE(DBG_MACH_EXCP_SC
, -((int) regs
->eax
))
152 thread_exception_return();
158 boolean_t debug_all_traps_with_kdb
= FALSE
;
159 extern struct db_watchpoint
*db_watchpoint_list
;
160 extern boolean_t db_watchpoints_inserted
;
161 extern boolean_t db_breakpoints_inserted
;
164 thread_kdb_return(void)
166 thread_t thr_act
= current_thread();
167 x86_saved_state_t
*iss
= USER_STATE(thr_act
);
169 if (is_saved_state64(iss
)) {
170 x86_saved_state64_t
*regs
;
172 regs
= saved_state64(iss
);
174 if (kdb_trap(regs
->isf
.trapno
, (int)regs
->isf
.err
, (void *)regs
)) {
175 thread_exception_return();
180 x86_saved_state32_t
*regs
;
182 regs
= saved_state32(iss
);
184 if (kdb_trap(regs
->trapno
, regs
->err
, (void *)regs
)) {
185 thread_exception_return();
191 #endif /* MACH_KDB */
194 user_page_fault_continue(
197 thread_t thread
= current_thread();
198 x86_saved_state_t
*regs
= USER_STATE(thread
);
207 assert((is_saved_state32(regs
) && !thread_is_64bit(thread
)) ||
208 (is_saved_state64(regs
) && thread_is_64bit(thread
)));
210 if (thread_is_64bit(thread
)) {
211 x86_saved_state64_t
*uregs
;
213 uregs
= USER_REGS64(thread
);
216 trapno
= uregs
->isf
.trapno
;
217 err
= uregs
->isf
.err
;
219 vaddr
= (user_addr_t
)uregs
->cr2
;
221 x86_saved_state32_t
*uregs
;
223 uregs
= USER_REGS32(thread
);
226 trapno
= uregs
->trapno
;
232 if ((kr
== KERN_SUCCESS
) || (kr
== KERN_ABORTED
)) {
234 if (!db_breakpoints_inserted
) {
235 db_set_breakpoints();
237 if (db_watchpoint_list
&&
238 db_watchpoints_inserted
&&
239 (err
& T_PF_WRITE
) &&
240 db_find_watchpoint(thread
->map
,
243 kdb_trap(T_WATCHPOINT
, 0, regs
);
244 #endif /* MACH_KDB */
245 intr
= ml_set_interrupts_enabled(FALSE
);
246 myast
= ast_pending();
247 while (*myast
& AST_ALL
) {
248 ast_taken(AST_ALL
, intr
);
249 ml_set_interrupts_enabled(FALSE
);
250 myast
= ast_pending();
252 ml_set_interrupts_enabled(intr
);
254 thread_exception_return();
259 if (debug_all_traps_with_kdb
&&
260 kdb_trap(trapno
, err
, regs
)) {
261 thread_exception_return();
264 #endif /* MACH_KDB */
266 i386_exception(EXC_BAD_ACCESS
, kr
, vaddr
);
271 * Fault recovery in copyin/copyout routines.
275 uint32_t recover_addr
;
278 extern struct recovery recover_table
[];
279 extern struct recovery recover_table_end
[];
281 const char * trap_type
[] = {TRAP_NAMES
};
282 unsigned TRAP_TYPES
= sizeof(trap_type
)/sizeof(trap_type
[0]);
284 extern unsigned panic_io_port
;
289 uint32_t dr7
= 0x400; /* magic dr7 reset value */
290 __asm__
volatile("movl %0,%%dr7" : : "r" (dr7
));
293 unsigned kdp_has_active_watchpoints
= 0;
296 * Trap from kernel mode. Only page-fault errors are recoverable,
297 * and then only in special circumstances. All other errors are
298 * fatal. Return value indicates if trap was handled.
302 x86_saved_state_t
*state
)
304 x86_saved_state32_t
*saved_state
;
309 kern_return_t result
= KERN_FAILURE
;
316 int fault_in_copy_window
= -1;
320 #endif /* MACH_KDB */
322 thread
= current_thread();
324 if (is_saved_state64(state
))
325 panic("kernel_trap(%p) with 64-bit state", state
);
326 saved_state
= saved_state32(state
);
328 vaddr
= (user_addr_t
)saved_state
->cr2
;
329 type
= saved_state
->trapno
;
330 code
= saved_state
->err
& 0xffff;
331 intr
= (saved_state
->efl
& EFL_IF
) != 0; /* state of ints at trap */
333 kern_ip
= (vm_offset_t
)saved_state
->eip
;
335 myast
= ast_pending();
338 if (*myast
& AST_CHUD_ALL
)
339 perfASTHook(type
, NULL
, 0, 0);
341 *myast
&= ~AST_CHUD_ALL
;
347 if (perfTrapHook(type
, NULL
, 0, 0) == KERN_SUCCESS
) {
349 * If it succeeds, we are done...
355 * we come here with interrupts off as we don't want to recurse
356 * on preemption below. but we do want to re-enable interrupts
357 * as soon we possibly can to hold latency down
359 if (T_PREEMPT
== type
) {
361 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86
, type
)) | DBG_FUNC_NONE
,
362 0, 0, 0, kern_ip
, 0);
364 ast_taken(AST_PREEMPTION
, FALSE
);
368 if (T_PAGE_FAULT
== type
) {
370 * assume we're faulting in the kernel map
374 if (thread
!= THREAD_NULL
&& thread
->map
!= kernel_map
) {
375 vm_offset_t copy_window_base
;
379 kvaddr
= (vm_offset_t
)vaddr
;
381 * must determine if fault occurred in
382 * the copy window while pre-emption is
383 * disabled for this processor so that
384 * we only need to look at the window
385 * associated with this processor
387 copy_window_base
= current_cpu_datap()->cpu_copywindow_base
;
389 if (kvaddr
>= copy_window_base
&& kvaddr
< (copy_window_base
+ (NBPDE
* NCOPY_WINDOWS
)) ) {
391 window_index
= (kvaddr
- copy_window_base
) / NBPDE
;
393 if (thread
->machine
.copy_window
[window_index
].user_base
!= (user_addr_t
)-1) {
395 kvaddr
-= (copy_window_base
+ (NBPDE
* window_index
));
396 vaddr
= thread
->machine
.copy_window
[window_index
].user_base
+ kvaddr
;
399 fault_in_copy_window
= window_index
;
405 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86
, type
)) | DBG_FUNC_NONE
,
406 (int)(vaddr
>> 32), (int)vaddr
, is_user
, kern_ip
, 0);
409 (void) ml_set_interrupts_enabled(intr
);
421 case T_FLOATING_POINT_ERROR
:
425 case T_SSE_FLOAT_ERROR
:
429 if ((saved_state
->efl
& EFL_TF
) == 0
430 && !kdp_has_active_watchpoints
) {
431 /* We've somehow encountered a debug
432 * register match that does not belong
433 * to the kernel debugger.
434 * This isn't supposed to happen.
442 * If the current map is a submap of the kernel map,
443 * and the address is within that map, fault on that
444 * map. If the same check is done in vm_fault
445 * (vm_map_lookup), we may deadlock on the kernel map
451 if (code
& T_PF_WRITE
)
452 prot
|= VM_PROT_WRITE
;
454 if (code
& T_PF_EXECUTE
)
455 prot
|= VM_PROT_EXECUTE
;
460 * Check for watchpoint on kernel static data.
461 * vm_fault would fail in this case
463 if (map
== kernel_map
&& db_watchpoint_list
&& db_watchpoints_inserted
&&
464 (code
& T_PF_WRITE
) && vaddr
< vm_map_max(map
) &&
465 ((*(pte
= pmap_pte(kernel_pmap
, (vm_map_offset_t
)vaddr
))) & INTEL_PTE_WRITE
) == 0) {
468 *pte
| INTEL_PTE_VALID
| INTEL_PTE_WRITE
);
469 /* XXX need invltlb here? */
471 result
= KERN_SUCCESS
;
472 goto look_for_watchpoints
;
474 #endif /* MACH_KDB */
476 result
= vm_fault(map
,
477 vm_map_trunc_page(vaddr
),
480 THREAD_UNINT
, NULL
, 0);
483 if (result
== KERN_SUCCESS
) {
485 * Look for watchpoints
487 look_for_watchpoints
:
488 if (map
== kernel_map
&& db_watchpoint_list
&& db_watchpoints_inserted
&& (code
& T_PF_WRITE
) &&
489 db_find_watchpoint(map
, vaddr
, saved_state
))
490 kdb_trap(T_WATCHPOINT
, 0, saved_state
);
492 #endif /* MACH_KDB */
494 if (result
== KERN_SUCCESS
) {
496 if (fault_in_copy_window
!= -1) {
501 * in case there was no page table assigned
502 * for the user base address and the pmap
503 * got 'expanded' due to this fault, we'll
504 * copy in the descriptor
506 * we're either setting the page table descriptor
507 * to the same value or it was 0... no need
508 * for a TLB flush in either case
511 ml_set_interrupts_enabled(FALSE
);
512 updp
= pmap_pde(map
->pmap
, thread
->machine
.copy_window
[fault_in_copy_window
].user_base
);
514 if (0 == updp
) panic("trap: updp 0"); /* XXX DEBUG */
515 kpdp
= current_cpu_datap()->cpu_copywindow_pdp
;
516 kpdp
+= fault_in_copy_window
;
519 if (*kpdp
&& (*kpdp
& PG_FRAME
) != (*updp
& PG_FRAME
))
520 panic("kernel_fault: user pdp doesn't match - updp = 0x%x, kpdp = 0x%x\n", updp
, kpdp
);
522 pmap_store_pte(kpdp
, *updp
);
524 (void) ml_set_interrupts_enabled(intr
);
532 case T_GENERAL_PROTECTION
:
534 * If there is a failure recovery address
535 * for this fault, go there.
537 for (rp
= recover_table
; rp
< recover_table_end
; rp
++) {
538 if (kern_ip
== rp
->fault_addr
) {
539 set_recovery_ip(saved_state
, rp
->recover_addr
);
545 * Check thread recovery address also.
547 if (thread
->recover
) {
548 set_recovery_ip(saved_state
, thread
->recover
);
553 * Unanticipated page-fault errors in kernel
561 * Exception 15 is reserved but some chips may generate it
562 * spuriously. Seen at startup on AMD Athlon-64.
565 kprintf("kernel_trap() ignoring spurious trap 15\n");
569 /* Ensure that the i386_kernel_state at the base of the
570 * current thread's stack (if any) is synchronized with the
571 * context at the moment of the trap, to facilitate
572 * access through the debugger.
574 sync_iss_to_iks(saved_state
);
577 #endif /* MACH_KDB */
579 if (current_debugger
!= KDB_CUR_DB
) {
580 if (kdp_i386_trap(type
, saved_state
, result
, vaddr
))
583 #endif /* MACH_KDP */
586 if (kdb_trap(type
, code
, saved_state
)) {
587 if (switch_debugger
) {
588 current_debugger
= KDP_CUR_DB
;
590 goto restart_debugger
;
594 #endif /* MACH_KDB */
597 panic_trap(saved_state
);
605 set_recovery_ip(x86_saved_state32_t
*saved_state
, vm_offset_t ip
)
607 saved_state
->eip
= ip
;
612 panic_trap(x86_saved_state32_t
*regs
)
614 const char *trapname
= "Unknown";
615 uint32_t cr0
= get_cr0();
616 uint32_t cr2
= get_cr2();
617 uint32_t cr3
= get_cr3();
618 uint32_t cr4
= get_cr4();
621 (void)inb(panic_io_port
);
623 kprintf("panic trap number 0x%x, eip 0x%x\n", regs
->trapno
, regs
->eip
);
624 kprintf("cr0 0x%08x cr2 0x%08x cr3 0x%08x cr4 0x%08x\n",
627 if (regs
->trapno
< TRAP_TYPES
)
628 trapname
= trap_type
[regs
->trapno
];
630 panic("Unresolved kernel trap (CPU %d, Type %d=%s), registers:\n"
631 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
632 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
633 "CR2: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
634 "EFL: 0x%08x, EIP: 0x%08x, CS: 0x%08x, DS: 0x%08x\n",
635 cpu_number(), regs
->trapno
, trapname
, cr0
, cr2
, cr3
, cr4
,
636 regs
->eax
,regs
->ebx
,regs
->ecx
,regs
->edx
,
637 regs
->cr2
,regs
->ebp
,regs
->esi
,regs
->edi
,
638 regs
->efl
,regs
->eip
,regs
->cs
, regs
->ds
);
640 * This next statement is not executed,
641 * but it's needed to stop the compiler using tail call optimization
642 * for the panic call - which confuses the subsequent backtrace.
647 extern void kprintf_break_lock(void);
651 * Called from locore on a special reserved stack after a double-fault
652 * is taken in kernel space.
653 * Kernel stack overflow is one route here.
656 panic_double_fault(int code
)
658 struct i386_tss
*my_ktss
= current_ktss();
660 /* Set postcode (DEBUG only) */
661 postcode(PANIC_DOUBLE_FAULT
);
663 /* Issue an I/O port read if one has been requested - this is an event logic
664 * analyzers can use as a trigger point.
667 (void)inb(panic_io_port
);
670 * Break kprintf lock in case of recursion,
671 * and record originally faulted instruction address.
673 kprintf_break_lock();
677 * Print backtrace leading to first fault:
679 panic_i386_backtrace((void *) my_ktss
->ebp
, 10);
682 panic("Double fault (CPU:%d, thread:%p, code:0x%x),"
684 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
685 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
686 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
687 "EFL: 0x%08x, EIP: 0x%08x\n",
688 cpu_number(), current_thread(), code
,
689 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
690 my_ktss
->eax
, my_ktss
->ebx
, my_ktss
->ecx
, my_ktss
->edx
,
691 my_ktss
->esp
, my_ktss
->ebp
, my_ktss
->esi
, my_ktss
->edi
,
692 my_ktss
->eflags
, my_ktss
->eip
);
697 * Called from locore on a special reserved stack after a machine-check
700 panic_machine_check(int code
)
702 struct i386_tss
*my_ktss
= current_ktss();
704 /* Set postcode (DEBUG only) */
705 postcode(PANIC_MACHINE_CHECK
);
708 * Break kprintf lock in case of recursion,
709 * and record originally faulted instruction address.
711 kprintf_break_lock();
712 panic("Machine-check (CPU:%d, thread:%p, code:0x%x),"
714 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
715 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
716 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
717 "EFL: 0x%08x, EIP: 0x%08x\n",
718 cpu_number(), current_thread(), code
,
719 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
720 my_ktss
->eax
, my_ktss
->ebx
, my_ktss
->ecx
, my_ktss
->edx
,
721 my_ktss
->esp
, my_ktss
->ebp
, my_ktss
->esi
, my_ktss
->edi
,
722 my_ktss
->eflags
, my_ktss
->eip
);
726 panic_double_fault64(x86_saved_state_t
*esp
)
728 /* Set postcode (DEBUG only) */
729 postcode(PANIC_DOUBLE_FAULT
);
732 * Break kprintf lock in case of recursion,
733 * and record originally faulted instruction address.
735 kprintf_break_lock();
738 * Dump the interrupt stack frame at last kernel entry.
740 if (is_saved_state64(esp
)) {
741 x86_saved_state64_t
*ss64p
= saved_state64(esp
);
742 panic("Double fault (CPU:%d, thread:%p, trapno:0x%x, err:0x%qx),"
744 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
745 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
746 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
747 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
748 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
749 "RFL: 0x%016qx, RIP: 0x%016qx\n",
750 cpu_number(), current_thread(), ss64p
->isf
.trapno
, ss64p
->isf
.err
,
751 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
752 ss64p
->rax
, ss64p
->rbx
, ss64p
->rcx
, ss64p
->rdx
,
753 ss64p
->isf
.rsp
, ss64p
->rbp
, ss64p
->rsi
, ss64p
->rdi
,
754 ss64p
->r8
, ss64p
->r9
, ss64p
->r10
, ss64p
->r11
,
755 ss64p
->r12
, ss64p
->r13
, ss64p
->r14
, ss64p
->r15
,
756 ss64p
->isf
.rflags
, ss64p
->isf
.rip
);
758 x86_saved_state32_t
*ss32p
= saved_state32(esp
);
759 panic("Double fault (CPU:%d, thread:%p, trapno:0x%x, err:0x%x),"
761 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
762 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
763 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
764 "EFL: 0x%08x, EIP: 0x%08x\n",
765 cpu_number(), current_thread(), ss32p
->trapno
, ss32p
->err
,
766 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
767 ss32p
->eax
, ss32p
->ebx
, ss32p
->ecx
, ss32p
->edx
,
768 ss32p
->uesp
, ss32p
->ebp
, ss32p
->esi
, ss32p
->edi
,
769 ss32p
->efl
, ss32p
->eip
);
774 * Simplistic machine check handler.
775 * We could peruse all those MSRs but we only dump register state as we do for
776 * the double fault exception.
777 * Note: the machine check registers are non-volatile across warm boot - so
778 * they'll be around when we return.
781 panic_machine_check64(x86_saved_state_t
*esp
)
783 /* Set postcode (DEBUG only) */
784 postcode(PANIC_MACHINE_CHECK
);
787 * Break kprintf lock in case of recursion,
788 * and record originally faulted instruction address.
790 kprintf_break_lock();
793 * Dump the interrupt stack frame at last kernel entry.
795 if (is_saved_state64(esp
)) {
796 x86_saved_state64_t
*ss64p
= saved_state64(esp
);
797 panic("Machine Check (CPU:%d, thread:%p, trapno:0x%x, err:0x%qx),"
799 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
800 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
801 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
802 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
803 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
804 "RFL: 0x%016qx, RIP: 0x%016qx\n",
805 cpu_number(), current_thread(), ss64p
->isf
.trapno
, ss64p
->isf
.err
,
806 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
807 ss64p
->rax
, ss64p
->rbx
, ss64p
->rcx
, ss64p
->rdx
,
808 ss64p
->isf
.rsp
, ss64p
->rbp
, ss64p
->rsi
, ss64p
->rdi
,
809 ss64p
->r8
, ss64p
->r9
, ss64p
->r10
, ss64p
->r11
,
810 ss64p
->r12
, ss64p
->r13
, ss64p
->r14
, ss64p
->r15
,
811 ss64p
->isf
.rflags
, ss64p
->isf
.rip
);
813 x86_saved_state32_t
*ss32p
= saved_state32(esp
);
814 panic("Machine Check (CPU:%d, thread:%p, trapno:0x%x, err:0x%x),"
816 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
817 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
818 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
819 "EFL: 0x%08x, EIP: 0x%08x\n",
820 cpu_number(), current_thread(), ss32p
->trapno
, ss32p
->err
,
821 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
822 ss32p
->eax
, ss32p
->ebx
, ss32p
->ecx
, ss32p
->edx
,
823 ss32p
->uesp
, ss32p
->ebp
, ss32p
->esi
, ss32p
->edi
,
824 ss32p
->efl
, ss32p
->eip
);
829 * Trap from user mode.
833 x86_saved_state_t
*saved_state
)
838 unsigned int subcode
;
842 thread_t thread
= current_thread();
848 assert((is_saved_state32(saved_state
) && !thread_is_64bit(thread
)) ||
849 (is_saved_state64(saved_state
) && thread_is_64bit(thread
)));
851 if (is_saved_state64(saved_state
)) {
852 x86_saved_state64_t
*regs
;
854 regs
= saved_state64(saved_state
);
856 type
= regs
->isf
.trapno
;
857 err
= regs
->isf
.err
& 0xffff;
858 vaddr
= (user_addr_t
)regs
->cr2
;
859 rip
= (user_addr_t
)regs
->isf
.rip
;
861 x86_saved_state32_t
*regs
;
863 regs
= saved_state32(saved_state
);
866 err
= regs
->err
& 0xffff;
867 vaddr
= (user_addr_t
)regs
->cr2
;
868 rip
= (user_addr_t
)regs
->eip
;
871 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86
, type
)) | DBG_FUNC_NONE
,
872 (int)(vaddr
>>32), (int)vaddr
, (int)(rip
>>32), (int)rip
, 0);
879 kprintf("user_trap(0x%08x) type=%d vaddr=0x%016llx\n",
880 saved_state
, type
, vaddr
);
882 myast
= ast_pending();
884 if (*myast
& AST_CHUD_ALL
) {
885 perfASTHook(type
, saved_state
, 0, 0);
888 *myast
&= ~AST_CHUD_ALL
;
891 /* Is there a hook? */
893 if (perfTrapHook(type
, saved_state
, 0, 0) == KERN_SUCCESS
)
894 return; /* If it succeeds, we are done... */
900 exc
= EXC_ARITHMETIC
;
907 unsigned int clear
= 0;
909 * get dr6 and set it in the thread's pcb before
910 * returning to userland
912 pcb
= thread
->machine
.pcb
;
915 * We can get and set the status register
916 * in 32-bit mode even on a 64-bit thread
917 * because the high order bits are not
920 if (thread_is_64bit(thread
)) {
922 x86_debug_state64_t
*ids
= pcb
->ids
;
923 dr6
= (uint32_t)ids
->dr6
;
924 __asm__
volatile ("movl %%db6, %0" : "=r" (dr6
));
926 } else { /* 32 bit thread */
927 x86_debug_state32_t
*ids
= pcb
->ids
;
928 __asm__
volatile ("movl %%db6, %0" : "=r" (ids
->dr6
));
930 __asm__
volatile ("movl %0, %%db6" : : "r" (clear
));
932 exc
= EXC_BREAKPOINT
;
937 exc
= EXC_BREAKPOINT
;
942 exc
= EXC_ARITHMETIC
;
943 code
= EXC_I386_INTO
;
946 case T_OUT_OF_BOUNDS
:
948 code
= EXC_I386_BOUND
;
951 case T_INVALID_OPCODE
:
952 exc
= EXC_BAD_INSTRUCTION
;
953 code
= EXC_I386_INVOP
;
965 case 10: /* invalid TSS == iret with NT flag set */
966 exc
= EXC_BAD_INSTRUCTION
;
967 code
= EXC_I386_INVTSSFLT
;
971 case T_SEGMENT_NOT_PRESENT
:
972 exc
= EXC_BAD_INSTRUCTION
;
973 code
= EXC_I386_SEGNPFLT
;
978 exc
= EXC_BAD_INSTRUCTION
;
979 code
= EXC_I386_STKFLT
;
983 case T_GENERAL_PROTECTION
:
984 exc
= EXC_BAD_INSTRUCTION
;
985 code
= EXC_I386_GPFLT
;
992 if (err
& T_PF_WRITE
)
993 prot
|= VM_PROT_WRITE
;
995 if (err
& T_PF_EXECUTE
)
996 prot
|= VM_PROT_EXECUTE
;
998 kret
= vm_fault(thread
->map
, vm_map_trunc_page(vaddr
),
1000 THREAD_ABORTSAFE
, NULL
, 0);
1002 user_page_fault_continue(kret
);
1007 case T_SSE_FLOAT_ERROR
:
1012 case T_FLOATING_POINT_ERROR
:
1018 Debugger("Unanticipated user trap");
1020 #endif /* MACH_KGDB */
1022 if (kdb_trap(type
, err
, saved_state
))
1024 #endif /* MACH_KDB */
1028 intr
= ml_set_interrupts_enabled(FALSE
);
1029 myast
= ast_pending();
1030 while (*myast
& AST_ALL
) {
1031 ast_taken(AST_ALL
, intr
);
1032 ml_set_interrupts_enabled(FALSE
);
1033 myast
= ast_pending();
1035 ml_set_interrupts_enabled(intr
);
1037 i386_exception(exc
, code
, subcode
);
1043 * Handle AST traps for i386.
1044 * Check for delayed floating-point exception from
1048 extern void log_thread_action (thread_t
, char *);
1051 i386_astintr(int preemption
)
1053 ast_t mask
= AST_ALL
;
1057 mask
= AST_PREEMPTION
;
1067 * Handle exceptions for i386.
1069 * If we are an AT bus machine, we must turn off the AST for a
1070 * delayed floating-point exception.
1072 * If we are providing floating-point emulation, we may have
1073 * to retrieve the real register values from the floating point
1082 exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
1084 codes
[0] = code
; /* new exception interface */
1086 exception_triage(exc
, codes
, 2);
1092 kernel_preempt_check(void)
1098 * disable interrupts to both prevent pre-emption
1099 * and to keep the ast state from changing via
1100 * an interrupt handler making something runnable
1102 intr
= ml_set_interrupts_enabled(FALSE
);
1104 myast
= ast_pending();
1106 if ((*myast
& AST_URGENT
) && intr
== TRUE
&& get_interrupt_level() == 0) {
1108 * can handle interrupts and preemptions
1111 ml_set_interrupts_enabled(intr
);
1114 * now cause the PRE-EMPTION trap
1116 __asm__
volatile (" int $0xff");
1119 * if interrupts were already disabled or
1120 * we're in an interrupt context, we can't
1121 * preempt... of course if AST_URGENT
1122 * isn't set we also don't want to
1124 ml_set_interrupts_enabled(intr
);
1130 extern void db_i386_state(x86_saved_state32_t
*regs
);
1132 #include <ddb/db_output.h>
1136 x86_saved_state32_t
*regs
)
1138 db_printf("eip %8x\n", regs
->eip
);
1139 db_printf("trap %8x\n", regs
->trapno
);
1140 db_printf("err %8x\n", regs
->err
);
1141 db_printf("efl %8x\n", regs
->efl
);
1142 db_printf("ebp %8x\n", regs
->ebp
);
1143 db_printf("esp %8x\n", regs
->cr2
);
1144 db_printf("uesp %8x\n", regs
->uesp
);
1145 db_printf("cs %8x\n", regs
->cs
& 0xff);
1146 db_printf("ds %8x\n", regs
->ds
& 0xff);
1147 db_printf("es %8x\n", regs
->es
& 0xff);
1148 db_printf("fs %8x\n", regs
->fs
& 0xff);
1149 db_printf("gs %8x\n", regs
->gs
& 0xff);
1150 db_printf("ss %8x\n", regs
->ss
& 0xff);
1151 db_printf("eax %8x\n", regs
->eax
);
1152 db_printf("ebx %8x\n", regs
->ebx
);
1153 db_printf("ecx %8x\n", regs
->ecx
);
1154 db_printf("edx %8x\n", regs
->edx
);
1155 db_printf("esi %8x\n", regs
->esi
);
1156 db_printf("edi %8x\n", regs
->edi
);
1159 #endif /* MACH_KDB */
1161 /* Synchronize a thread's i386_kernel_state (if any) with the given
1162 * i386_saved_state_t obtained from the trap/IPI handler; called in
1163 * kernel_trap() prior to entering the debugger, and when receiving
1168 sync_iss_to_iks(x86_saved_state32_t
*saved_state
)
1170 struct x86_kernel_state32
*iks
;
1172 boolean_t record_active_regs
= FALSE
;
1174 if ((kstack
= current_thread()->kernel_stack
) != 0) {
1175 x86_saved_state32_t
*regs
;
1179 iks
= STACK_IKS(kstack
);
1182 * Did we take the trap/interrupt in kernel mode?
1184 if (regs
== USER_REGS32(current_thread()))
1185 record_active_regs
= TRUE
;
1187 iks
->k_ebx
= regs
->ebx
;
1188 iks
->k_esp
= (int)regs
;
1189 iks
->k_ebp
= regs
->ebp
;
1190 iks
->k_edi
= regs
->edi
;
1191 iks
->k_esi
= regs
->esi
;
1192 iks
->k_eip
= regs
->eip
;
1196 if (record_active_regs
== TRUE
) {
1198 * Show the trap handler path
1200 __asm__
volatile("movl %%ebx, %0" : "=m" (iks
->k_ebx
));
1201 __asm__
volatile("movl %%esp, %0" : "=m" (iks
->k_esp
));
1202 __asm__
volatile("movl %%ebp, %0" : "=m" (iks
->k_ebp
));
1203 __asm__
volatile("movl %%edi, %0" : "=m" (iks
->k_edi
));
1204 __asm__
volatile("movl %%esi, %0" : "=m" (iks
->k_esi
));
1206 * "Current" instruction pointer
1208 __asm__
volatile("movl $1f, %0\n1:" : "=m" (iks
->k_eip
));
1213 * This is used by the NMI interrupt handler (from mp.c) to
1214 * uncondtionally sync the trap handler context to the IKS
1215 * irrespective of whether the NMI was fielded in kernel
1219 sync_iss_to_iks_unconditionally(__unused x86_saved_state32_t
*saved_state
) {
1220 struct x86_kernel_state32
*iks
;
1222 boolean_t record_active_regs
= FALSE
;
1224 if ((kstack
= current_thread()->kernel_stack
) != 0) {
1226 iks
= STACK_IKS(kstack
);
1228 * Show the trap handler path
1230 __asm__
volatile("movl %%ebx, %0" : "=m" (iks
->k_ebx
));
1231 __asm__
volatile("movl %%esp, %0" : "=m" (iks
->k_esp
));
1232 __asm__
volatile("movl %%ebp, %0" : "=m" (iks
->k_ebp
));
1233 __asm__
volatile("movl %%edi, %0" : "=m" (iks
->k_edi
));
1234 __asm__
volatile("movl %%esi, %0" : "=m" (iks
->k_esi
));
1236 * "Current" instruction pointer
1238 __asm__
volatile("movl $1f, %0\n1:" : "=m" (iks
->k_eip
));