2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
36 * All Rights Reserved.
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
61 * Hardware trap/fault handler.
65 #include <mach_kgdb.h>
67 #include <mach_ldebug.h>
70 #include <i386/eflags.h>
71 #include <i386/trap.h>
72 #include <i386/pmap.h>
75 #include <mach/exception.h>
76 #include <mach/kern_return.h>
77 #include <mach/vm_param.h>
78 #include <mach/i386/thread_status.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_fault.h>
83 #include <kern/kern_types.h>
84 #include <kern/processor.h>
85 #include <kern/thread.h>
86 #include <kern/task.h>
87 #include <kern/sched.h>
88 #include <kern/sched_prim.h>
89 #include <kern/exception.h>
91 #include <kern/misc_protos.h>
94 #include <kgdb/kgdb_defs.h>
95 #endif /* MACH_KGDB */
97 #include <i386/intel_read_fault.h>
100 #include <kgdb/kgdb_defs.h>
101 #endif /* MACH_KGDB */
104 #include <ddb/db_watch.h>
105 #include <ddb/db_run.h>
106 #include <ddb/db_break.h>
107 #include <ddb/db_trap.h>
108 #endif /* MACH_KDB */
112 #include <i386/io_emulate.h>
115 * Forward declarations
117 extern void user_page_fault_continue(
120 extern boolean_t
v86_assist(
122 struct i386_saved_state
*regs
);
124 extern boolean_t
check_io_fault(
125 struct i386_saved_state
*regs
);
127 extern int inst_fetch(
132 thread_syscall_return(
135 register thread_t thr_act
= current_thread();
136 register struct i386_saved_state
*regs
= USER_REGS(thr_act
);
138 thread_exception_return();
144 boolean_t debug_all_traps_with_kdb
= FALSE
;
145 extern struct db_watchpoint
*db_watchpoint_list
;
146 extern boolean_t db_watchpoints_inserted
;
147 extern boolean_t db_breakpoints_inserted
;
150 thread_kdb_return(void)
152 register thread_t thread
= current_thread();
153 register struct i386_saved_state
*regs
= USER_REGS(thread
);
155 if (kdb_trap(regs
->trapno
, regs
->err
, regs
)) {
157 assert(thread
->mutex_count
== 0);
158 #endif /* MACH_LDEBUG */
159 thread_exception_return();
163 boolean_t let_ddb_vm_fault
= FALSE
;
165 #endif /* MACH_KDB */
168 user_page_fault_continue(
171 register thread_t thread
= current_thread();
172 register struct i386_saved_state
*regs
= USER_REGS(thread
);
174 if ((kr
== KERN_SUCCESS
) || (kr
== KERN_ABORTED
)) {
176 if (!db_breakpoints_inserted
) {
177 db_set_breakpoints();
179 if (db_watchpoint_list
&&
180 db_watchpoints_inserted
&&
181 (regs
->err
& T_PF_WRITE
) &&
182 db_find_watchpoint(thread
->map
,
183 (vm_offset_t
)regs
->cr2
,
185 kdb_trap(T_WATCHPOINT
, 0, regs
);
186 #endif /* MACH_KDB */
187 thread_exception_return();
192 if (debug_all_traps_with_kdb
&&
193 kdb_trap(regs
->trapno
, regs
->err
, regs
)) {
195 assert(thread
->mutex_count
== 0);
196 #endif /* MACH_LDEBUG */
197 thread_exception_return();
200 #endif /* MACH_KDB */
202 i386_exception(EXC_BAD_ACCESS
, kr
, regs
->cr2
);
207 * Fault recovery in copyin/copyout routines.
211 uint32_t recover_addr
;
214 extern struct recovery recover_table
[];
215 extern struct recovery recover_table_end
[];
218 * Recovery from Successful fault in copyout does not
219 * return directly - it retries the pte check, since
220 * the 386 ignores write protection in kernel mode.
222 extern struct recovery retry_table
[];
223 extern struct recovery retry_table_end
[];
225 const char * trap_type
[] = {TRAP_NAMES
};
226 int TRAP_TYPES
= sizeof(trap_type
)/sizeof(trap_type
[0]);
230 * Trap from kernel mode. Only page-fault errors are recoverable,
231 * and then only in special circumstances. All other errors are
232 * fatal. Return value indicates if trap was handled.
236 register struct i386_saved_state
*regs
)
239 unsigned int subcode
;
240 int interruptible
= THREAD_UNINT
;
243 kern_return_t result
= KERN_FAILURE
;
244 register thread_t thread
;
248 thread
= current_thread();
252 ast_taken(AST_PREEMPTION
, FALSE
);
263 case T_FLOATING_POINT_ERROR
:
269 * If the current map is a submap of the kernel map,
270 * and the address is within that map, fault on that
271 * map. If the same check is done in vm_fault
272 * (vm_map_lookup), we may deadlock on the kernel map
276 mp_disable_preemption();
278 && kdb_active
[cpu_number()]
279 && !let_ddb_vm_fault
) {
281 * Force kdb to handle this one.
283 mp_enable_preemption();
286 mp_enable_preemption();
287 #endif /* MACH_KDB */
288 subcode
= regs
->cr2
; /* get faulting address */
290 if (subcode
> LINEAR_KERNEL_ADDRESS
) {
292 } else if (thread
== THREAD_NULL
)
299 * Check for watchpoint on kernel static data.
300 * vm_fault would fail in this case
302 if (map
== kernel_map
&&
303 db_watchpoint_list
&&
304 db_watchpoints_inserted
&&
305 (code
& T_PF_WRITE
) &&
306 (vm_offset_t
)subcode
< vm_last_phys
&&
307 ((*(pte
= pmap_pte(kernel_pmap
, (vm_offset_t
)subcode
))) &
308 INTEL_PTE_WRITE
) == 0) {
309 *pte
= *pte
| INTEL_PTE_VALID
| INTEL_PTE_WRITE
; /* XXX need invltlb here? */
310 result
= KERN_SUCCESS
;
312 #endif /* MACH_KDB */
315 * Since the 386 ignores write protection in
316 * kernel mode, always try for write permission
317 * first. If that fails and the fault was a
318 * read fault, retry with read permission.
320 if (map
== kernel_map
) {
321 register struct recovery
*rp
;
323 interruptible
= THREAD_UNINT
;
324 for (rp
= recover_table
; rp
< recover_table_end
; rp
++) {
325 if (regs
->eip
== rp
->fault_addr
) {
326 interruptible
= THREAD_ABORTSAFE
;
331 result
= vm_fault(map
,
332 trunc_page((vm_offset_t
)subcode
),
333 VM_PROT_READ
|VM_PROT_WRITE
,
335 (map
== kernel_map
) ? interruptible
: THREAD_ABORTSAFE
, NULL
, 0);
338 if (result
== KERN_SUCCESS
) {
339 /* Look for watchpoints */
340 if (db_watchpoint_list
&&
341 db_watchpoints_inserted
&&
342 (code
& T_PF_WRITE
) &&
343 db_find_watchpoint(map
,
344 (vm_offset_t
)subcode
, regs
))
345 kdb_trap(T_WATCHPOINT
, 0, regs
);
348 #endif /* MACH_KDB */
349 if ((code
& T_PF_WRITE
) == 0 &&
350 result
== KERN_PROTECTION_FAILURE
)
353 * Must expand vm_fault by hand,
354 * so that we can ask for read-only access
355 * but enter a (kernel)writable mapping.
357 result
= intel_read_fault(map
,
358 trunc_page((vm_offset_t
)subcode
));
361 if (result
== KERN_SUCCESS
) {
363 * Certain faults require that we back up
366 register struct recovery
*rp
;
368 for (rp
= retry_table
; rp
< retry_table_end
; rp
++) {
369 if (regs
->eip
== rp
->fault_addr
) {
370 regs
->eip
= rp
->recover_addr
;
379 case T_GENERAL_PROTECTION
:
382 * If there is a failure recovery address
383 * for this fault, go there.
386 register struct recovery
*rp
;
388 for (rp
= recover_table
;
389 rp
< recover_table_end
;
391 if (regs
->eip
== rp
->fault_addr
) {
392 regs
->eip
= rp
->recover_addr
;
399 * Check thread recovery address also -
400 * v86 assist uses it.
402 if (thread
->recover
) {
403 regs
->eip
= thread
->recover
;
409 * Unanticipated page-fault errors in kernel
412 /* fall through... */
416 * Exception 15 is reserved but some chips may generate it
417 * spuriously. Seen at startup on AMD Athlon-64.
420 kprintf("kernel_trap() ignoring spurious trap 15\n");
425 * ...and return failure, so that locore can call into
429 kdp_i386_trap(type
, regs
, result
, regs
->cr2
);
437 * Called if both kernel_trap() and kdb_trap() fail.
441 register struct i386_saved_state
*regs
)
449 printf("trap type %d, code = %x, pc = %x\n",
450 type
, code
, regs
->eip
);
456 * Trap from user mode.
460 register struct i386_saved_state
*regs
)
464 unsigned int subcode
;
468 kern_return_t result
;
469 thread_t thread
= current_thread();
470 boolean_t kernel_act
= FALSE
;
472 if (regs
->efl
& EFL_VM
) {
474 * If hardware assist can handle exception,
475 * continue execution.
477 if (v86_assist(thread
, regs
))
489 exc
= EXC_ARITHMETIC
;
494 exc
= EXC_BREAKPOINT
;
499 exc
= EXC_BREAKPOINT
;
504 exc
= EXC_ARITHMETIC
;
505 code
= EXC_I386_INTO
;
508 case T_OUT_OF_BOUNDS
:
510 code
= EXC_I386_BOUND
;
513 case T_INVALID_OPCODE
:
514 exc
= EXC_BAD_INSTRUCTION
;
515 code
= EXC_I386_INVOP
;
527 case 10: /* invalid TSS == iret with NT flag set */
528 exc
= EXC_BAD_INSTRUCTION
;
529 code
= EXC_I386_INVTSSFLT
;
530 subcode
= regs
->err
& 0xffff;
533 case T_SEGMENT_NOT_PRESENT
:
534 exc
= EXC_BAD_INSTRUCTION
;
535 code
= EXC_I386_SEGNPFLT
;
536 subcode
= regs
->err
& 0xffff;
540 exc
= EXC_BAD_INSTRUCTION
;
541 code
= EXC_I386_STKFLT
;
542 subcode
= regs
->err
& 0xffff;
545 case T_GENERAL_PROTECTION
:
546 if (!(regs
->efl
& EFL_VM
)) {
547 if (check_io_fault(regs
))
550 exc
= EXC_BAD_INSTRUCTION
;
551 code
= EXC_I386_GPFLT
;
552 subcode
= regs
->err
& 0xffff;
557 prot
= VM_PROT_READ
|VM_PROT_WRITE
;
558 if (kernel_act
== FALSE
) {
559 if (!(regs
->err
& T_PF_WRITE
))
561 (void) user_page_fault_continue(vm_fault(thread
->map
,
562 trunc_page((vm_offset_t
)subcode
),
565 THREAD_ABORTSAFE
, NULL
, 0));
569 if (subcode
> LINEAR_KERNEL_ADDRESS
) {
572 result
= vm_fault(thread
->map
,
573 trunc_page((vm_offset_t
)subcode
),
576 (map
== kernel_map
) ? THREAD_UNINT
: THREAD_ABORTSAFE
, NULL
, 0);
577 if ((result
!= KERN_SUCCESS
) && (result
!= KERN_ABORTED
)) {
579 * Must expand vm_fault by hand,
580 * so that we can ask for read-only access
581 * but enter a (kernel) writable mapping.
583 result
= intel_read_fault(thread
->map
,
584 trunc_page((vm_offset_t
)subcode
));
586 user_page_fault_continue(result
);
591 case T_FLOATING_POINT_ERROR
:
597 Debugger("Unanticipated user trap");
599 #endif /* MACH_KGDB */
601 if (kdb_trap(type
, regs
->err
, regs
))
603 #endif /* MACH_KDB */
604 printf("user trap type %d, code = %x, pc = %x\n",
605 type
, regs
->err
, regs
->eip
);
611 if (debug_all_traps_with_kdb
&&
612 kdb_trap(type
, regs
->err
, regs
))
614 #endif /* MACH_KDB */
616 i386_exception(exc
, code
, subcode
);
621 * V86 mode assist for interrupt handling.
623 boolean_t v86_assist_on
= TRUE
;
624 boolean_t v86_unsafe_ok
= FALSE
;
625 boolean_t v86_do_sti_cli
= TRUE
;
626 boolean_t v86_do_sti_immediate
= FALSE
;
628 #define V86_IRET_PENDING 0x4000
636 register struct i386_saved_state
*regs
)
638 register struct v86_assist_state
*v86
= &thread
->machine
.pcb
->ims
.v86s
;
641 * Build an 8086 address. Use only when off is known to be 16 bits.
643 #define Addr8086(seg,off) ((((seg) & 0xffff) << 4) + (off))
645 #define EFL_V86_SAFE ( EFL_OF | EFL_DF | EFL_TF \
646 | EFL_SF | EFL_ZF | EFL_AF \
656 unsigned short flags
;
659 struct iret_32 iret_32
;
660 struct iret_16 iret_16
;
672 * If delayed STI pending, enable interrupts.
673 * Turn off tracing if on only to delay STI.
675 if (v86
->flags
& V86_IF_PENDING
) {
676 v86
->flags
&= ~V86_IF_PENDING
;
677 v86
->flags
|= EFL_IF
;
678 if ((v86
->flags
& EFL_TF
) == 0)
679 regs
->efl
&= ~EFL_TF
;
682 if (regs
->trapno
== T_DEBUG
) {
684 if (v86
->flags
& EFL_TF
) {
686 * Trace flag was also set - it has priority
688 return FALSE
; /* handle as single-step */
691 * Fall through to check for interrupts.
694 else if (regs
->trapno
== T_GENERAL_PROTECTION
) {
696 * General protection error - must be an 8086 instruction
700 boolean_t addr_32
= FALSE
;
701 boolean_t data_32
= FALSE
;
705 * Set up error handler for bad instruction/data
708 __asm__("movl $(addr_error), %0" : : "m" (thread
->recover
));
712 unsigned char opcode
;
716 return FALSE
; /* GP fault: IP out of range */
719 opcode
= *(unsigned char *)Addr8086(regs
->cs
,eip
);
722 case 0xf0: /* lock */
723 case 0xf2: /* repne */
724 case 0xf3: /* repe */
734 case 0x66: /* data size */
738 case 0x67: /* address size */
742 case 0xe4: /* inb imm */
743 case 0xe5: /* inw imm */
744 case 0xe6: /* outb imm */
745 case 0xe7: /* outw imm */
746 io_port
= *(unsigned char *)Addr8086(regs
->cs
, eip
);
750 case 0xec: /* inb dx */
751 case 0xed: /* inw dx */
752 case 0xee: /* outb dx */
753 case 0xef: /* outw dx */
754 case 0x6c: /* insb */
755 case 0x6d: /* insw */
756 case 0x6e: /* outsb */
757 case 0x6f: /* outsw */
758 io_port
= regs
->edx
& 0xffff;
762 opcode
|= 0x6600; /* word IO */
764 switch (emulate_io(regs
, opcode
, io_port
)) {
766 /* instruction executed */
769 /* port mapped, retry instruction */
773 /* port not mapped */
780 if (!v86_do_sti_cli
) {
785 v86
->flags
&= ~EFL_IF
;
786 /* disable simulated interrupts */
791 if (!v86_do_sti_cli
) {
796 if ((v86
->flags
& EFL_IF
) == 0) {
797 if (v86_do_sti_immediate
) {
798 v86
->flags
|= EFL_IF
;
800 v86
->flags
|= V86_IF_PENDING
;
803 /* single step to set IF next inst. */
808 case 0x9c: /* pushf */
815 if ((v86
->flags
& EFL_IF
) == 0)
818 if ((v86
->flags
& EFL_TF
) == 0)
820 else flags
|= EFL_TF
;
825 else if (sp
> 0xffff)
827 size
= (data_32
) ? 4 : 2;
831 if (copyout((char *)&flags
,
832 (user_addr_t
)Addr8086(regs
->ss
,sp
),
838 regs
->uesp
= (regs
->uesp
& 0xffff0000) | sp
;
842 case 0x9d: /* popf */
850 else if (sp
> 0xffff)
854 if (sp
> 0xffff - sizeof(int))
856 nflags
= *(int *)Addr8086(regs
->ss
,sp
);
860 if (sp
> 0xffff - sizeof(short))
862 nflags
= *(unsigned short *)
863 Addr8086(regs
->ss
,sp
);
869 regs
->uesp
= (regs
->uesp
& 0xffff0000) | sp
;
871 if (v86
->flags
& V86_IRET_PENDING
) {
872 v86
->flags
= nflags
& (EFL_TF
| EFL_IF
);
873 v86
->flags
|= V86_IRET_PENDING
;
875 v86
->flags
= nflags
& (EFL_TF
| EFL_IF
);
877 regs
->efl
= (regs
->efl
& ~EFL_V86_SAFE
)
878 | (nflags
& EFL_V86_SAFE
);
881 case 0xcf: /* iret */
885 union iret_struct iret_struct
;
887 v86
->flags
&= ~V86_IRET_PENDING
;
891 else if (sp
> 0xffff)
895 if (sp
> 0xffff - sizeof(struct iret_32
))
897 iret_struct
.iret_32
=
898 *(struct iret_32
*) Addr8086(regs
->ss
,sp
);
899 sp
+= sizeof(struct iret_32
);
902 if (sp
> 0xffff - sizeof(struct iret_16
))
904 iret_struct
.iret_16
=
905 *(struct iret_16
*) Addr8086(regs
->ss
,sp
);
906 sp
+= sizeof(struct iret_16
);
911 regs
->uesp
= (regs
->uesp
& 0xffff0000) | sp
;
914 eip
= iret_struct
.iret_32
.eip
;
915 regs
->cs
= iret_struct
.iret_32
.cs
& 0xffff;
916 nflags
= iret_struct
.iret_32
.eflags
;
919 eip
= iret_struct
.iret_16
.ip
;
920 regs
->cs
= iret_struct
.iret_16
.cs
;
921 nflags
= iret_struct
.iret_16
.flags
;
924 v86
->flags
= nflags
& (EFL_TF
| EFL_IF
);
925 regs
->efl
= (regs
->efl
& ~EFL_V86_SAFE
)
926 | (nflags
& EFL_V86_SAFE
);
931 * Instruction not emulated here.
936 break; /* exit from 'while TRUE' */
938 regs
->eip
= (regs
->eip
& 0xffff0000) | eip
;
942 * Not a trap we handle.
948 if ((v86
->flags
& EFL_IF
) && ((v86
->flags
& V86_IRET_PENDING
)==0)) {
950 struct v86_interrupt_table
*int_table
;
955 int_table
= (struct v86_interrupt_table
*) v86
->int_table
;
956 int_count
= v86
->int_count
;
959 for (i
= 0; i
< int_count
; int_table
++, i
++) {
960 if (!int_table
->mask
&& int_table
->count
> 0) {
962 vec
= int_table
->vec
;
968 * Take this interrupt
971 struct iret_16 iret_16
;
972 struct int_vec int_vec
;
974 sp
= regs
->uesp
& 0xffff;
975 if (sp
< sizeof(struct iret_16
))
977 sp
-= sizeof(struct iret_16
);
978 iret_16
.ip
= regs
->eip
;
979 iret_16
.cs
= regs
->cs
;
980 iret_16
.flags
= regs
->efl
& 0xFFFF;
981 if ((v86
->flags
& EFL_TF
) == 0)
982 iret_16
.flags
&= ~EFL_TF
;
983 else iret_16
.flags
|= EFL_TF
;
985 (void) memcpy((char *) &int_vec
,
986 (char *) (sizeof(struct int_vec
) * vec
),
987 sizeof (struct int_vec
));
988 if (copyout((char *)&iret_16
,
989 (user_addr_t
)Addr8086(regs
->ss
,sp
),
990 sizeof(struct iret_16
)))
992 regs
->uesp
= (regs
->uesp
& 0xFFFF0000) | (sp
& 0xffff);
993 regs
->eip
= int_vec
.ip
;
994 regs
->cs
= int_vec
.cs
;
995 regs
->efl
&= ~EFL_TF
;
996 v86
->flags
&= ~(EFL_IF
| EFL_TF
);
997 v86
->flags
|= V86_IRET_PENDING
;
1001 thread
->recover
= 0;
1005 * On address error, report a page fault.
1006 * XXX report GP fault - we don`t save
1007 * the faulting address.
1010 __asm__("addr_error:;");
1011 thread
->recover
= 0;
1015 * On stack address error, return stack fault (12).
1018 thread
->recover
= 0;
1019 regs
->trapno
= T_STACK_FAULT
;
1024 * Handle AST traps for i386.
1025 * Check for delayed floating-point exception from
1029 extern void log_thread_action (thread_t
, char *);
1032 i386_astintr(int preemption
)
1034 ast_t
*my_ast
, mask
= AST_ALL
;
1037 s
= splsched(); /* block interrupts to check reasons */
1038 mp_disable_preemption();
1039 my_ast
= ast_pending();
1040 if (*my_ast
& AST_I386_FP
) {
1042 * AST was for delayed floating-point exception -
1043 * FP interrupt occurred while in kernel.
1044 * Turn off this AST reason and handle the FPU error.
1047 ast_off(AST_I386_FP
);
1048 mp_enable_preemption();
1055 * Not an FPU trap. Handle the AST.
1056 * Interrupts are still blocked.
1061 mask
= AST_PREEMPTION
;
1062 mp_enable_preemption();
1064 mp_enable_preemption();
1067 mp_enable_preemption();
1076 * Handle exceptions for i386.
1078 * If we are an AT bus machine, we must turn off the AST for a
1079 * delayed floating-point exception.
1081 * If we are providing floating-point emulation, we may have
1082 * to retrieve the real register values from the floating point
1092 exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
1095 * Turn off delayed FPU error handling.
1098 mp_disable_preemption();
1099 ast_off(AST_I386_FP
);
1100 mp_enable_preemption();
1103 codes
[0] = code
; /* new exception interface */
1105 exception_triage(exc
, codes
, 2);
1111 struct i386_saved_state
*regs
)
1113 int eip
, opcode
, io_port
;
1114 boolean_t data_16
= FALSE
;
1117 * Get the instruction.
1122 opcode
= inst_fetch(eip
, regs
->cs
);
1125 case 0x66: /* data-size prefix */
1129 case 0xf3: /* rep prefix */
1138 case 0xE4: /* inb imm */
1139 case 0xE5: /* inl imm */
1140 case 0xE6: /* outb imm */
1141 case 0xE7: /* outl imm */
1142 /* port is immediate byte */
1143 io_port
= inst_fetch(eip
, regs
->cs
);
1147 case 0xEC: /* inb dx */
1148 case 0xED: /* inl dx */
1149 case 0xEE: /* outb dx */
1150 case 0xEF: /* outl dx */
1151 case 0x6C: /* insb */
1152 case 0x6D: /* insl */
1153 case 0x6E: /* outsb */
1154 case 0x6F: /* outsl */
1155 /* port is in DX register */
1156 io_port
= regs
->edx
& 0xFFFF;
1166 opcode
|= 0x6600; /* word IO */
1168 switch (emulate_io(regs
, opcode
, io_port
)) {
1170 /* instruction executed */
1175 /* port mapped, retry instruction */
1179 /* port not mapped */
1186 kernel_preempt_check (void)
1190 mp_disable_preemption();
1191 myast
= ast_pending();
1192 if ((*myast
& AST_URGENT
) &&
1193 get_interrupt_level() == 1
1195 mp_enable_preemption_no_check();
1196 __asm__
volatile (" int $0xff");
1198 mp_enable_preemption_no_check();
1204 extern void db_i386_state(struct i386_saved_state
*regs
);
1206 #include <ddb/db_output.h>
1210 struct i386_saved_state
*regs
)
1212 db_printf("eip %8x\n", regs
->eip
);
1213 db_printf("trap %8x\n", regs
->trapno
);
1214 db_printf("err %8x\n", regs
->err
);
1215 db_printf("efl %8x\n", regs
->efl
);
1216 db_printf("ebp %8x\n", regs
->ebp
);
1217 db_printf("esp %8x\n", regs
->esp
);
1218 db_printf("uesp %8x\n", regs
->uesp
);
1219 db_printf("cs %8x\n", regs
->cs
& 0xff);
1220 db_printf("ds %8x\n", regs
->ds
& 0xff);
1221 db_printf("es %8x\n", regs
->es
& 0xff);
1222 db_printf("fs %8x\n", regs
->fs
& 0xff);
1223 db_printf("gs %8x\n", regs
->gs
& 0xff);
1224 db_printf("ss %8x\n", regs
->ss
& 0xff);
1225 db_printf("eax %8x\n", regs
->eax
);
1226 db_printf("ebx %8x\n", regs
->ebx
);
1227 db_printf("ecx %8x\n", regs
->ecx
);
1228 db_printf("edx %8x\n", regs
->edx
);
1229 db_printf("esi %8x\n", regs
->esi
);
1230 db_printf("edi %8x\n", regs
->edi
);
1233 #endif /* MACH_KDB */