2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
29 * All Rights Reserved.
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
41 * Carnegie Mellon requests users of this software to return to
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
54 * Hardware trap/fault handler.
58 #include <mach_kgdb.h>
60 #include <mach_ldebug.h>
63 #include <i386/eflags.h>
64 #include <i386/trap.h>
65 #include <i386/pmap.h>
68 #include <mach/exception.h>
69 #include <mach/kern_return.h>
70 #include <mach/vm_param.h>
71 #include <mach/i386/thread_status.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_fault.h>
76 #include <kern/kern_types.h>
77 #include <kern/processor.h>
78 #include <kern/thread.h>
79 #include <kern/task.h>
80 #include <kern/sched.h>
81 #include <kern/sched_prim.h>
82 #include <kern/exception.h>
84 #include <kern/misc_protos.h>
87 #include <kgdb/kgdb_defs.h>
88 #endif /* MACH_KGDB */
90 #include <i386/intel_read_fault.h>
93 #include <kgdb/kgdb_defs.h>
94 #endif /* MACH_KGDB */
97 #include <ddb/db_watch.h>
98 #include <ddb/db_run.h>
99 #include <ddb/db_break.h>
100 #include <ddb/db_trap.h>
101 #endif /* MACH_KDB */
105 #include <i386/io_emulate.h>
108 * Forward declarations
110 extern void user_page_fault_continue(
113 extern boolean_t
v86_assist(
115 struct i386_saved_state
*regs
);
117 extern boolean_t
check_io_fault(
118 struct i386_saved_state
*regs
);
120 extern int inst_fetch(
125 thread_syscall_return(
128 register thread_t thr_act
= current_thread();
129 register struct i386_saved_state
*regs
= USER_REGS(thr_act
);
131 thread_exception_return();
137 boolean_t debug_all_traps_with_kdb
= FALSE
;
138 extern struct db_watchpoint
*db_watchpoint_list
;
139 extern boolean_t db_watchpoints_inserted
;
140 extern boolean_t db_breakpoints_inserted
;
143 thread_kdb_return(void)
145 register thread_t thread
= current_thread();
146 register struct i386_saved_state
*regs
= USER_REGS(thread
);
148 if (kdb_trap(regs
->trapno
, regs
->err
, regs
)) {
150 assert(thread
->mutex_count
== 0);
151 #endif /* MACH_LDEBUG */
152 thread_exception_return();
156 boolean_t let_ddb_vm_fault
= FALSE
;
158 #endif /* MACH_KDB */
161 user_page_fault_continue(
164 register thread_t thread
= current_thread();
165 register struct i386_saved_state
*regs
= USER_REGS(thread
);
167 if ((kr
== KERN_SUCCESS
) || (kr
== KERN_ABORTED
)) {
169 if (!db_breakpoints_inserted
) {
170 db_set_breakpoints();
172 if (db_watchpoint_list
&&
173 db_watchpoints_inserted
&&
174 (regs
->err
& T_PF_WRITE
) &&
175 db_find_watchpoint(thread
->map
,
176 (vm_offset_t
)regs
->cr2
,
178 kdb_trap(T_WATCHPOINT
, 0, regs
);
179 #endif /* MACH_KDB */
180 thread_exception_return();
185 if (debug_all_traps_with_kdb
&&
186 kdb_trap(regs
->trapno
, regs
->err
, regs
)) {
188 assert(thread
->mutex_count
== 0);
189 #endif /* MACH_LDEBUG */
190 thread_exception_return();
193 #endif /* MACH_KDB */
195 i386_exception(EXC_BAD_ACCESS
, kr
, regs
->cr2
);
200 * Fault recovery in copyin/copyout routines.
204 uint32_t recover_addr
;
207 extern struct recovery recover_table
[];
208 extern struct recovery recover_table_end
[];
211 * Recovery from Successful fault in copyout does not
212 * return directly - it retries the pte check, since
213 * the 386 ignores write protection in kernel mode.
215 extern struct recovery retry_table
[];
216 extern struct recovery retry_table_end
[];
218 const char * trap_type
[] = {TRAP_NAMES
};
219 int TRAP_TYPES
= sizeof(trap_type
)/sizeof(trap_type
[0]);
223 * Trap from kernel mode. Only page-fault errors are recoverable,
224 * and then only in special circumstances. All other errors are
225 * fatal. Return value indicates if trap was handled.
229 register struct i386_saved_state
*regs
)
232 unsigned int subcode
;
233 int interruptible
= THREAD_UNINT
;
236 kern_return_t result
= KERN_FAILURE
;
237 register thread_t thread
;
241 thread
= current_thread();
245 ast_taken(AST_PREEMPTION
, FALSE
);
256 case T_FLOATING_POINT_ERROR
:
262 * If the current map is a submap of the kernel map,
263 * and the address is within that map, fault on that
264 * map. If the same check is done in vm_fault
265 * (vm_map_lookup), we may deadlock on the kernel map
269 mp_disable_preemption();
271 && kdb_active
[cpu_number()]
272 && !let_ddb_vm_fault
) {
274 * Force kdb to handle this one.
276 mp_enable_preemption();
279 mp_enable_preemption();
280 #endif /* MACH_KDB */
281 subcode
= regs
->cr2
; /* get faulting address */
283 if (subcode
> LINEAR_KERNEL_ADDRESS
) {
285 } else if (thread
== THREAD_NULL
)
292 * Check for watchpoint on kernel static data.
293 * vm_fault would fail in this case
295 if (map
== kernel_map
&&
296 db_watchpoint_list
&&
297 db_watchpoints_inserted
&&
298 (code
& T_PF_WRITE
) &&
299 (vm_offset_t
)subcode
< vm_last_phys
&&
300 ((*(pte
= pmap_pte(kernel_pmap
, (vm_offset_t
)subcode
))) &
301 INTEL_PTE_WRITE
) == 0) {
302 *pte
= *pte
| INTEL_PTE_VALID
| INTEL_PTE_WRITE
; /* XXX need invltlb here? */
303 result
= KERN_SUCCESS
;
305 #endif /* MACH_KDB */
308 * Since the 386 ignores write protection in
309 * kernel mode, always try for write permission
310 * first. If that fails and the fault was a
311 * read fault, retry with read permission.
313 if (map
== kernel_map
) {
314 register struct recovery
*rp
;
316 interruptible
= THREAD_UNINT
;
317 for (rp
= recover_table
; rp
< recover_table_end
; rp
++) {
318 if (regs
->eip
== rp
->fault_addr
) {
319 interruptible
= THREAD_ABORTSAFE
;
324 result
= vm_fault(map
,
325 trunc_page((vm_offset_t
)subcode
),
326 VM_PROT_READ
|VM_PROT_WRITE
,
328 (map
== kernel_map
) ? interruptible
: THREAD_ABORTSAFE
, NULL
, 0);
331 if (result
== KERN_SUCCESS
) {
332 /* Look for watchpoints */
333 if (db_watchpoint_list
&&
334 db_watchpoints_inserted
&&
335 (code
& T_PF_WRITE
) &&
336 db_find_watchpoint(map
,
337 (vm_offset_t
)subcode
, regs
))
338 kdb_trap(T_WATCHPOINT
, 0, regs
);
341 #endif /* MACH_KDB */
342 if ((code
& T_PF_WRITE
) == 0 &&
343 result
== KERN_PROTECTION_FAILURE
)
346 * Must expand vm_fault by hand,
347 * so that we can ask for read-only access
348 * but enter a (kernel)writable mapping.
350 result
= intel_read_fault(map
,
351 trunc_page((vm_offset_t
)subcode
));
354 if (result
== KERN_SUCCESS
) {
356 * Certain faults require that we back up
359 register struct recovery
*rp
;
361 for (rp
= retry_table
; rp
< retry_table_end
; rp
++) {
362 if (regs
->eip
== rp
->fault_addr
) {
363 regs
->eip
= rp
->recover_addr
;
372 case T_GENERAL_PROTECTION
:
375 * If there is a failure recovery address
376 * for this fault, go there.
379 register struct recovery
*rp
;
381 for (rp
= recover_table
;
382 rp
< recover_table_end
;
384 if (regs
->eip
== rp
->fault_addr
) {
385 regs
->eip
= rp
->recover_addr
;
392 * Check thread recovery address also -
393 * v86 assist uses it.
395 if (thread
->recover
) {
396 regs
->eip
= thread
->recover
;
402 * Unanticipated page-fault errors in kernel
405 /* fall through... */
409 * Exception 15 is reserved but some chips may generate it
410 * spuriously. Seen at startup on AMD Athlon-64.
413 kprintf("kernel_trap() ignoring spurious trap 15\n");
418 * ...and return failure, so that locore can call into
422 kdp_i386_trap(type
, regs
, result
, regs
->cr2
);
430 * Called if both kernel_trap() and kdb_trap() fail.
434 register struct i386_saved_state
*regs
)
442 printf("trap type %d, code = %x, pc = %x\n",
443 type
, code
, regs
->eip
);
449 * Trap from user mode.
453 register struct i386_saved_state
*regs
)
457 unsigned int subcode
;
461 kern_return_t result
;
462 thread_t thread
= current_thread();
463 boolean_t kernel_act
= FALSE
;
465 if (regs
->efl
& EFL_VM
) {
467 * If hardware assist can handle exception,
468 * continue execution.
470 if (v86_assist(thread
, regs
))
482 exc
= EXC_ARITHMETIC
;
487 exc
= EXC_BREAKPOINT
;
492 exc
= EXC_BREAKPOINT
;
497 exc
= EXC_ARITHMETIC
;
498 code
= EXC_I386_INTO
;
501 case T_OUT_OF_BOUNDS
:
503 code
= EXC_I386_BOUND
;
506 case T_INVALID_OPCODE
:
507 exc
= EXC_BAD_INSTRUCTION
;
508 code
= EXC_I386_INVOP
;
520 case 10: /* invalid TSS == iret with NT flag set */
521 exc
= EXC_BAD_INSTRUCTION
;
522 code
= EXC_I386_INVTSSFLT
;
523 subcode
= regs
->err
& 0xffff;
526 case T_SEGMENT_NOT_PRESENT
:
527 exc
= EXC_BAD_INSTRUCTION
;
528 code
= EXC_I386_SEGNPFLT
;
529 subcode
= regs
->err
& 0xffff;
533 exc
= EXC_BAD_INSTRUCTION
;
534 code
= EXC_I386_STKFLT
;
535 subcode
= regs
->err
& 0xffff;
538 case T_GENERAL_PROTECTION
:
539 if (!(regs
->efl
& EFL_VM
)) {
540 if (check_io_fault(regs
))
543 exc
= EXC_BAD_INSTRUCTION
;
544 code
= EXC_I386_GPFLT
;
545 subcode
= regs
->err
& 0xffff;
550 prot
= VM_PROT_READ
|VM_PROT_WRITE
;
551 if (kernel_act
== FALSE
) {
552 if (!(regs
->err
& T_PF_WRITE
))
554 (void) user_page_fault_continue(vm_fault(thread
->map
,
555 trunc_page((vm_offset_t
)subcode
),
558 THREAD_ABORTSAFE
, NULL
, 0));
562 if (subcode
> LINEAR_KERNEL_ADDRESS
) {
565 result
= vm_fault(thread
->map
,
566 trunc_page((vm_offset_t
)subcode
),
569 (map
== kernel_map
) ? THREAD_UNINT
: THREAD_ABORTSAFE
, NULL
, 0);
570 if ((result
!= KERN_SUCCESS
) && (result
!= KERN_ABORTED
)) {
572 * Must expand vm_fault by hand,
573 * so that we can ask for read-only access
574 * but enter a (kernel) writable mapping.
576 result
= intel_read_fault(thread
->map
,
577 trunc_page((vm_offset_t
)subcode
));
579 user_page_fault_continue(result
);
584 case T_FLOATING_POINT_ERROR
:
590 Debugger("Unanticipated user trap");
592 #endif /* MACH_KGDB */
594 if (kdb_trap(type
, regs
->err
, regs
))
596 #endif /* MACH_KDB */
597 printf("user trap type %d, code = %x, pc = %x\n",
598 type
, regs
->err
, regs
->eip
);
604 if (debug_all_traps_with_kdb
&&
605 kdb_trap(type
, regs
->err
, regs
))
607 #endif /* MACH_KDB */
609 i386_exception(exc
, code
, subcode
);
614 * V86 mode assist for interrupt handling.
616 boolean_t v86_assist_on
= TRUE
;
617 boolean_t v86_unsafe_ok
= FALSE
;
618 boolean_t v86_do_sti_cli
= TRUE
;
619 boolean_t v86_do_sti_immediate
= FALSE
;
621 #define V86_IRET_PENDING 0x4000
629 register struct i386_saved_state
*regs
)
631 register struct v86_assist_state
*v86
= &thread
->machine
.pcb
->ims
.v86s
;
634 * Build an 8086 address. Use only when off is known to be 16 bits.
636 #define Addr8086(seg,off) ((((seg) & 0xffff) << 4) + (off))
638 #define EFL_V86_SAFE ( EFL_OF | EFL_DF | EFL_TF \
639 | EFL_SF | EFL_ZF | EFL_AF \
649 unsigned short flags
;
652 struct iret_32 iret_32
;
653 struct iret_16 iret_16
;
665 * If delayed STI pending, enable interrupts.
666 * Turn off tracing if on only to delay STI.
668 if (v86
->flags
& V86_IF_PENDING
) {
669 v86
->flags
&= ~V86_IF_PENDING
;
670 v86
->flags
|= EFL_IF
;
671 if ((v86
->flags
& EFL_TF
) == 0)
672 regs
->efl
&= ~EFL_TF
;
675 if (regs
->trapno
== T_DEBUG
) {
677 if (v86
->flags
& EFL_TF
) {
679 * Trace flag was also set - it has priority
681 return FALSE
; /* handle as single-step */
684 * Fall through to check for interrupts.
687 else if (regs
->trapno
== T_GENERAL_PROTECTION
) {
689 * General protection error - must be an 8086 instruction
693 boolean_t addr_32
= FALSE
;
694 boolean_t data_32
= FALSE
;
698 * Set up error handler for bad instruction/data
701 __asm__("movl $(addr_error), %0" : : "m" (thread
->recover
));
705 unsigned char opcode
;
709 return FALSE
; /* GP fault: IP out of range */
712 opcode
= *(unsigned char *)Addr8086(regs
->cs
,eip
);
715 case 0xf0: /* lock */
716 case 0xf2: /* repne */
717 case 0xf3: /* repe */
727 case 0x66: /* data size */
731 case 0x67: /* address size */
735 case 0xe4: /* inb imm */
736 case 0xe5: /* inw imm */
737 case 0xe6: /* outb imm */
738 case 0xe7: /* outw imm */
739 io_port
= *(unsigned char *)Addr8086(regs
->cs
, eip
);
743 case 0xec: /* inb dx */
744 case 0xed: /* inw dx */
745 case 0xee: /* outb dx */
746 case 0xef: /* outw dx */
747 case 0x6c: /* insb */
748 case 0x6d: /* insw */
749 case 0x6e: /* outsb */
750 case 0x6f: /* outsw */
751 io_port
= regs
->edx
& 0xffff;
755 opcode
|= 0x6600; /* word IO */
757 switch (emulate_io(regs
, opcode
, io_port
)) {
759 /* instruction executed */
762 /* port mapped, retry instruction */
766 /* port not mapped */
773 if (!v86_do_sti_cli
) {
778 v86
->flags
&= ~EFL_IF
;
779 /* disable simulated interrupts */
784 if (!v86_do_sti_cli
) {
789 if ((v86
->flags
& EFL_IF
) == 0) {
790 if (v86_do_sti_immediate
) {
791 v86
->flags
|= EFL_IF
;
793 v86
->flags
|= V86_IF_PENDING
;
796 /* single step to set IF next inst. */
801 case 0x9c: /* pushf */
808 if ((v86
->flags
& EFL_IF
) == 0)
811 if ((v86
->flags
& EFL_TF
) == 0)
813 else flags
|= EFL_TF
;
818 else if (sp
> 0xffff)
820 size
= (data_32
) ? 4 : 2;
824 if (copyout((char *)&flags
,
825 (user_addr_t
)Addr8086(regs
->ss
,sp
),
831 regs
->uesp
= (regs
->uesp
& 0xffff0000) | sp
;
835 case 0x9d: /* popf */
843 else if (sp
> 0xffff)
847 if (sp
> 0xffff - sizeof(int))
849 nflags
= *(int *)Addr8086(regs
->ss
,sp
);
853 if (sp
> 0xffff - sizeof(short))
855 nflags
= *(unsigned short *)
856 Addr8086(regs
->ss
,sp
);
862 regs
->uesp
= (regs
->uesp
& 0xffff0000) | sp
;
864 if (v86
->flags
& V86_IRET_PENDING
) {
865 v86
->flags
= nflags
& (EFL_TF
| EFL_IF
);
866 v86
->flags
|= V86_IRET_PENDING
;
868 v86
->flags
= nflags
& (EFL_TF
| EFL_IF
);
870 regs
->efl
= (regs
->efl
& ~EFL_V86_SAFE
)
871 | (nflags
& EFL_V86_SAFE
);
874 case 0xcf: /* iret */
878 union iret_struct iret_struct
;
880 v86
->flags
&= ~V86_IRET_PENDING
;
884 else if (sp
> 0xffff)
888 if (sp
> 0xffff - sizeof(struct iret_32
))
890 iret_struct
.iret_32
=
891 *(struct iret_32
*) Addr8086(regs
->ss
,sp
);
892 sp
+= sizeof(struct iret_32
);
895 if (sp
> 0xffff - sizeof(struct iret_16
))
897 iret_struct
.iret_16
=
898 *(struct iret_16
*) Addr8086(regs
->ss
,sp
);
899 sp
+= sizeof(struct iret_16
);
904 regs
->uesp
= (regs
->uesp
& 0xffff0000) | sp
;
907 eip
= iret_struct
.iret_32
.eip
;
908 regs
->cs
= iret_struct
.iret_32
.cs
& 0xffff;
909 nflags
= iret_struct
.iret_32
.eflags
;
912 eip
= iret_struct
.iret_16
.ip
;
913 regs
->cs
= iret_struct
.iret_16
.cs
;
914 nflags
= iret_struct
.iret_16
.flags
;
917 v86
->flags
= nflags
& (EFL_TF
| EFL_IF
);
918 regs
->efl
= (regs
->efl
& ~EFL_V86_SAFE
)
919 | (nflags
& EFL_V86_SAFE
);
924 * Instruction not emulated here.
929 break; /* exit from 'while TRUE' */
931 regs
->eip
= (regs
->eip
& 0xffff0000) | eip
;
935 * Not a trap we handle.
941 if ((v86
->flags
& EFL_IF
) && ((v86
->flags
& V86_IRET_PENDING
)==0)) {
943 struct v86_interrupt_table
*int_table
;
948 int_table
= (struct v86_interrupt_table
*) v86
->int_table
;
949 int_count
= v86
->int_count
;
952 for (i
= 0; i
< int_count
; int_table
++, i
++) {
953 if (!int_table
->mask
&& int_table
->count
> 0) {
955 vec
= int_table
->vec
;
961 * Take this interrupt
964 struct iret_16 iret_16
;
965 struct int_vec int_vec
;
967 sp
= regs
->uesp
& 0xffff;
968 if (sp
< sizeof(struct iret_16
))
970 sp
-= sizeof(struct iret_16
);
971 iret_16
.ip
= regs
->eip
;
972 iret_16
.cs
= regs
->cs
;
973 iret_16
.flags
= regs
->efl
& 0xFFFF;
974 if ((v86
->flags
& EFL_TF
) == 0)
975 iret_16
.flags
&= ~EFL_TF
;
976 else iret_16
.flags
|= EFL_TF
;
978 (void) memcpy((char *) &int_vec
,
979 (char *) (sizeof(struct int_vec
) * vec
),
980 sizeof (struct int_vec
));
981 if (copyout((char *)&iret_16
,
982 (user_addr_t
)Addr8086(regs
->ss
,sp
),
983 sizeof(struct iret_16
)))
985 regs
->uesp
= (regs
->uesp
& 0xFFFF0000) | (sp
& 0xffff);
986 regs
->eip
= int_vec
.ip
;
987 regs
->cs
= int_vec
.cs
;
988 regs
->efl
&= ~EFL_TF
;
989 v86
->flags
&= ~(EFL_IF
| EFL_TF
);
990 v86
->flags
|= V86_IRET_PENDING
;
998 * On address error, report a page fault.
999 * XXX report GP fault - we don`t save
1000 * the faulting address.
1003 __asm__("addr_error:;");
1004 thread
->recover
= 0;
1008 * On stack address error, return stack fault (12).
1011 thread
->recover
= 0;
1012 regs
->trapno
= T_STACK_FAULT
;
1017 * Handle AST traps for i386.
1018 * Check for delayed floating-point exception from
1022 extern void log_thread_action (thread_t
, char *);
1025 i386_astintr(int preemption
)
1027 ast_t
*my_ast
, mask
= AST_ALL
;
1030 s
= splsched(); /* block interrupts to check reasons */
1031 mp_disable_preemption();
1032 my_ast
= ast_pending();
1033 if (*my_ast
& AST_I386_FP
) {
1035 * AST was for delayed floating-point exception -
1036 * FP interrupt occurred while in kernel.
1037 * Turn off this AST reason and handle the FPU error.
1040 ast_off(AST_I386_FP
);
1041 mp_enable_preemption();
1048 * Not an FPU trap. Handle the AST.
1049 * Interrupts are still blocked.
1054 mask
= AST_PREEMPTION
;
1055 mp_enable_preemption();
1057 mp_enable_preemption();
1060 mp_enable_preemption();
1069 * Handle exceptions for i386.
1071 * If we are an AT bus machine, we must turn off the AST for a
1072 * delayed floating-point exception.
1074 * If we are providing floating-point emulation, we may have
1075 * to retrieve the real register values from the floating point
1085 exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
1088 * Turn off delayed FPU error handling.
1091 mp_disable_preemption();
1092 ast_off(AST_I386_FP
);
1093 mp_enable_preemption();
1096 codes
[0] = code
; /* new exception interface */
1098 exception_triage(exc
, codes
, 2);
1104 struct i386_saved_state
*regs
)
1106 int eip
, opcode
, io_port
;
1107 boolean_t data_16
= FALSE
;
1110 * Get the instruction.
1115 opcode
= inst_fetch(eip
, regs
->cs
);
1118 case 0x66: /* data-size prefix */
1122 case 0xf3: /* rep prefix */
1131 case 0xE4: /* inb imm */
1132 case 0xE5: /* inl imm */
1133 case 0xE6: /* outb imm */
1134 case 0xE7: /* outl imm */
1135 /* port is immediate byte */
1136 io_port
= inst_fetch(eip
, regs
->cs
);
1140 case 0xEC: /* inb dx */
1141 case 0xED: /* inl dx */
1142 case 0xEE: /* outb dx */
1143 case 0xEF: /* outl dx */
1144 case 0x6C: /* insb */
1145 case 0x6D: /* insl */
1146 case 0x6E: /* outsb */
1147 case 0x6F: /* outsl */
1148 /* port is in DX register */
1149 io_port
= regs
->edx
& 0xFFFF;
1159 opcode
|= 0x6600; /* word IO */
1161 switch (emulate_io(regs
, opcode
, io_port
)) {
1163 /* instruction executed */
1168 /* port mapped, retry instruction */
1172 /* port not mapped */
1179 kernel_preempt_check (void)
1183 mp_disable_preemption();
1184 myast
= ast_pending();
1185 if ((*myast
& AST_URGENT
) &&
1186 get_interrupt_level() == 1
1188 mp_enable_preemption_no_check();
1189 __asm__
volatile (" int $0xff");
1191 mp_enable_preemption_no_check();
1197 extern void db_i386_state(struct i386_saved_state
*regs
);
1199 #include <ddb/db_output.h>
1203 struct i386_saved_state
*regs
)
1205 db_printf("eip %8x\n", regs
->eip
);
1206 db_printf("trap %8x\n", regs
->trapno
);
1207 db_printf("err %8x\n", regs
->err
);
1208 db_printf("efl %8x\n", regs
->efl
);
1209 db_printf("ebp %8x\n", regs
->ebp
);
1210 db_printf("esp %8x\n", regs
->esp
);
1211 db_printf("uesp %8x\n", regs
->uesp
);
1212 db_printf("cs %8x\n", regs
->cs
& 0xff);
1213 db_printf("ds %8x\n", regs
->ds
& 0xff);
1214 db_printf("es %8x\n", regs
->es
& 0xff);
1215 db_printf("fs %8x\n", regs
->fs
& 0xff);
1216 db_printf("gs %8x\n", regs
->gs
& 0xff);
1217 db_printf("ss %8x\n", regs
->ss
& 0xff);
1218 db_printf("eax %8x\n", regs
->eax
);
1219 db_printf("ebx %8x\n", regs
->ebx
);
1220 db_printf("ecx %8x\n", regs
->ecx
);
1221 db_printf("edx %8x\n", regs
->edx
);
1222 db_printf("esi %8x\n", regs
->esi
);
1223 db_printf("edi %8x\n", regs
->edi
);
1226 #endif /* MACH_KDB */