2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * Hardware trap/fault handler.
57 #include <mach_kgdb.h>
59 #include <mach_ldebug.h>
62 #include <i386/eflags.h>
63 #include <i386/trap.h>
64 #include <i386/pmap.h>
67 #include <mach/exception.h>
68 #include <mach/kern_return.h>
69 #include <mach/vm_param.h>
70 #include <mach/i386/thread_status.h>
72 #include <vm/vm_kern.h>
73 #include <vm/vm_fault.h>
75 #include <kern/kern_types.h>
76 #include <kern/processor.h>
77 #include <kern/thread.h>
78 #include <kern/task.h>
79 #include <kern/sched.h>
80 #include <kern/sched_prim.h>
81 #include <kern/exception.h>
83 #include <kern/misc_protos.h>
86 #include <kgdb/kgdb_defs.h>
87 #endif /* MACH_KGDB */
89 #include <i386/intel_read_fault.h>
92 #include <kgdb/kgdb_defs.h>
93 #endif /* MACH_KGDB */
96 #include <ddb/db_watch.h>
97 #include <ddb/db_run.h>
98 #include <ddb/db_break.h>
99 #include <ddb/db_trap.h>
100 #endif /* MACH_KDB */
104 #include <i386/io_emulate.h>
107 * Forward declarations
109 extern void user_page_fault_continue(
112 extern boolean_t
v86_assist(
114 struct i386_saved_state
*regs
);
116 extern boolean_t
check_io_fault(
117 struct i386_saved_state
*regs
);
119 extern int inst_fetch(
124 thread_syscall_return(
127 register thread_t thr_act
= current_thread();
128 register struct i386_saved_state
*regs
= USER_REGS(thr_act
);
130 thread_exception_return();
136 boolean_t debug_all_traps_with_kdb
= FALSE
;
137 extern struct db_watchpoint
*db_watchpoint_list
;
138 extern boolean_t db_watchpoints_inserted
;
139 extern boolean_t db_breakpoints_inserted
;
142 thread_kdb_return(void)
144 register thread_t thread
= current_thread();
145 register struct i386_saved_state
*regs
= USER_REGS(thread
);
147 if (kdb_trap(regs
->trapno
, regs
->err
, regs
)) {
149 assert(thread
->mutex_count
== 0);
150 #endif /* MACH_LDEBUG */
151 thread_exception_return();
155 boolean_t let_ddb_vm_fault
= FALSE
;
157 #endif /* MACH_KDB */
160 user_page_fault_continue(
163 register thread_t thread
= current_thread();
164 register struct i386_saved_state
*regs
= USER_REGS(thread
);
166 if ((kr
== KERN_SUCCESS
) || (kr
== KERN_ABORTED
)) {
168 if (!db_breakpoints_inserted
) {
169 db_set_breakpoints();
171 if (db_watchpoint_list
&&
172 db_watchpoints_inserted
&&
173 (regs
->err
& T_PF_WRITE
) &&
174 db_find_watchpoint(thread
->map
,
175 (vm_offset_t
)regs
->cr2
,
177 kdb_trap(T_WATCHPOINT
, 0, regs
);
178 #endif /* MACH_KDB */
179 thread_exception_return();
184 if (debug_all_traps_with_kdb
&&
185 kdb_trap(regs
->trapno
, regs
->err
, regs
)) {
187 assert(thread
->mutex_count
== 0);
188 #endif /* MACH_LDEBUG */
189 thread_exception_return();
192 #endif /* MACH_KDB */
194 i386_exception(EXC_BAD_ACCESS
, kr
, regs
->cr2
);
199 * Fault recovery in copyin/copyout routines.
203 uint32_t recover_addr
;
206 extern struct recovery recover_table
[];
207 extern struct recovery recover_table_end
[];
210 * Recovery from Successful fault in copyout does not
211 * return directly - it retries the pte check, since
212 * the 386 ignores write protection in kernel mode.
214 extern struct recovery retry_table
[];
215 extern struct recovery retry_table_end
[];
217 const char * trap_type
[] = {TRAP_NAMES
};
218 int TRAP_TYPES
= sizeof(trap_type
)/sizeof(trap_type
[0]);
222 * Trap from kernel mode. Only page-fault errors are recoverable,
223 * and then only in special circumstances. All other errors are
224 * fatal. Return value indicates if trap was handled.
228 register struct i386_saved_state
*regs
)
231 unsigned int subcode
;
232 int interruptible
= THREAD_UNINT
;
235 kern_return_t result
= KERN_FAILURE
;
236 register thread_t thread
;
240 thread
= current_thread();
244 ast_taken(AST_PREEMPTION
, FALSE
);
255 case T_FLOATING_POINT_ERROR
:
261 * If the current map is a submap of the kernel map,
262 * and the address is within that map, fault on that
263 * map. If the same check is done in vm_fault
264 * (vm_map_lookup), we may deadlock on the kernel map
268 mp_disable_preemption();
270 && kdb_active
[cpu_number()]
271 && !let_ddb_vm_fault
) {
273 * Force kdb to handle this one.
275 mp_enable_preemption();
278 mp_enable_preemption();
279 #endif /* MACH_KDB */
280 subcode
= regs
->cr2
; /* get faulting address */
282 if (subcode
> LINEAR_KERNEL_ADDRESS
) {
284 } else if (thread
== THREAD_NULL
)
291 * Check for watchpoint on kernel static data.
292 * vm_fault would fail in this case
294 if (map
== kernel_map
&&
295 db_watchpoint_list
&&
296 db_watchpoints_inserted
&&
297 (code
& T_PF_WRITE
) &&
298 (vm_offset_t
)subcode
< vm_last_phys
&&
299 ((*(pte
= pmap_pte(kernel_pmap
, (vm_offset_t
)subcode
))) &
300 INTEL_PTE_WRITE
) == 0) {
301 *pte
= *pte
| INTEL_PTE_VALID
| INTEL_PTE_WRITE
; /* XXX need invltlb here? */
302 result
= KERN_SUCCESS
;
304 #endif /* MACH_KDB */
307 * Since the 386 ignores write protection in
308 * kernel mode, always try for write permission
309 * first. If that fails and the fault was a
310 * read fault, retry with read permission.
312 if (map
== kernel_map
) {
313 register struct recovery
*rp
;
315 interruptible
= THREAD_UNINT
;
316 for (rp
= recover_table
; rp
< recover_table_end
; rp
++) {
317 if (regs
->eip
== rp
->fault_addr
) {
318 interruptible
= THREAD_ABORTSAFE
;
323 result
= vm_fault(map
,
324 trunc_page((vm_offset_t
)subcode
),
325 VM_PROT_READ
|VM_PROT_WRITE
,
327 (map
== kernel_map
) ? interruptible
: THREAD_ABORTSAFE
, NULL
, 0);
330 if (result
== KERN_SUCCESS
) {
331 /* Look for watchpoints */
332 if (db_watchpoint_list
&&
333 db_watchpoints_inserted
&&
334 (code
& T_PF_WRITE
) &&
335 db_find_watchpoint(map
,
336 (vm_offset_t
)subcode
, regs
))
337 kdb_trap(T_WATCHPOINT
, 0, regs
);
340 #endif /* MACH_KDB */
341 if ((code
& T_PF_WRITE
) == 0 &&
342 result
== KERN_PROTECTION_FAILURE
)
345 * Must expand vm_fault by hand,
346 * so that we can ask for read-only access
347 * but enter a (kernel)writable mapping.
349 result
= intel_read_fault(map
,
350 trunc_page((vm_offset_t
)subcode
));
353 if (result
== KERN_SUCCESS
) {
355 * Certain faults require that we back up
358 register struct recovery
*rp
;
360 for (rp
= retry_table
; rp
< retry_table_end
; rp
++) {
361 if (regs
->eip
== rp
->fault_addr
) {
362 regs
->eip
= rp
->recover_addr
;
371 case T_GENERAL_PROTECTION
:
374 * If there is a failure recovery address
375 * for this fault, go there.
378 register struct recovery
*rp
;
380 for (rp
= recover_table
;
381 rp
< recover_table_end
;
383 if (regs
->eip
== rp
->fault_addr
) {
384 regs
->eip
= rp
->recover_addr
;
391 * Check thread recovery address also -
392 * v86 assist uses it.
394 if (thread
->recover
) {
395 regs
->eip
= thread
->recover
;
401 * Unanticipated page-fault errors in kernel
404 /* fall through... */
408 * Exception 15 is reserved but some chips may generate it
409 * spuriously. Seen at startup on AMD Athlon-64.
412 kprintf("kernel_trap() ignoring spurious trap 15\n");
417 * ...and return failure, so that locore can call into
421 kdp_i386_trap(type
, regs
, result
, regs
->cr2
);
429 * Called if both kernel_trap() and kdb_trap() fail.
433 register struct i386_saved_state
*regs
)
441 printf("trap type %d, code = %x, pc = %x\n",
442 type
, code
, regs
->eip
);
448 * Trap from user mode.
452 register struct i386_saved_state
*regs
)
456 unsigned int subcode
;
460 kern_return_t result
;
461 thread_t thread
= current_thread();
462 boolean_t kernel_act
= FALSE
;
464 if (regs
->efl
& EFL_VM
) {
466 * If hardware assist can handle exception,
467 * continue execution.
469 if (v86_assist(thread
, regs
))
481 exc
= EXC_ARITHMETIC
;
486 exc
= EXC_BREAKPOINT
;
491 exc
= EXC_BREAKPOINT
;
496 exc
= EXC_ARITHMETIC
;
497 code
= EXC_I386_INTO
;
500 case T_OUT_OF_BOUNDS
:
502 code
= EXC_I386_BOUND
;
505 case T_INVALID_OPCODE
:
506 exc
= EXC_BAD_INSTRUCTION
;
507 code
= EXC_I386_INVOP
;
519 case 10: /* invalid TSS == iret with NT flag set */
520 exc
= EXC_BAD_INSTRUCTION
;
521 code
= EXC_I386_INVTSSFLT
;
522 subcode
= regs
->err
& 0xffff;
525 case T_SEGMENT_NOT_PRESENT
:
526 exc
= EXC_BAD_INSTRUCTION
;
527 code
= EXC_I386_SEGNPFLT
;
528 subcode
= regs
->err
& 0xffff;
532 exc
= EXC_BAD_INSTRUCTION
;
533 code
= EXC_I386_STKFLT
;
534 subcode
= regs
->err
& 0xffff;
537 case T_GENERAL_PROTECTION
:
538 if (!(regs
->efl
& EFL_VM
)) {
539 if (check_io_fault(regs
))
542 exc
= EXC_BAD_INSTRUCTION
;
543 code
= EXC_I386_GPFLT
;
544 subcode
= regs
->err
& 0xffff;
549 prot
= VM_PROT_READ
|VM_PROT_WRITE
;
550 if (kernel_act
== FALSE
) {
551 if (!(regs
->err
& T_PF_WRITE
))
553 (void) user_page_fault_continue(vm_fault(thread
->map
,
554 trunc_page((vm_offset_t
)subcode
),
557 THREAD_ABORTSAFE
, NULL
, 0));
561 if (subcode
> LINEAR_KERNEL_ADDRESS
) {
564 result
= vm_fault(thread
->map
,
565 trunc_page((vm_offset_t
)subcode
),
568 (map
== kernel_map
) ? THREAD_UNINT
: THREAD_ABORTSAFE
, NULL
, 0);
569 if ((result
!= KERN_SUCCESS
) && (result
!= KERN_ABORTED
)) {
571 * Must expand vm_fault by hand,
572 * so that we can ask for read-only access
573 * but enter a (kernel) writable mapping.
575 result
= intel_read_fault(thread
->map
,
576 trunc_page((vm_offset_t
)subcode
));
578 user_page_fault_continue(result
);
583 case T_FLOATING_POINT_ERROR
:
589 Debugger("Unanticipated user trap");
591 #endif /* MACH_KGDB */
593 if (kdb_trap(type
, regs
->err
, regs
))
595 #endif /* MACH_KDB */
596 printf("user trap type %d, code = %x, pc = %x\n",
597 type
, regs
->err
, regs
->eip
);
603 if (debug_all_traps_with_kdb
&&
604 kdb_trap(type
, regs
->err
, regs
))
606 #endif /* MACH_KDB */
608 i386_exception(exc
, code
, subcode
);
613 * V86 mode assist for interrupt handling.
615 boolean_t v86_assist_on
= TRUE
;
616 boolean_t v86_unsafe_ok
= FALSE
;
617 boolean_t v86_do_sti_cli
= TRUE
;
618 boolean_t v86_do_sti_immediate
= FALSE
;
620 #define V86_IRET_PENDING 0x4000
628 register struct i386_saved_state
*regs
)
630 register struct v86_assist_state
*v86
= &thread
->machine
.pcb
->ims
.v86s
;
633 * Build an 8086 address. Use only when off is known to be 16 bits.
635 #define Addr8086(seg,off) ((((seg) & 0xffff) << 4) + (off))
637 #define EFL_V86_SAFE ( EFL_OF | EFL_DF | EFL_TF \
638 | EFL_SF | EFL_ZF | EFL_AF \
648 unsigned short flags
;
651 struct iret_32 iret_32
;
652 struct iret_16 iret_16
;
664 * If delayed STI pending, enable interrupts.
665 * Turn off tracing if on only to delay STI.
667 if (v86
->flags
& V86_IF_PENDING
) {
668 v86
->flags
&= ~V86_IF_PENDING
;
669 v86
->flags
|= EFL_IF
;
670 if ((v86
->flags
& EFL_TF
) == 0)
671 regs
->efl
&= ~EFL_TF
;
674 if (regs
->trapno
== T_DEBUG
) {
676 if (v86
->flags
& EFL_TF
) {
678 * Trace flag was also set - it has priority
680 return FALSE
; /* handle as single-step */
683 * Fall through to check for interrupts.
686 else if (regs
->trapno
== T_GENERAL_PROTECTION
) {
688 * General protection error - must be an 8086 instruction
692 boolean_t addr_32
= FALSE
;
693 boolean_t data_32
= FALSE
;
697 * Set up error handler for bad instruction/data
700 __asm__("movl $(addr_error), %0" : : "m" (thread
->recover
));
704 unsigned char opcode
;
708 return FALSE
; /* GP fault: IP out of range */
711 opcode
= *(unsigned char *)Addr8086(regs
->cs
,eip
);
714 case 0xf0: /* lock */
715 case 0xf2: /* repne */
716 case 0xf3: /* repe */
726 case 0x66: /* data size */
730 case 0x67: /* address size */
734 case 0xe4: /* inb imm */
735 case 0xe5: /* inw imm */
736 case 0xe6: /* outb imm */
737 case 0xe7: /* outw imm */
738 io_port
= *(unsigned char *)Addr8086(regs
->cs
, eip
);
742 case 0xec: /* inb dx */
743 case 0xed: /* inw dx */
744 case 0xee: /* outb dx */
745 case 0xef: /* outw dx */
746 case 0x6c: /* insb */
747 case 0x6d: /* insw */
748 case 0x6e: /* outsb */
749 case 0x6f: /* outsw */
750 io_port
= regs
->edx
& 0xffff;
754 opcode
|= 0x6600; /* word IO */
756 switch (emulate_io(regs
, opcode
, io_port
)) {
758 /* instruction executed */
761 /* port mapped, retry instruction */
765 /* port not mapped */
772 if (!v86_do_sti_cli
) {
777 v86
->flags
&= ~EFL_IF
;
778 /* disable simulated interrupts */
783 if (!v86_do_sti_cli
) {
788 if ((v86
->flags
& EFL_IF
) == 0) {
789 if (v86_do_sti_immediate
) {
790 v86
->flags
|= EFL_IF
;
792 v86
->flags
|= V86_IF_PENDING
;
795 /* single step to set IF next inst. */
800 case 0x9c: /* pushf */
807 if ((v86
->flags
& EFL_IF
) == 0)
810 if ((v86
->flags
& EFL_TF
) == 0)
812 else flags
|= EFL_TF
;
817 else if (sp
> 0xffff)
819 size
= (data_32
) ? 4 : 2;
823 if (copyout((char *)&flags
,
824 (user_addr_t
)Addr8086(regs
->ss
,sp
),
830 regs
->uesp
= (regs
->uesp
& 0xffff0000) | sp
;
834 case 0x9d: /* popf */
842 else if (sp
> 0xffff)
846 if (sp
> 0xffff - sizeof(int))
848 nflags
= *(int *)Addr8086(regs
->ss
,sp
);
852 if (sp
> 0xffff - sizeof(short))
854 nflags
= *(unsigned short *)
855 Addr8086(regs
->ss
,sp
);
861 regs
->uesp
= (regs
->uesp
& 0xffff0000) | sp
;
863 if (v86
->flags
& V86_IRET_PENDING
) {
864 v86
->flags
= nflags
& (EFL_TF
| EFL_IF
);
865 v86
->flags
|= V86_IRET_PENDING
;
867 v86
->flags
= nflags
& (EFL_TF
| EFL_IF
);
869 regs
->efl
= (regs
->efl
& ~EFL_V86_SAFE
)
870 | (nflags
& EFL_V86_SAFE
);
873 case 0xcf: /* iret */
877 union iret_struct iret_struct
;
879 v86
->flags
&= ~V86_IRET_PENDING
;
883 else if (sp
> 0xffff)
887 if (sp
> 0xffff - sizeof(struct iret_32
))
889 iret_struct
.iret_32
=
890 *(struct iret_32
*) Addr8086(regs
->ss
,sp
);
891 sp
+= sizeof(struct iret_32
);
894 if (sp
> 0xffff - sizeof(struct iret_16
))
896 iret_struct
.iret_16
=
897 *(struct iret_16
*) Addr8086(regs
->ss
,sp
);
898 sp
+= sizeof(struct iret_16
);
903 regs
->uesp
= (regs
->uesp
& 0xffff0000) | sp
;
906 eip
= iret_struct
.iret_32
.eip
;
907 regs
->cs
= iret_struct
.iret_32
.cs
& 0xffff;
908 nflags
= iret_struct
.iret_32
.eflags
;
911 eip
= iret_struct
.iret_16
.ip
;
912 regs
->cs
= iret_struct
.iret_16
.cs
;
913 nflags
= iret_struct
.iret_16
.flags
;
916 v86
->flags
= nflags
& (EFL_TF
| EFL_IF
);
917 regs
->efl
= (regs
->efl
& ~EFL_V86_SAFE
)
918 | (nflags
& EFL_V86_SAFE
);
923 * Instruction not emulated here.
928 break; /* exit from 'while TRUE' */
930 regs
->eip
= (regs
->eip
& 0xffff0000) | eip
;
934 * Not a trap we handle.
940 if ((v86
->flags
& EFL_IF
) && ((v86
->flags
& V86_IRET_PENDING
)==0)) {
942 struct v86_interrupt_table
*int_table
;
947 int_table
= (struct v86_interrupt_table
*) v86
->int_table
;
948 int_count
= v86
->int_count
;
951 for (i
= 0; i
< int_count
; int_table
++, i
++) {
952 if (!int_table
->mask
&& int_table
->count
> 0) {
954 vec
= int_table
->vec
;
960 * Take this interrupt
963 struct iret_16 iret_16
;
964 struct int_vec int_vec
;
966 sp
= regs
->uesp
& 0xffff;
967 if (sp
< sizeof(struct iret_16
))
969 sp
-= sizeof(struct iret_16
);
970 iret_16
.ip
= regs
->eip
;
971 iret_16
.cs
= regs
->cs
;
972 iret_16
.flags
= regs
->efl
& 0xFFFF;
973 if ((v86
->flags
& EFL_TF
) == 0)
974 iret_16
.flags
&= ~EFL_TF
;
975 else iret_16
.flags
|= EFL_TF
;
977 (void) memcpy((char *) &int_vec
,
978 (char *) (sizeof(struct int_vec
) * vec
),
979 sizeof (struct int_vec
));
980 if (copyout((char *)&iret_16
,
981 (user_addr_t
)Addr8086(regs
->ss
,sp
),
982 sizeof(struct iret_16
)))
984 regs
->uesp
= (regs
->uesp
& 0xFFFF0000) | (sp
& 0xffff);
985 regs
->eip
= int_vec
.ip
;
986 regs
->cs
= int_vec
.cs
;
987 regs
->efl
&= ~EFL_TF
;
988 v86
->flags
&= ~(EFL_IF
| EFL_TF
);
989 v86
->flags
|= V86_IRET_PENDING
;
997 * On address error, report a page fault.
998 * XXX report GP fault - we don`t save
999 * the faulting address.
1002 __asm__("addr_error:;");
1003 thread
->recover
= 0;
1007 * On stack address error, return stack fault (12).
1010 thread
->recover
= 0;
1011 regs
->trapno
= T_STACK_FAULT
;
1016 * Handle AST traps for i386.
1017 * Check for delayed floating-point exception from
1021 extern void log_thread_action (thread_t
, char *);
1024 i386_astintr(int preemption
)
1026 ast_t
*my_ast
, mask
= AST_ALL
;
1029 s
= splsched(); /* block interrupts to check reasons */
1030 mp_disable_preemption();
1031 my_ast
= ast_pending();
1032 if (*my_ast
& AST_I386_FP
) {
1034 * AST was for delayed floating-point exception -
1035 * FP interrupt occurred while in kernel.
1036 * Turn off this AST reason and handle the FPU error.
1039 ast_off(AST_I386_FP
);
1040 mp_enable_preemption();
1047 * Not an FPU trap. Handle the AST.
1048 * Interrupts are still blocked.
1053 mask
= AST_PREEMPTION
;
1054 mp_enable_preemption();
1056 mp_enable_preemption();
1059 mp_enable_preemption();
1068 * Handle exceptions for i386.
1070 * If we are an AT bus machine, we must turn off the AST for a
1071 * delayed floating-point exception.
1073 * If we are providing floating-point emulation, we may have
1074 * to retrieve the real register values from the floating point
1084 exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
1087 * Turn off delayed FPU error handling.
1090 mp_disable_preemption();
1091 ast_off(AST_I386_FP
);
1092 mp_enable_preemption();
1095 codes
[0] = code
; /* new exception interface */
1097 exception_triage(exc
, codes
, 2);
1103 struct i386_saved_state
*regs
)
1105 int eip
, opcode
, io_port
;
1106 boolean_t data_16
= FALSE
;
1109 * Get the instruction.
1114 opcode
= inst_fetch(eip
, regs
->cs
);
1117 case 0x66: /* data-size prefix */
1121 case 0xf3: /* rep prefix */
1130 case 0xE4: /* inb imm */
1131 case 0xE5: /* inl imm */
1132 case 0xE6: /* outb imm */
1133 case 0xE7: /* outl imm */
1134 /* port is immediate byte */
1135 io_port
= inst_fetch(eip
, regs
->cs
);
1139 case 0xEC: /* inb dx */
1140 case 0xED: /* inl dx */
1141 case 0xEE: /* outb dx */
1142 case 0xEF: /* outl dx */
1143 case 0x6C: /* insb */
1144 case 0x6D: /* insl */
1145 case 0x6E: /* outsb */
1146 case 0x6F: /* outsl */
1147 /* port is in DX register */
1148 io_port
= regs
->edx
& 0xFFFF;
1158 opcode
|= 0x6600; /* word IO */
1160 switch (emulate_io(regs
, opcode
, io_port
)) {
1162 /* instruction executed */
1167 /* port mapped, retry instruction */
1171 /* port not mapped */
1178 kernel_preempt_check (void)
1182 mp_disable_preemption();
1183 myast
= ast_pending();
1184 if ((*myast
& AST_URGENT
) &&
1185 get_interrupt_level() == 1
1187 mp_enable_preemption_no_check();
1188 __asm__
volatile (" int $0xff");
1190 mp_enable_preemption_no_check();
1196 extern void db_i386_state(struct i386_saved_state
*regs
);
1198 #include <ddb/db_output.h>
1202 struct i386_saved_state
*regs
)
1204 db_printf("eip %8x\n", regs
->eip
);
1205 db_printf("trap %8x\n", regs
->trapno
);
1206 db_printf("err %8x\n", regs
->err
);
1207 db_printf("efl %8x\n", regs
->efl
);
1208 db_printf("ebp %8x\n", regs
->ebp
);
1209 db_printf("esp %8x\n", regs
->esp
);
1210 db_printf("uesp %8x\n", regs
->uesp
);
1211 db_printf("cs %8x\n", regs
->cs
& 0xff);
1212 db_printf("ds %8x\n", regs
->ds
& 0xff);
1213 db_printf("es %8x\n", regs
->es
& 0xff);
1214 db_printf("fs %8x\n", regs
->fs
& 0xff);
1215 db_printf("gs %8x\n", regs
->gs
& 0xff);
1216 db_printf("ss %8x\n", regs
->ss
& 0xff);
1217 db_printf("eax %8x\n", regs
->eax
);
1218 db_printf("ebx %8x\n", regs
->ebx
);
1219 db_printf("ecx %8x\n", regs
->ecx
);
1220 db_printf("edx %8x\n", regs
->edx
);
1221 db_printf("esi %8x\n", regs
->esi
);
1222 db_printf("edi %8x\n", regs
->edi
);
1225 #endif /* MACH_KDB */