2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * Hardware trap/fault handler.
57 #include <fast_idle.h>
59 #include <mach_kgdb.h>
61 #include <mach_ldebug.h>
64 #include <i386/eflags.h>
65 #include <i386/trap.h>
66 #include <i386/pmap.h>
69 #include <mach/exception.h>
70 #include <mach/kern_return.h>
71 #include <mach/vm_param.h>
72 #include <mach/i386/thread_status.h>
74 #include <vm/vm_kern.h>
75 #include <vm/vm_fault.h>
77 #include <kern/etap_macros.h>
78 #include <kern/kern_types.h>
80 #include <kern/thread.h>
81 #include <kern/task.h>
82 #include <kern/sched.h>
83 #include <kern/sched_prim.h>
84 #include <kern/exception.h>
86 #include <kern/misc_protos.h>
89 #include <kgdb/kgdb_defs.h>
90 #endif /* MACH_KGDB */
92 #include <i386/intel_read_fault.h>
95 #include <kgdb/kgdb_defs.h>
96 #endif /* MACH_KGDB */
99 #include <ddb/db_watch.h>
100 #include <ddb/db_run.h>
101 #include <ddb/db_break.h>
102 #include <ddb/db_trap.h>
103 #endif /* MACH_KDB */
107 #include <i386/io_emulate.h>
110 * Forward declarations
112 extern void user_page_fault_continue(
115 extern boolean_t
v86_assist(
117 struct i386_saved_state
*regs
);
119 extern boolean_t
check_io_fault(
120 struct i386_saved_state
*regs
);
122 extern int inst_fetch(
127 thread_syscall_return(
130 register thread_act_t thr_act
= current_act();
131 register struct i386_saved_state
*regs
= USER_REGS(thr_act
);
133 thread_exception_return();
139 boolean_t debug_all_traps_with_kdb
= FALSE
;
140 extern struct db_watchpoint
*db_watchpoint_list
;
141 extern boolean_t db_watchpoints_inserted
;
142 extern boolean_t db_breakpoints_inserted
;
145 thread_kdb_return(void)
147 register thread_act_t thr_act
= current_act();
148 register thread_t cur_thr
= current_thread();
149 register struct i386_saved_state
*regs
= USER_REGS(thr_act
);
151 if (kdb_trap(regs
->trapno
, regs
->err
, regs
)) {
153 assert(cur_thr
->mutex_count
== 0);
154 #endif /* MACH_LDEBUG */
155 check_simple_locks();
156 thread_exception_return();
160 boolean_t let_ddb_vm_fault
= FALSE
;
163 extern int kdb_active
[NCPUS
];
164 #endif /* NCPUS > 1 */
166 #endif /* MACH_KDB */
169 user_page_fault_continue(
172 register thread_act_t thr_act
= current_act();
173 register thread_t cur_thr
= current_thread();
174 register struct i386_saved_state
*regs
= USER_REGS(thr_act
);
176 if ((kr
== KERN_SUCCESS
) && (kr
== KERN_ABORTED
)) {
178 if (!db_breakpoints_inserted
) {
179 db_set_breakpoints();
181 if (db_watchpoint_list
&&
182 db_watchpoints_inserted
&&
183 (regs
->err
& T_PF_WRITE
) &&
184 db_find_watchpoint(thr_act
->map
,
185 (vm_offset_t
)regs
->cr2
,
187 kdb_trap(T_WATCHPOINT
, 0, regs
);
188 #endif /* MACH_KDB */
189 thread_exception_return();
194 if (debug_all_traps_with_kdb
&&
195 kdb_trap(regs
->trapno
, regs
->err
, regs
)) {
197 assert(cur_thr
->mutex_count
== 0);
198 #endif /* MACH_LDEBUG */
199 check_simple_locks();
200 thread_exception_return();
203 #endif /* MACH_KDB */
205 i386_exception(EXC_BAD_ACCESS
, kr
, regs
->cr2
);
210 * Fault recovery in copyin/copyout routines.
217 extern struct recovery recover_table
[];
218 extern struct recovery recover_table_end
[];
221 * Recovery from Successful fault in copyout does not
222 * return directly - it retries the pte check, since
223 * the 386 ignores write protection in kernel mode.
225 extern struct recovery retry_table
[];
226 extern struct recovery retry_table_end
[];
228 char * trap_type
[] = {TRAP_NAMES
};
229 int TRAP_TYPES
= sizeof(trap_type
)/sizeof(trap_type
[0]);
232 * Trap from kernel mode. Only page-fault errors are recoverable,
233 * and then only in special circumstances. All other errors are
234 * fatal. Return value indicates if trap was handled.
238 register struct i386_saved_state
*regs
)
246 kern_return_t result
;
247 register thread_t thread
;
248 thread_act_t thr_act
;
249 etap_data_t probe_data
;
251 extern vm_offset_t vm_last_phys
;
255 thread
= current_thread();
256 thr_act
= current_act();
258 ETAP_DATA_LOAD(probe_data
[0], regs
->trapno
);
259 ETAP_DATA_LOAD(probe_data
[1], MACH_PORT_NULL
);
260 ETAP_DATA_LOAD(probe_data
[2], MACH_PORT_NULL
);
261 ETAP_PROBE_DATA(ETAP_P_EXCEPTION
,
279 case T_FLOATING_POINT_ERROR
:
285 * If the current map is a submap of the kernel map,
286 * and the address is within that map, fault on that
287 * map. If the same check is done in vm_fault
288 * (vm_map_lookup), we may deadlock on the kernel map
292 mp_disable_preemption();
295 && kdb_active
[cpu_number()]
296 #endif /* NCPUS > 1 */
297 && !let_ddb_vm_fault
) {
299 * Force kdb to handle this one.
301 mp_enable_preemption();
304 mp_enable_preemption();
305 #endif /* MACH_KDB */
306 subcode
= regs
->cr2
; /* get faulting address */
308 if (subcode
> LINEAR_KERNEL_ADDRESS
) {
310 subcode
-= LINEAR_KERNEL_ADDRESS
;
311 } else if (thr_act
== THR_ACT_NULL
|| thread
== THREAD_NULL
)
319 * Check for watchpoint on kernel static data.
320 * vm_fault would fail in this case
322 if (map
== kernel_map
&&
323 db_watchpoint_list
&&
324 db_watchpoints_inserted
&&
325 (code
& T_PF_WRITE
) &&
326 (vm_offset_t
)subcode
< vm_last_phys
&&
327 ((*(pte
= pmap_pte(kernel_pmap
, (vm_offset_t
)subcode
))) &
328 INTEL_PTE_WRITE
) == 0) {
329 *pte
= INTEL_PTE_VALID
| INTEL_PTE_WRITE
|
330 pa_to_pte(trunc_page((vm_offset_t
)subcode
) -
331 VM_MIN_KERNEL_ADDRESS
);
332 result
= KERN_SUCCESS
;
334 #endif /* MACH_KDB */
337 * Since the 386 ignores write protection in
338 * kernel mode, always try for write permission
339 * first. If that fails and the fault was a
340 * read fault, retry with read permission.
342 if (map
== kernel_map
) {
343 register struct recovery
*rp
;
345 interruptible
= THREAD_UNINT
;
346 for (rp
= recover_table
; rp
< recover_table_end
; rp
++) {
347 if (regs
->eip
== rp
->fault_addr
) {
348 interruptible
= THREAD_ABORTSAFE
;
354 result
= vm_fault(map
,
355 trunc_page((vm_offset_t
)subcode
),
356 VM_PROT_READ
|VM_PROT_WRITE
,
358 (map
== kernel_map
) ? interruptible
: THREAD_ABORTSAFE
);
361 if (result
== KERN_SUCCESS
) {
362 /* Look for watchpoints */
363 if (db_watchpoint_list
&&
364 db_watchpoints_inserted
&&
365 (code
& T_PF_WRITE
) &&
366 db_find_watchpoint(map
,
367 (vm_offset_t
)subcode
, regs
))
368 kdb_trap(T_WATCHPOINT
, 0, regs
);
371 #endif /* MACH_KDB */
372 if ((code
& T_PF_WRITE
) == 0 &&
373 result
== KERN_PROTECTION_FAILURE
)
376 * Must expand vm_fault by hand,
377 * so that we can ask for read-only access
378 * but enter a (kernel)writable mapping.
380 result
= intel_read_fault(map
,
381 trunc_page((vm_offset_t
)subcode
));
384 if (result
== KERN_SUCCESS
) {
386 * Certain faults require that we back up
389 register struct recovery
*rp
;
391 for (rp
= retry_table
; rp
< retry_table_end
; rp
++) {
392 if (regs
->eip
== rp
->fault_addr
) {
393 regs
->eip
= rp
->recover_addr
;
402 case T_GENERAL_PROTECTION
:
405 * If there is a failure recovery address
406 * for this fault, go there.
409 register struct recovery
*rp
;
411 for (rp
= recover_table
;
412 rp
< recover_table_end
;
414 if (regs
->eip
== rp
->fault_addr
) {
415 regs
->eip
= rp
->recover_addr
;
422 * Check thread recovery address also -
423 * v86 assist uses it.
425 if (thread
->recover
) {
426 regs
->eip
= thread
->recover
;
432 * Unanticipated page-fault errors in kernel
435 /* fall through... */
439 * ...and return failure, so that locore can call into
443 kdp_i386_trap(type
, regs
, result
, regs
->cr2
);
451 * Called if both kernel_trap() and kdb_trap() fail.
455 register struct i386_saved_state
*regs
)
463 printf("trap type %d, code = %x, pc = %x\n",
464 type
, code
, regs
->eip
);
470 * Trap from user mode.
474 register struct i386_saved_state
*regs
)
482 kern_return_t result
;
483 register thread_act_t thr_act
= current_act();
484 thread_t thread
= (thr_act
? thr_act
->thread
: THREAD_NULL
);
485 boolean_t kernel_act
= thr_act
->kernel_loaded
;
486 etap_data_t probe_data
;
488 if (regs
->efl
& EFL_VM
) {
490 * If hardware assist can handle exception,
491 * continue execution.
493 if (v86_assist(thread
, regs
))
504 exc
= EXC_ARITHMETIC
;
509 exc
= EXC_BREAKPOINT
;
514 exc
= EXC_BREAKPOINT
;
519 exc
= EXC_ARITHMETIC
;
520 code
= EXC_I386_INTO
;
523 case T_OUT_OF_BOUNDS
:
525 code
= EXC_I386_BOUND
;
528 case T_INVALID_OPCODE
:
529 exc
= EXC_BAD_INSTRUCTION
;
530 code
= EXC_I386_INVOP
;
542 case 10: /* invalid TSS == iret with NT flag set */
543 exc
= EXC_BAD_INSTRUCTION
;
544 code
= EXC_I386_INVTSSFLT
;
545 subcode
= regs
->err
& 0xffff;
548 case T_SEGMENT_NOT_PRESENT
:
549 exc
= EXC_BAD_INSTRUCTION
;
550 code
= EXC_I386_SEGNPFLT
;
551 subcode
= regs
->err
& 0xffff;
555 exc
= EXC_BAD_INSTRUCTION
;
556 code
= EXC_I386_STKFLT
;
557 subcode
= regs
->err
& 0xffff;
560 case T_GENERAL_PROTECTION
:
561 if (!(regs
->efl
& EFL_VM
)) {
562 if (check_io_fault(regs
))
565 exc
= EXC_BAD_INSTRUCTION
;
566 code
= EXC_I386_GPFLT
;
567 subcode
= regs
->err
& 0xffff;
572 prot
= VM_PROT_READ
|VM_PROT_WRITE
;
573 if (kernel_act
== FALSE
) {
574 if (!(regs
->err
& T_PF_WRITE
))
576 (void) user_page_fault_continue(vm_fault(thr_act
->map
,
577 trunc_page((vm_offset_t
)subcode
),
584 if (subcode
> LINEAR_KERNEL_ADDRESS
) {
586 subcode
-= LINEAR_KERNEL_ADDRESS
;
588 result
= vm_fault(thr_act
->map
,
589 trunc_page((vm_offset_t
)subcode
),
592 (map
== kernel_map
) ? THREAD_UNINT
: THREAD_ABORTSAFE
);
593 if ((result
!= KERN_SUCCESS
) && (result
!= KERN_ABORTED
)) {
595 * Must expand vm_fault by hand,
596 * so that we can ask for read-only access
597 * but enter a (kernel) writable mapping.
599 result
= intel_read_fault(thr_act
->map
,
600 trunc_page((vm_offset_t
)subcode
));
602 user_page_fault_continue(result
);
607 case T_FLOATING_POINT_ERROR
:
613 Debugger("Unanticipated user trap");
615 #endif /* MACH_KGDB */
617 if (kdb_trap(type
, regs
->err
, regs
))
619 #endif /* MACH_KDB */
620 printf("user trap type %d, code = %x, pc = %x\n",
621 type
, regs
->err
, regs
->eip
);
627 if (debug_all_traps_with_kdb
&&
628 kdb_trap(type
, regs
->err
, regs
))
630 #endif /* MACH_KDB */
632 #if ETAP_EVENT_MONITOR
633 if (thread
!= THREAD_NULL
) {
634 ETAP_DATA_LOAD(probe_data
[0], regs
->trapno
);
635 ETAP_DATA_LOAD(probe_data
[1],
636 thr_act
->exc_actions
[exc
].port
);
637 ETAP_DATA_LOAD(probe_data
[2],
638 thr_act
->task
->exc_actions
[exc
].port
);
639 ETAP_PROBE_DATA(ETAP_P_EXCEPTION
,
645 #endif /* ETAP_EVENT_MONITOR */
647 i386_exception(exc
, code
, subcode
);
652 * V86 mode assist for interrupt handling.
654 boolean_t v86_assist_on
= TRUE
;
655 boolean_t v86_unsafe_ok
= FALSE
;
656 boolean_t v86_do_sti_cli
= TRUE
;
657 boolean_t v86_do_sti_immediate
= FALSE
;
659 #define V86_IRET_PENDING 0x4000
667 register struct i386_saved_state
*regs
)
669 register struct v86_assist_state
*v86
= &thread
->top_act
->mact
.pcb
->ims
.v86s
;
672 * Build an 8086 address. Use only when off is known to be 16 bits.
674 #define Addr8086(seg,off) ((((seg) & 0xffff) << 4) + (off))
676 #define EFL_V86_SAFE ( EFL_OF | EFL_DF | EFL_TF \
677 | EFL_SF | EFL_ZF | EFL_AF \
687 unsigned short flags
;
690 struct iret_32 iret_32
;
691 struct iret_16 iret_16
;
703 * If delayed STI pending, enable interrupts.
704 * Turn off tracing if on only to delay STI.
706 if (v86
->flags
& V86_IF_PENDING
) {
707 v86
->flags
&= ~V86_IF_PENDING
;
708 v86
->flags
|= EFL_IF
;
709 if ((v86
->flags
& EFL_TF
) == 0)
710 regs
->efl
&= ~EFL_TF
;
713 if (regs
->trapno
== T_DEBUG
) {
715 if (v86
->flags
& EFL_TF
) {
717 * Trace flag was also set - it has priority
719 return FALSE
; /* handle as single-step */
722 * Fall through to check for interrupts.
725 else if (regs
->trapno
== T_GENERAL_PROTECTION
) {
727 * General protection error - must be an 8086 instruction
731 boolean_t addr_32
= FALSE
;
732 boolean_t data_32
= FALSE
;
736 * Set up error handler for bad instruction/data
739 __asm__("movl $(addr_error), %0" : : "m" (thread
->recover
));
743 unsigned char opcode
;
747 return FALSE
; /* GP fault: IP out of range */
750 opcode
= *(unsigned char *)Addr8086(regs
->cs
,eip
);
753 case 0xf0: /* lock */
754 case 0xf2: /* repne */
755 case 0xf3: /* repe */
765 case 0x66: /* data size */
769 case 0x67: /* address size */
773 case 0xe4: /* inb imm */
774 case 0xe5: /* inw imm */
775 case 0xe6: /* outb imm */
776 case 0xe7: /* outw imm */
777 io_port
= *(unsigned char *)Addr8086(regs
->cs
, eip
);
781 case 0xec: /* inb dx */
782 case 0xed: /* inw dx */
783 case 0xee: /* outb dx */
784 case 0xef: /* outw dx */
785 case 0x6c: /* insb */
786 case 0x6d: /* insw */
787 case 0x6e: /* outsb */
788 case 0x6f: /* outsw */
789 io_port
= regs
->edx
& 0xffff;
793 opcode
|= 0x6600; /* word IO */
795 switch (emulate_io(regs
, opcode
, io_port
)) {
797 /* instruction executed */
800 /* port mapped, retry instruction */
804 /* port not mapped */
811 if (!v86_do_sti_cli
) {
816 v86
->flags
&= ~EFL_IF
;
817 /* disable simulated interrupts */
822 if (!v86_do_sti_cli
) {
827 if ((v86
->flags
& EFL_IF
) == 0) {
828 if (v86_do_sti_immediate
) {
829 v86
->flags
|= EFL_IF
;
831 v86
->flags
|= V86_IF_PENDING
;
834 /* single step to set IF next inst. */
839 case 0x9c: /* pushf */
846 if ((v86
->flags
& EFL_IF
) == 0)
849 if ((v86
->flags
& EFL_TF
) == 0)
851 else flags
|= EFL_TF
;
856 else if (sp
> 0xffff)
858 size
= (data_32
) ? 4 : 2;
862 if (copyout((char *)&flags
,
863 (char *)Addr8086(regs
->ss
,sp
),
869 regs
->uesp
= (regs
->uesp
& 0xffff0000) | sp
;
873 case 0x9d: /* popf */
881 else if (sp
> 0xffff)
885 if (sp
> 0xffff - sizeof(int))
887 nflags
= *(int *)Addr8086(regs
->ss
,sp
);
891 if (sp
> 0xffff - sizeof(short))
893 nflags
= *(unsigned short *)
894 Addr8086(regs
->ss
,sp
);
900 regs
->uesp
= (regs
->uesp
& 0xffff0000) | sp
;
902 if (v86
->flags
& V86_IRET_PENDING
) {
903 v86
->flags
= nflags
& (EFL_TF
| EFL_IF
);
904 v86
->flags
|= V86_IRET_PENDING
;
906 v86
->flags
= nflags
& (EFL_TF
| EFL_IF
);
908 regs
->efl
= (regs
->efl
& ~EFL_V86_SAFE
)
909 | (nflags
& EFL_V86_SAFE
);
912 case 0xcf: /* iret */
917 union iret_struct iret_struct
;
919 v86
->flags
&= ~V86_IRET_PENDING
;
923 else if (sp
> 0xffff)
927 if (sp
> 0xffff - sizeof(struct iret_32
))
929 iret_struct
.iret_32
=
930 *(struct iret_32
*) Addr8086(regs
->ss
,sp
);
931 sp
+= sizeof(struct iret_32
);
934 if (sp
> 0xffff - sizeof(struct iret_16
))
936 iret_struct
.iret_16
=
937 *(struct iret_16
*) Addr8086(regs
->ss
,sp
);
938 sp
+= sizeof(struct iret_16
);
943 regs
->uesp
= (regs
->uesp
& 0xffff0000) | sp
;
946 eip
= iret_struct
.iret_32
.eip
;
947 regs
->cs
= iret_struct
.iret_32
.cs
& 0xffff;
948 nflags
= iret_struct
.iret_32
.eflags
;
951 eip
= iret_struct
.iret_16
.ip
;
952 regs
->cs
= iret_struct
.iret_16
.cs
;
953 nflags
= iret_struct
.iret_16
.flags
;
956 v86
->flags
= nflags
& (EFL_TF
| EFL_IF
);
957 regs
->efl
= (regs
->efl
& ~EFL_V86_SAFE
)
958 | (nflags
& EFL_V86_SAFE
);
963 * Instruction not emulated here.
968 break; /* exit from 'while TRUE' */
970 regs
->eip
= (regs
->eip
& 0xffff0000 | eip
);
974 * Not a trap we handle.
980 if ((v86
->flags
& EFL_IF
) && ((v86
->flags
& V86_IRET_PENDING
)==0)) {
982 struct v86_interrupt_table
*int_table
;
987 int_table
= (struct v86_interrupt_table
*) v86
->int_table
;
988 int_count
= v86
->int_count
;
991 for (i
= 0; i
< int_count
; int_table
++, i
++) {
992 if (!int_table
->mask
&& int_table
->count
> 0) {
994 vec
= int_table
->vec
;
1000 * Take this interrupt
1003 struct iret_16 iret_16
;
1004 struct int_vec int_vec
;
1006 sp
= regs
->uesp
& 0xffff;
1007 if (sp
< sizeof(struct iret_16
))
1009 sp
-= sizeof(struct iret_16
);
1010 iret_16
.ip
= regs
->eip
;
1011 iret_16
.cs
= regs
->cs
;
1012 iret_16
.flags
= regs
->efl
& 0xFFFF;
1013 if ((v86
->flags
& EFL_TF
) == 0)
1014 iret_16
.flags
&= ~EFL_TF
;
1015 else iret_16
.flags
|= EFL_TF
;
1017 (void) memcpy((char *) &int_vec
,
1018 (char *) (sizeof(struct int_vec
) * vec
),
1019 sizeof (struct int_vec
));
1020 if (copyout((char *)&iret_16
,
1021 (char *)Addr8086(regs
->ss
,sp
),
1022 sizeof(struct iret_16
)))
1024 regs
->uesp
= (regs
->uesp
& 0xFFFF0000) | (sp
& 0xffff);
1025 regs
->eip
= int_vec
.ip
;
1026 regs
->cs
= int_vec
.cs
;
1027 regs
->efl
&= ~EFL_TF
;
1028 v86
->flags
&= ~(EFL_IF
| EFL_TF
);
1029 v86
->flags
|= V86_IRET_PENDING
;
1033 thread
->recover
= 0;
1037 * On address error, report a page fault.
1038 * XXX report GP fault - we don`t save
1039 * the faulting address.
1042 __asm__("addr_error:;");
1043 thread
->recover
= 0;
1047 * On stack address error, return stack fault (12).
1050 thread
->recover
= 0;
1051 regs
->trapno
= T_STACK_FAULT
;
1056 * Handle AST traps for i386.
1057 * Check for delayed floating-point exception from
1061 extern void log_thread_action (thread_t
, char *);
1064 i386_astintr(int preemption
)
1067 ast_t mask
= AST_ALL
;
1069 thread_t self
= current_thread();
1071 s
= splsched(); /* block interrupts to check reasons */
1072 mp_disable_preemption();
1073 mycpu
= cpu_number();
1074 if (need_ast
[mycpu
] & AST_I386_FP
) {
1076 * AST was for delayed floating-point exception -
1077 * FP interrupt occured while in kernel.
1078 * Turn off this AST reason and handle the FPU error.
1081 ast_off(AST_I386_FP
);
1082 mp_enable_preemption();
1089 * Not an FPU trap. Handle the AST.
1090 * Interrupts are still blocked.
1097 * We don't want to process any AST if we were in
1098 * kernel-mode and the current thread is in any
1099 * funny state (waiting and/or suspended).
1104 if (thread_not_preemptable(self
) || self
->preempt
) {
1105 ast_off(AST_URGENT
);
1106 thread_unlock (self
);
1107 mp_enable_preemption();
1111 else mask
= AST_PREEMPT
;
1112 mp_enable_preemption();
1115 self->preempt = TH_NOT_PREEMPTABLE;
1118 thread_unlock (self
);
1120 mp_enable_preemption();
1123 mp_enable_preemption();
1126 ast_taken(preemption
, mask
, s
1129 #endif /* FAST_IDLE */
1132 self->preempt = TH_PREEMPTABLE;
1138 * Handle exceptions for i386.
1140 * If we are an AT bus machine, we must turn off the AST for a
1141 * delayed floating-point exception.
1143 * If we are providing floating-point emulation, we may have
1144 * to retrieve the real register values from the floating point
1154 exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
1157 * Turn off delayed FPU error handling.
1160 mp_disable_preemption();
1161 ast_off(AST_I386_FP
);
1162 mp_enable_preemption();
1165 codes
[0] = code
; /* new exception interface */
1167 exception(exc
, codes
, 2);
1173 struct i386_saved_state
*regs
)
1175 int eip
, opcode
, io_port
;
1176 boolean_t data_16
= FALSE
;
1179 * Get the instruction.
1184 opcode
= inst_fetch(eip
, regs
->cs
);
1187 case 0x66: /* data-size prefix */
1191 case 0xf3: /* rep prefix */
1200 case 0xE4: /* inb imm */
1201 case 0xE5: /* inl imm */
1202 case 0xE6: /* outb imm */
1203 case 0xE7: /* outl imm */
1204 /* port is immediate byte */
1205 io_port
= inst_fetch(eip
, regs
->cs
);
1209 case 0xEC: /* inb dx */
1210 case 0xED: /* inl dx */
1211 case 0xEE: /* outb dx */
1212 case 0xEF: /* outl dx */
1213 case 0x6C: /* insb */
1214 case 0x6D: /* insl */
1215 case 0x6E: /* outsb */
1216 case 0x6F: /* outsl */
1217 /* port is in DX register */
1218 io_port
= regs
->edx
& 0xFFFF;
1228 opcode
|= 0x6600; /* word IO */
1230 switch (emulate_io(regs
, opcode
, io_port
)) {
1232 /* instruction executed */
1237 /* port mapped, retry instruction */
1241 /* port not mapped */
1248 kernel_preempt_check (void)
1250 mp_disable_preemption();
1251 if ((need_ast
[cpu_number()] & AST_URGENT
) &&
1253 get_interrupt_level() == 1
1254 #else /* NCPUS > 1 */
1255 get_interrupt_level() == 0
1256 #endif /* NCPUS > 1 */
1258 mp_enable_preemption_no_check();
1259 __asm__
volatile (" int $0xff");
1261 mp_enable_preemption_no_check();
1267 extern void db_i386_state(struct i386_saved_state
*regs
);
1269 #include <ddb/db_output.h>
1273 struct i386_saved_state
*regs
)
1275 db_printf("eip %8x\n", regs
->eip
);
1276 db_printf("trap %8x\n", regs
->trapno
);
1277 db_printf("err %8x\n", regs
->err
);
1278 db_printf("efl %8x\n", regs
->efl
);
1279 db_printf("ebp %8x\n", regs
->ebp
);
1280 db_printf("esp %8x\n", regs
->esp
);
1281 db_printf("uesp %8x\n", regs
->uesp
);
1282 db_printf("cs %8x\n", regs
->cs
& 0xff);
1283 db_printf("ds %8x\n", regs
->ds
& 0xff);
1284 db_printf("es %8x\n", regs
->es
& 0xff);
1285 db_printf("fs %8x\n", regs
->fs
& 0xff);
1286 db_printf("gs %8x\n", regs
->gs
& 0xff);
1287 db_printf("ss %8x\n", regs
->ss
& 0xff);
1288 db_printf("eax %8x\n", regs
->eax
);
1289 db_printf("ebx %8x\n", regs
->ebx
);
1290 db_printf("ecx %8x\n", regs
->ecx
);
1291 db_printf("edx %8x\n", regs
->edx
);
1292 db_printf("esi %8x\n", regs
->esi
);
1293 db_printf("edi %8x\n", regs
->edi
);
1296 #endif /* MACH_KDB */