2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
56 * Hardware trap/fault handler.
60 #include <fast_idle.h>
62 #include <mach_kgdb.h>
64 #include <mach_ldebug.h>
67 #include <i386/eflags.h>
68 #include <i386/trap.h>
69 #include <i386/pmap.h>
72 #include <mach/exception.h>
73 #include <mach/kern_return.h>
74 #include <mach/vm_param.h>
75 #include <mach/i386/thread_status.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_fault.h>
80 #include <kern/etap_macros.h>
81 #include <kern/kern_types.h>
83 #include <kern/thread.h>
84 #include <kern/task.h>
85 #include <kern/sched.h>
86 #include <kern/sched_prim.h>
87 #include <kern/exception.h>
89 #include <kern/misc_protos.h>
92 #include <kgdb/kgdb_defs.h>
93 #endif /* MACH_KGDB */
95 #include <i386/intel_read_fault.h>
98 #include <kgdb/kgdb_defs.h>
99 #endif /* MACH_KGDB */
102 #include <ddb/db_watch.h>
103 #include <ddb/db_run.h>
104 #include <ddb/db_break.h>
105 #include <ddb/db_trap.h>
106 #endif /* MACH_KDB */
110 #include <i386/io_emulate.h>
113 * Forward declarations
115 extern void user_page_fault_continue(
118 extern boolean_t
v86_assist(
120 struct i386_saved_state
*regs
);
122 extern boolean_t
check_io_fault(
123 struct i386_saved_state
*regs
);
125 extern int inst_fetch(
130 thread_syscall_return(
133 register thread_act_t thr_act
= current_act();
134 register struct i386_saved_state
*regs
= USER_REGS(thr_act
);
136 thread_exception_return();
142 boolean_t debug_all_traps_with_kdb
= FALSE
;
143 extern struct db_watchpoint
*db_watchpoint_list
;
144 extern boolean_t db_watchpoints_inserted
;
145 extern boolean_t db_breakpoints_inserted
;
148 thread_kdb_return(void)
150 register thread_act_t thr_act
= current_act();
151 register thread_t cur_thr
= current_thread();
152 register struct i386_saved_state
*regs
= USER_REGS(thr_act
);
154 if (kdb_trap(regs
->trapno
, regs
->err
, regs
)) {
156 assert(cur_thr
->mutex_count
== 0);
157 #endif /* MACH_LDEBUG */
158 check_simple_locks();
159 thread_exception_return();
163 boolean_t let_ddb_vm_fault
= FALSE
;
166 extern int kdb_active
[NCPUS
];
167 #endif /* NCPUS > 1 */
169 #endif /* MACH_KDB */
172 user_page_fault_continue(
175 register thread_act_t thr_act
= current_act();
176 register thread_t cur_thr
= current_thread();
177 register struct i386_saved_state
*regs
= USER_REGS(thr_act
);
179 if ((kr
== KERN_SUCCESS
) || (kr
== KERN_ABORTED
)) {
181 if (!db_breakpoints_inserted
) {
182 db_set_breakpoints();
184 if (db_watchpoint_list
&&
185 db_watchpoints_inserted
&&
186 (regs
->err
& T_PF_WRITE
) &&
187 db_find_watchpoint(thr_act
->map
,
188 (vm_offset_t
)regs
->cr2
,
190 kdb_trap(T_WATCHPOINT
, 0, regs
);
191 #endif /* MACH_KDB */
192 thread_exception_return();
197 if (debug_all_traps_with_kdb
&&
198 kdb_trap(regs
->trapno
, regs
->err
, regs
)) {
200 assert(cur_thr
->mutex_count
== 0);
201 #endif /* MACH_LDEBUG */
202 check_simple_locks();
203 thread_exception_return();
206 #endif /* MACH_KDB */
208 i386_exception(EXC_BAD_ACCESS
, kr
, regs
->cr2
);
213 * Fault recovery in copyin/copyout routines.
220 extern struct recovery recover_table
[];
221 extern struct recovery recover_table_end
[];
224 * Recovery from Successful fault in copyout does not
225 * return directly - it retries the pte check, since
226 * the 386 ignores write protection in kernel mode.
228 extern struct recovery retry_table
[];
229 extern struct recovery retry_table_end
[];
231 char * trap_type
[] = {TRAP_NAMES
};
232 int TRAP_TYPES
= sizeof(trap_type
)/sizeof(trap_type
[0]);
235 * Trap from kernel mode. Only page-fault errors are recoverable,
236 * and then only in special circumstances. All other errors are
237 * fatal. Return value indicates if trap was handled.
241 register struct i386_saved_state
*regs
)
249 kern_return_t result
;
250 register thread_t thread
;
251 thread_act_t thr_act
;
252 etap_data_t probe_data
;
254 extern vm_offset_t vm_last_phys
;
258 thread
= current_thread();
259 thr_act
= current_act();
261 ETAP_DATA_LOAD(probe_data
[0], regs
->trapno
);
262 ETAP_DATA_LOAD(probe_data
[1], MACH_PORT_NULL
);
263 ETAP_DATA_LOAD(probe_data
[2], MACH_PORT_NULL
);
264 ETAP_PROBE_DATA(ETAP_P_EXCEPTION
,
282 case T_FLOATING_POINT_ERROR
:
288 * If the current map is a submap of the kernel map,
289 * and the address is within that map, fault on that
290 * map. If the same check is done in vm_fault
291 * (vm_map_lookup), we may deadlock on the kernel map
295 mp_disable_preemption();
298 && kdb_active
[cpu_number()]
299 #endif /* NCPUS > 1 */
300 && !let_ddb_vm_fault
) {
302 * Force kdb to handle this one.
304 mp_enable_preemption();
307 mp_enable_preemption();
308 #endif /* MACH_KDB */
309 subcode
= regs
->cr2
; /* get faulting address */
311 if (subcode
> LINEAR_KERNEL_ADDRESS
) {
313 subcode
-= LINEAR_KERNEL_ADDRESS
;
314 } else if (thr_act
== THR_ACT_NULL
|| thread
== THREAD_NULL
)
322 * Check for watchpoint on kernel static data.
323 * vm_fault would fail in this case
325 if (map
== kernel_map
&&
326 db_watchpoint_list
&&
327 db_watchpoints_inserted
&&
328 (code
& T_PF_WRITE
) &&
329 (vm_offset_t
)subcode
< vm_last_phys
&&
330 ((*(pte
= pmap_pte(kernel_pmap
, (vm_offset_t
)subcode
))) &
331 INTEL_PTE_WRITE
) == 0) {
332 *pte
= INTEL_PTE_VALID
| INTEL_PTE_WRITE
|
333 pa_to_pte(trunc_page((vm_offset_t
)subcode
) -
334 VM_MIN_KERNEL_ADDRESS
);
335 result
= KERN_SUCCESS
;
337 #endif /* MACH_KDB */
340 * Since the 386 ignores write protection in
341 * kernel mode, always try for write permission
342 * first. If that fails and the fault was a
343 * read fault, retry with read permission.
345 if (map
== kernel_map
) {
346 register struct recovery
*rp
;
348 interruptible
= THREAD_UNINT
;
349 for (rp
= recover_table
; rp
< recover_table_end
; rp
++) {
350 if (regs
->eip
== rp
->fault_addr
) {
351 interruptible
= THREAD_ABORTSAFE
;
357 result
= vm_fault(map
,
358 trunc_page((vm_offset_t
)subcode
),
359 VM_PROT_READ
|VM_PROT_WRITE
,
361 (map
== kernel_map
) ? interruptible
: THREAD_ABORTSAFE
, NULL
, 0);
364 if (result
== KERN_SUCCESS
) {
365 /* Look for watchpoints */
366 if (db_watchpoint_list
&&
367 db_watchpoints_inserted
&&
368 (code
& T_PF_WRITE
) &&
369 db_find_watchpoint(map
,
370 (vm_offset_t
)subcode
, regs
))
371 kdb_trap(T_WATCHPOINT
, 0, regs
);
374 #endif /* MACH_KDB */
375 if ((code
& T_PF_WRITE
) == 0 &&
376 result
== KERN_PROTECTION_FAILURE
)
379 * Must expand vm_fault by hand,
380 * so that we can ask for read-only access
381 * but enter a (kernel)writable mapping.
383 result
= intel_read_fault(map
,
384 trunc_page((vm_offset_t
)subcode
));
387 if (result
== KERN_SUCCESS
) {
389 * Certain faults require that we back up
392 register struct recovery
*rp
;
394 for (rp
= retry_table
; rp
< retry_table_end
; rp
++) {
395 if (regs
->eip
== rp
->fault_addr
) {
396 regs
->eip
= rp
->recover_addr
;
405 case T_GENERAL_PROTECTION
:
408 * If there is a failure recovery address
409 * for this fault, go there.
412 register struct recovery
*rp
;
414 for (rp
= recover_table
;
415 rp
< recover_table_end
;
417 if (regs
->eip
== rp
->fault_addr
) {
418 regs
->eip
= rp
->recover_addr
;
425 * Check thread recovery address also -
426 * v86 assist uses it.
428 if (thread
->recover
) {
429 regs
->eip
= thread
->recover
;
435 * Unanticipated page-fault errors in kernel
438 /* fall through... */
442 * ...and return failure, so that locore can call into
446 kdp_i386_trap(type
, regs
, result
, regs
->cr2
);
454 * Called if both kernel_trap() and kdb_trap() fail.
458 register struct i386_saved_state
*regs
)
466 printf("trap type %d, code = %x, pc = %x\n",
467 type
, code
, regs
->eip
);
473 * Trap from user mode.
477 register struct i386_saved_state
*regs
)
485 kern_return_t result
;
486 register thread_act_t thr_act
= current_act();
487 thread_t thread
= (thr_act
? thr_act
->thread
: THREAD_NULL
);
488 boolean_t kernel_act
= thr_act
->kernel_loaded
;
489 etap_data_t probe_data
;
491 if (regs
->efl
& EFL_VM
) {
493 * If hardware assist can handle exception,
494 * continue execution.
496 if (v86_assist(thread
, regs
))
507 exc
= EXC_ARITHMETIC
;
512 exc
= EXC_BREAKPOINT
;
517 exc
= EXC_BREAKPOINT
;
522 exc
= EXC_ARITHMETIC
;
523 code
= EXC_I386_INTO
;
526 case T_OUT_OF_BOUNDS
:
528 code
= EXC_I386_BOUND
;
531 case T_INVALID_OPCODE
:
532 exc
= EXC_BAD_INSTRUCTION
;
533 code
= EXC_I386_INVOP
;
545 case 10: /* invalid TSS == iret with NT flag set */
546 exc
= EXC_BAD_INSTRUCTION
;
547 code
= EXC_I386_INVTSSFLT
;
548 subcode
= regs
->err
& 0xffff;
551 case T_SEGMENT_NOT_PRESENT
:
552 exc
= EXC_BAD_INSTRUCTION
;
553 code
= EXC_I386_SEGNPFLT
;
554 subcode
= regs
->err
& 0xffff;
558 exc
= EXC_BAD_INSTRUCTION
;
559 code
= EXC_I386_STKFLT
;
560 subcode
= regs
->err
& 0xffff;
563 case T_GENERAL_PROTECTION
:
564 if (!(regs
->efl
& EFL_VM
)) {
565 if (check_io_fault(regs
))
568 exc
= EXC_BAD_INSTRUCTION
;
569 code
= EXC_I386_GPFLT
;
570 subcode
= regs
->err
& 0xffff;
575 prot
= VM_PROT_READ
|VM_PROT_WRITE
;
576 if (kernel_act
== FALSE
) {
577 if (!(regs
->err
& T_PF_WRITE
))
579 (void) user_page_fault_continue(vm_fault(thr_act
->map
,
580 trunc_page((vm_offset_t
)subcode
),
583 THREAD_ABORTSAFE
, NULL
, 0));
587 if (subcode
> LINEAR_KERNEL_ADDRESS
) {
589 subcode
-= LINEAR_KERNEL_ADDRESS
;
591 result
= vm_fault(thr_act
->map
,
592 trunc_page((vm_offset_t
)subcode
),
595 (map
== kernel_map
) ? THREAD_UNINT
: THREAD_ABORTSAFE
, NULL
, 0);
596 if ((result
!= KERN_SUCCESS
) && (result
!= KERN_ABORTED
)) {
598 * Must expand vm_fault by hand,
599 * so that we can ask for read-only access
600 * but enter a (kernel) writable mapping.
602 result
= intel_read_fault(thr_act
->map
,
603 trunc_page((vm_offset_t
)subcode
));
605 user_page_fault_continue(result
);
610 case T_FLOATING_POINT_ERROR
:
616 Debugger("Unanticipated user trap");
618 #endif /* MACH_KGDB */
620 if (kdb_trap(type
, regs
->err
, regs
))
622 #endif /* MACH_KDB */
623 printf("user trap type %d, code = %x, pc = %x\n",
624 type
, regs
->err
, regs
->eip
);
630 if (debug_all_traps_with_kdb
&&
631 kdb_trap(type
, regs
->err
, regs
))
633 #endif /* MACH_KDB */
635 #if ETAP_EVENT_MONITOR
636 if (thread
!= THREAD_NULL
) {
637 ETAP_DATA_LOAD(probe_data
[0], regs
->trapno
);
638 ETAP_DATA_LOAD(probe_data
[1],
639 thr_act
->exc_actions
[exc
].port
);
640 ETAP_DATA_LOAD(probe_data
[2],
641 thr_act
->task
->exc_actions
[exc
].port
);
642 ETAP_PROBE_DATA(ETAP_P_EXCEPTION
,
648 #endif /* ETAP_EVENT_MONITOR */
650 i386_exception(exc
, code
, subcode
);
655 * V86 mode assist for interrupt handling.
657 boolean_t v86_assist_on
= TRUE
;
658 boolean_t v86_unsafe_ok
= FALSE
;
659 boolean_t v86_do_sti_cli
= TRUE
;
660 boolean_t v86_do_sti_immediate
= FALSE
;
662 #define V86_IRET_PENDING 0x4000
670 register struct i386_saved_state
*regs
)
672 register struct v86_assist_state
*v86
= &thread
->top_act
->mact
.pcb
->ims
.v86s
;
675 * Build an 8086 address. Use only when off is known to be 16 bits.
677 #define Addr8086(seg,off) ((((seg) & 0xffff) << 4) + (off))
679 #define EFL_V86_SAFE ( EFL_OF | EFL_DF | EFL_TF \
680 | EFL_SF | EFL_ZF | EFL_AF \
690 unsigned short flags
;
693 struct iret_32 iret_32
;
694 struct iret_16 iret_16
;
706 * If delayed STI pending, enable interrupts.
707 * Turn off tracing if on only to delay STI.
709 if (v86
->flags
& V86_IF_PENDING
) {
710 v86
->flags
&= ~V86_IF_PENDING
;
711 v86
->flags
|= EFL_IF
;
712 if ((v86
->flags
& EFL_TF
) == 0)
713 regs
->efl
&= ~EFL_TF
;
716 if (regs
->trapno
== T_DEBUG
) {
718 if (v86
->flags
& EFL_TF
) {
720 * Trace flag was also set - it has priority
722 return FALSE
; /* handle as single-step */
725 * Fall through to check for interrupts.
728 else if (regs
->trapno
== T_GENERAL_PROTECTION
) {
730 * General protection error - must be an 8086 instruction
734 boolean_t addr_32
= FALSE
;
735 boolean_t data_32
= FALSE
;
739 * Set up error handler for bad instruction/data
742 __asm__("movl $(addr_error), %0" : : "m" (thread
->recover
));
746 unsigned char opcode
;
750 return FALSE
; /* GP fault: IP out of range */
753 opcode
= *(unsigned char *)Addr8086(regs
->cs
,eip
);
756 case 0xf0: /* lock */
757 case 0xf2: /* repne */
758 case 0xf3: /* repe */
768 case 0x66: /* data size */
772 case 0x67: /* address size */
776 case 0xe4: /* inb imm */
777 case 0xe5: /* inw imm */
778 case 0xe6: /* outb imm */
779 case 0xe7: /* outw imm */
780 io_port
= *(unsigned char *)Addr8086(regs
->cs
, eip
);
784 case 0xec: /* inb dx */
785 case 0xed: /* inw dx */
786 case 0xee: /* outb dx */
787 case 0xef: /* outw dx */
788 case 0x6c: /* insb */
789 case 0x6d: /* insw */
790 case 0x6e: /* outsb */
791 case 0x6f: /* outsw */
792 io_port
= regs
->edx
& 0xffff;
796 opcode
|= 0x6600; /* word IO */
798 switch (emulate_io(regs
, opcode
, io_port
)) {
800 /* instruction executed */
803 /* port mapped, retry instruction */
807 /* port not mapped */
814 if (!v86_do_sti_cli
) {
819 v86
->flags
&= ~EFL_IF
;
820 /* disable simulated interrupts */
825 if (!v86_do_sti_cli
) {
830 if ((v86
->flags
& EFL_IF
) == 0) {
831 if (v86_do_sti_immediate
) {
832 v86
->flags
|= EFL_IF
;
834 v86
->flags
|= V86_IF_PENDING
;
837 /* single step to set IF next inst. */
842 case 0x9c: /* pushf */
849 if ((v86
->flags
& EFL_IF
) == 0)
852 if ((v86
->flags
& EFL_TF
) == 0)
854 else flags
|= EFL_TF
;
859 else if (sp
> 0xffff)
861 size
= (data_32
) ? 4 : 2;
865 if (copyout((char *)&flags
,
866 (char *)Addr8086(regs
->ss
,sp
),
872 regs
->uesp
= (regs
->uesp
& 0xffff0000) | sp
;
876 case 0x9d: /* popf */
884 else if (sp
> 0xffff)
888 if (sp
> 0xffff - sizeof(int))
890 nflags
= *(int *)Addr8086(regs
->ss
,sp
);
894 if (sp
> 0xffff - sizeof(short))
896 nflags
= *(unsigned short *)
897 Addr8086(regs
->ss
,sp
);
903 regs
->uesp
= (regs
->uesp
& 0xffff0000) | sp
;
905 if (v86
->flags
& V86_IRET_PENDING
) {
906 v86
->flags
= nflags
& (EFL_TF
| EFL_IF
);
907 v86
->flags
|= V86_IRET_PENDING
;
909 v86
->flags
= nflags
& (EFL_TF
| EFL_IF
);
911 regs
->efl
= (regs
->efl
& ~EFL_V86_SAFE
)
912 | (nflags
& EFL_V86_SAFE
);
915 case 0xcf: /* iret */
920 union iret_struct iret_struct
;
922 v86
->flags
&= ~V86_IRET_PENDING
;
926 else if (sp
> 0xffff)
930 if (sp
> 0xffff - sizeof(struct iret_32
))
932 iret_struct
.iret_32
=
933 *(struct iret_32
*) Addr8086(regs
->ss
,sp
);
934 sp
+= sizeof(struct iret_32
);
937 if (sp
> 0xffff - sizeof(struct iret_16
))
939 iret_struct
.iret_16
=
940 *(struct iret_16
*) Addr8086(regs
->ss
,sp
);
941 sp
+= sizeof(struct iret_16
);
946 regs
->uesp
= (regs
->uesp
& 0xffff0000) | sp
;
949 eip
= iret_struct
.iret_32
.eip
;
950 regs
->cs
= iret_struct
.iret_32
.cs
& 0xffff;
951 nflags
= iret_struct
.iret_32
.eflags
;
954 eip
= iret_struct
.iret_16
.ip
;
955 regs
->cs
= iret_struct
.iret_16
.cs
;
956 nflags
= iret_struct
.iret_16
.flags
;
959 v86
->flags
= nflags
& (EFL_TF
| EFL_IF
);
960 regs
->efl
= (regs
->efl
& ~EFL_V86_SAFE
)
961 | (nflags
& EFL_V86_SAFE
);
966 * Instruction not emulated here.
971 break; /* exit from 'while TRUE' */
973 regs
->eip
= (regs
->eip
& 0xffff0000 | eip
);
977 * Not a trap we handle.
983 if ((v86
->flags
& EFL_IF
) && ((v86
->flags
& V86_IRET_PENDING
)==0)) {
985 struct v86_interrupt_table
*int_table
;
990 int_table
= (struct v86_interrupt_table
*) v86
->int_table
;
991 int_count
= v86
->int_count
;
994 for (i
= 0; i
< int_count
; int_table
++, i
++) {
995 if (!int_table
->mask
&& int_table
->count
> 0) {
997 vec
= int_table
->vec
;
1003 * Take this interrupt
1006 struct iret_16 iret_16
;
1007 struct int_vec int_vec
;
1009 sp
= regs
->uesp
& 0xffff;
1010 if (sp
< sizeof(struct iret_16
))
1012 sp
-= sizeof(struct iret_16
);
1013 iret_16
.ip
= regs
->eip
;
1014 iret_16
.cs
= regs
->cs
;
1015 iret_16
.flags
= regs
->efl
& 0xFFFF;
1016 if ((v86
->flags
& EFL_TF
) == 0)
1017 iret_16
.flags
&= ~EFL_TF
;
1018 else iret_16
.flags
|= EFL_TF
;
1020 (void) memcpy((char *) &int_vec
,
1021 (char *) (sizeof(struct int_vec
) * vec
),
1022 sizeof (struct int_vec
));
1023 if (copyout((char *)&iret_16
,
1024 (char *)Addr8086(regs
->ss
,sp
),
1025 sizeof(struct iret_16
)))
1027 regs
->uesp
= (regs
->uesp
& 0xFFFF0000) | (sp
& 0xffff);
1028 regs
->eip
= int_vec
.ip
;
1029 regs
->cs
= int_vec
.cs
;
1030 regs
->efl
&= ~EFL_TF
;
1031 v86
->flags
&= ~(EFL_IF
| EFL_TF
);
1032 v86
->flags
|= V86_IRET_PENDING
;
1036 thread
->recover
= 0;
1040 * On address error, report a page fault.
1041 * XXX report GP fault - we don`t save
1042 * the faulting address.
1045 __asm__("addr_error:;");
1046 thread
->recover
= 0;
1050 * On stack address error, return stack fault (12).
1053 thread
->recover
= 0;
1054 regs
->trapno
= T_STACK_FAULT
;
1059 * Handle AST traps for i386.
1060 * Check for delayed floating-point exception from
1064 extern void log_thread_action (thread_t
, char *);
1067 i386_astintr(int preemption
)
1070 ast_t mask
= AST_ALL
;
1072 thread_t self
= current_thread();
1074 s
= splsched(); /* block interrupts to check reasons */
1075 mp_disable_preemption();
1076 mycpu
= cpu_number();
1077 if (need_ast
[mycpu
] & AST_I386_FP
) {
1079 * AST was for delayed floating-point exception -
1080 * FP interrupt occured while in kernel.
1081 * Turn off this AST reason and handle the FPU error.
1084 ast_off(AST_I386_FP
);
1085 mp_enable_preemption();
1092 * Not an FPU trap. Handle the AST.
1093 * Interrupts are still blocked.
1100 * We don't want to process any AST if we were in
1101 * kernel-mode and the current thread is in any
1102 * funny state (waiting and/or suspended).
1107 if (thread_not_preemptable(self
) || self
->preempt
) {
1108 ast_off(AST_URGENT
);
1109 thread_unlock (self
);
1110 mp_enable_preemption();
1114 else mask
= AST_PREEMPT
;
1115 mp_enable_preemption();
1118 self->preempt = TH_NOT_PREEMPTABLE;
1121 thread_unlock (self
);
1123 mp_enable_preemption();
1126 mp_enable_preemption();
1132 #endif /* FAST_IDLE */
1135 self->preempt = TH_PREEMPTABLE;
1141 * Handle exceptions for i386.
1143 * If we are an AT bus machine, we must turn off the AST for a
1144 * delayed floating-point exception.
1146 * If we are providing floating-point emulation, we may have
1147 * to retrieve the real register values from the floating point
1157 exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
1160 * Turn off delayed FPU error handling.
1163 mp_disable_preemption();
1164 ast_off(AST_I386_FP
);
1165 mp_enable_preemption();
1168 codes
[0] = code
; /* new exception interface */
1170 exception(exc
, codes
, 2);
1176 struct i386_saved_state
*regs
)
1178 int eip
, opcode
, io_port
;
1179 boolean_t data_16
= FALSE
;
1182 * Get the instruction.
1187 opcode
= inst_fetch(eip
, regs
->cs
);
1190 case 0x66: /* data-size prefix */
1194 case 0xf3: /* rep prefix */
1203 case 0xE4: /* inb imm */
1204 case 0xE5: /* inl imm */
1205 case 0xE6: /* outb imm */
1206 case 0xE7: /* outl imm */
1207 /* port is immediate byte */
1208 io_port
= inst_fetch(eip
, regs
->cs
);
1212 case 0xEC: /* inb dx */
1213 case 0xED: /* inl dx */
1214 case 0xEE: /* outb dx */
1215 case 0xEF: /* outl dx */
1216 case 0x6C: /* insb */
1217 case 0x6D: /* insl */
1218 case 0x6E: /* outsb */
1219 case 0x6F: /* outsl */
1220 /* port is in DX register */
1221 io_port
= regs
->edx
& 0xFFFF;
1231 opcode
|= 0x6600; /* word IO */
1233 switch (emulate_io(regs
, opcode
, io_port
)) {
1235 /* instruction executed */
1240 /* port mapped, retry instruction */
1244 /* port not mapped */
1251 kernel_preempt_check (void)
1253 mp_disable_preemption();
1254 if ((need_ast
[cpu_number()] & AST_URGENT
) &&
1256 get_interrupt_level() == 1
1257 #else /* NCPUS > 1 */
1258 get_interrupt_level() == 0
1259 #endif /* NCPUS > 1 */
1261 mp_enable_preemption_no_check();
1262 __asm__
volatile (" int $0xff");
1264 mp_enable_preemption_no_check();
1270 extern void db_i386_state(struct i386_saved_state
*regs
);
1272 #include <ddb/db_output.h>
1276 struct i386_saved_state
*regs
)
1278 db_printf("eip %8x\n", regs
->eip
);
1279 db_printf("trap %8x\n", regs
->trapno
);
1280 db_printf("err %8x\n", regs
->err
);
1281 db_printf("efl %8x\n", regs
->efl
);
1282 db_printf("ebp %8x\n", regs
->ebp
);
1283 db_printf("esp %8x\n", regs
->esp
);
1284 db_printf("uesp %8x\n", regs
->uesp
);
1285 db_printf("cs %8x\n", regs
->cs
& 0xff);
1286 db_printf("ds %8x\n", regs
->ds
& 0xff);
1287 db_printf("es %8x\n", regs
->es
& 0xff);
1288 db_printf("fs %8x\n", regs
->fs
& 0xff);
1289 db_printf("gs %8x\n", regs
->gs
& 0xff);
1290 db_printf("ss %8x\n", regs
->ss
& 0xff);
1291 db_printf("eax %8x\n", regs
->eax
);
1292 db_printf("ebx %8x\n", regs
->ebx
);
1293 db_printf("ecx %8x\n", regs
->ecx
);
1294 db_printf("edx %8x\n", regs
->edx
);
1295 db_printf("esi %8x\n", regs
->esi
);
1296 db_printf("edi %8x\n", regs
->edi
);
1299 #endif /* MACH_KDB */