]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/trap.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / i386 / trap.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 */
55/*
56 * Hardware trap/fault handler.
57 */
58
59#include <cpus.h>
60#include <fast_idle.h>
61#include <mach_kdb.h>
62#include <mach_kgdb.h>
63#include <mach_kdp.h>
64#include <mach_ldebug.h>
65
66#include <types.h>
67#include <i386/eflags.h>
68#include <i386/trap.h>
69#include <i386/pmap.h>
70#include <i386/fpu.h>
71
72#include <mach/exception.h>
73#include <mach/kern_return.h>
74#include <mach/vm_param.h>
75#include <mach/i386/thread_status.h>
76
77#include <vm/vm_kern.h>
78#include <vm/vm_fault.h>
79
80#include <kern/etap_macros.h>
81#include <kern/kern_types.h>
82#include <kern/ast.h>
83#include <kern/thread.h>
84#include <kern/task.h>
85#include <kern/sched.h>
86#include <kern/sched_prim.h>
87#include <kern/exception.h>
88#include <kern/spl.h>
89#include <kern/misc_protos.h>
90
91#if MACH_KGDB
92#include <kgdb/kgdb_defs.h>
93#endif /* MACH_KGDB */
94
95#include <i386/intel_read_fault.h>
96
97#if MACH_KGDB
98#include <kgdb/kgdb_defs.h>
99#endif /* MACH_KGDB */
100
101#if MACH_KDB
102#include <ddb/db_watch.h>
103#include <ddb/db_run.h>
104#include <ddb/db_break.h>
105#include <ddb/db_trap.h>
106#endif /* MACH_KDB */
107
108#include <string.h>
109
110#include <i386/io_emulate.h>
111
112/*
113 * Forward declarations
114 */
115extern void user_page_fault_continue(
116 kern_return_t kr);
117
118extern boolean_t v86_assist(
119 thread_t thread,
120 struct i386_saved_state *regs);
121
122extern boolean_t check_io_fault(
123 struct i386_saved_state *regs);
124
125extern int inst_fetch(
126 int eip,
127 int cs);
128
129void
130thread_syscall_return(
131 kern_return_t ret)
132{
133 register thread_act_t thr_act = current_act();
134 register struct i386_saved_state *regs = USER_REGS(thr_act);
135 regs->eax = ret;
136 thread_exception_return();
137 /*NOTREACHED*/
138}
139
140
141#if MACH_KDB
142boolean_t debug_all_traps_with_kdb = FALSE;
143extern struct db_watchpoint *db_watchpoint_list;
144extern boolean_t db_watchpoints_inserted;
145extern boolean_t db_breakpoints_inserted;
146
147void
148thread_kdb_return(void)
149{
150 register thread_act_t thr_act = current_act();
151 register thread_t cur_thr = current_thread();
152 register struct i386_saved_state *regs = USER_REGS(thr_act);
153
154 if (kdb_trap(regs->trapno, regs->err, regs)) {
155#if MACH_LDEBUG
156 assert(cur_thr->mutex_count == 0);
157#endif /* MACH_LDEBUG */
158 check_simple_locks();
159 thread_exception_return();
160 /*NOTREACHED*/
161 }
162}
163boolean_t let_ddb_vm_fault = FALSE;
164
165#if NCPUS > 1
166extern int kdb_active[NCPUS];
167#endif /* NCPUS > 1 */
168
169#endif /* MACH_KDB */
170
171void
172user_page_fault_continue(
173 kern_return_t kr)
174{
175 register thread_act_t thr_act = current_act();
176 register thread_t cur_thr = current_thread();
177 register struct i386_saved_state *regs = USER_REGS(thr_act);
178
0b4e3aa0 179 if ((kr == KERN_SUCCESS) || (kr == KERN_ABORTED)) {
1c79356b
A
180#if MACH_KDB
181 if (!db_breakpoints_inserted) {
182 db_set_breakpoints();
183 }
184 if (db_watchpoint_list &&
185 db_watchpoints_inserted &&
186 (regs->err & T_PF_WRITE) &&
187 db_find_watchpoint(thr_act->map,
188 (vm_offset_t)regs->cr2,
189 regs))
190 kdb_trap(T_WATCHPOINT, 0, regs);
191#endif /* MACH_KDB */
192 thread_exception_return();
193 /*NOTREACHED*/
194 }
195
196#if MACH_KDB
197 if (debug_all_traps_with_kdb &&
198 kdb_trap(regs->trapno, regs->err, regs)) {
199#if MACH_LDEBUG
200 assert(cur_thr->mutex_count == 0);
201#endif /* MACH_LDEBUG */
202 check_simple_locks();
203 thread_exception_return();
204 /*NOTREACHED*/
205 }
206#endif /* MACH_KDB */
207
208 i386_exception(EXC_BAD_ACCESS, kr, regs->cr2);
209 /*NOTREACHED*/
210}
211
212/*
213 * Fault recovery in copyin/copyout routines.
214 */
215struct recovery {
216 int fault_addr;
217 int recover_addr;
218};
219
220extern struct recovery recover_table[];
221extern struct recovery recover_table_end[];
222
223/*
224 * Recovery from Successful fault in copyout does not
225 * return directly - it retries the pte check, since
226 * the 386 ignores write protection in kernel mode.
227 */
228extern struct recovery retry_table[];
229extern struct recovery retry_table_end[];
230
231char * trap_type[] = {TRAP_NAMES};
232int TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]);
233
234/*
235 * Trap from kernel mode. Only page-fault errors are recoverable,
236 * and then only in special circumstances. All other errors are
237 * fatal. Return value indicates if trap was handled.
238 */
239boolean_t
240kernel_trap(
241 register struct i386_saved_state *regs)
242{
243 int exc;
244 int code;
245 int subcode;
246 int interruptible;
247 register int type;
248 vm_map_t map;
249 kern_return_t result;
250 register thread_t thread;
251 thread_act_t thr_act;
252 etap_data_t probe_data;
253 pt_entry_t *pte;
254 extern vm_offset_t vm_last_phys;
255
256 type = regs->trapno;
257 code = regs->err;
258 thread = current_thread();
259 thr_act = current_act();
260
261 ETAP_DATA_LOAD(probe_data[0], regs->trapno);
262 ETAP_DATA_LOAD(probe_data[1], MACH_PORT_NULL);
263 ETAP_DATA_LOAD(probe_data[2], MACH_PORT_NULL);
264 ETAP_PROBE_DATA(ETAP_P_EXCEPTION,
265 0,
266 thread,
267 &probe_data,
268 ETAP_DATA_ENTRY*3);
269
270 switch (type) {
271 case T_PREEMPT:
272 return (TRUE);
273
274 case T_NO_FPU:
275 fpnoextflt();
276 return (TRUE);
277
278 case T_FPU_FAULT:
279 fpextovrflt();
280 return (TRUE);
281
282 case T_FLOATING_POINT_ERROR:
283 fpexterrflt();
284 return (TRUE);
285
286 case T_PAGE_FAULT:
287 /*
288 * If the current map is a submap of the kernel map,
289 * and the address is within that map, fault on that
290 * map. If the same check is done in vm_fault
291 * (vm_map_lookup), we may deadlock on the kernel map
292 * lock.
293 */
294#if MACH_KDB
295 mp_disable_preemption();
296 if (db_active
297#if NCPUS > 1
298 && kdb_active[cpu_number()]
299#endif /* NCPUS > 1 */
300 && !let_ddb_vm_fault) {
301 /*
302 * Force kdb to handle this one.
303 */
304 mp_enable_preemption();
305 return (FALSE);
306 }
307 mp_enable_preemption();
308#endif /* MACH_KDB */
309 subcode = regs->cr2; /* get faulting address */
310
311 if (subcode > LINEAR_KERNEL_ADDRESS) {
312 map = kernel_map;
313 subcode -= LINEAR_KERNEL_ADDRESS;
314 } else if (thr_act == THR_ACT_NULL || thread == THREAD_NULL)
315 map = kernel_map;
316 else {
317 map = thr_act->map;
318 }
319
320#if MACH_KDB
321 /*
322 * Check for watchpoint on kernel static data.
323 * vm_fault would fail in this case
324 */
325 if (map == kernel_map &&
326 db_watchpoint_list &&
327 db_watchpoints_inserted &&
328 (code & T_PF_WRITE) &&
329 (vm_offset_t)subcode < vm_last_phys &&
330 ((*(pte = pmap_pte(kernel_pmap, (vm_offset_t)subcode))) &
331 INTEL_PTE_WRITE) == 0) {
332 *pte = INTEL_PTE_VALID | INTEL_PTE_WRITE |
333 pa_to_pte(trunc_page((vm_offset_t)subcode) -
334 VM_MIN_KERNEL_ADDRESS);
335 result = KERN_SUCCESS;
336 } else
337#endif /* MACH_KDB */
338 {
339 /*
340 * Since the 386 ignores write protection in
341 * kernel mode, always try for write permission
342 * first. If that fails and the fault was a
343 * read fault, retry with read permission.
344 */
345 if (map == kernel_map) {
346 register struct recovery *rp;
347
348 interruptible = THREAD_UNINT;
349 for (rp = recover_table; rp < recover_table_end; rp++) {
350 if (regs->eip == rp->fault_addr) {
351 interruptible = THREAD_ABORTSAFE;
352 break;
353 }
354 }
355 }
356
357 result = vm_fault(map,
358 trunc_page((vm_offset_t)subcode),
359 VM_PROT_READ|VM_PROT_WRITE,
360 FALSE,
9bccf70c 361 (map == kernel_map) ? interruptible : THREAD_ABORTSAFE, NULL, 0);
1c79356b
A
362 }
363#if MACH_KDB
364 if (result == KERN_SUCCESS) {
365 /* Look for watchpoints */
366 if (db_watchpoint_list &&
367 db_watchpoints_inserted &&
368 (code & T_PF_WRITE) &&
369 db_find_watchpoint(map,
370 (vm_offset_t)subcode, regs))
371 kdb_trap(T_WATCHPOINT, 0, regs);
372 }
373 else
374#endif /* MACH_KDB */
375 if ((code & T_PF_WRITE) == 0 &&
376 result == KERN_PROTECTION_FAILURE)
377 {
378 /*
379 * Must expand vm_fault by hand,
380 * so that we can ask for read-only access
381 * but enter a (kernel)writable mapping.
382 */
383 result = intel_read_fault(map,
384 trunc_page((vm_offset_t)subcode));
385 }
386
387 if (result == KERN_SUCCESS) {
388 /*
389 * Certain faults require that we back up
390 * the EIP.
391 */
392 register struct recovery *rp;
393
394 for (rp = retry_table; rp < retry_table_end; rp++) {
395 if (regs->eip == rp->fault_addr) {
396 regs->eip = rp->recover_addr;
397 break;
398 }
399 }
400 return (TRUE);
401 }
402
403 /* fall through */
404
405 case T_GENERAL_PROTECTION:
406
407 /*
408 * If there is a failure recovery address
409 * for this fault, go there.
410 */
411 {
412 register struct recovery *rp;
413
414 for (rp = recover_table;
415 rp < recover_table_end;
416 rp++) {
417 if (regs->eip == rp->fault_addr) {
418 regs->eip = rp->recover_addr;
419 return (TRUE);
420 }
421 }
422 }
423
424 /*
425 * Check thread recovery address also -
426 * v86 assist uses it.
427 */
428 if (thread->recover) {
429 regs->eip = thread->recover;
430 thread->recover = 0;
431 return (TRUE);
432 }
433
434 /*
435 * Unanticipated page-fault errors in kernel
436 * should not happen.
437 */
438 /* fall through... */
439
440 default:
441 /*
442 * ...and return failure, so that locore can call into
443 * debugger.
444 */
445#if MACH_KDP
446 kdp_i386_trap(type, regs, result, regs->cr2);
447#endif
448 return (FALSE);
449 }
450 return (TRUE);
451}
452
453/*
454 * Called if both kernel_trap() and kdb_trap() fail.
455 */
456void
457panic_trap(
458 register struct i386_saved_state *regs)
459{
460 int code;
461 register int type;
462
463 type = regs->trapno;
464 code = regs->err;
465
466 printf("trap type %d, code = %x, pc = %x\n",
467 type, code, regs->eip);
468 panic("trap");
469}
470
471
472/*
473 * Trap from user mode.
474 */
475void
476user_trap(
477 register struct i386_saved_state *regs)
478{
479 int exc;
480 int code;
481 int subcode;
482 register int type;
483 vm_map_t map;
484 vm_prot_t prot;
485 kern_return_t result;
486 register thread_act_t thr_act = current_act();
487 thread_t thread = (thr_act ? thr_act->thread : THREAD_NULL);
488 boolean_t kernel_act = thr_act->kernel_loaded;
489 etap_data_t probe_data;
490
491 if (regs->efl & EFL_VM) {
492 /*
493 * If hardware assist can handle exception,
494 * continue execution.
495 */
496 if (v86_assist(thread, regs))
497 return;
498 }
499
500 type = regs->trapno;
501 code = 0;
502 subcode = 0;
503
504 switch (type) {
505
506 case T_DIVIDE_ERROR:
507 exc = EXC_ARITHMETIC;
508 code = EXC_I386_DIV;
509 break;
510
511 case T_DEBUG:
512 exc = EXC_BREAKPOINT;
513 code = EXC_I386_SGL;
514 break;
515
516 case T_INT3:
517 exc = EXC_BREAKPOINT;
518 code = EXC_I386_BPT;
519 break;
520
521 case T_OVERFLOW:
522 exc = EXC_ARITHMETIC;
523 code = EXC_I386_INTO;
524 break;
525
526 case T_OUT_OF_BOUNDS:
527 exc = EXC_SOFTWARE;
528 code = EXC_I386_BOUND;
529 break;
530
531 case T_INVALID_OPCODE:
532 exc = EXC_BAD_INSTRUCTION;
533 code = EXC_I386_INVOP;
534 break;
535
536 case T_NO_FPU:
537 case 32: /* XXX */
538 fpnoextflt();
539 return;
540
541 case T_FPU_FAULT:
542 fpextovrflt();
543 return;
544
545 case 10: /* invalid TSS == iret with NT flag set */
546 exc = EXC_BAD_INSTRUCTION;
547 code = EXC_I386_INVTSSFLT;
548 subcode = regs->err & 0xffff;
549 break;
550
551 case T_SEGMENT_NOT_PRESENT:
552 exc = EXC_BAD_INSTRUCTION;
553 code = EXC_I386_SEGNPFLT;
554 subcode = regs->err & 0xffff;
555 break;
556
557 case T_STACK_FAULT:
558 exc = EXC_BAD_INSTRUCTION;
559 code = EXC_I386_STKFLT;
560 subcode = regs->err & 0xffff;
561 break;
562
563 case T_GENERAL_PROTECTION:
564 if (!(regs->efl & EFL_VM)) {
565 if (check_io_fault(regs))
566 return;
567 }
568 exc = EXC_BAD_INSTRUCTION;
569 code = EXC_I386_GPFLT;
570 subcode = regs->err & 0xffff;
571 break;
572
573 case T_PAGE_FAULT:
574 subcode = regs->cr2;
575 prot = VM_PROT_READ|VM_PROT_WRITE;
576 if (kernel_act == FALSE) {
577 if (!(regs->err & T_PF_WRITE))
578 prot = VM_PROT_READ;
579 (void) user_page_fault_continue(vm_fault(thr_act->map,
580 trunc_page((vm_offset_t)subcode),
581 prot,
582 FALSE,
9bccf70c 583 THREAD_ABORTSAFE, NULL, 0));
1c79356b
A
584 /* NOTREACHED */
585 }
586 else {
587 if (subcode > LINEAR_KERNEL_ADDRESS) {
588 map = kernel_map;
589 subcode -= LINEAR_KERNEL_ADDRESS;
590 }
591 result = vm_fault(thr_act->map,
592 trunc_page((vm_offset_t)subcode),
593 prot,
594 FALSE,
9bccf70c 595 (map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE, NULL, 0);
1c79356b
A
596 if ((result != KERN_SUCCESS) && (result != KERN_ABORTED)) {
597 /*
598 * Must expand vm_fault by hand,
599 * so that we can ask for read-only access
600 * but enter a (kernel) writable mapping.
601 */
602 result = intel_read_fault(thr_act->map,
603 trunc_page((vm_offset_t)subcode));
604 }
605 user_page_fault_continue(result);
606 /*NOTREACHED*/
607 }
608 break;
609
610 case T_FLOATING_POINT_ERROR:
611 fpexterrflt();
612 return;
613
614 default:
615#if MACH_KGDB
616 Debugger("Unanticipated user trap");
617 return;
618#endif /* MACH_KGDB */
619#if MACH_KDB
620 if (kdb_trap(type, regs->err, regs))
621 return;
622#endif /* MACH_KDB */
623 printf("user trap type %d, code = %x, pc = %x\n",
624 type, regs->err, regs->eip);
625 panic("user trap");
626 return;
627 }
628
629#if MACH_KDB
630 if (debug_all_traps_with_kdb &&
631 kdb_trap(type, regs->err, regs))
632 return;
633#endif /* MACH_KDB */
634
635#if ETAP_EVENT_MONITOR
636 if (thread != THREAD_NULL) {
637 ETAP_DATA_LOAD(probe_data[0], regs->trapno);
638 ETAP_DATA_LOAD(probe_data[1],
639 thr_act->exc_actions[exc].port);
640 ETAP_DATA_LOAD(probe_data[2],
641 thr_act->task->exc_actions[exc].port);
642 ETAP_PROBE_DATA(ETAP_P_EXCEPTION,
643 0,
644 thread,
645 &probe_data,
646 ETAP_DATA_ENTRY*3);
647 }
648#endif /* ETAP_EVENT_MONITOR */
649
650 i386_exception(exc, code, subcode);
651 /*NOTREACHED*/
652}
653
654/*
655 * V86 mode assist for interrupt handling.
656 */
657boolean_t v86_assist_on = TRUE;
658boolean_t v86_unsafe_ok = FALSE;
659boolean_t v86_do_sti_cli = TRUE;
660boolean_t v86_do_sti_immediate = FALSE;
661
662#define V86_IRET_PENDING 0x4000
663
664int cli_count = 0;
665int sti_count = 0;
666
667boolean_t
668v86_assist(
669 thread_t thread,
670 register struct i386_saved_state *regs)
671{
672 register struct v86_assist_state *v86 = &thread->top_act->mact.pcb->ims.v86s;
673
674/*
675 * Build an 8086 address. Use only when off is known to be 16 bits.
676 */
677#define Addr8086(seg,off) ((((seg) & 0xffff) << 4) + (off))
678
679#define EFL_V86_SAFE ( EFL_OF | EFL_DF | EFL_TF \
680 | EFL_SF | EFL_ZF | EFL_AF \
681 | EFL_PF | EFL_CF )
682 struct iret_32 {
683 int eip;
684 int cs;
685 int eflags;
686 };
687 struct iret_16 {
688 unsigned short ip;
689 unsigned short cs;
690 unsigned short flags;
691 };
692 union iret_struct {
693 struct iret_32 iret_32;
694 struct iret_16 iret_16;
695 };
696
697 struct int_vec {
698 unsigned short ip;
699 unsigned short cs;
700 };
701
702 if (!v86_assist_on)
703 return FALSE;
704
705 /*
706 * If delayed STI pending, enable interrupts.
707 * Turn off tracing if on only to delay STI.
708 */
709 if (v86->flags & V86_IF_PENDING) {
710 v86->flags &= ~V86_IF_PENDING;
711 v86->flags |= EFL_IF;
712 if ((v86->flags & EFL_TF) == 0)
713 regs->efl &= ~EFL_TF;
714 }
715
716 if (regs->trapno == T_DEBUG) {
717
718 if (v86->flags & EFL_TF) {
719 /*
720 * Trace flag was also set - it has priority
721 */
722 return FALSE; /* handle as single-step */
723 }
724 /*
725 * Fall through to check for interrupts.
726 */
727 }
728 else if (regs->trapno == T_GENERAL_PROTECTION) {
729 /*
730 * General protection error - must be an 8086 instruction
731 * to emulate.
732 */
733 register int eip;
734 boolean_t addr_32 = FALSE;
735 boolean_t data_32 = FALSE;
736 int io_port;
737
738 /*
739 * Set up error handler for bad instruction/data
740 * fetches.
741 */
742 __asm__("movl $(addr_error), %0" : : "m" (thread->recover));
743
744 eip = regs->eip;
745 while (TRUE) {
746 unsigned char opcode;
747
748 if (eip > 0xFFFF) {
749 thread->recover = 0;
750 return FALSE; /* GP fault: IP out of range */
751 }
752
753 opcode = *(unsigned char *)Addr8086(regs->cs,eip);
754 eip++;
755 switch (opcode) {
756 case 0xf0: /* lock */
757 case 0xf2: /* repne */
758 case 0xf3: /* repe */
759 case 0x2e: /* cs */
760 case 0x36: /* ss */
761 case 0x3e: /* ds */
762 case 0x26: /* es */
763 case 0x64: /* fs */
764 case 0x65: /* gs */
765 /* ignore prefix */
766 continue;
767
768 case 0x66: /* data size */
769 data_32 = TRUE;
770 continue;
771
772 case 0x67: /* address size */
773 addr_32 = TRUE;
774 continue;
775
776 case 0xe4: /* inb imm */
777 case 0xe5: /* inw imm */
778 case 0xe6: /* outb imm */
779 case 0xe7: /* outw imm */
780 io_port = *(unsigned char *)Addr8086(regs->cs, eip);
781 eip++;
782 goto do_in_out;
783
784 case 0xec: /* inb dx */
785 case 0xed: /* inw dx */
786 case 0xee: /* outb dx */
787 case 0xef: /* outw dx */
788 case 0x6c: /* insb */
789 case 0x6d: /* insw */
790 case 0x6e: /* outsb */
791 case 0x6f: /* outsw */
792 io_port = regs->edx & 0xffff;
793
794 do_in_out:
795 if (!data_32)
796 opcode |= 0x6600; /* word IO */
797
798 switch (emulate_io(regs, opcode, io_port)) {
799 case EM_IO_DONE:
800 /* instruction executed */
801 break;
802 case EM_IO_RETRY:
803 /* port mapped, retry instruction */
804 thread->recover = 0;
805 return TRUE;
806 case EM_IO_ERROR:
807 /* port not mapped */
808 thread->recover = 0;
809 return FALSE;
810 }
811 break;
812
813 case 0xfa: /* cli */
814 if (!v86_do_sti_cli) {
815 thread->recover = 0;
816 return (FALSE);
817 }
818
819 v86->flags &= ~EFL_IF;
820 /* disable simulated interrupts */
821 cli_count++;
822 break;
823
824 case 0xfb: /* sti */
825 if (!v86_do_sti_cli) {
826 thread->recover = 0;
827 return (FALSE);
828 }
829
830 if ((v86->flags & EFL_IF) == 0) {
831 if (v86_do_sti_immediate) {
832 v86->flags |= EFL_IF;
833 } else {
834 v86->flags |= V86_IF_PENDING;
835 regs->efl |= EFL_TF;
836 }
837 /* single step to set IF next inst. */
838 }
839 sti_count++;
840 break;
841
842 case 0x9c: /* pushf */
843 {
844 int flags;
845 vm_offset_t sp;
846 int size;
847
848 flags = regs->efl;
849 if ((v86->flags & EFL_IF) == 0)
850 flags &= ~EFL_IF;
851
852 if ((v86->flags & EFL_TF) == 0)
853 flags &= ~EFL_TF;
854 else flags |= EFL_TF;
855
856 sp = regs->uesp;
857 if (!addr_32)
858 sp &= 0xffff;
859 else if (sp > 0xffff)
860 goto stack_error;
861 size = (data_32) ? 4 : 2;
862 if (sp < size)
863 goto stack_error;
864 sp -= size;
865 if (copyout((char *)&flags,
866 (char *)Addr8086(regs->ss,sp),
867 size))
868 goto addr_error;
869 if (addr_32)
870 regs->uesp = sp;
871 else
872 regs->uesp = (regs->uesp & 0xffff0000) | sp;
873 break;
874 }
875
876 case 0x9d: /* popf */
877 {
878 vm_offset_t sp;
879 int nflags;
880
881 sp = regs->uesp;
882 if (!addr_32)
883 sp &= 0xffff;
884 else if (sp > 0xffff)
885 goto stack_error;
886
887 if (data_32) {
888 if (sp > 0xffff - sizeof(int))
889 goto stack_error;
890 nflags = *(int *)Addr8086(regs->ss,sp);
891 sp += sizeof(int);
892 }
893 else {
894 if (sp > 0xffff - sizeof(short))
895 goto stack_error;
896 nflags = *(unsigned short *)
897 Addr8086(regs->ss,sp);
898 sp += sizeof(short);
899 }
900 if (addr_32)
901 regs->uesp = sp;
902 else
903 regs->uesp = (regs->uesp & 0xffff0000) | sp;
904
905 if (v86->flags & V86_IRET_PENDING) {
906 v86->flags = nflags & (EFL_TF | EFL_IF);
907 v86->flags |= V86_IRET_PENDING;
908 } else {
909 v86->flags = nflags & (EFL_TF | EFL_IF);
910 }
911 regs->efl = (regs->efl & ~EFL_V86_SAFE)
912 | (nflags & EFL_V86_SAFE);
913 break;
914 }
915 case 0xcf: /* iret */
916 {
917 vm_offset_t sp;
918 int nflags;
919 int size;
920 union iret_struct iret_struct;
921
922 v86->flags &= ~V86_IRET_PENDING;
923 sp = regs->uesp;
924 if (!addr_32)
925 sp &= 0xffff;
926 else if (sp > 0xffff)
927 goto stack_error;
928
929 if (data_32) {
930 if (sp > 0xffff - sizeof(struct iret_32))
931 goto stack_error;
932 iret_struct.iret_32 =
933 *(struct iret_32 *) Addr8086(regs->ss,sp);
934 sp += sizeof(struct iret_32);
935 }
936 else {
937 if (sp > 0xffff - sizeof(struct iret_16))
938 goto stack_error;
939 iret_struct.iret_16 =
940 *(struct iret_16 *) Addr8086(regs->ss,sp);
941 sp += sizeof(struct iret_16);
942 }
943 if (addr_32)
944 regs->uesp = sp;
945 else
946 regs->uesp = (regs->uesp & 0xffff0000) | sp;
947
948 if (data_32) {
949 eip = iret_struct.iret_32.eip;
950 regs->cs = iret_struct.iret_32.cs & 0xffff;
951 nflags = iret_struct.iret_32.eflags;
952 }
953 else {
954 eip = iret_struct.iret_16.ip;
955 regs->cs = iret_struct.iret_16.cs;
956 nflags = iret_struct.iret_16.flags;
957 }
958
959 v86->flags = nflags & (EFL_TF | EFL_IF);
960 regs->efl = (regs->efl & ~EFL_V86_SAFE)
961 | (nflags & EFL_V86_SAFE);
962 break;
963 }
964 default:
965 /*
966 * Instruction not emulated here.
967 */
968 thread->recover = 0;
969 return FALSE;
970 }
971 break; /* exit from 'while TRUE' */
972 }
973 regs->eip = (regs->eip & 0xffff0000 | eip);
974 }
975 else {
976 /*
977 * Not a trap we handle.
978 */
979 thread->recover = 0;
980 return FALSE;
981 }
982
983 if ((v86->flags & EFL_IF) && ((v86->flags & V86_IRET_PENDING)==0)) {
984
985 struct v86_interrupt_table *int_table;
986 int int_count;
987 int vec;
988 int i;
989
990 int_table = (struct v86_interrupt_table *) v86->int_table;
991 int_count = v86->int_count;
992
993 vec = 0;
994 for (i = 0; i < int_count; int_table++, i++) {
995 if (!int_table->mask && int_table->count > 0) {
996 int_table->count--;
997 vec = int_table->vec;
998 break;
999 }
1000 }
1001 if (vec != 0) {
1002 /*
1003 * Take this interrupt
1004 */
1005 vm_offset_t sp;
1006 struct iret_16 iret_16;
1007 struct int_vec int_vec;
1008
1009 sp = regs->uesp & 0xffff;
1010 if (sp < sizeof(struct iret_16))
1011 goto stack_error;
1012 sp -= sizeof(struct iret_16);
1013 iret_16.ip = regs->eip;
1014 iret_16.cs = regs->cs;
1015 iret_16.flags = regs->efl & 0xFFFF;
1016 if ((v86->flags & EFL_TF) == 0)
1017 iret_16.flags &= ~EFL_TF;
1018 else iret_16.flags |= EFL_TF;
1019
1020 (void) memcpy((char *) &int_vec,
1021 (char *) (sizeof(struct int_vec) * vec),
1022 sizeof (struct int_vec));
1023 if (copyout((char *)&iret_16,
1024 (char *)Addr8086(regs->ss,sp),
1025 sizeof(struct iret_16)))
1026 goto addr_error;
1027 regs->uesp = (regs->uesp & 0xFFFF0000) | (sp & 0xffff);
1028 regs->eip = int_vec.ip;
1029 regs->cs = int_vec.cs;
1030 regs->efl &= ~EFL_TF;
1031 v86->flags &= ~(EFL_IF | EFL_TF);
1032 v86->flags |= V86_IRET_PENDING;
1033 }
1034 }
1035
1036 thread->recover = 0;
1037 return TRUE;
1038
1039 /*
1040 * On address error, report a page fault.
1041 * XXX report GP fault - we don`t save
1042 * the faulting address.
1043 */
1044 addr_error:
1045 __asm__("addr_error:;");
1046 thread->recover = 0;
1047 return FALSE;
1048
1049 /*
1050 * On stack address error, return stack fault (12).
1051 */
1052 stack_error:
1053 thread->recover = 0;
1054 regs->trapno = T_STACK_FAULT;
1055 return FALSE;
1056}
1057
1058/*
1059 * Handle AST traps for i386.
1060 * Check for delayed floating-point exception from
1061 * AT-bus machines.
1062 */
1063
1064extern void log_thread_action (thread_t, char *);
1065
1066void
1067i386_astintr(int preemption)
1068{
1069 int mycpu;
1070 ast_t mask = AST_ALL;
1071 spl_t s;
1072 thread_t self = current_thread();
1073
1074 s = splsched(); /* block interrupts to check reasons */
1075 mp_disable_preemption();
1076 mycpu = cpu_number();
1077 if (need_ast[mycpu] & AST_I386_FP) {
1078 /*
1079 * AST was for delayed floating-point exception -
1080 * FP interrupt occured while in kernel.
1081 * Turn off this AST reason and handle the FPU error.
1082 */
1083
1084 ast_off(AST_I386_FP);
1085 mp_enable_preemption();
1086 splx(s);
1087
1088 fpexterrflt();
1089 }
1090 else {
1091 /*
1092 * Not an FPU trap. Handle the AST.
1093 * Interrupts are still blocked.
1094 */
1095
1096#ifdef XXX
1097 if (preemption) {
1098
1099 /*
1100 * We don't want to process any AST if we were in
1101 * kernel-mode and the current thread is in any
1102 * funny state (waiting and/or suspended).
1103 */
1104
1105 thread_lock (self);
1106
1107 if (thread_not_preemptable(self) || self->preempt) {
1108 ast_off(AST_URGENT);
1109 thread_unlock (self);
1110 mp_enable_preemption();
1111 splx(s);
1112 return;
1113 }
1114 else mask = AST_PREEMPT;
1115 mp_enable_preemption();
1116
1117/*
1118 self->preempt = TH_NOT_PREEMPTABLE;
1119*/
1120
1121 thread_unlock (self);
1122 } else {
1123 mp_enable_preemption();
1124 }
1125#else
1126 mp_enable_preemption();
1127#endif
1128
0b4e3aa0 1129 ast_taken(mask, s
1c79356b
A
1130#if FAST_IDLE
1131 ,NO_IDLE_THREAD
1132#endif /* FAST_IDLE */
1133 );
1134/*
1135 self->preempt = TH_PREEMPTABLE;
1136*/
1137 }
1138}
1139
1140/*
1141 * Handle exceptions for i386.
1142 *
1143 * If we are an AT bus machine, we must turn off the AST for a
1144 * delayed floating-point exception.
1145 *
1146 * If we are providing floating-point emulation, we may have
1147 * to retrieve the real register values from the floating point
1148 * emulator.
1149 */
1150void
1151i386_exception(
1152 int exc,
1153 int code,
1154 int subcode)
1155{
1156 spl_t s;
1157 exception_data_type_t codes[EXCEPTION_CODE_MAX];
1158
1159 /*
1160 * Turn off delayed FPU error handling.
1161 */
1162 s = splsched();
1163 mp_disable_preemption();
1164 ast_off(AST_I386_FP);
1165 mp_enable_preemption();
1166 splx(s);
1167
1168 codes[0] = code; /* new exception interface */
1169 codes[1] = subcode;
1170 exception(exc, codes, 2);
1171 /*NOTREACHED*/
1172}
1173
1174boolean_t
1175check_io_fault(
1176 struct i386_saved_state *regs)
1177{
1178 int eip, opcode, io_port;
1179 boolean_t data_16 = FALSE;
1180
1181 /*
1182 * Get the instruction.
1183 */
1184 eip = regs->eip;
1185
1186 for (;;) {
1187 opcode = inst_fetch(eip, regs->cs);
1188 eip++;
1189 switch (opcode) {
1190 case 0x66: /* data-size prefix */
1191 data_16 = TRUE;
1192 continue;
1193
1194 case 0xf3: /* rep prefix */
1195 case 0x26: /* es */
1196 case 0x2e: /* cs */
1197 case 0x36: /* ss */
1198 case 0x3e: /* ds */
1199 case 0x64: /* fs */
1200 case 0x65: /* gs */
1201 continue;
1202
1203 case 0xE4: /* inb imm */
1204 case 0xE5: /* inl imm */
1205 case 0xE6: /* outb imm */
1206 case 0xE7: /* outl imm */
1207 /* port is immediate byte */
1208 io_port = inst_fetch(eip, regs->cs);
1209 eip++;
1210 break;
1211
1212 case 0xEC: /* inb dx */
1213 case 0xED: /* inl dx */
1214 case 0xEE: /* outb dx */
1215 case 0xEF: /* outl dx */
1216 case 0x6C: /* insb */
1217 case 0x6D: /* insl */
1218 case 0x6E: /* outsb */
1219 case 0x6F: /* outsl */
1220 /* port is in DX register */
1221 io_port = regs->edx & 0xFFFF;
1222 break;
1223
1224 default:
1225 return FALSE;
1226 }
1227 break;
1228 }
1229
1230 if (data_16)
1231 opcode |= 0x6600; /* word IO */
1232
1233 switch (emulate_io(regs, opcode, io_port)) {
1234 case EM_IO_DONE:
1235 /* instruction executed */
1236 regs->eip = eip;
1237 return TRUE;
1238
1239 case EM_IO_RETRY:
1240 /* port mapped, retry instruction */
1241 return TRUE;
1242
1243 case EM_IO_ERROR:
1244 /* port not mapped */
1245 return FALSE;
1246 }
1247 return FALSE;
1248}
1249
1250void
1251kernel_preempt_check (void)
1252{
1253 mp_disable_preemption();
1254 if ((need_ast[cpu_number()] & AST_URGENT) &&
1255#if NCPUS > 1
1256 get_interrupt_level() == 1
1257#else /* NCPUS > 1 */
1258 get_interrupt_level() == 0
1259#endif /* NCPUS > 1 */
1260 ) {
1261 mp_enable_preemption_no_check();
1262 __asm__ volatile (" int $0xff");
1263 } else {
1264 mp_enable_preemption_no_check();
1265 }
1266}
1267
1268#if MACH_KDB
1269
1270extern void db_i386_state(struct i386_saved_state *regs);
1271
1272#include <ddb/db_output.h>
1273
1274void
1275db_i386_state(
1276 struct i386_saved_state *regs)
1277{
1278 db_printf("eip %8x\n", regs->eip);
1279 db_printf("trap %8x\n", regs->trapno);
1280 db_printf("err %8x\n", regs->err);
1281 db_printf("efl %8x\n", regs->efl);
1282 db_printf("ebp %8x\n", regs->ebp);
1283 db_printf("esp %8x\n", regs->esp);
1284 db_printf("uesp %8x\n", regs->uesp);
1285 db_printf("cs %8x\n", regs->cs & 0xff);
1286 db_printf("ds %8x\n", regs->ds & 0xff);
1287 db_printf("es %8x\n", regs->es & 0xff);
1288 db_printf("fs %8x\n", regs->fs & 0xff);
1289 db_printf("gs %8x\n", regs->gs & 0xff);
1290 db_printf("ss %8x\n", regs->ss & 0xff);
1291 db_printf("eax %8x\n", regs->eax);
1292 db_printf("ebx %8x\n", regs->ebx);
1293 db_printf("ecx %8x\n", regs->ecx);
1294 db_printf("edx %8x\n", regs->edx);
1295 db_printf("esi %8x\n", regs->esi);
1296 db_printf("edi %8x\n", regs->edi);
1297}
1298
1299#endif /* MACH_KDB */