]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/trap.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / osfmk / i386 / trap.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * Hardware trap/fault handler.
60 */
61
62 #include <mach_kdb.h>
63 #include <mach_kgdb.h>
64 #include <mach_kdp.h>
65 #include <mach_ldebug.h>
66
67 #include <types.h>
68 #include <i386/eflags.h>
69 #include <i386/trap.h>
70 #include <i386/pmap.h>
71 #include <i386/fpu.h>
72
73 #include <mach/exception.h>
74 #include <mach/kern_return.h>
75 #include <mach/vm_param.h>
76 #include <mach/i386/thread_status.h>
77
78 #include <vm/vm_kern.h>
79 #include <vm/vm_fault.h>
80
81 #include <kern/kern_types.h>
82 #include <kern/processor.h>
83 #include <kern/thread.h>
84 #include <kern/task.h>
85 #include <kern/sched.h>
86 #include <kern/sched_prim.h>
87 #include <kern/exception.h>
88 #include <kern/spl.h>
89 #include <kern/misc_protos.h>
90
91 #if MACH_KGDB
92 #include <kgdb/kgdb_defs.h>
93 #endif /* MACH_KGDB */
94
95 #include <i386/intel_read_fault.h>
96
97 #if MACH_KGDB
98 #include <kgdb/kgdb_defs.h>
99 #endif /* MACH_KGDB */
100
101 #if MACH_KDB
102 #include <ddb/db_watch.h>
103 #include <ddb/db_run.h>
104 #include <ddb/db_break.h>
105 #include <ddb/db_trap.h>
106 #endif /* MACH_KDB */
107
108 #include <string.h>
109
110 #include <i386/io_emulate.h>
111
112 /*
113 * Forward declarations
114 */
115 extern void user_page_fault_continue(
116 kern_return_t kr);
117
118 extern boolean_t v86_assist(
119 thread_t thread,
120 struct i386_saved_state *regs);
121
122 extern boolean_t check_io_fault(
123 struct i386_saved_state *regs);
124
125 extern int inst_fetch(
126 int eip,
127 int cs);
128
129 void
130 thread_syscall_return(
131 kern_return_t ret)
132 {
133 register thread_t thr_act = current_thread();
134 register struct i386_saved_state *regs = USER_REGS(thr_act);
135 regs->eax = ret;
136 thread_exception_return();
137 /*NOTREACHED*/
138 }
139
140
141 #if MACH_KDB
142 boolean_t debug_all_traps_with_kdb = FALSE;
143 extern struct db_watchpoint *db_watchpoint_list;
144 extern boolean_t db_watchpoints_inserted;
145 extern boolean_t db_breakpoints_inserted;
146
147 void
148 thread_kdb_return(void)
149 {
150 register thread_t thread = current_thread();
151 register struct i386_saved_state *regs = USER_REGS(thread);
152
153 if (kdb_trap(regs->trapno, regs->err, regs)) {
154 #if MACH_LDEBUG
155 assert(thread->mutex_count == 0);
156 #endif /* MACH_LDEBUG */
157 thread_exception_return();
158 /*NOTREACHED*/
159 }
160 }
161 boolean_t let_ddb_vm_fault = FALSE;
162
163 #endif /* MACH_KDB */
164
165 void
166 user_page_fault_continue(
167 kern_return_t kr)
168 {
169 register thread_t thread = current_thread();
170 register struct i386_saved_state *regs = USER_REGS(thread);
171
172 if ((kr == KERN_SUCCESS) || (kr == KERN_ABORTED)) {
173 #if MACH_KDB
174 if (!db_breakpoints_inserted) {
175 db_set_breakpoints();
176 }
177 if (db_watchpoint_list &&
178 db_watchpoints_inserted &&
179 (regs->err & T_PF_WRITE) &&
180 db_find_watchpoint(thread->map,
181 (vm_offset_t)regs->cr2,
182 regs))
183 kdb_trap(T_WATCHPOINT, 0, regs);
184 #endif /* MACH_KDB */
185 thread_exception_return();
186 /*NOTREACHED*/
187 }
188
189 #if MACH_KDB
190 if (debug_all_traps_with_kdb &&
191 kdb_trap(regs->trapno, regs->err, regs)) {
192 #if MACH_LDEBUG
193 assert(thread->mutex_count == 0);
194 #endif /* MACH_LDEBUG */
195 thread_exception_return();
196 /*NOTREACHED*/
197 }
198 #endif /* MACH_KDB */
199
200 i386_exception(EXC_BAD_ACCESS, kr, regs->cr2);
201 /*NOTREACHED*/
202 }
203
204 /*
205 * Fault recovery in copyin/copyout routines.
206 */
207 struct recovery {
208 uint32_t fault_addr;
209 uint32_t recover_addr;
210 };
211
212 extern struct recovery recover_table[];
213 extern struct recovery recover_table_end[];
214
215 /*
216 * Recovery from Successful fault in copyout does not
217 * return directly - it retries the pte check, since
218 * the 386 ignores write protection in kernel mode.
219 */
220 extern struct recovery retry_table[];
221 extern struct recovery retry_table_end[];
222
223 const char * trap_type[] = {TRAP_NAMES};
224 int TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]);
225
226
227 /*
228 * Trap from kernel mode. Only page-fault errors are recoverable,
229 * and then only in special circumstances. All other errors are
230 * fatal. Return value indicates if trap was handled.
231 */
232 boolean_t
233 kernel_trap(
234 register struct i386_saved_state *regs)
235 {
236 int code;
237 unsigned int subcode;
238 int interruptible = THREAD_UNINT;
239 register int type;
240 vm_map_t map;
241 kern_return_t result = KERN_FAILURE;
242 register thread_t thread;
243
244 type = regs->trapno;
245 code = regs->err;
246 thread = current_thread();
247
248 switch (type) {
249 case T_PREEMPT:
250 ast_taken(AST_PREEMPTION, FALSE);
251 return (TRUE);
252
253 case T_NO_FPU:
254 fpnoextflt();
255 return (TRUE);
256
257 case T_FPU_FAULT:
258 fpextovrflt();
259 return (TRUE);
260
261 case T_FLOATING_POINT_ERROR:
262 fpexterrflt();
263 return (TRUE);
264
265 case T_PAGE_FAULT:
266 /*
267 * If the current map is a submap of the kernel map,
268 * and the address is within that map, fault on that
269 * map. If the same check is done in vm_fault
270 * (vm_map_lookup), we may deadlock on the kernel map
271 * lock.
272 */
273 #if MACH_KDB
274 mp_disable_preemption();
275 if (db_active
276 && kdb_active[cpu_number()]
277 && !let_ddb_vm_fault) {
278 /*
279 * Force kdb to handle this one.
280 */
281 mp_enable_preemption();
282 return (FALSE);
283 }
284 mp_enable_preemption();
285 #endif /* MACH_KDB */
286 subcode = regs->cr2; /* get faulting address */
287
288 if (subcode > LINEAR_KERNEL_ADDRESS) {
289 map = kernel_map;
290 } else if (thread == THREAD_NULL)
291 map = kernel_map;
292 else {
293 map = thread->map;
294 }
295 #if MACH_KDB
296 /*
297 * Check for watchpoint on kernel static data.
298 * vm_fault would fail in this case
299 */
300 if (map == kernel_map &&
301 db_watchpoint_list &&
302 db_watchpoints_inserted &&
303 (code & T_PF_WRITE) &&
304 (vm_offset_t)subcode < vm_last_phys &&
305 ((*(pte = pmap_pte(kernel_pmap, (vm_offset_t)subcode))) &
306 INTEL_PTE_WRITE) == 0) {
307 *pte = *pte | INTEL_PTE_VALID | INTEL_PTE_WRITE; /* XXX need invltlb here? */
308 result = KERN_SUCCESS;
309 } else
310 #endif /* MACH_KDB */
311 {
312 /*
313 * Since the 386 ignores write protection in
314 * kernel mode, always try for write permission
315 * first. If that fails and the fault was a
316 * read fault, retry with read permission.
317 */
318 if (map == kernel_map) {
319 register struct recovery *rp;
320
321 interruptible = THREAD_UNINT;
322 for (rp = recover_table; rp < recover_table_end; rp++) {
323 if (regs->eip == rp->fault_addr) {
324 interruptible = THREAD_ABORTSAFE;
325 break;
326 }
327 }
328 }
329 result = vm_fault(map,
330 trunc_page((vm_offset_t)subcode),
331 VM_PROT_READ|VM_PROT_WRITE,
332 FALSE,
333 (map == kernel_map) ? interruptible : THREAD_ABORTSAFE, NULL, 0);
334 }
335 #if MACH_KDB
336 if (result == KERN_SUCCESS) {
337 /* Look for watchpoints */
338 if (db_watchpoint_list &&
339 db_watchpoints_inserted &&
340 (code & T_PF_WRITE) &&
341 db_find_watchpoint(map,
342 (vm_offset_t)subcode, regs))
343 kdb_trap(T_WATCHPOINT, 0, regs);
344 }
345 else
346 #endif /* MACH_KDB */
347 if ((code & T_PF_WRITE) == 0 &&
348 result == KERN_PROTECTION_FAILURE)
349 {
350 /*
351 * Must expand vm_fault by hand,
352 * so that we can ask for read-only access
353 * but enter a (kernel)writable mapping.
354 */
355 result = intel_read_fault(map,
356 trunc_page((vm_offset_t)subcode));
357 }
358
359 if (result == KERN_SUCCESS) {
360 /*
361 * Certain faults require that we back up
362 * the EIP.
363 */
364 register struct recovery *rp;
365
366 for (rp = retry_table; rp < retry_table_end; rp++) {
367 if (regs->eip == rp->fault_addr) {
368 regs->eip = rp->recover_addr;
369 break;
370 }
371 }
372 return (TRUE);
373 }
374
375 /* fall through */
376
377 case T_GENERAL_PROTECTION:
378
379 /*
380 * If there is a failure recovery address
381 * for this fault, go there.
382 */
383 {
384 register struct recovery *rp;
385
386 for (rp = recover_table;
387 rp < recover_table_end;
388 rp++) {
389 if (regs->eip == rp->fault_addr) {
390 regs->eip = rp->recover_addr;
391 return (TRUE);
392 }
393 }
394 }
395
396 /*
397 * Check thread recovery address also -
398 * v86 assist uses it.
399 */
400 if (thread->recover) {
401 regs->eip = thread->recover;
402 thread->recover = 0;
403 return (TRUE);
404 }
405
406 /*
407 * Unanticipated page-fault errors in kernel
408 * should not happen.
409 */
410 /* fall through... */
411
412 default:
413 /*
414 * Exception 15 is reserved but some chips may generate it
415 * spuriously. Seen at startup on AMD Athlon-64.
416 */
417 if (type == 15) {
418 kprintf("kernel_trap() ignoring spurious trap 15\n");
419 return (TRUE);
420 }
421
422 /*
423 * ...and return failure, so that locore can call into
424 * debugger.
425 */
426 #if MACH_KDP
427 kdp_i386_trap(type, regs, result, regs->cr2);
428 #endif
429 return (FALSE);
430 }
431 return (TRUE);
432 }
433
434 /*
435 * Called if both kernel_trap() and kdb_trap() fail.
436 */
437 void
438 panic_trap(
439 register struct i386_saved_state *regs)
440 {
441 int code;
442 register int type;
443
444 type = regs->trapno;
445 code = regs->err;
446
447 printf("trap type %d, code = %x, pc = %x\n",
448 type, code, regs->eip);
449 panic("trap");
450 }
451
452
453 /*
454 * Trap from user mode.
455 */
456 void
457 user_trap(
458 register struct i386_saved_state *regs)
459 {
460 int exc;
461 int code;
462 unsigned int subcode;
463 register int type;
464 vm_map_t map;
465 vm_prot_t prot;
466 kern_return_t result;
467 thread_t thread = current_thread();
468 boolean_t kernel_act = FALSE;
469
470 if (regs->efl & EFL_VM) {
471 /*
472 * If hardware assist can handle exception,
473 * continue execution.
474 */
475 if (v86_assist(thread, regs))
476 return;
477 }
478
479 type = regs->trapno;
480 code = 0;
481 subcode = 0;
482 exc = 0;
483
484 switch (type) {
485
486 case T_DIVIDE_ERROR:
487 exc = EXC_ARITHMETIC;
488 code = EXC_I386_DIV;
489 break;
490
491 case T_DEBUG:
492 exc = EXC_BREAKPOINT;
493 code = EXC_I386_SGL;
494 break;
495
496 case T_INT3:
497 exc = EXC_BREAKPOINT;
498 code = EXC_I386_BPT;
499 break;
500
501 case T_OVERFLOW:
502 exc = EXC_ARITHMETIC;
503 code = EXC_I386_INTO;
504 break;
505
506 case T_OUT_OF_BOUNDS:
507 exc = EXC_SOFTWARE;
508 code = EXC_I386_BOUND;
509 break;
510
511 case T_INVALID_OPCODE:
512 exc = EXC_BAD_INSTRUCTION;
513 code = EXC_I386_INVOP;
514 break;
515
516 case T_NO_FPU:
517 case 32: /* XXX */
518 fpnoextflt();
519 return;
520
521 case T_FPU_FAULT:
522 fpextovrflt();
523 return;
524
525 case 10: /* invalid TSS == iret with NT flag set */
526 exc = EXC_BAD_INSTRUCTION;
527 code = EXC_I386_INVTSSFLT;
528 subcode = regs->err & 0xffff;
529 break;
530
531 case T_SEGMENT_NOT_PRESENT:
532 exc = EXC_BAD_INSTRUCTION;
533 code = EXC_I386_SEGNPFLT;
534 subcode = regs->err & 0xffff;
535 break;
536
537 case T_STACK_FAULT:
538 exc = EXC_BAD_INSTRUCTION;
539 code = EXC_I386_STKFLT;
540 subcode = regs->err & 0xffff;
541 break;
542
543 case T_GENERAL_PROTECTION:
544 if (!(regs->efl & EFL_VM)) {
545 if (check_io_fault(regs))
546 return;
547 }
548 exc = EXC_BAD_INSTRUCTION;
549 code = EXC_I386_GPFLT;
550 subcode = regs->err & 0xffff;
551 break;
552
553 case T_PAGE_FAULT:
554 subcode = regs->cr2;
555 prot = VM_PROT_READ|VM_PROT_WRITE;
556 if (kernel_act == FALSE) {
557 if (!(regs->err & T_PF_WRITE))
558 prot = VM_PROT_READ;
559 (void) user_page_fault_continue(vm_fault(thread->map,
560 trunc_page((vm_offset_t)subcode),
561 prot,
562 FALSE,
563 THREAD_ABORTSAFE, NULL, 0));
564 /* NOTREACHED */
565 }
566 else {
567 if (subcode > LINEAR_KERNEL_ADDRESS) {
568 map = kernel_map;
569 }
570 result = vm_fault(thread->map,
571 trunc_page((vm_offset_t)subcode),
572 prot,
573 FALSE,
574 (map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE, NULL, 0);
575 if ((result != KERN_SUCCESS) && (result != KERN_ABORTED)) {
576 /*
577 * Must expand vm_fault by hand,
578 * so that we can ask for read-only access
579 * but enter a (kernel) writable mapping.
580 */
581 result = intel_read_fault(thread->map,
582 trunc_page((vm_offset_t)subcode));
583 }
584 user_page_fault_continue(result);
585 /*NOTREACHED*/
586 }
587 break;
588
589 case T_FLOATING_POINT_ERROR:
590 fpexterrflt();
591 return;
592
593 default:
594 #if MACH_KGDB
595 Debugger("Unanticipated user trap");
596 return;
597 #endif /* MACH_KGDB */
598 #if MACH_KDB
599 if (kdb_trap(type, regs->err, regs))
600 return;
601 #endif /* MACH_KDB */
602 printf("user trap type %d, code = %x, pc = %x\n",
603 type, regs->err, regs->eip);
604 panic("user trap");
605 return;
606 }
607
608 #if MACH_KDB
609 if (debug_all_traps_with_kdb &&
610 kdb_trap(type, regs->err, regs))
611 return;
612 #endif /* MACH_KDB */
613
614 i386_exception(exc, code, subcode);
615 /*NOTREACHED*/
616 }
617
618 /*
619 * V86 mode assist for interrupt handling.
620 */
621 boolean_t v86_assist_on = TRUE;
622 boolean_t v86_unsafe_ok = FALSE;
623 boolean_t v86_do_sti_cli = TRUE;
624 boolean_t v86_do_sti_immediate = FALSE;
625
626 #define V86_IRET_PENDING 0x4000
627
628 int cli_count = 0;
629 int sti_count = 0;
630
631 boolean_t
632 v86_assist(
633 thread_t thread,
634 register struct i386_saved_state *regs)
635 {
636 register struct v86_assist_state *v86 = &thread->machine.pcb->ims.v86s;
637
638 /*
639 * Build an 8086 address. Use only when off is known to be 16 bits.
640 */
641 #define Addr8086(seg,off) ((((seg) & 0xffff) << 4) + (off))
642
643 #define EFL_V86_SAFE ( EFL_OF | EFL_DF | EFL_TF \
644 | EFL_SF | EFL_ZF | EFL_AF \
645 | EFL_PF | EFL_CF )
646 struct iret_32 {
647 int eip;
648 int cs;
649 int eflags;
650 };
651 struct iret_16 {
652 unsigned short ip;
653 unsigned short cs;
654 unsigned short flags;
655 };
656 union iret_struct {
657 struct iret_32 iret_32;
658 struct iret_16 iret_16;
659 };
660
661 struct int_vec {
662 unsigned short ip;
663 unsigned short cs;
664 };
665
666 if (!v86_assist_on)
667 return FALSE;
668
669 /*
670 * If delayed STI pending, enable interrupts.
671 * Turn off tracing if on only to delay STI.
672 */
673 if (v86->flags & V86_IF_PENDING) {
674 v86->flags &= ~V86_IF_PENDING;
675 v86->flags |= EFL_IF;
676 if ((v86->flags & EFL_TF) == 0)
677 regs->efl &= ~EFL_TF;
678 }
679
680 if (regs->trapno == T_DEBUG) {
681
682 if (v86->flags & EFL_TF) {
683 /*
684 * Trace flag was also set - it has priority
685 */
686 return FALSE; /* handle as single-step */
687 }
688 /*
689 * Fall through to check for interrupts.
690 */
691 }
692 else if (regs->trapno == T_GENERAL_PROTECTION) {
693 /*
694 * General protection error - must be an 8086 instruction
695 * to emulate.
696 */
697 register int eip;
698 boolean_t addr_32 = FALSE;
699 boolean_t data_32 = FALSE;
700 int io_port;
701
702 /*
703 * Set up error handler for bad instruction/data
704 * fetches.
705 */
706 __asm__("movl $(addr_error), %0" : : "m" (thread->recover));
707
708 eip = regs->eip;
709 while (TRUE) {
710 unsigned char opcode;
711
712 if (eip > 0xFFFF) {
713 thread->recover = 0;
714 return FALSE; /* GP fault: IP out of range */
715 }
716
717 opcode = *(unsigned char *)Addr8086(regs->cs,eip);
718 eip++;
719 switch (opcode) {
720 case 0xf0: /* lock */
721 case 0xf2: /* repne */
722 case 0xf3: /* repe */
723 case 0x2e: /* cs */
724 case 0x36: /* ss */
725 case 0x3e: /* ds */
726 case 0x26: /* es */
727 case 0x64: /* fs */
728 case 0x65: /* gs */
729 /* ignore prefix */
730 continue;
731
732 case 0x66: /* data size */
733 data_32 = TRUE;
734 continue;
735
736 case 0x67: /* address size */
737 addr_32 = TRUE;
738 continue;
739
740 case 0xe4: /* inb imm */
741 case 0xe5: /* inw imm */
742 case 0xe6: /* outb imm */
743 case 0xe7: /* outw imm */
744 io_port = *(unsigned char *)Addr8086(regs->cs, eip);
745 eip++;
746 goto do_in_out;
747
748 case 0xec: /* inb dx */
749 case 0xed: /* inw dx */
750 case 0xee: /* outb dx */
751 case 0xef: /* outw dx */
752 case 0x6c: /* insb */
753 case 0x6d: /* insw */
754 case 0x6e: /* outsb */
755 case 0x6f: /* outsw */
756 io_port = regs->edx & 0xffff;
757
758 do_in_out:
759 if (!data_32)
760 opcode |= 0x6600; /* word IO */
761
762 switch (emulate_io(regs, opcode, io_port)) {
763 case EM_IO_DONE:
764 /* instruction executed */
765 break;
766 case EM_IO_RETRY:
767 /* port mapped, retry instruction */
768 thread->recover = 0;
769 return TRUE;
770 case EM_IO_ERROR:
771 /* port not mapped */
772 thread->recover = 0;
773 return FALSE;
774 }
775 break;
776
777 case 0xfa: /* cli */
778 if (!v86_do_sti_cli) {
779 thread->recover = 0;
780 return (FALSE);
781 }
782
783 v86->flags &= ~EFL_IF;
784 /* disable simulated interrupts */
785 cli_count++;
786 break;
787
788 case 0xfb: /* sti */
789 if (!v86_do_sti_cli) {
790 thread->recover = 0;
791 return (FALSE);
792 }
793
794 if ((v86->flags & EFL_IF) == 0) {
795 if (v86_do_sti_immediate) {
796 v86->flags |= EFL_IF;
797 } else {
798 v86->flags |= V86_IF_PENDING;
799 regs->efl |= EFL_TF;
800 }
801 /* single step to set IF next inst. */
802 }
803 sti_count++;
804 break;
805
806 case 0x9c: /* pushf */
807 {
808 int flags;
809 vm_offset_t sp;
810 unsigned int size;
811
812 flags = regs->efl;
813 if ((v86->flags & EFL_IF) == 0)
814 flags &= ~EFL_IF;
815
816 if ((v86->flags & EFL_TF) == 0)
817 flags &= ~EFL_TF;
818 else flags |= EFL_TF;
819
820 sp = regs->uesp;
821 if (!addr_32)
822 sp &= 0xffff;
823 else if (sp > 0xffff)
824 goto stack_error;
825 size = (data_32) ? 4 : 2;
826 if (sp < size)
827 goto stack_error;
828 sp -= size;
829 if (copyout((char *)&flags,
830 (user_addr_t)Addr8086(regs->ss,sp),
831 size))
832 goto addr_error;
833 if (addr_32)
834 regs->uesp = sp;
835 else
836 regs->uesp = (regs->uesp & 0xffff0000) | sp;
837 break;
838 }
839
840 case 0x9d: /* popf */
841 {
842 vm_offset_t sp;
843 int nflags;
844
845 sp = regs->uesp;
846 if (!addr_32)
847 sp &= 0xffff;
848 else if (sp > 0xffff)
849 goto stack_error;
850
851 if (data_32) {
852 if (sp > 0xffff - sizeof(int))
853 goto stack_error;
854 nflags = *(int *)Addr8086(regs->ss,sp);
855 sp += sizeof(int);
856 }
857 else {
858 if (sp > 0xffff - sizeof(short))
859 goto stack_error;
860 nflags = *(unsigned short *)
861 Addr8086(regs->ss,sp);
862 sp += sizeof(short);
863 }
864 if (addr_32)
865 regs->uesp = sp;
866 else
867 regs->uesp = (regs->uesp & 0xffff0000) | sp;
868
869 if (v86->flags & V86_IRET_PENDING) {
870 v86->flags = nflags & (EFL_TF | EFL_IF);
871 v86->flags |= V86_IRET_PENDING;
872 } else {
873 v86->flags = nflags & (EFL_TF | EFL_IF);
874 }
875 regs->efl = (regs->efl & ~EFL_V86_SAFE)
876 | (nflags & EFL_V86_SAFE);
877 break;
878 }
879 case 0xcf: /* iret */
880 {
881 vm_offset_t sp;
882 int nflags;
883 union iret_struct iret_struct;
884
885 v86->flags &= ~V86_IRET_PENDING;
886 sp = regs->uesp;
887 if (!addr_32)
888 sp &= 0xffff;
889 else if (sp > 0xffff)
890 goto stack_error;
891
892 if (data_32) {
893 if (sp > 0xffff - sizeof(struct iret_32))
894 goto stack_error;
895 iret_struct.iret_32 =
896 *(struct iret_32 *) Addr8086(regs->ss,sp);
897 sp += sizeof(struct iret_32);
898 }
899 else {
900 if (sp > 0xffff - sizeof(struct iret_16))
901 goto stack_error;
902 iret_struct.iret_16 =
903 *(struct iret_16 *) Addr8086(regs->ss,sp);
904 sp += sizeof(struct iret_16);
905 }
906 if (addr_32)
907 regs->uesp = sp;
908 else
909 regs->uesp = (regs->uesp & 0xffff0000) | sp;
910
911 if (data_32) {
912 eip = iret_struct.iret_32.eip;
913 regs->cs = iret_struct.iret_32.cs & 0xffff;
914 nflags = iret_struct.iret_32.eflags;
915 }
916 else {
917 eip = iret_struct.iret_16.ip;
918 regs->cs = iret_struct.iret_16.cs;
919 nflags = iret_struct.iret_16.flags;
920 }
921
922 v86->flags = nflags & (EFL_TF | EFL_IF);
923 regs->efl = (regs->efl & ~EFL_V86_SAFE)
924 | (nflags & EFL_V86_SAFE);
925 break;
926 }
927 default:
928 /*
929 * Instruction not emulated here.
930 */
931 thread->recover = 0;
932 return FALSE;
933 }
934 break; /* exit from 'while TRUE' */
935 }
936 regs->eip = (regs->eip & 0xffff0000) | eip;
937 }
938 else {
939 /*
940 * Not a trap we handle.
941 */
942 thread->recover = 0;
943 return FALSE;
944 }
945
946 if ((v86->flags & EFL_IF) && ((v86->flags & V86_IRET_PENDING)==0)) {
947
948 struct v86_interrupt_table *int_table;
949 int int_count;
950 int vec;
951 int i;
952
953 int_table = (struct v86_interrupt_table *) v86->int_table;
954 int_count = v86->int_count;
955
956 vec = 0;
957 for (i = 0; i < int_count; int_table++, i++) {
958 if (!int_table->mask && int_table->count > 0) {
959 int_table->count--;
960 vec = int_table->vec;
961 break;
962 }
963 }
964 if (vec != 0) {
965 /*
966 * Take this interrupt
967 */
968 vm_offset_t sp;
969 struct iret_16 iret_16;
970 struct int_vec int_vec;
971
972 sp = regs->uesp & 0xffff;
973 if (sp < sizeof(struct iret_16))
974 goto stack_error;
975 sp -= sizeof(struct iret_16);
976 iret_16.ip = regs->eip;
977 iret_16.cs = regs->cs;
978 iret_16.flags = regs->efl & 0xFFFF;
979 if ((v86->flags & EFL_TF) == 0)
980 iret_16.flags &= ~EFL_TF;
981 else iret_16.flags |= EFL_TF;
982
983 (void) memcpy((char *) &int_vec,
984 (char *) (sizeof(struct int_vec) * vec),
985 sizeof (struct int_vec));
986 if (copyout((char *)&iret_16,
987 (user_addr_t)Addr8086(regs->ss,sp),
988 sizeof(struct iret_16)))
989 goto addr_error;
990 regs->uesp = (regs->uesp & 0xFFFF0000) | (sp & 0xffff);
991 regs->eip = int_vec.ip;
992 regs->cs = int_vec.cs;
993 regs->efl &= ~EFL_TF;
994 v86->flags &= ~(EFL_IF | EFL_TF);
995 v86->flags |= V86_IRET_PENDING;
996 }
997 }
998
999 thread->recover = 0;
1000 return TRUE;
1001
1002 /*
1003 * On address error, report a page fault.
1004 * XXX report GP fault - we don`t save
1005 * the faulting address.
1006 */
1007 addr_error:
1008 __asm__("addr_error:;");
1009 thread->recover = 0;
1010 return FALSE;
1011
1012 /*
1013 * On stack address error, return stack fault (12).
1014 */
1015 stack_error:
1016 thread->recover = 0;
1017 regs->trapno = T_STACK_FAULT;
1018 return FALSE;
1019 }
1020
1021 /*
1022 * Handle AST traps for i386.
1023 * Check for delayed floating-point exception from
1024 * AT-bus machines.
1025 */
1026
1027 extern void log_thread_action (thread_t, char *);
1028
1029 void
1030 i386_astintr(int preemption)
1031 {
1032 ast_t *my_ast, mask = AST_ALL;
1033 spl_t s;
1034
1035 s = splsched(); /* block interrupts to check reasons */
1036 mp_disable_preemption();
1037 my_ast = ast_pending();
1038 if (*my_ast & AST_I386_FP) {
1039 /*
1040 * AST was for delayed floating-point exception -
1041 * FP interrupt occurred while in kernel.
1042 * Turn off this AST reason and handle the FPU error.
1043 */
1044
1045 ast_off(AST_I386_FP);
1046 mp_enable_preemption();
1047 splx(s);
1048
1049 fpexterrflt();
1050 }
1051 else {
1052 /*
1053 * Not an FPU trap. Handle the AST.
1054 * Interrupts are still blocked.
1055 */
1056
1057 #if 1
1058 if (preemption) {
1059 mask = AST_PREEMPTION;
1060 mp_enable_preemption();
1061 } else {
1062 mp_enable_preemption();
1063 }
1064 #else
1065 mp_enable_preemption();
1066 #endif
1067
1068 ast_taken(mask, s);
1069
1070 }
1071 }
1072
1073 /*
1074 * Handle exceptions for i386.
1075 *
1076 * If we are an AT bus machine, we must turn off the AST for a
1077 * delayed floating-point exception.
1078 *
1079 * If we are providing floating-point emulation, we may have
1080 * to retrieve the real register values from the floating point
1081 * emulator.
1082 */
1083 void
1084 i386_exception(
1085 int exc,
1086 int code,
1087 int subcode)
1088 {
1089 spl_t s;
1090 exception_data_type_t codes[EXCEPTION_CODE_MAX];
1091
1092 /*
1093 * Turn off delayed FPU error handling.
1094 */
1095 s = splsched();
1096 mp_disable_preemption();
1097 ast_off(AST_I386_FP);
1098 mp_enable_preemption();
1099 splx(s);
1100
1101 codes[0] = code; /* new exception interface */
1102 codes[1] = subcode;
1103 exception_triage(exc, codes, 2);
1104 /*NOTREACHED*/
1105 }
1106
1107 boolean_t
1108 check_io_fault(
1109 struct i386_saved_state *regs)
1110 {
1111 int eip, opcode, io_port;
1112 boolean_t data_16 = FALSE;
1113
1114 /*
1115 * Get the instruction.
1116 */
1117 eip = regs->eip;
1118
1119 for (;;) {
1120 opcode = inst_fetch(eip, regs->cs);
1121 eip++;
1122 switch (opcode) {
1123 case 0x66: /* data-size prefix */
1124 data_16 = TRUE;
1125 continue;
1126
1127 case 0xf3: /* rep prefix */
1128 case 0x26: /* es */
1129 case 0x2e: /* cs */
1130 case 0x36: /* ss */
1131 case 0x3e: /* ds */
1132 case 0x64: /* fs */
1133 case 0x65: /* gs */
1134 continue;
1135
1136 case 0xE4: /* inb imm */
1137 case 0xE5: /* inl imm */
1138 case 0xE6: /* outb imm */
1139 case 0xE7: /* outl imm */
1140 /* port is immediate byte */
1141 io_port = inst_fetch(eip, regs->cs);
1142 eip++;
1143 break;
1144
1145 case 0xEC: /* inb dx */
1146 case 0xED: /* inl dx */
1147 case 0xEE: /* outb dx */
1148 case 0xEF: /* outl dx */
1149 case 0x6C: /* insb */
1150 case 0x6D: /* insl */
1151 case 0x6E: /* outsb */
1152 case 0x6F: /* outsl */
1153 /* port is in DX register */
1154 io_port = regs->edx & 0xFFFF;
1155 break;
1156
1157 default:
1158 return FALSE;
1159 }
1160 break;
1161 }
1162
1163 if (data_16)
1164 opcode |= 0x6600; /* word IO */
1165
1166 switch (emulate_io(regs, opcode, io_port)) {
1167 case EM_IO_DONE:
1168 /* instruction executed */
1169 regs->eip = eip;
1170 return TRUE;
1171
1172 case EM_IO_RETRY:
1173 /* port mapped, retry instruction */
1174 return TRUE;
1175
1176 case EM_IO_ERROR:
1177 /* port not mapped */
1178 return FALSE;
1179 }
1180 return FALSE;
1181 }
1182
1183 void
1184 kernel_preempt_check (void)
1185 {
1186 ast_t *myast;
1187
1188 mp_disable_preemption();
1189 myast = ast_pending();
1190 if ((*myast & AST_URGENT) &&
1191 get_interrupt_level() == 1
1192 ) {
1193 mp_enable_preemption_no_check();
1194 __asm__ volatile (" int $0xff");
1195 } else {
1196 mp_enable_preemption_no_check();
1197 }
1198 }
1199
1200 #if MACH_KDB
1201
1202 extern void db_i386_state(struct i386_saved_state *regs);
1203
1204 #include <ddb/db_output.h>
1205
1206 void
1207 db_i386_state(
1208 struct i386_saved_state *regs)
1209 {
1210 db_printf("eip %8x\n", regs->eip);
1211 db_printf("trap %8x\n", regs->trapno);
1212 db_printf("err %8x\n", regs->err);
1213 db_printf("efl %8x\n", regs->efl);
1214 db_printf("ebp %8x\n", regs->ebp);
1215 db_printf("esp %8x\n", regs->esp);
1216 db_printf("uesp %8x\n", regs->uesp);
1217 db_printf("cs %8x\n", regs->cs & 0xff);
1218 db_printf("ds %8x\n", regs->ds & 0xff);
1219 db_printf("es %8x\n", regs->es & 0xff);
1220 db_printf("fs %8x\n", regs->fs & 0xff);
1221 db_printf("gs %8x\n", regs->gs & 0xff);
1222 db_printf("ss %8x\n", regs->ss & 0xff);
1223 db_printf("eax %8x\n", regs->eax);
1224 db_printf("ebx %8x\n", regs->ebx);
1225 db_printf("ecx %8x\n", regs->ecx);
1226 db_printf("edx %8x\n", regs->edx);
1227 db_printf("esi %8x\n", regs->esi);
1228 db_printf("edi %8x\n", regs->edi);
1229 }
1230
1231 #endif /* MACH_KDB */