]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/trap.c
xnu-792.2.4.tar.gz
[apple/xnu.git] / osfmk / i386 / trap.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * Hardware trap/fault handler.
54 */
55
56 #include <mach_kdb.h>
57 #include <mach_kgdb.h>
58 #include <mach_kdp.h>
59 #include <mach_ldebug.h>
60
61 #include <types.h>
62 #include <i386/eflags.h>
63 #include <i386/trap.h>
64 #include <i386/pmap.h>
65 #include <i386/fpu.h>
66
67 #include <mach/exception.h>
68 #include <mach/kern_return.h>
69 #include <mach/vm_param.h>
70 #include <mach/i386/thread_status.h>
71
72 #include <vm/vm_kern.h>
73 #include <vm/vm_fault.h>
74
75 #include <kern/kern_types.h>
76 #include <kern/processor.h>
77 #include <kern/thread.h>
78 #include <kern/task.h>
79 #include <kern/sched.h>
80 #include <kern/sched_prim.h>
81 #include <kern/exception.h>
82 #include <kern/spl.h>
83 #include <kern/misc_protos.h>
84
85 #if MACH_KGDB
86 #include <kgdb/kgdb_defs.h>
87 #endif /* MACH_KGDB */
88
89 #include <i386/intel_read_fault.h>
90
91 #if MACH_KGDB
92 #include <kgdb/kgdb_defs.h>
93 #endif /* MACH_KGDB */
94
95 #if MACH_KDB
96 #include <ddb/db_watch.h>
97 #include <ddb/db_run.h>
98 #include <ddb/db_break.h>
99 #include <ddb/db_trap.h>
100 #endif /* MACH_KDB */
101
102 #include <string.h>
103
104 #include <i386/io_emulate.h>
105
106 /*
107 * Forward declarations
108 */
109 extern void user_page_fault_continue(
110 kern_return_t kr);
111
112 extern boolean_t v86_assist(
113 thread_t thread,
114 struct i386_saved_state *regs);
115
116 extern boolean_t check_io_fault(
117 struct i386_saved_state *regs);
118
119 extern int inst_fetch(
120 int eip,
121 int cs);
122
123 void
124 thread_syscall_return(
125 kern_return_t ret)
126 {
127 register thread_t thr_act = current_thread();
128 register struct i386_saved_state *regs = USER_REGS(thr_act);
129 regs->eax = ret;
130 thread_exception_return();
131 /*NOTREACHED*/
132 }
133
134
135 #if MACH_KDB
136 boolean_t debug_all_traps_with_kdb = FALSE;
137 extern struct db_watchpoint *db_watchpoint_list;
138 extern boolean_t db_watchpoints_inserted;
139 extern boolean_t db_breakpoints_inserted;
140
141 void
142 thread_kdb_return(void)
143 {
144 register thread_t thread = current_thread();
145 register struct i386_saved_state *regs = USER_REGS(thread);
146
147 if (kdb_trap(regs->trapno, regs->err, regs)) {
148 #if MACH_LDEBUG
149 assert(thread->mutex_count == 0);
150 #endif /* MACH_LDEBUG */
151 thread_exception_return();
152 /*NOTREACHED*/
153 }
154 }
155 boolean_t let_ddb_vm_fault = FALSE;
156
157 #endif /* MACH_KDB */
158
159 void
160 user_page_fault_continue(
161 kern_return_t kr)
162 {
163 register thread_t thread = current_thread();
164 register struct i386_saved_state *regs = USER_REGS(thread);
165
166 if ((kr == KERN_SUCCESS) || (kr == KERN_ABORTED)) {
167 #if MACH_KDB
168 if (!db_breakpoints_inserted) {
169 db_set_breakpoints();
170 }
171 if (db_watchpoint_list &&
172 db_watchpoints_inserted &&
173 (regs->err & T_PF_WRITE) &&
174 db_find_watchpoint(thread->map,
175 (vm_offset_t)regs->cr2,
176 regs))
177 kdb_trap(T_WATCHPOINT, 0, regs);
178 #endif /* MACH_KDB */
179 thread_exception_return();
180 /*NOTREACHED*/
181 }
182
183 #if MACH_KDB
184 if (debug_all_traps_with_kdb &&
185 kdb_trap(regs->trapno, regs->err, regs)) {
186 #if MACH_LDEBUG
187 assert(thread->mutex_count == 0);
188 #endif /* MACH_LDEBUG */
189 thread_exception_return();
190 /*NOTREACHED*/
191 }
192 #endif /* MACH_KDB */
193
194 i386_exception(EXC_BAD_ACCESS, kr, regs->cr2);
195 /*NOTREACHED*/
196 }
197
198 /*
199 * Fault recovery in copyin/copyout routines.
200 */
201 struct recovery {
202 uint32_t fault_addr;
203 uint32_t recover_addr;
204 };
205
206 extern struct recovery recover_table[];
207 extern struct recovery recover_table_end[];
208
209 /*
210 * Recovery from Successful fault in copyout does not
211 * return directly - it retries the pte check, since
212 * the 386 ignores write protection in kernel mode.
213 */
214 extern struct recovery retry_table[];
215 extern struct recovery retry_table_end[];
216
217 const char * trap_type[] = {TRAP_NAMES};
218 int TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]);
219
220
221 /*
222 * Trap from kernel mode. Only page-fault errors are recoverable,
223 * and then only in special circumstances. All other errors are
224 * fatal. Return value indicates if trap was handled.
225 */
226 boolean_t
227 kernel_trap(
228 register struct i386_saved_state *regs)
229 {
230 int code;
231 unsigned int subcode;
232 int interruptible = THREAD_UNINT;
233 register int type;
234 vm_map_t map;
235 kern_return_t result = KERN_FAILURE;
236 register thread_t thread;
237
238 type = regs->trapno;
239 code = regs->err;
240 thread = current_thread();
241
242 switch (type) {
243 case T_PREEMPT:
244 ast_taken(AST_PREEMPTION, FALSE);
245 return (TRUE);
246
247 case T_NO_FPU:
248 fpnoextflt();
249 return (TRUE);
250
251 case T_FPU_FAULT:
252 fpextovrflt();
253 return (TRUE);
254
255 case T_FLOATING_POINT_ERROR:
256 fpexterrflt();
257 return (TRUE);
258
259 case T_PAGE_FAULT:
260 /*
261 * If the current map is a submap of the kernel map,
262 * and the address is within that map, fault on that
263 * map. If the same check is done in vm_fault
264 * (vm_map_lookup), we may deadlock on the kernel map
265 * lock.
266 */
267 #if MACH_KDB
268 mp_disable_preemption();
269 if (db_active
270 && kdb_active[cpu_number()]
271 && !let_ddb_vm_fault) {
272 /*
273 * Force kdb to handle this one.
274 */
275 mp_enable_preemption();
276 return (FALSE);
277 }
278 mp_enable_preemption();
279 #endif /* MACH_KDB */
280 subcode = regs->cr2; /* get faulting address */
281
282 if (subcode > LINEAR_KERNEL_ADDRESS) {
283 map = kernel_map;
284 } else if (thread == THREAD_NULL)
285 map = kernel_map;
286 else {
287 map = thread->map;
288 }
289 #if MACH_KDB
290 /*
291 * Check for watchpoint on kernel static data.
292 * vm_fault would fail in this case
293 */
294 if (map == kernel_map &&
295 db_watchpoint_list &&
296 db_watchpoints_inserted &&
297 (code & T_PF_WRITE) &&
298 (vm_offset_t)subcode < vm_last_phys &&
299 ((*(pte = pmap_pte(kernel_pmap, (vm_offset_t)subcode))) &
300 INTEL_PTE_WRITE) == 0) {
301 *pte = *pte | INTEL_PTE_VALID | INTEL_PTE_WRITE; /* XXX need invltlb here? */
302 result = KERN_SUCCESS;
303 } else
304 #endif /* MACH_KDB */
305 {
306 /*
307 * Since the 386 ignores write protection in
308 * kernel mode, always try for write permission
309 * first. If that fails and the fault was a
310 * read fault, retry with read permission.
311 */
312 if (map == kernel_map) {
313 register struct recovery *rp;
314
315 interruptible = THREAD_UNINT;
316 for (rp = recover_table; rp < recover_table_end; rp++) {
317 if (regs->eip == rp->fault_addr) {
318 interruptible = THREAD_ABORTSAFE;
319 break;
320 }
321 }
322 }
323 result = vm_fault(map,
324 trunc_page((vm_offset_t)subcode),
325 VM_PROT_READ|VM_PROT_WRITE,
326 FALSE,
327 (map == kernel_map) ? interruptible : THREAD_ABORTSAFE, NULL, 0);
328 }
329 #if MACH_KDB
330 if (result == KERN_SUCCESS) {
331 /* Look for watchpoints */
332 if (db_watchpoint_list &&
333 db_watchpoints_inserted &&
334 (code & T_PF_WRITE) &&
335 db_find_watchpoint(map,
336 (vm_offset_t)subcode, regs))
337 kdb_trap(T_WATCHPOINT, 0, regs);
338 }
339 else
340 #endif /* MACH_KDB */
341 if ((code & T_PF_WRITE) == 0 &&
342 result == KERN_PROTECTION_FAILURE)
343 {
344 /*
345 * Must expand vm_fault by hand,
346 * so that we can ask for read-only access
347 * but enter a (kernel)writable mapping.
348 */
349 result = intel_read_fault(map,
350 trunc_page((vm_offset_t)subcode));
351 }
352
353 if (result == KERN_SUCCESS) {
354 /*
355 * Certain faults require that we back up
356 * the EIP.
357 */
358 register struct recovery *rp;
359
360 for (rp = retry_table; rp < retry_table_end; rp++) {
361 if (regs->eip == rp->fault_addr) {
362 regs->eip = rp->recover_addr;
363 break;
364 }
365 }
366 return (TRUE);
367 }
368
369 /* fall through */
370
371 case T_GENERAL_PROTECTION:
372
373 /*
374 * If there is a failure recovery address
375 * for this fault, go there.
376 */
377 {
378 register struct recovery *rp;
379
380 for (rp = recover_table;
381 rp < recover_table_end;
382 rp++) {
383 if (regs->eip == rp->fault_addr) {
384 regs->eip = rp->recover_addr;
385 return (TRUE);
386 }
387 }
388 }
389
390 /*
391 * Check thread recovery address also -
392 * v86 assist uses it.
393 */
394 if (thread->recover) {
395 regs->eip = thread->recover;
396 thread->recover = 0;
397 return (TRUE);
398 }
399
400 /*
401 * Unanticipated page-fault errors in kernel
402 * should not happen.
403 */
404 /* fall through... */
405
406 default:
407 /*
408 * Exception 15 is reserved but some chips may generate it
409 * spuriously. Seen at startup on AMD Athlon-64.
410 */
411 if (type == 15) {
412 kprintf("kernel_trap() ignoring spurious trap 15\n");
413 return (TRUE);
414 }
415
416 /*
417 * ...and return failure, so that locore can call into
418 * debugger.
419 */
420 #if MACH_KDP
421 kdp_i386_trap(type, regs, result, regs->cr2);
422 #endif
423 return (FALSE);
424 }
425 return (TRUE);
426 }
427
428 /*
429 * Called if both kernel_trap() and kdb_trap() fail.
430 */
431 void
432 panic_trap(
433 register struct i386_saved_state *regs)
434 {
435 int code;
436 register int type;
437
438 type = regs->trapno;
439 code = regs->err;
440
441 printf("trap type %d, code = %x, pc = %x\n",
442 type, code, regs->eip);
443 panic("trap");
444 }
445
446
447 /*
448 * Trap from user mode.
449 */
450 void
451 user_trap(
452 register struct i386_saved_state *regs)
453 {
454 int exc;
455 int code;
456 unsigned int subcode;
457 register int type;
458 vm_map_t map;
459 vm_prot_t prot;
460 kern_return_t result;
461 thread_t thread = current_thread();
462 boolean_t kernel_act = FALSE;
463
464 if (regs->efl & EFL_VM) {
465 /*
466 * If hardware assist can handle exception,
467 * continue execution.
468 */
469 if (v86_assist(thread, regs))
470 return;
471 }
472
473 type = regs->trapno;
474 code = 0;
475 subcode = 0;
476 exc = 0;
477
478 switch (type) {
479
480 case T_DIVIDE_ERROR:
481 exc = EXC_ARITHMETIC;
482 code = EXC_I386_DIV;
483 break;
484
485 case T_DEBUG:
486 exc = EXC_BREAKPOINT;
487 code = EXC_I386_SGL;
488 break;
489
490 case T_INT3:
491 exc = EXC_BREAKPOINT;
492 code = EXC_I386_BPT;
493 break;
494
495 case T_OVERFLOW:
496 exc = EXC_ARITHMETIC;
497 code = EXC_I386_INTO;
498 break;
499
500 case T_OUT_OF_BOUNDS:
501 exc = EXC_SOFTWARE;
502 code = EXC_I386_BOUND;
503 break;
504
505 case T_INVALID_OPCODE:
506 exc = EXC_BAD_INSTRUCTION;
507 code = EXC_I386_INVOP;
508 break;
509
510 case T_NO_FPU:
511 case 32: /* XXX */
512 fpnoextflt();
513 return;
514
515 case T_FPU_FAULT:
516 fpextovrflt();
517 return;
518
519 case 10: /* invalid TSS == iret with NT flag set */
520 exc = EXC_BAD_INSTRUCTION;
521 code = EXC_I386_INVTSSFLT;
522 subcode = regs->err & 0xffff;
523 break;
524
525 case T_SEGMENT_NOT_PRESENT:
526 exc = EXC_BAD_INSTRUCTION;
527 code = EXC_I386_SEGNPFLT;
528 subcode = regs->err & 0xffff;
529 break;
530
531 case T_STACK_FAULT:
532 exc = EXC_BAD_INSTRUCTION;
533 code = EXC_I386_STKFLT;
534 subcode = regs->err & 0xffff;
535 break;
536
537 case T_GENERAL_PROTECTION:
538 if (!(regs->efl & EFL_VM)) {
539 if (check_io_fault(regs))
540 return;
541 }
542 exc = EXC_BAD_INSTRUCTION;
543 code = EXC_I386_GPFLT;
544 subcode = regs->err & 0xffff;
545 break;
546
547 case T_PAGE_FAULT:
548 subcode = regs->cr2;
549 prot = VM_PROT_READ|VM_PROT_WRITE;
550 if (kernel_act == FALSE) {
551 if (!(regs->err & T_PF_WRITE))
552 prot = VM_PROT_READ;
553 (void) user_page_fault_continue(vm_fault(thread->map,
554 trunc_page((vm_offset_t)subcode),
555 prot,
556 FALSE,
557 THREAD_ABORTSAFE, NULL, 0));
558 /* NOTREACHED */
559 }
560 else {
561 if (subcode > LINEAR_KERNEL_ADDRESS) {
562 map = kernel_map;
563 }
564 result = vm_fault(thread->map,
565 trunc_page((vm_offset_t)subcode),
566 prot,
567 FALSE,
568 (map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE, NULL, 0);
569 if ((result != KERN_SUCCESS) && (result != KERN_ABORTED)) {
570 /*
571 * Must expand vm_fault by hand,
572 * so that we can ask for read-only access
573 * but enter a (kernel) writable mapping.
574 */
575 result = intel_read_fault(thread->map,
576 trunc_page((vm_offset_t)subcode));
577 }
578 user_page_fault_continue(result);
579 /*NOTREACHED*/
580 }
581 break;
582
583 case T_FLOATING_POINT_ERROR:
584 fpexterrflt();
585 return;
586
587 default:
588 #if MACH_KGDB
589 Debugger("Unanticipated user trap");
590 return;
591 #endif /* MACH_KGDB */
592 #if MACH_KDB
593 if (kdb_trap(type, regs->err, regs))
594 return;
595 #endif /* MACH_KDB */
596 printf("user trap type %d, code = %x, pc = %x\n",
597 type, regs->err, regs->eip);
598 panic("user trap");
599 return;
600 }
601
602 #if MACH_KDB
603 if (debug_all_traps_with_kdb &&
604 kdb_trap(type, regs->err, regs))
605 return;
606 #endif /* MACH_KDB */
607
608 i386_exception(exc, code, subcode);
609 /*NOTREACHED*/
610 }
611
612 /*
613 * V86 mode assist for interrupt handling.
614 */
615 boolean_t v86_assist_on = TRUE;
616 boolean_t v86_unsafe_ok = FALSE;
617 boolean_t v86_do_sti_cli = TRUE;
618 boolean_t v86_do_sti_immediate = FALSE;
619
620 #define V86_IRET_PENDING 0x4000
621
622 int cli_count = 0;
623 int sti_count = 0;
624
625 boolean_t
626 v86_assist(
627 thread_t thread,
628 register struct i386_saved_state *regs)
629 {
630 register struct v86_assist_state *v86 = &thread->machine.pcb->ims.v86s;
631
632 /*
633 * Build an 8086 address. Use only when off is known to be 16 bits.
634 */
635 #define Addr8086(seg,off) ((((seg) & 0xffff) << 4) + (off))
636
637 #define EFL_V86_SAFE ( EFL_OF | EFL_DF | EFL_TF \
638 | EFL_SF | EFL_ZF | EFL_AF \
639 | EFL_PF | EFL_CF )
640 struct iret_32 {
641 int eip;
642 int cs;
643 int eflags;
644 };
645 struct iret_16 {
646 unsigned short ip;
647 unsigned short cs;
648 unsigned short flags;
649 };
650 union iret_struct {
651 struct iret_32 iret_32;
652 struct iret_16 iret_16;
653 };
654
655 struct int_vec {
656 unsigned short ip;
657 unsigned short cs;
658 };
659
660 if (!v86_assist_on)
661 return FALSE;
662
663 /*
664 * If delayed STI pending, enable interrupts.
665 * Turn off tracing if on only to delay STI.
666 */
667 if (v86->flags & V86_IF_PENDING) {
668 v86->flags &= ~V86_IF_PENDING;
669 v86->flags |= EFL_IF;
670 if ((v86->flags & EFL_TF) == 0)
671 regs->efl &= ~EFL_TF;
672 }
673
674 if (regs->trapno == T_DEBUG) {
675
676 if (v86->flags & EFL_TF) {
677 /*
678 * Trace flag was also set - it has priority
679 */
680 return FALSE; /* handle as single-step */
681 }
682 /*
683 * Fall through to check for interrupts.
684 */
685 }
686 else if (regs->trapno == T_GENERAL_PROTECTION) {
687 /*
688 * General protection error - must be an 8086 instruction
689 * to emulate.
690 */
691 register int eip;
692 boolean_t addr_32 = FALSE;
693 boolean_t data_32 = FALSE;
694 int io_port;
695
696 /*
697 * Set up error handler for bad instruction/data
698 * fetches.
699 */
700 __asm__("movl $(addr_error), %0" : : "m" (thread->recover));
701
702 eip = regs->eip;
703 while (TRUE) {
704 unsigned char opcode;
705
706 if (eip > 0xFFFF) {
707 thread->recover = 0;
708 return FALSE; /* GP fault: IP out of range */
709 }
710
711 opcode = *(unsigned char *)Addr8086(regs->cs,eip);
712 eip++;
713 switch (opcode) {
714 case 0xf0: /* lock */
715 case 0xf2: /* repne */
716 case 0xf3: /* repe */
717 case 0x2e: /* cs */
718 case 0x36: /* ss */
719 case 0x3e: /* ds */
720 case 0x26: /* es */
721 case 0x64: /* fs */
722 case 0x65: /* gs */
723 /* ignore prefix */
724 continue;
725
726 case 0x66: /* data size */
727 data_32 = TRUE;
728 continue;
729
730 case 0x67: /* address size */
731 addr_32 = TRUE;
732 continue;
733
734 case 0xe4: /* inb imm */
735 case 0xe5: /* inw imm */
736 case 0xe6: /* outb imm */
737 case 0xe7: /* outw imm */
738 io_port = *(unsigned char *)Addr8086(regs->cs, eip);
739 eip++;
740 goto do_in_out;
741
742 case 0xec: /* inb dx */
743 case 0xed: /* inw dx */
744 case 0xee: /* outb dx */
745 case 0xef: /* outw dx */
746 case 0x6c: /* insb */
747 case 0x6d: /* insw */
748 case 0x6e: /* outsb */
749 case 0x6f: /* outsw */
750 io_port = regs->edx & 0xffff;
751
752 do_in_out:
753 if (!data_32)
754 opcode |= 0x6600; /* word IO */
755
756 switch (emulate_io(regs, opcode, io_port)) {
757 case EM_IO_DONE:
758 /* instruction executed */
759 break;
760 case EM_IO_RETRY:
761 /* port mapped, retry instruction */
762 thread->recover = 0;
763 return TRUE;
764 case EM_IO_ERROR:
765 /* port not mapped */
766 thread->recover = 0;
767 return FALSE;
768 }
769 break;
770
771 case 0xfa: /* cli */
772 if (!v86_do_sti_cli) {
773 thread->recover = 0;
774 return (FALSE);
775 }
776
777 v86->flags &= ~EFL_IF;
778 /* disable simulated interrupts */
779 cli_count++;
780 break;
781
782 case 0xfb: /* sti */
783 if (!v86_do_sti_cli) {
784 thread->recover = 0;
785 return (FALSE);
786 }
787
788 if ((v86->flags & EFL_IF) == 0) {
789 if (v86_do_sti_immediate) {
790 v86->flags |= EFL_IF;
791 } else {
792 v86->flags |= V86_IF_PENDING;
793 regs->efl |= EFL_TF;
794 }
795 /* single step to set IF next inst. */
796 }
797 sti_count++;
798 break;
799
800 case 0x9c: /* pushf */
801 {
802 int flags;
803 vm_offset_t sp;
804 unsigned int size;
805
806 flags = regs->efl;
807 if ((v86->flags & EFL_IF) == 0)
808 flags &= ~EFL_IF;
809
810 if ((v86->flags & EFL_TF) == 0)
811 flags &= ~EFL_TF;
812 else flags |= EFL_TF;
813
814 sp = regs->uesp;
815 if (!addr_32)
816 sp &= 0xffff;
817 else if (sp > 0xffff)
818 goto stack_error;
819 size = (data_32) ? 4 : 2;
820 if (sp < size)
821 goto stack_error;
822 sp -= size;
823 if (copyout((char *)&flags,
824 (user_addr_t)Addr8086(regs->ss,sp),
825 size))
826 goto addr_error;
827 if (addr_32)
828 regs->uesp = sp;
829 else
830 regs->uesp = (regs->uesp & 0xffff0000) | sp;
831 break;
832 }
833
834 case 0x9d: /* popf */
835 {
836 vm_offset_t sp;
837 int nflags;
838
839 sp = regs->uesp;
840 if (!addr_32)
841 sp &= 0xffff;
842 else if (sp > 0xffff)
843 goto stack_error;
844
845 if (data_32) {
846 if (sp > 0xffff - sizeof(int))
847 goto stack_error;
848 nflags = *(int *)Addr8086(regs->ss,sp);
849 sp += sizeof(int);
850 }
851 else {
852 if (sp > 0xffff - sizeof(short))
853 goto stack_error;
854 nflags = *(unsigned short *)
855 Addr8086(regs->ss,sp);
856 sp += sizeof(short);
857 }
858 if (addr_32)
859 regs->uesp = sp;
860 else
861 regs->uesp = (regs->uesp & 0xffff0000) | sp;
862
863 if (v86->flags & V86_IRET_PENDING) {
864 v86->flags = nflags & (EFL_TF | EFL_IF);
865 v86->flags |= V86_IRET_PENDING;
866 } else {
867 v86->flags = nflags & (EFL_TF | EFL_IF);
868 }
869 regs->efl = (regs->efl & ~EFL_V86_SAFE)
870 | (nflags & EFL_V86_SAFE);
871 break;
872 }
873 case 0xcf: /* iret */
874 {
875 vm_offset_t sp;
876 int nflags;
877 union iret_struct iret_struct;
878
879 v86->flags &= ~V86_IRET_PENDING;
880 sp = regs->uesp;
881 if (!addr_32)
882 sp &= 0xffff;
883 else if (sp > 0xffff)
884 goto stack_error;
885
886 if (data_32) {
887 if (sp > 0xffff - sizeof(struct iret_32))
888 goto stack_error;
889 iret_struct.iret_32 =
890 *(struct iret_32 *) Addr8086(regs->ss,sp);
891 sp += sizeof(struct iret_32);
892 }
893 else {
894 if (sp > 0xffff - sizeof(struct iret_16))
895 goto stack_error;
896 iret_struct.iret_16 =
897 *(struct iret_16 *) Addr8086(regs->ss,sp);
898 sp += sizeof(struct iret_16);
899 }
900 if (addr_32)
901 regs->uesp = sp;
902 else
903 regs->uesp = (regs->uesp & 0xffff0000) | sp;
904
905 if (data_32) {
906 eip = iret_struct.iret_32.eip;
907 regs->cs = iret_struct.iret_32.cs & 0xffff;
908 nflags = iret_struct.iret_32.eflags;
909 }
910 else {
911 eip = iret_struct.iret_16.ip;
912 regs->cs = iret_struct.iret_16.cs;
913 nflags = iret_struct.iret_16.flags;
914 }
915
916 v86->flags = nflags & (EFL_TF | EFL_IF);
917 regs->efl = (regs->efl & ~EFL_V86_SAFE)
918 | (nflags & EFL_V86_SAFE);
919 break;
920 }
921 default:
922 /*
923 * Instruction not emulated here.
924 */
925 thread->recover = 0;
926 return FALSE;
927 }
928 break; /* exit from 'while TRUE' */
929 }
930 regs->eip = (regs->eip & 0xffff0000) | eip;
931 }
932 else {
933 /*
934 * Not a trap we handle.
935 */
936 thread->recover = 0;
937 return FALSE;
938 }
939
940 if ((v86->flags & EFL_IF) && ((v86->flags & V86_IRET_PENDING)==0)) {
941
942 struct v86_interrupt_table *int_table;
943 int int_count;
944 int vec;
945 int i;
946
947 int_table = (struct v86_interrupt_table *) v86->int_table;
948 int_count = v86->int_count;
949
950 vec = 0;
951 for (i = 0; i < int_count; int_table++, i++) {
952 if (!int_table->mask && int_table->count > 0) {
953 int_table->count--;
954 vec = int_table->vec;
955 break;
956 }
957 }
958 if (vec != 0) {
959 /*
960 * Take this interrupt
961 */
962 vm_offset_t sp;
963 struct iret_16 iret_16;
964 struct int_vec int_vec;
965
966 sp = regs->uesp & 0xffff;
967 if (sp < sizeof(struct iret_16))
968 goto stack_error;
969 sp -= sizeof(struct iret_16);
970 iret_16.ip = regs->eip;
971 iret_16.cs = regs->cs;
972 iret_16.flags = regs->efl & 0xFFFF;
973 if ((v86->flags & EFL_TF) == 0)
974 iret_16.flags &= ~EFL_TF;
975 else iret_16.flags |= EFL_TF;
976
977 (void) memcpy((char *) &int_vec,
978 (char *) (sizeof(struct int_vec) * vec),
979 sizeof (struct int_vec));
980 if (copyout((char *)&iret_16,
981 (user_addr_t)Addr8086(regs->ss,sp),
982 sizeof(struct iret_16)))
983 goto addr_error;
984 regs->uesp = (regs->uesp & 0xFFFF0000) | (sp & 0xffff);
985 regs->eip = int_vec.ip;
986 regs->cs = int_vec.cs;
987 regs->efl &= ~EFL_TF;
988 v86->flags &= ~(EFL_IF | EFL_TF);
989 v86->flags |= V86_IRET_PENDING;
990 }
991 }
992
993 thread->recover = 0;
994 return TRUE;
995
996 /*
997 * On address error, report a page fault.
998 * XXX report GP fault - we don`t save
999 * the faulting address.
1000 */
1001 addr_error:
1002 __asm__("addr_error:;");
1003 thread->recover = 0;
1004 return FALSE;
1005
1006 /*
1007 * On stack address error, return stack fault (12).
1008 */
1009 stack_error:
1010 thread->recover = 0;
1011 regs->trapno = T_STACK_FAULT;
1012 return FALSE;
1013 }
1014
1015 /*
1016 * Handle AST traps for i386.
1017 * Check for delayed floating-point exception from
1018 * AT-bus machines.
1019 */
1020
1021 extern void log_thread_action (thread_t, char *);
1022
1023 void
1024 i386_astintr(int preemption)
1025 {
1026 ast_t *my_ast, mask = AST_ALL;
1027 spl_t s;
1028
1029 s = splsched(); /* block interrupts to check reasons */
1030 mp_disable_preemption();
1031 my_ast = ast_pending();
1032 if (*my_ast & AST_I386_FP) {
1033 /*
1034 * AST was for delayed floating-point exception -
1035 * FP interrupt occurred while in kernel.
1036 * Turn off this AST reason and handle the FPU error.
1037 */
1038
1039 ast_off(AST_I386_FP);
1040 mp_enable_preemption();
1041 splx(s);
1042
1043 fpexterrflt();
1044 }
1045 else {
1046 /*
1047 * Not an FPU trap. Handle the AST.
1048 * Interrupts are still blocked.
1049 */
1050
1051 #if 1
1052 if (preemption) {
1053 mask = AST_PREEMPTION;
1054 mp_enable_preemption();
1055 } else {
1056 mp_enable_preemption();
1057 }
1058 #else
1059 mp_enable_preemption();
1060 #endif
1061
1062 ast_taken(mask, s);
1063
1064 }
1065 }
1066
1067 /*
1068 * Handle exceptions for i386.
1069 *
1070 * If we are an AT bus machine, we must turn off the AST for a
1071 * delayed floating-point exception.
1072 *
1073 * If we are providing floating-point emulation, we may have
1074 * to retrieve the real register values from the floating point
1075 * emulator.
1076 */
1077 void
1078 i386_exception(
1079 int exc,
1080 int code,
1081 int subcode)
1082 {
1083 spl_t s;
1084 exception_data_type_t codes[EXCEPTION_CODE_MAX];
1085
1086 /*
1087 * Turn off delayed FPU error handling.
1088 */
1089 s = splsched();
1090 mp_disable_preemption();
1091 ast_off(AST_I386_FP);
1092 mp_enable_preemption();
1093 splx(s);
1094
1095 codes[0] = code; /* new exception interface */
1096 codes[1] = subcode;
1097 exception_triage(exc, codes, 2);
1098 /*NOTREACHED*/
1099 }
1100
1101 boolean_t
1102 check_io_fault(
1103 struct i386_saved_state *regs)
1104 {
1105 int eip, opcode, io_port;
1106 boolean_t data_16 = FALSE;
1107
1108 /*
1109 * Get the instruction.
1110 */
1111 eip = regs->eip;
1112
1113 for (;;) {
1114 opcode = inst_fetch(eip, regs->cs);
1115 eip++;
1116 switch (opcode) {
1117 case 0x66: /* data-size prefix */
1118 data_16 = TRUE;
1119 continue;
1120
1121 case 0xf3: /* rep prefix */
1122 case 0x26: /* es */
1123 case 0x2e: /* cs */
1124 case 0x36: /* ss */
1125 case 0x3e: /* ds */
1126 case 0x64: /* fs */
1127 case 0x65: /* gs */
1128 continue;
1129
1130 case 0xE4: /* inb imm */
1131 case 0xE5: /* inl imm */
1132 case 0xE6: /* outb imm */
1133 case 0xE7: /* outl imm */
1134 /* port is immediate byte */
1135 io_port = inst_fetch(eip, regs->cs);
1136 eip++;
1137 break;
1138
1139 case 0xEC: /* inb dx */
1140 case 0xED: /* inl dx */
1141 case 0xEE: /* outb dx */
1142 case 0xEF: /* outl dx */
1143 case 0x6C: /* insb */
1144 case 0x6D: /* insl */
1145 case 0x6E: /* outsb */
1146 case 0x6F: /* outsl */
1147 /* port is in DX register */
1148 io_port = regs->edx & 0xFFFF;
1149 break;
1150
1151 default:
1152 return FALSE;
1153 }
1154 break;
1155 }
1156
1157 if (data_16)
1158 opcode |= 0x6600; /* word IO */
1159
1160 switch (emulate_io(regs, opcode, io_port)) {
1161 case EM_IO_DONE:
1162 /* instruction executed */
1163 regs->eip = eip;
1164 return TRUE;
1165
1166 case EM_IO_RETRY:
1167 /* port mapped, retry instruction */
1168 return TRUE;
1169
1170 case EM_IO_ERROR:
1171 /* port not mapped */
1172 return FALSE;
1173 }
1174 return FALSE;
1175 }
1176
1177 void
1178 kernel_preempt_check (void)
1179 {
1180 ast_t *myast;
1181
1182 mp_disable_preemption();
1183 myast = ast_pending();
1184 if ((*myast & AST_URGENT) &&
1185 get_interrupt_level() == 1
1186 ) {
1187 mp_enable_preemption_no_check();
1188 __asm__ volatile (" int $0xff");
1189 } else {
1190 mp_enable_preemption_no_check();
1191 }
1192 }
1193
1194 #if MACH_KDB
1195
1196 extern void db_i386_state(struct i386_saved_state *regs);
1197
1198 #include <ddb/db_output.h>
1199
1200 void
1201 db_i386_state(
1202 struct i386_saved_state *regs)
1203 {
1204 db_printf("eip %8x\n", regs->eip);
1205 db_printf("trap %8x\n", regs->trapno);
1206 db_printf("err %8x\n", regs->err);
1207 db_printf("efl %8x\n", regs->efl);
1208 db_printf("ebp %8x\n", regs->ebp);
1209 db_printf("esp %8x\n", regs->esp);
1210 db_printf("uesp %8x\n", regs->uesp);
1211 db_printf("cs %8x\n", regs->cs & 0xff);
1212 db_printf("ds %8x\n", regs->ds & 0xff);
1213 db_printf("es %8x\n", regs->es & 0xff);
1214 db_printf("fs %8x\n", regs->fs & 0xff);
1215 db_printf("gs %8x\n", regs->gs & 0xff);
1216 db_printf("ss %8x\n", regs->ss & 0xff);
1217 db_printf("eax %8x\n", regs->eax);
1218 db_printf("ebx %8x\n", regs->ebx);
1219 db_printf("ecx %8x\n", regs->ecx);
1220 db_printf("edx %8x\n", regs->edx);
1221 db_printf("esi %8x\n", regs->esi);
1222 db_printf("edi %8x\n", regs->edi);
1223 }
1224
1225 #endif /* MACH_KDB */