]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/trap.c
afb29fa3825b97000515737f2b6429df3afebcee
[apple/xnu.git] / osfmk / i386 / trap.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60 /*
61 * Hardware trap/fault handler.
62 */
63
64 #include <mach_kdb.h>
65 #include <mach_kgdb.h>
66 #include <mach_kdp.h>
67 #include <mach_ldebug.h>
68
69 #include <types.h>
70 #include <i386/eflags.h>
71 #include <i386/trap.h>
72 #include <i386/pmap.h>
73 #include <i386/fpu.h>
74
75 #include <mach/exception.h>
76 #include <mach/kern_return.h>
77 #include <mach/vm_param.h>
78 #include <mach/i386/thread_status.h>
79
80 #include <vm/vm_kern.h>
81 #include <vm/vm_fault.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/processor.h>
85 #include <kern/thread.h>
86 #include <kern/task.h>
87 #include <kern/sched.h>
88 #include <kern/sched_prim.h>
89 #include <kern/exception.h>
90 #include <kern/spl.h>
91 #include <kern/misc_protos.h>
92
93 #if MACH_KGDB
94 #include <kgdb/kgdb_defs.h>
95 #endif /* MACH_KGDB */
96
97 #include <i386/intel_read_fault.h>
98
99 #if MACH_KGDB
100 #include <kgdb/kgdb_defs.h>
101 #endif /* MACH_KGDB */
102
103 #if MACH_KDB
104 #include <ddb/db_watch.h>
105 #include <ddb/db_run.h>
106 #include <ddb/db_break.h>
107 #include <ddb/db_trap.h>
108 #endif /* MACH_KDB */
109
110 #include <string.h>
111
112 #include <i386/io_emulate.h>
113
114 /*
115 * Forward declarations
116 */
117 extern void user_page_fault_continue(
118 kern_return_t kr);
119
120 extern boolean_t v86_assist(
121 thread_t thread,
122 struct i386_saved_state *regs);
123
124 extern boolean_t check_io_fault(
125 struct i386_saved_state *regs);
126
127 extern int inst_fetch(
128 int eip,
129 int cs);
130
131 void
132 thread_syscall_return(
133 kern_return_t ret)
134 {
135 register thread_t thr_act = current_thread();
136 register struct i386_saved_state *regs = USER_REGS(thr_act);
137 regs->eax = ret;
138 thread_exception_return();
139 /*NOTREACHED*/
140 }
141
142
143 #if MACH_KDB
144 boolean_t debug_all_traps_with_kdb = FALSE;
145 extern struct db_watchpoint *db_watchpoint_list;
146 extern boolean_t db_watchpoints_inserted;
147 extern boolean_t db_breakpoints_inserted;
148
149 void
150 thread_kdb_return(void)
151 {
152 register thread_t thread = current_thread();
153 register struct i386_saved_state *regs = USER_REGS(thread);
154
155 if (kdb_trap(regs->trapno, regs->err, regs)) {
156 #if MACH_LDEBUG
157 assert(thread->mutex_count == 0);
158 #endif /* MACH_LDEBUG */
159 thread_exception_return();
160 /*NOTREACHED*/
161 }
162 }
163 boolean_t let_ddb_vm_fault = FALSE;
164
165 #endif /* MACH_KDB */
166
167 void
168 user_page_fault_continue(
169 kern_return_t kr)
170 {
171 register thread_t thread = current_thread();
172 register struct i386_saved_state *regs = USER_REGS(thread);
173
174 if ((kr == KERN_SUCCESS) || (kr == KERN_ABORTED)) {
175 #if MACH_KDB
176 if (!db_breakpoints_inserted) {
177 db_set_breakpoints();
178 }
179 if (db_watchpoint_list &&
180 db_watchpoints_inserted &&
181 (regs->err & T_PF_WRITE) &&
182 db_find_watchpoint(thread->map,
183 (vm_offset_t)regs->cr2,
184 regs))
185 kdb_trap(T_WATCHPOINT, 0, regs);
186 #endif /* MACH_KDB */
187 thread_exception_return();
188 /*NOTREACHED*/
189 }
190
191 #if MACH_KDB
192 if (debug_all_traps_with_kdb &&
193 kdb_trap(regs->trapno, regs->err, regs)) {
194 #if MACH_LDEBUG
195 assert(thread->mutex_count == 0);
196 #endif /* MACH_LDEBUG */
197 thread_exception_return();
198 /*NOTREACHED*/
199 }
200 #endif /* MACH_KDB */
201
202 i386_exception(EXC_BAD_ACCESS, kr, regs->cr2);
203 /*NOTREACHED*/
204 }
205
206 /*
207 * Fault recovery in copyin/copyout routines.
208 */
209 struct recovery {
210 uint32_t fault_addr;
211 uint32_t recover_addr;
212 };
213
214 extern struct recovery recover_table[];
215 extern struct recovery recover_table_end[];
216
217 /*
218 * Recovery from Successful fault in copyout does not
219 * return directly - it retries the pte check, since
220 * the 386 ignores write protection in kernel mode.
221 */
222 extern struct recovery retry_table[];
223 extern struct recovery retry_table_end[];
224
225 const char * trap_type[] = {TRAP_NAMES};
226 int TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]);
227
228
229 /*
230 * Trap from kernel mode. Only page-fault errors are recoverable,
231 * and then only in special circumstances. All other errors are
232 * fatal. Return value indicates if trap was handled.
233 */
234 boolean_t
235 kernel_trap(
236 register struct i386_saved_state *regs)
237 {
238 int code;
239 unsigned int subcode;
240 int interruptible = THREAD_UNINT;
241 register int type;
242 vm_map_t map;
243 kern_return_t result = KERN_FAILURE;
244 register thread_t thread;
245
246 type = regs->trapno;
247 code = regs->err;
248 thread = current_thread();
249
250 switch (type) {
251 case T_PREEMPT:
252 ast_taken(AST_PREEMPTION, FALSE);
253 return (TRUE);
254
255 case T_NO_FPU:
256 fpnoextflt();
257 return (TRUE);
258
259 case T_FPU_FAULT:
260 fpextovrflt();
261 return (TRUE);
262
263 case T_FLOATING_POINT_ERROR:
264 fpexterrflt();
265 return (TRUE);
266
267 case T_PAGE_FAULT:
268 /*
269 * If the current map is a submap of the kernel map,
270 * and the address is within that map, fault on that
271 * map. If the same check is done in vm_fault
272 * (vm_map_lookup), we may deadlock on the kernel map
273 * lock.
274 */
275 #if MACH_KDB
276 mp_disable_preemption();
277 if (db_active
278 && kdb_active[cpu_number()]
279 && !let_ddb_vm_fault) {
280 /*
281 * Force kdb to handle this one.
282 */
283 mp_enable_preemption();
284 return (FALSE);
285 }
286 mp_enable_preemption();
287 #endif /* MACH_KDB */
288 subcode = regs->cr2; /* get faulting address */
289
290 if (subcode > LINEAR_KERNEL_ADDRESS) {
291 map = kernel_map;
292 } else if (thread == THREAD_NULL)
293 map = kernel_map;
294 else {
295 map = thread->map;
296 }
297 #if MACH_KDB
298 /*
299 * Check for watchpoint on kernel static data.
300 * vm_fault would fail in this case
301 */
302 if (map == kernel_map &&
303 db_watchpoint_list &&
304 db_watchpoints_inserted &&
305 (code & T_PF_WRITE) &&
306 (vm_offset_t)subcode < vm_last_phys &&
307 ((*(pte = pmap_pte(kernel_pmap, (vm_offset_t)subcode))) &
308 INTEL_PTE_WRITE) == 0) {
309 *pte = *pte | INTEL_PTE_VALID | INTEL_PTE_WRITE; /* XXX need invltlb here? */
310 result = KERN_SUCCESS;
311 } else
312 #endif /* MACH_KDB */
313 {
314 /*
315 * Since the 386 ignores write protection in
316 * kernel mode, always try for write permission
317 * first. If that fails and the fault was a
318 * read fault, retry with read permission.
319 */
320 if (map == kernel_map) {
321 register struct recovery *rp;
322
323 interruptible = THREAD_UNINT;
324 for (rp = recover_table; rp < recover_table_end; rp++) {
325 if (regs->eip == rp->fault_addr) {
326 interruptible = THREAD_ABORTSAFE;
327 break;
328 }
329 }
330 }
331 result = vm_fault(map,
332 trunc_page((vm_offset_t)subcode),
333 VM_PROT_READ|VM_PROT_WRITE,
334 FALSE,
335 (map == kernel_map) ? interruptible : THREAD_ABORTSAFE, NULL, 0);
336 }
337 #if MACH_KDB
338 if (result == KERN_SUCCESS) {
339 /* Look for watchpoints */
340 if (db_watchpoint_list &&
341 db_watchpoints_inserted &&
342 (code & T_PF_WRITE) &&
343 db_find_watchpoint(map,
344 (vm_offset_t)subcode, regs))
345 kdb_trap(T_WATCHPOINT, 0, regs);
346 }
347 else
348 #endif /* MACH_KDB */
349 if ((code & T_PF_WRITE) == 0 &&
350 result == KERN_PROTECTION_FAILURE)
351 {
352 /*
353 * Must expand vm_fault by hand,
354 * so that we can ask for read-only access
355 * but enter a (kernel)writable mapping.
356 */
357 result = intel_read_fault(map,
358 trunc_page((vm_offset_t)subcode));
359 }
360
361 if (result == KERN_SUCCESS) {
362 /*
363 * Certain faults require that we back up
364 * the EIP.
365 */
366 register struct recovery *rp;
367
368 for (rp = retry_table; rp < retry_table_end; rp++) {
369 if (regs->eip == rp->fault_addr) {
370 regs->eip = rp->recover_addr;
371 break;
372 }
373 }
374 return (TRUE);
375 }
376
377 /* fall through */
378
379 case T_GENERAL_PROTECTION:
380
381 /*
382 * If there is a failure recovery address
383 * for this fault, go there.
384 */
385 {
386 register struct recovery *rp;
387
388 for (rp = recover_table;
389 rp < recover_table_end;
390 rp++) {
391 if (regs->eip == rp->fault_addr) {
392 regs->eip = rp->recover_addr;
393 return (TRUE);
394 }
395 }
396 }
397
398 /*
399 * Check thread recovery address also -
400 * v86 assist uses it.
401 */
402 if (thread->recover) {
403 regs->eip = thread->recover;
404 thread->recover = 0;
405 return (TRUE);
406 }
407
408 /*
409 * Unanticipated page-fault errors in kernel
410 * should not happen.
411 */
412 /* fall through... */
413
414 default:
415 /*
416 * Exception 15 is reserved but some chips may generate it
417 * spuriously. Seen at startup on AMD Athlon-64.
418 */
419 if (type == 15) {
420 kprintf("kernel_trap() ignoring spurious trap 15\n");
421 return (TRUE);
422 }
423
424 /*
425 * ...and return failure, so that locore can call into
426 * debugger.
427 */
428 #if MACH_KDP
429 kdp_i386_trap(type, regs, result, regs->cr2);
430 #endif
431 return (FALSE);
432 }
433 return (TRUE);
434 }
435
436 /*
437 * Called if both kernel_trap() and kdb_trap() fail.
438 */
439 void
440 panic_trap(
441 register struct i386_saved_state *regs)
442 {
443 int code;
444 register int type;
445
446 type = regs->trapno;
447 code = regs->err;
448
449 printf("trap type %d, code = %x, pc = %x\n",
450 type, code, regs->eip);
451 panic("trap");
452 }
453
454
455 /*
456 * Trap from user mode.
457 */
458 void
459 user_trap(
460 register struct i386_saved_state *regs)
461 {
462 int exc;
463 int code;
464 unsigned int subcode;
465 register int type;
466 vm_map_t map;
467 vm_prot_t prot;
468 kern_return_t result;
469 thread_t thread = current_thread();
470 boolean_t kernel_act = FALSE;
471
472 if (regs->efl & EFL_VM) {
473 /*
474 * If hardware assist can handle exception,
475 * continue execution.
476 */
477 if (v86_assist(thread, regs))
478 return;
479 }
480
481 type = regs->trapno;
482 code = 0;
483 subcode = 0;
484 exc = 0;
485
486 switch (type) {
487
488 case T_DIVIDE_ERROR:
489 exc = EXC_ARITHMETIC;
490 code = EXC_I386_DIV;
491 break;
492
493 case T_DEBUG:
494 exc = EXC_BREAKPOINT;
495 code = EXC_I386_SGL;
496 break;
497
498 case T_INT3:
499 exc = EXC_BREAKPOINT;
500 code = EXC_I386_BPT;
501 break;
502
503 case T_OVERFLOW:
504 exc = EXC_ARITHMETIC;
505 code = EXC_I386_INTO;
506 break;
507
508 case T_OUT_OF_BOUNDS:
509 exc = EXC_SOFTWARE;
510 code = EXC_I386_BOUND;
511 break;
512
513 case T_INVALID_OPCODE:
514 exc = EXC_BAD_INSTRUCTION;
515 code = EXC_I386_INVOP;
516 break;
517
518 case T_NO_FPU:
519 case 32: /* XXX */
520 fpnoextflt();
521 return;
522
523 case T_FPU_FAULT:
524 fpextovrflt();
525 return;
526
527 case 10: /* invalid TSS == iret with NT flag set */
528 exc = EXC_BAD_INSTRUCTION;
529 code = EXC_I386_INVTSSFLT;
530 subcode = regs->err & 0xffff;
531 break;
532
533 case T_SEGMENT_NOT_PRESENT:
534 exc = EXC_BAD_INSTRUCTION;
535 code = EXC_I386_SEGNPFLT;
536 subcode = regs->err & 0xffff;
537 break;
538
539 case T_STACK_FAULT:
540 exc = EXC_BAD_INSTRUCTION;
541 code = EXC_I386_STKFLT;
542 subcode = regs->err & 0xffff;
543 break;
544
545 case T_GENERAL_PROTECTION:
546 if (!(regs->efl & EFL_VM)) {
547 if (check_io_fault(regs))
548 return;
549 }
550 exc = EXC_BAD_INSTRUCTION;
551 code = EXC_I386_GPFLT;
552 subcode = regs->err & 0xffff;
553 break;
554
555 case T_PAGE_FAULT:
556 subcode = regs->cr2;
557 prot = VM_PROT_READ|VM_PROT_WRITE;
558 if (kernel_act == FALSE) {
559 if (!(regs->err & T_PF_WRITE))
560 prot = VM_PROT_READ;
561 (void) user_page_fault_continue(vm_fault(thread->map,
562 trunc_page((vm_offset_t)subcode),
563 prot,
564 FALSE,
565 THREAD_ABORTSAFE, NULL, 0));
566 /* NOTREACHED */
567 }
568 else {
569 if (subcode > LINEAR_KERNEL_ADDRESS) {
570 map = kernel_map;
571 }
572 result = vm_fault(thread->map,
573 trunc_page((vm_offset_t)subcode),
574 prot,
575 FALSE,
576 (map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE, NULL, 0);
577 if ((result != KERN_SUCCESS) && (result != KERN_ABORTED)) {
578 /*
579 * Must expand vm_fault by hand,
580 * so that we can ask for read-only access
581 * but enter a (kernel) writable mapping.
582 */
583 result = intel_read_fault(thread->map,
584 trunc_page((vm_offset_t)subcode));
585 }
586 user_page_fault_continue(result);
587 /*NOTREACHED*/
588 }
589 break;
590
591 case T_FLOATING_POINT_ERROR:
592 fpexterrflt();
593 return;
594
595 default:
596 #if MACH_KGDB
597 Debugger("Unanticipated user trap");
598 return;
599 #endif /* MACH_KGDB */
600 #if MACH_KDB
601 if (kdb_trap(type, regs->err, regs))
602 return;
603 #endif /* MACH_KDB */
604 printf("user trap type %d, code = %x, pc = %x\n",
605 type, regs->err, regs->eip);
606 panic("user trap");
607 return;
608 }
609
610 #if MACH_KDB
611 if (debug_all_traps_with_kdb &&
612 kdb_trap(type, regs->err, regs))
613 return;
614 #endif /* MACH_KDB */
615
616 i386_exception(exc, code, subcode);
617 /*NOTREACHED*/
618 }
619
620 /*
621 * V86 mode assist for interrupt handling.
622 */
623 boolean_t v86_assist_on = TRUE;
624 boolean_t v86_unsafe_ok = FALSE;
625 boolean_t v86_do_sti_cli = TRUE;
626 boolean_t v86_do_sti_immediate = FALSE;
627
628 #define V86_IRET_PENDING 0x4000
629
630 int cli_count = 0;
631 int sti_count = 0;
632
633 boolean_t
634 v86_assist(
635 thread_t thread,
636 register struct i386_saved_state *regs)
637 {
638 register struct v86_assist_state *v86 = &thread->machine.pcb->ims.v86s;
639
640 /*
641 * Build an 8086 address. Use only when off is known to be 16 bits.
642 */
643 #define Addr8086(seg,off) ((((seg) & 0xffff) << 4) + (off))
644
645 #define EFL_V86_SAFE ( EFL_OF | EFL_DF | EFL_TF \
646 | EFL_SF | EFL_ZF | EFL_AF \
647 | EFL_PF | EFL_CF )
648 struct iret_32 {
649 int eip;
650 int cs;
651 int eflags;
652 };
653 struct iret_16 {
654 unsigned short ip;
655 unsigned short cs;
656 unsigned short flags;
657 };
658 union iret_struct {
659 struct iret_32 iret_32;
660 struct iret_16 iret_16;
661 };
662
663 struct int_vec {
664 unsigned short ip;
665 unsigned short cs;
666 };
667
668 if (!v86_assist_on)
669 return FALSE;
670
671 /*
672 * If delayed STI pending, enable interrupts.
673 * Turn off tracing if on only to delay STI.
674 */
675 if (v86->flags & V86_IF_PENDING) {
676 v86->flags &= ~V86_IF_PENDING;
677 v86->flags |= EFL_IF;
678 if ((v86->flags & EFL_TF) == 0)
679 regs->efl &= ~EFL_TF;
680 }
681
682 if (regs->trapno == T_DEBUG) {
683
684 if (v86->flags & EFL_TF) {
685 /*
686 * Trace flag was also set - it has priority
687 */
688 return FALSE; /* handle as single-step */
689 }
690 /*
691 * Fall through to check for interrupts.
692 */
693 }
694 else if (regs->trapno == T_GENERAL_PROTECTION) {
695 /*
696 * General protection error - must be an 8086 instruction
697 * to emulate.
698 */
699 register int eip;
700 boolean_t addr_32 = FALSE;
701 boolean_t data_32 = FALSE;
702 int io_port;
703
704 /*
705 * Set up error handler for bad instruction/data
706 * fetches.
707 */
708 __asm__("movl $(addr_error), %0" : : "m" (thread->recover));
709
710 eip = regs->eip;
711 while (TRUE) {
712 unsigned char opcode;
713
714 if (eip > 0xFFFF) {
715 thread->recover = 0;
716 return FALSE; /* GP fault: IP out of range */
717 }
718
719 opcode = *(unsigned char *)Addr8086(regs->cs,eip);
720 eip++;
721 switch (opcode) {
722 case 0xf0: /* lock */
723 case 0xf2: /* repne */
724 case 0xf3: /* repe */
725 case 0x2e: /* cs */
726 case 0x36: /* ss */
727 case 0x3e: /* ds */
728 case 0x26: /* es */
729 case 0x64: /* fs */
730 case 0x65: /* gs */
731 /* ignore prefix */
732 continue;
733
734 case 0x66: /* data size */
735 data_32 = TRUE;
736 continue;
737
738 case 0x67: /* address size */
739 addr_32 = TRUE;
740 continue;
741
742 case 0xe4: /* inb imm */
743 case 0xe5: /* inw imm */
744 case 0xe6: /* outb imm */
745 case 0xe7: /* outw imm */
746 io_port = *(unsigned char *)Addr8086(regs->cs, eip);
747 eip++;
748 goto do_in_out;
749
750 case 0xec: /* inb dx */
751 case 0xed: /* inw dx */
752 case 0xee: /* outb dx */
753 case 0xef: /* outw dx */
754 case 0x6c: /* insb */
755 case 0x6d: /* insw */
756 case 0x6e: /* outsb */
757 case 0x6f: /* outsw */
758 io_port = regs->edx & 0xffff;
759
760 do_in_out:
761 if (!data_32)
762 opcode |= 0x6600; /* word IO */
763
764 switch (emulate_io(regs, opcode, io_port)) {
765 case EM_IO_DONE:
766 /* instruction executed */
767 break;
768 case EM_IO_RETRY:
769 /* port mapped, retry instruction */
770 thread->recover = 0;
771 return TRUE;
772 case EM_IO_ERROR:
773 /* port not mapped */
774 thread->recover = 0;
775 return FALSE;
776 }
777 break;
778
779 case 0xfa: /* cli */
780 if (!v86_do_sti_cli) {
781 thread->recover = 0;
782 return (FALSE);
783 }
784
785 v86->flags &= ~EFL_IF;
786 /* disable simulated interrupts */
787 cli_count++;
788 break;
789
790 case 0xfb: /* sti */
791 if (!v86_do_sti_cli) {
792 thread->recover = 0;
793 return (FALSE);
794 }
795
796 if ((v86->flags & EFL_IF) == 0) {
797 if (v86_do_sti_immediate) {
798 v86->flags |= EFL_IF;
799 } else {
800 v86->flags |= V86_IF_PENDING;
801 regs->efl |= EFL_TF;
802 }
803 /* single step to set IF next inst. */
804 }
805 sti_count++;
806 break;
807
808 case 0x9c: /* pushf */
809 {
810 int flags;
811 vm_offset_t sp;
812 unsigned int size;
813
814 flags = regs->efl;
815 if ((v86->flags & EFL_IF) == 0)
816 flags &= ~EFL_IF;
817
818 if ((v86->flags & EFL_TF) == 0)
819 flags &= ~EFL_TF;
820 else flags |= EFL_TF;
821
822 sp = regs->uesp;
823 if (!addr_32)
824 sp &= 0xffff;
825 else if (sp > 0xffff)
826 goto stack_error;
827 size = (data_32) ? 4 : 2;
828 if (sp < size)
829 goto stack_error;
830 sp -= size;
831 if (copyout((char *)&flags,
832 (user_addr_t)Addr8086(regs->ss,sp),
833 size))
834 goto addr_error;
835 if (addr_32)
836 regs->uesp = sp;
837 else
838 regs->uesp = (regs->uesp & 0xffff0000) | sp;
839 break;
840 }
841
842 case 0x9d: /* popf */
843 {
844 vm_offset_t sp;
845 int nflags;
846
847 sp = regs->uesp;
848 if (!addr_32)
849 sp &= 0xffff;
850 else if (sp > 0xffff)
851 goto stack_error;
852
853 if (data_32) {
854 if (sp > 0xffff - sizeof(int))
855 goto stack_error;
856 nflags = *(int *)Addr8086(regs->ss,sp);
857 sp += sizeof(int);
858 }
859 else {
860 if (sp > 0xffff - sizeof(short))
861 goto stack_error;
862 nflags = *(unsigned short *)
863 Addr8086(regs->ss,sp);
864 sp += sizeof(short);
865 }
866 if (addr_32)
867 regs->uesp = sp;
868 else
869 regs->uesp = (regs->uesp & 0xffff0000) | sp;
870
871 if (v86->flags & V86_IRET_PENDING) {
872 v86->flags = nflags & (EFL_TF | EFL_IF);
873 v86->flags |= V86_IRET_PENDING;
874 } else {
875 v86->flags = nflags & (EFL_TF | EFL_IF);
876 }
877 regs->efl = (regs->efl & ~EFL_V86_SAFE)
878 | (nflags & EFL_V86_SAFE);
879 break;
880 }
881 case 0xcf: /* iret */
882 {
883 vm_offset_t sp;
884 int nflags;
885 union iret_struct iret_struct;
886
887 v86->flags &= ~V86_IRET_PENDING;
888 sp = regs->uesp;
889 if (!addr_32)
890 sp &= 0xffff;
891 else if (sp > 0xffff)
892 goto stack_error;
893
894 if (data_32) {
895 if (sp > 0xffff - sizeof(struct iret_32))
896 goto stack_error;
897 iret_struct.iret_32 =
898 *(struct iret_32 *) Addr8086(regs->ss,sp);
899 sp += sizeof(struct iret_32);
900 }
901 else {
902 if (sp > 0xffff - sizeof(struct iret_16))
903 goto stack_error;
904 iret_struct.iret_16 =
905 *(struct iret_16 *) Addr8086(regs->ss,sp);
906 sp += sizeof(struct iret_16);
907 }
908 if (addr_32)
909 regs->uesp = sp;
910 else
911 regs->uesp = (regs->uesp & 0xffff0000) | sp;
912
913 if (data_32) {
914 eip = iret_struct.iret_32.eip;
915 regs->cs = iret_struct.iret_32.cs & 0xffff;
916 nflags = iret_struct.iret_32.eflags;
917 }
918 else {
919 eip = iret_struct.iret_16.ip;
920 regs->cs = iret_struct.iret_16.cs;
921 nflags = iret_struct.iret_16.flags;
922 }
923
924 v86->flags = nflags & (EFL_TF | EFL_IF);
925 regs->efl = (regs->efl & ~EFL_V86_SAFE)
926 | (nflags & EFL_V86_SAFE);
927 break;
928 }
929 default:
930 /*
931 * Instruction not emulated here.
932 */
933 thread->recover = 0;
934 return FALSE;
935 }
936 break; /* exit from 'while TRUE' */
937 }
938 regs->eip = (regs->eip & 0xffff0000) | eip;
939 }
940 else {
941 /*
942 * Not a trap we handle.
943 */
944 thread->recover = 0;
945 return FALSE;
946 }
947
948 if ((v86->flags & EFL_IF) && ((v86->flags & V86_IRET_PENDING)==0)) {
949
950 struct v86_interrupt_table *int_table;
951 int int_count;
952 int vec;
953 int i;
954
955 int_table = (struct v86_interrupt_table *) v86->int_table;
956 int_count = v86->int_count;
957
958 vec = 0;
959 for (i = 0; i < int_count; int_table++, i++) {
960 if (!int_table->mask && int_table->count > 0) {
961 int_table->count--;
962 vec = int_table->vec;
963 break;
964 }
965 }
966 if (vec != 0) {
967 /*
968 * Take this interrupt
969 */
970 vm_offset_t sp;
971 struct iret_16 iret_16;
972 struct int_vec int_vec;
973
974 sp = regs->uesp & 0xffff;
975 if (sp < sizeof(struct iret_16))
976 goto stack_error;
977 sp -= sizeof(struct iret_16);
978 iret_16.ip = regs->eip;
979 iret_16.cs = regs->cs;
980 iret_16.flags = regs->efl & 0xFFFF;
981 if ((v86->flags & EFL_TF) == 0)
982 iret_16.flags &= ~EFL_TF;
983 else iret_16.flags |= EFL_TF;
984
985 (void) memcpy((char *) &int_vec,
986 (char *) (sizeof(struct int_vec) * vec),
987 sizeof (struct int_vec));
988 if (copyout((char *)&iret_16,
989 (user_addr_t)Addr8086(regs->ss,sp),
990 sizeof(struct iret_16)))
991 goto addr_error;
992 regs->uesp = (regs->uesp & 0xFFFF0000) | (sp & 0xffff);
993 regs->eip = int_vec.ip;
994 regs->cs = int_vec.cs;
995 regs->efl &= ~EFL_TF;
996 v86->flags &= ~(EFL_IF | EFL_TF);
997 v86->flags |= V86_IRET_PENDING;
998 }
999 }
1000
1001 thread->recover = 0;
1002 return TRUE;
1003
1004 /*
1005 * On address error, report a page fault.
1006 * XXX report GP fault - we don`t save
1007 * the faulting address.
1008 */
1009 addr_error:
1010 __asm__("addr_error:;");
1011 thread->recover = 0;
1012 return FALSE;
1013
1014 /*
1015 * On stack address error, return stack fault (12).
1016 */
1017 stack_error:
1018 thread->recover = 0;
1019 regs->trapno = T_STACK_FAULT;
1020 return FALSE;
1021 }
1022
1023 /*
1024 * Handle AST traps for i386.
1025 * Check for delayed floating-point exception from
1026 * AT-bus machines.
1027 */
1028
1029 extern void log_thread_action (thread_t, char *);
1030
1031 void
1032 i386_astintr(int preemption)
1033 {
1034 ast_t *my_ast, mask = AST_ALL;
1035 spl_t s;
1036
1037 s = splsched(); /* block interrupts to check reasons */
1038 mp_disable_preemption();
1039 my_ast = ast_pending();
1040 if (*my_ast & AST_I386_FP) {
1041 /*
1042 * AST was for delayed floating-point exception -
1043 * FP interrupt occurred while in kernel.
1044 * Turn off this AST reason and handle the FPU error.
1045 */
1046
1047 ast_off(AST_I386_FP);
1048 mp_enable_preemption();
1049 splx(s);
1050
1051 fpexterrflt();
1052 }
1053 else {
1054 /*
1055 * Not an FPU trap. Handle the AST.
1056 * Interrupts are still blocked.
1057 */
1058
1059 #if 1
1060 if (preemption) {
1061 mask = AST_PREEMPTION;
1062 mp_enable_preemption();
1063 } else {
1064 mp_enable_preemption();
1065 }
1066 #else
1067 mp_enable_preemption();
1068 #endif
1069
1070 ast_taken(mask, s);
1071
1072 }
1073 }
1074
1075 /*
1076 * Handle exceptions for i386.
1077 *
1078 * If we are an AT bus machine, we must turn off the AST for a
1079 * delayed floating-point exception.
1080 *
1081 * If we are providing floating-point emulation, we may have
1082 * to retrieve the real register values from the floating point
1083 * emulator.
1084 */
1085 void
1086 i386_exception(
1087 int exc,
1088 int code,
1089 int subcode)
1090 {
1091 spl_t s;
1092 exception_data_type_t codes[EXCEPTION_CODE_MAX];
1093
1094 /*
1095 * Turn off delayed FPU error handling.
1096 */
1097 s = splsched();
1098 mp_disable_preemption();
1099 ast_off(AST_I386_FP);
1100 mp_enable_preemption();
1101 splx(s);
1102
1103 codes[0] = code; /* new exception interface */
1104 codes[1] = subcode;
1105 exception_triage(exc, codes, 2);
1106 /*NOTREACHED*/
1107 }
1108
1109 boolean_t
1110 check_io_fault(
1111 struct i386_saved_state *regs)
1112 {
1113 int eip, opcode, io_port;
1114 boolean_t data_16 = FALSE;
1115
1116 /*
1117 * Get the instruction.
1118 */
1119 eip = regs->eip;
1120
1121 for (;;) {
1122 opcode = inst_fetch(eip, regs->cs);
1123 eip++;
1124 switch (opcode) {
1125 case 0x66: /* data-size prefix */
1126 data_16 = TRUE;
1127 continue;
1128
1129 case 0xf3: /* rep prefix */
1130 case 0x26: /* es */
1131 case 0x2e: /* cs */
1132 case 0x36: /* ss */
1133 case 0x3e: /* ds */
1134 case 0x64: /* fs */
1135 case 0x65: /* gs */
1136 continue;
1137
1138 case 0xE4: /* inb imm */
1139 case 0xE5: /* inl imm */
1140 case 0xE6: /* outb imm */
1141 case 0xE7: /* outl imm */
1142 /* port is immediate byte */
1143 io_port = inst_fetch(eip, regs->cs);
1144 eip++;
1145 break;
1146
1147 case 0xEC: /* inb dx */
1148 case 0xED: /* inl dx */
1149 case 0xEE: /* outb dx */
1150 case 0xEF: /* outl dx */
1151 case 0x6C: /* insb */
1152 case 0x6D: /* insl */
1153 case 0x6E: /* outsb */
1154 case 0x6F: /* outsl */
1155 /* port is in DX register */
1156 io_port = regs->edx & 0xFFFF;
1157 break;
1158
1159 default:
1160 return FALSE;
1161 }
1162 break;
1163 }
1164
1165 if (data_16)
1166 opcode |= 0x6600; /* word IO */
1167
1168 switch (emulate_io(regs, opcode, io_port)) {
1169 case EM_IO_DONE:
1170 /* instruction executed */
1171 regs->eip = eip;
1172 return TRUE;
1173
1174 case EM_IO_RETRY:
1175 /* port mapped, retry instruction */
1176 return TRUE;
1177
1178 case EM_IO_ERROR:
1179 /* port not mapped */
1180 return FALSE;
1181 }
1182 return FALSE;
1183 }
1184
1185 void
1186 kernel_preempt_check (void)
1187 {
1188 ast_t *myast;
1189
1190 mp_disable_preemption();
1191 myast = ast_pending();
1192 if ((*myast & AST_URGENT) &&
1193 get_interrupt_level() == 1
1194 ) {
1195 mp_enable_preemption_no_check();
1196 __asm__ volatile (" int $0xff");
1197 } else {
1198 mp_enable_preemption_no_check();
1199 }
1200 }
1201
1202 #if MACH_KDB
1203
1204 extern void db_i386_state(struct i386_saved_state *regs);
1205
1206 #include <ddb/db_output.h>
1207
1208 void
1209 db_i386_state(
1210 struct i386_saved_state *regs)
1211 {
1212 db_printf("eip %8x\n", regs->eip);
1213 db_printf("trap %8x\n", regs->trapno);
1214 db_printf("err %8x\n", regs->err);
1215 db_printf("efl %8x\n", regs->efl);
1216 db_printf("ebp %8x\n", regs->ebp);
1217 db_printf("esp %8x\n", regs->esp);
1218 db_printf("uesp %8x\n", regs->uesp);
1219 db_printf("cs %8x\n", regs->cs & 0xff);
1220 db_printf("ds %8x\n", regs->ds & 0xff);
1221 db_printf("es %8x\n", regs->es & 0xff);
1222 db_printf("fs %8x\n", regs->fs & 0xff);
1223 db_printf("gs %8x\n", regs->gs & 0xff);
1224 db_printf("ss %8x\n", regs->ss & 0xff);
1225 db_printf("eax %8x\n", regs->eax);
1226 db_printf("ebx %8x\n", regs->ebx);
1227 db_printf("ecx %8x\n", regs->ecx);
1228 db_printf("edx %8x\n", regs->edx);
1229 db_printf("esi %8x\n", regs->esi);
1230 db_printf("edi %8x\n", regs->edi);
1231 }
1232
1233 #endif /* MACH_KDB */