]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/trap.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / i386 / trap.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
ff6e181a
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
1c79356b 12 *
ff6e181a
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
ff6e181a
A
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
1c79356b
A
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*
24 * @OSF_COPYRIGHT@
25 */
26/*
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51/*
52 */
53/*
54 * Hardware trap/fault handler.
55 */
56
1c79356b
A
57#include <mach_kdb.h>
58#include <mach_kgdb.h>
59#include <mach_kdp.h>
60#include <mach_ldebug.h>
61
62#include <types.h>
63#include <i386/eflags.h>
64#include <i386/trap.h>
65#include <i386/pmap.h>
66#include <i386/fpu.h>
67
68#include <mach/exception.h>
69#include <mach/kern_return.h>
70#include <mach/vm_param.h>
71#include <mach/i386/thread_status.h>
72
73#include <vm/vm_kern.h>
74#include <vm/vm_fault.h>
75
1c79356b 76#include <kern/kern_types.h>
91447636 77#include <kern/processor.h>
1c79356b
A
78#include <kern/thread.h>
79#include <kern/task.h>
80#include <kern/sched.h>
81#include <kern/sched_prim.h>
82#include <kern/exception.h>
83#include <kern/spl.h>
84#include <kern/misc_protos.h>
85
86#if MACH_KGDB
87#include <kgdb/kgdb_defs.h>
88#endif /* MACH_KGDB */
89
90#include <i386/intel_read_fault.h>
91
92#if MACH_KGDB
93#include <kgdb/kgdb_defs.h>
94#endif /* MACH_KGDB */
95
96#if MACH_KDB
97#include <ddb/db_watch.h>
98#include <ddb/db_run.h>
99#include <ddb/db_break.h>
100#include <ddb/db_trap.h>
101#endif /* MACH_KDB */
102
103#include <string.h>
104
105#include <i386/io_emulate.h>
106
107/*
108 * Forward declarations
109 */
110extern void user_page_fault_continue(
111 kern_return_t kr);
112
113extern boolean_t v86_assist(
114 thread_t thread,
115 struct i386_saved_state *regs);
116
117extern boolean_t check_io_fault(
118 struct i386_saved_state *regs);
119
120extern int inst_fetch(
121 int eip,
122 int cs);
123
124void
125thread_syscall_return(
126 kern_return_t ret)
127{
91447636 128 register thread_t thr_act = current_thread();
1c79356b
A
129 register struct i386_saved_state *regs = USER_REGS(thr_act);
130 regs->eax = ret;
131 thread_exception_return();
132 /*NOTREACHED*/
133}
134
135
136#if MACH_KDB
137boolean_t debug_all_traps_with_kdb = FALSE;
138extern struct db_watchpoint *db_watchpoint_list;
139extern boolean_t db_watchpoints_inserted;
140extern boolean_t db_breakpoints_inserted;
141
142void
143thread_kdb_return(void)
144{
91447636
A
145 register thread_t thread = current_thread();
146 register struct i386_saved_state *regs = USER_REGS(thread);
1c79356b
A
147
148 if (kdb_trap(regs->trapno, regs->err, regs)) {
149#if MACH_LDEBUG
91447636 150 assert(thread->mutex_count == 0);
1c79356b 151#endif /* MACH_LDEBUG */
1c79356b
A
152 thread_exception_return();
153 /*NOTREACHED*/
154 }
155}
156boolean_t let_ddb_vm_fault = FALSE;
157
1c79356b
A
158#endif /* MACH_KDB */
159
160void
161user_page_fault_continue(
162 kern_return_t kr)
163{
91447636
A
164 register thread_t thread = current_thread();
165 register struct i386_saved_state *regs = USER_REGS(thread);
1c79356b 166
0b4e3aa0 167 if ((kr == KERN_SUCCESS) || (kr == KERN_ABORTED)) {
1c79356b
A
168#if MACH_KDB
169 if (!db_breakpoints_inserted) {
170 db_set_breakpoints();
171 }
172 if (db_watchpoint_list &&
173 db_watchpoints_inserted &&
174 (regs->err & T_PF_WRITE) &&
91447636 175 db_find_watchpoint(thread->map,
1c79356b
A
176 (vm_offset_t)regs->cr2,
177 regs))
178 kdb_trap(T_WATCHPOINT, 0, regs);
179#endif /* MACH_KDB */
180 thread_exception_return();
181 /*NOTREACHED*/
182 }
183
184#if MACH_KDB
185 if (debug_all_traps_with_kdb &&
186 kdb_trap(regs->trapno, regs->err, regs)) {
187#if MACH_LDEBUG
91447636 188 assert(thread->mutex_count == 0);
1c79356b 189#endif /* MACH_LDEBUG */
1c79356b
A
190 thread_exception_return();
191 /*NOTREACHED*/
192 }
193#endif /* MACH_KDB */
194
195 i386_exception(EXC_BAD_ACCESS, kr, regs->cr2);
196 /*NOTREACHED*/
197}
198
199/*
200 * Fault recovery in copyin/copyout routines.
201 */
202struct recovery {
91447636
A
203 uint32_t fault_addr;
204 uint32_t recover_addr;
1c79356b
A
205};
206
207extern struct recovery recover_table[];
208extern struct recovery recover_table_end[];
209
210/*
211 * Recovery from Successful fault in copyout does not
212 * return directly - it retries the pte check, since
213 * the 386 ignores write protection in kernel mode.
214 */
215extern struct recovery retry_table[];
216extern struct recovery retry_table_end[];
217
91447636 218const char * trap_type[] = {TRAP_NAMES};
1c79356b
A
219int TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]);
220
91447636 221
1c79356b
A
222/*
223 * Trap from kernel mode. Only page-fault errors are recoverable,
224 * and then only in special circumstances. All other errors are
225 * fatal. Return value indicates if trap was handled.
226 */
227boolean_t
228kernel_trap(
229 register struct i386_saved_state *regs)
230{
91447636
A
231 int code;
232 unsigned int subcode;
233 int interruptible = THREAD_UNINT;
234 register int type;
235 vm_map_t map;
236 kern_return_t result = KERN_FAILURE;
1c79356b 237 register thread_t thread;
1c79356b
A
238
239 type = regs->trapno;
240 code = regs->err;
241 thread = current_thread();
1c79356b
A
242
243 switch (type) {
244 case T_PREEMPT:
91447636 245 ast_taken(AST_PREEMPTION, FALSE);
1c79356b
A
246 return (TRUE);
247
248 case T_NO_FPU:
249 fpnoextflt();
250 return (TRUE);
251
252 case T_FPU_FAULT:
253 fpextovrflt();
254 return (TRUE);
255
256 case T_FLOATING_POINT_ERROR:
257 fpexterrflt();
258 return (TRUE);
259
260 case T_PAGE_FAULT:
261 /*
262 * If the current map is a submap of the kernel map,
263 * and the address is within that map, fault on that
264 * map. If the same check is done in vm_fault
265 * (vm_map_lookup), we may deadlock on the kernel map
266 * lock.
267 */
268#if MACH_KDB
269 mp_disable_preemption();
270 if (db_active
1c79356b 271 && kdb_active[cpu_number()]
1c79356b
A
272 && !let_ddb_vm_fault) {
273 /*
274 * Force kdb to handle this one.
275 */
276 mp_enable_preemption();
277 return (FALSE);
278 }
279 mp_enable_preemption();
280#endif /* MACH_KDB */
281 subcode = regs->cr2; /* get faulting address */
282
283 if (subcode > LINEAR_KERNEL_ADDRESS) {
284 map = kernel_map;
91447636 285 } else if (thread == THREAD_NULL)
1c79356b
A
286 map = kernel_map;
287 else {
91447636 288 map = thread->map;
1c79356b 289 }
1c79356b
A
290#if MACH_KDB
291 /*
292 * Check for watchpoint on kernel static data.
293 * vm_fault would fail in this case
294 */
295 if (map == kernel_map &&
296 db_watchpoint_list &&
297 db_watchpoints_inserted &&
298 (code & T_PF_WRITE) &&
299 (vm_offset_t)subcode < vm_last_phys &&
300 ((*(pte = pmap_pte(kernel_pmap, (vm_offset_t)subcode))) &
301 INTEL_PTE_WRITE) == 0) {
91447636 302 *pte = *pte | INTEL_PTE_VALID | INTEL_PTE_WRITE; /* XXX need invltlb here? */
1c79356b
A
303 result = KERN_SUCCESS;
304 } else
305#endif /* MACH_KDB */
306 {
307 /*
308 * Since the 386 ignores write protection in
309 * kernel mode, always try for write permission
310 * first. If that fails and the fault was a
311 * read fault, retry with read permission.
312 */
313 if (map == kernel_map) {
314 register struct recovery *rp;
315
316 interruptible = THREAD_UNINT;
317 for (rp = recover_table; rp < recover_table_end; rp++) {
318 if (regs->eip == rp->fault_addr) {
319 interruptible = THREAD_ABORTSAFE;
320 break;
321 }
322 }
323 }
1c79356b
A
324 result = vm_fault(map,
325 trunc_page((vm_offset_t)subcode),
326 VM_PROT_READ|VM_PROT_WRITE,
327 FALSE,
9bccf70c 328 (map == kernel_map) ? interruptible : THREAD_ABORTSAFE, NULL, 0);
1c79356b
A
329 }
330#if MACH_KDB
331 if (result == KERN_SUCCESS) {
332 /* Look for watchpoints */
333 if (db_watchpoint_list &&
334 db_watchpoints_inserted &&
335 (code & T_PF_WRITE) &&
336 db_find_watchpoint(map,
337 (vm_offset_t)subcode, regs))
338 kdb_trap(T_WATCHPOINT, 0, regs);
339 }
340 else
341#endif /* MACH_KDB */
342 if ((code & T_PF_WRITE) == 0 &&
343 result == KERN_PROTECTION_FAILURE)
344 {
345 /*
346 * Must expand vm_fault by hand,
347 * so that we can ask for read-only access
348 * but enter a (kernel)writable mapping.
349 */
350 result = intel_read_fault(map,
351 trunc_page((vm_offset_t)subcode));
352 }
353
354 if (result == KERN_SUCCESS) {
355 /*
356 * Certain faults require that we back up
357 * the EIP.
358 */
359 register struct recovery *rp;
360
361 for (rp = retry_table; rp < retry_table_end; rp++) {
362 if (regs->eip == rp->fault_addr) {
363 regs->eip = rp->recover_addr;
364 break;
365 }
366 }
367 return (TRUE);
368 }
369
370 /* fall through */
371
372 case T_GENERAL_PROTECTION:
373
374 /*
375 * If there is a failure recovery address
376 * for this fault, go there.
377 */
378 {
379 register struct recovery *rp;
380
381 for (rp = recover_table;
382 rp < recover_table_end;
383 rp++) {
384 if (regs->eip == rp->fault_addr) {
385 regs->eip = rp->recover_addr;
386 return (TRUE);
387 }
388 }
389 }
390
391 /*
392 * Check thread recovery address also -
393 * v86 assist uses it.
394 */
395 if (thread->recover) {
396 regs->eip = thread->recover;
397 thread->recover = 0;
398 return (TRUE);
399 }
400
401 /*
402 * Unanticipated page-fault errors in kernel
403 * should not happen.
404 */
405 /* fall through... */
406
407 default:
91447636
A
408 /*
409 * Exception 15 is reserved but some chips may generate it
410 * spuriously. Seen at startup on AMD Athlon-64.
411 */
412 if (type == 15) {
413 kprintf("kernel_trap() ignoring spurious trap 15\n");
414 return (TRUE);
415 }
416
1c79356b
A
417 /*
418 * ...and return failure, so that locore can call into
419 * debugger.
420 */
421#if MACH_KDP
422 kdp_i386_trap(type, regs, result, regs->cr2);
423#endif
424 return (FALSE);
425 }
426 return (TRUE);
427}
428
429/*
430 * Called if both kernel_trap() and kdb_trap() fail.
431 */
432void
433panic_trap(
434 register struct i386_saved_state *regs)
435{
436 int code;
437 register int type;
438
439 type = regs->trapno;
440 code = regs->err;
441
442 printf("trap type %d, code = %x, pc = %x\n",
443 type, code, regs->eip);
444 panic("trap");
445}
446
447
448/*
449 * Trap from user mode.
450 */
451void
452user_trap(
453 register struct i386_saved_state *regs)
454{
455 int exc;
456 int code;
91447636 457 unsigned int subcode;
1c79356b
A
458 register int type;
459 vm_map_t map;
460 vm_prot_t prot;
461 kern_return_t result;
91447636 462 thread_t thread = current_thread();
55e303ae 463 boolean_t kernel_act = FALSE;
1c79356b
A
464
465 if (regs->efl & EFL_VM) {
466 /*
467 * If hardware assist can handle exception,
468 * continue execution.
469 */
470 if (v86_assist(thread, regs))
471 return;
472 }
473
474 type = regs->trapno;
475 code = 0;
476 subcode = 0;
91447636 477 exc = 0;
1c79356b
A
478
479 switch (type) {
480
481 case T_DIVIDE_ERROR:
482 exc = EXC_ARITHMETIC;
483 code = EXC_I386_DIV;
484 break;
485
486 case T_DEBUG:
487 exc = EXC_BREAKPOINT;
488 code = EXC_I386_SGL;
489 break;
490
491 case T_INT3:
492 exc = EXC_BREAKPOINT;
493 code = EXC_I386_BPT;
494 break;
495
496 case T_OVERFLOW:
497 exc = EXC_ARITHMETIC;
498 code = EXC_I386_INTO;
499 break;
500
501 case T_OUT_OF_BOUNDS:
502 exc = EXC_SOFTWARE;
503 code = EXC_I386_BOUND;
504 break;
505
506 case T_INVALID_OPCODE:
507 exc = EXC_BAD_INSTRUCTION;
508 code = EXC_I386_INVOP;
509 break;
510
511 case T_NO_FPU:
512 case 32: /* XXX */
513 fpnoextflt();
514 return;
515
516 case T_FPU_FAULT:
517 fpextovrflt();
518 return;
519
520 case 10: /* invalid TSS == iret with NT flag set */
521 exc = EXC_BAD_INSTRUCTION;
522 code = EXC_I386_INVTSSFLT;
523 subcode = regs->err & 0xffff;
524 break;
525
526 case T_SEGMENT_NOT_PRESENT:
527 exc = EXC_BAD_INSTRUCTION;
528 code = EXC_I386_SEGNPFLT;
529 subcode = regs->err & 0xffff;
530 break;
531
532 case T_STACK_FAULT:
533 exc = EXC_BAD_INSTRUCTION;
534 code = EXC_I386_STKFLT;
535 subcode = regs->err & 0xffff;
536 break;
537
538 case T_GENERAL_PROTECTION:
539 if (!(regs->efl & EFL_VM)) {
540 if (check_io_fault(regs))
541 return;
542 }
543 exc = EXC_BAD_INSTRUCTION;
544 code = EXC_I386_GPFLT;
545 subcode = regs->err & 0xffff;
546 break;
547
548 case T_PAGE_FAULT:
549 subcode = regs->cr2;
550 prot = VM_PROT_READ|VM_PROT_WRITE;
551 if (kernel_act == FALSE) {
552 if (!(regs->err & T_PF_WRITE))
553 prot = VM_PROT_READ;
91447636 554 (void) user_page_fault_continue(vm_fault(thread->map,
1c79356b
A
555 trunc_page((vm_offset_t)subcode),
556 prot,
557 FALSE,
9bccf70c 558 THREAD_ABORTSAFE, NULL, 0));
1c79356b
A
559 /* NOTREACHED */
560 }
561 else {
562 if (subcode > LINEAR_KERNEL_ADDRESS) {
563 map = kernel_map;
1c79356b 564 }
91447636 565 result = vm_fault(thread->map,
1c79356b
A
566 trunc_page((vm_offset_t)subcode),
567 prot,
568 FALSE,
9bccf70c 569 (map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE, NULL, 0);
1c79356b
A
570 if ((result != KERN_SUCCESS) && (result != KERN_ABORTED)) {
571 /*
572 * Must expand vm_fault by hand,
573 * so that we can ask for read-only access
574 * but enter a (kernel) writable mapping.
575 */
91447636 576 result = intel_read_fault(thread->map,
1c79356b
A
577 trunc_page((vm_offset_t)subcode));
578 }
579 user_page_fault_continue(result);
580 /*NOTREACHED*/
581 }
582 break;
583
584 case T_FLOATING_POINT_ERROR:
585 fpexterrflt();
586 return;
587
588 default:
589#if MACH_KGDB
590 Debugger("Unanticipated user trap");
591 return;
592#endif /* MACH_KGDB */
593#if MACH_KDB
594 if (kdb_trap(type, regs->err, regs))
595 return;
596#endif /* MACH_KDB */
597 printf("user trap type %d, code = %x, pc = %x\n",
598 type, regs->err, regs->eip);
599 panic("user trap");
600 return;
601 }
602
603#if MACH_KDB
604 if (debug_all_traps_with_kdb &&
605 kdb_trap(type, regs->err, regs))
606 return;
607#endif /* MACH_KDB */
608
1c79356b
A
609 i386_exception(exc, code, subcode);
610 /*NOTREACHED*/
611}
612
613/*
614 * V86 mode assist for interrupt handling.
615 */
616boolean_t v86_assist_on = TRUE;
617boolean_t v86_unsafe_ok = FALSE;
618boolean_t v86_do_sti_cli = TRUE;
619boolean_t v86_do_sti_immediate = FALSE;
620
621#define V86_IRET_PENDING 0x4000
622
623int cli_count = 0;
624int sti_count = 0;
625
626boolean_t
627v86_assist(
628 thread_t thread,
629 register struct i386_saved_state *regs)
630{
91447636 631 register struct v86_assist_state *v86 = &thread->machine.pcb->ims.v86s;
1c79356b
A
632
633/*
634 * Build an 8086 address. Use only when off is known to be 16 bits.
635 */
636#define Addr8086(seg,off) ((((seg) & 0xffff) << 4) + (off))
637
638#define EFL_V86_SAFE ( EFL_OF | EFL_DF | EFL_TF \
639 | EFL_SF | EFL_ZF | EFL_AF \
640 | EFL_PF | EFL_CF )
641 struct iret_32 {
642 int eip;
643 int cs;
644 int eflags;
645 };
646 struct iret_16 {
647 unsigned short ip;
648 unsigned short cs;
649 unsigned short flags;
650 };
651 union iret_struct {
652 struct iret_32 iret_32;
653 struct iret_16 iret_16;
654 };
655
656 struct int_vec {
657 unsigned short ip;
658 unsigned short cs;
659 };
660
661 if (!v86_assist_on)
662 return FALSE;
663
664 /*
665 * If delayed STI pending, enable interrupts.
666 * Turn off tracing if on only to delay STI.
667 */
668 if (v86->flags & V86_IF_PENDING) {
669 v86->flags &= ~V86_IF_PENDING;
670 v86->flags |= EFL_IF;
671 if ((v86->flags & EFL_TF) == 0)
672 regs->efl &= ~EFL_TF;
673 }
674
675 if (regs->trapno == T_DEBUG) {
676
677 if (v86->flags & EFL_TF) {
678 /*
679 * Trace flag was also set - it has priority
680 */
681 return FALSE; /* handle as single-step */
682 }
683 /*
684 * Fall through to check for interrupts.
685 */
686 }
687 else if (regs->trapno == T_GENERAL_PROTECTION) {
688 /*
689 * General protection error - must be an 8086 instruction
690 * to emulate.
691 */
692 register int eip;
693 boolean_t addr_32 = FALSE;
694 boolean_t data_32 = FALSE;
695 int io_port;
696
697 /*
698 * Set up error handler for bad instruction/data
699 * fetches.
700 */
701 __asm__("movl $(addr_error), %0" : : "m" (thread->recover));
702
703 eip = regs->eip;
704 while (TRUE) {
705 unsigned char opcode;
706
707 if (eip > 0xFFFF) {
708 thread->recover = 0;
709 return FALSE; /* GP fault: IP out of range */
710 }
711
712 opcode = *(unsigned char *)Addr8086(regs->cs,eip);
713 eip++;
714 switch (opcode) {
715 case 0xf0: /* lock */
716 case 0xf2: /* repne */
717 case 0xf3: /* repe */
718 case 0x2e: /* cs */
719 case 0x36: /* ss */
720 case 0x3e: /* ds */
721 case 0x26: /* es */
722 case 0x64: /* fs */
723 case 0x65: /* gs */
724 /* ignore prefix */
725 continue;
726
727 case 0x66: /* data size */
728 data_32 = TRUE;
729 continue;
730
731 case 0x67: /* address size */
732 addr_32 = TRUE;
733 continue;
734
735 case 0xe4: /* inb imm */
736 case 0xe5: /* inw imm */
737 case 0xe6: /* outb imm */
738 case 0xe7: /* outw imm */
739 io_port = *(unsigned char *)Addr8086(regs->cs, eip);
740 eip++;
741 goto do_in_out;
742
743 case 0xec: /* inb dx */
744 case 0xed: /* inw dx */
745 case 0xee: /* outb dx */
746 case 0xef: /* outw dx */
747 case 0x6c: /* insb */
748 case 0x6d: /* insw */
749 case 0x6e: /* outsb */
750 case 0x6f: /* outsw */
751 io_port = regs->edx & 0xffff;
752
753 do_in_out:
754 if (!data_32)
755 opcode |= 0x6600; /* word IO */
756
757 switch (emulate_io(regs, opcode, io_port)) {
758 case EM_IO_DONE:
759 /* instruction executed */
760 break;
761 case EM_IO_RETRY:
762 /* port mapped, retry instruction */
763 thread->recover = 0;
764 return TRUE;
765 case EM_IO_ERROR:
766 /* port not mapped */
767 thread->recover = 0;
768 return FALSE;
769 }
770 break;
771
772 case 0xfa: /* cli */
773 if (!v86_do_sti_cli) {
774 thread->recover = 0;
775 return (FALSE);
776 }
777
778 v86->flags &= ~EFL_IF;
779 /* disable simulated interrupts */
780 cli_count++;
781 break;
782
783 case 0xfb: /* sti */
784 if (!v86_do_sti_cli) {
785 thread->recover = 0;
786 return (FALSE);
787 }
788
789 if ((v86->flags & EFL_IF) == 0) {
790 if (v86_do_sti_immediate) {
791 v86->flags |= EFL_IF;
792 } else {
793 v86->flags |= V86_IF_PENDING;
794 regs->efl |= EFL_TF;
795 }
796 /* single step to set IF next inst. */
797 }
798 sti_count++;
799 break;
800
801 case 0x9c: /* pushf */
802 {
91447636
A
803 int flags;
804 vm_offset_t sp;
805 unsigned int size;
1c79356b
A
806
807 flags = regs->efl;
808 if ((v86->flags & EFL_IF) == 0)
809 flags &= ~EFL_IF;
810
811 if ((v86->flags & EFL_TF) == 0)
812 flags &= ~EFL_TF;
813 else flags |= EFL_TF;
814
815 sp = regs->uesp;
816 if (!addr_32)
817 sp &= 0xffff;
818 else if (sp > 0xffff)
819 goto stack_error;
820 size = (data_32) ? 4 : 2;
821 if (sp < size)
822 goto stack_error;
823 sp -= size;
824 if (copyout((char *)&flags,
91447636 825 (user_addr_t)Addr8086(regs->ss,sp),
1c79356b
A
826 size))
827 goto addr_error;
828 if (addr_32)
829 regs->uesp = sp;
830 else
831 regs->uesp = (regs->uesp & 0xffff0000) | sp;
832 break;
833 }
834
835 case 0x9d: /* popf */
836 {
837 vm_offset_t sp;
838 int nflags;
839
840 sp = regs->uesp;
841 if (!addr_32)
842 sp &= 0xffff;
843 else if (sp > 0xffff)
844 goto stack_error;
845
846 if (data_32) {
847 if (sp > 0xffff - sizeof(int))
848 goto stack_error;
849 nflags = *(int *)Addr8086(regs->ss,sp);
850 sp += sizeof(int);
851 }
852 else {
853 if (sp > 0xffff - sizeof(short))
854 goto stack_error;
855 nflags = *(unsigned short *)
856 Addr8086(regs->ss,sp);
857 sp += sizeof(short);
858 }
859 if (addr_32)
860 regs->uesp = sp;
861 else
862 regs->uesp = (regs->uesp & 0xffff0000) | sp;
863
864 if (v86->flags & V86_IRET_PENDING) {
865 v86->flags = nflags & (EFL_TF | EFL_IF);
866 v86->flags |= V86_IRET_PENDING;
867 } else {
868 v86->flags = nflags & (EFL_TF | EFL_IF);
869 }
870 regs->efl = (regs->efl & ~EFL_V86_SAFE)
871 | (nflags & EFL_V86_SAFE);
872 break;
873 }
874 case 0xcf: /* iret */
875 {
876 vm_offset_t sp;
877 int nflags;
1c79356b
A
878 union iret_struct iret_struct;
879
880 v86->flags &= ~V86_IRET_PENDING;
881 sp = regs->uesp;
882 if (!addr_32)
883 sp &= 0xffff;
884 else if (sp > 0xffff)
885 goto stack_error;
886
887 if (data_32) {
888 if (sp > 0xffff - sizeof(struct iret_32))
889 goto stack_error;
890 iret_struct.iret_32 =
891 *(struct iret_32 *) Addr8086(regs->ss,sp);
892 sp += sizeof(struct iret_32);
893 }
894 else {
895 if (sp > 0xffff - sizeof(struct iret_16))
896 goto stack_error;
897 iret_struct.iret_16 =
898 *(struct iret_16 *) Addr8086(regs->ss,sp);
899 sp += sizeof(struct iret_16);
900 }
901 if (addr_32)
902 regs->uesp = sp;
903 else
904 regs->uesp = (regs->uesp & 0xffff0000) | sp;
905
906 if (data_32) {
907 eip = iret_struct.iret_32.eip;
908 regs->cs = iret_struct.iret_32.cs & 0xffff;
909 nflags = iret_struct.iret_32.eflags;
910 }
911 else {
912 eip = iret_struct.iret_16.ip;
913 regs->cs = iret_struct.iret_16.cs;
914 nflags = iret_struct.iret_16.flags;
915 }
916
917 v86->flags = nflags & (EFL_TF | EFL_IF);
918 regs->efl = (regs->efl & ~EFL_V86_SAFE)
919 | (nflags & EFL_V86_SAFE);
920 break;
921 }
922 default:
923 /*
924 * Instruction not emulated here.
925 */
926 thread->recover = 0;
927 return FALSE;
928 }
929 break; /* exit from 'while TRUE' */
930 }
91447636 931 regs->eip = (regs->eip & 0xffff0000) | eip;
1c79356b
A
932 }
933 else {
934 /*
935 * Not a trap we handle.
936 */
937 thread->recover = 0;
938 return FALSE;
939 }
940
941 if ((v86->flags & EFL_IF) && ((v86->flags & V86_IRET_PENDING)==0)) {
942
943 struct v86_interrupt_table *int_table;
944 int int_count;
945 int vec;
946 int i;
947
948 int_table = (struct v86_interrupt_table *) v86->int_table;
949 int_count = v86->int_count;
950
951 vec = 0;
952 for (i = 0; i < int_count; int_table++, i++) {
953 if (!int_table->mask && int_table->count > 0) {
954 int_table->count--;
955 vec = int_table->vec;
956 break;
957 }
958 }
959 if (vec != 0) {
960 /*
961 * Take this interrupt
962 */
963 vm_offset_t sp;
964 struct iret_16 iret_16;
965 struct int_vec int_vec;
966
967 sp = regs->uesp & 0xffff;
968 if (sp < sizeof(struct iret_16))
969 goto stack_error;
970 sp -= sizeof(struct iret_16);
971 iret_16.ip = regs->eip;
972 iret_16.cs = regs->cs;
973 iret_16.flags = regs->efl & 0xFFFF;
974 if ((v86->flags & EFL_TF) == 0)
975 iret_16.flags &= ~EFL_TF;
976 else iret_16.flags |= EFL_TF;
977
978 (void) memcpy((char *) &int_vec,
979 (char *) (sizeof(struct int_vec) * vec),
980 sizeof (struct int_vec));
981 if (copyout((char *)&iret_16,
91447636 982 (user_addr_t)Addr8086(regs->ss,sp),
1c79356b
A
983 sizeof(struct iret_16)))
984 goto addr_error;
985 regs->uesp = (regs->uesp & 0xFFFF0000) | (sp & 0xffff);
986 regs->eip = int_vec.ip;
987 regs->cs = int_vec.cs;
988 regs->efl &= ~EFL_TF;
989 v86->flags &= ~(EFL_IF | EFL_TF);
990 v86->flags |= V86_IRET_PENDING;
991 }
992 }
993
994 thread->recover = 0;
995 return TRUE;
996
997 /*
998 * On address error, report a page fault.
999 * XXX report GP fault - we don`t save
1000 * the faulting address.
1001 */
1002 addr_error:
1003 __asm__("addr_error:;");
1004 thread->recover = 0;
1005 return FALSE;
1006
1007 /*
1008 * On stack address error, return stack fault (12).
1009 */
1010 stack_error:
1011 thread->recover = 0;
1012 regs->trapno = T_STACK_FAULT;
1013 return FALSE;
1014}
1015
1016/*
1017 * Handle AST traps for i386.
1018 * Check for delayed floating-point exception from
1019 * AT-bus machines.
1020 */
1021
1022extern void log_thread_action (thread_t, char *);
1023
1024void
1025i386_astintr(int preemption)
1026{
91447636 1027 ast_t *my_ast, mask = AST_ALL;
1c79356b 1028 spl_t s;
1c79356b
A
1029
1030 s = splsched(); /* block interrupts to check reasons */
1031 mp_disable_preemption();
91447636
A
1032 my_ast = ast_pending();
1033 if (*my_ast & AST_I386_FP) {
1c79356b
A
1034 /*
1035 * AST was for delayed floating-point exception -
91447636 1036 * FP interrupt occurred while in kernel.
1c79356b
A
1037 * Turn off this AST reason and handle the FPU error.
1038 */
1039
1040 ast_off(AST_I386_FP);
1041 mp_enable_preemption();
1042 splx(s);
1043
1044 fpexterrflt();
1045 }
1046 else {
1047 /*
1048 * Not an FPU trap. Handle the AST.
1049 * Interrupts are still blocked.
1050 */
1051
91447636 1052#if 1
1c79356b 1053 if (preemption) {
91447636 1054 mask = AST_PREEMPTION;
1c79356b 1055 mp_enable_preemption();
1c79356b
A
1056 } else {
1057 mp_enable_preemption();
1058 }
1059#else
1060 mp_enable_preemption();
1061#endif
1062
91447636
A
1063 ast_taken(mask, s);
1064
1c79356b
A
1065 }
1066}
1067
1068/*
1069 * Handle exceptions for i386.
1070 *
1071 * If we are an AT bus machine, we must turn off the AST for a
1072 * delayed floating-point exception.
1073 *
1074 * If we are providing floating-point emulation, we may have
1075 * to retrieve the real register values from the floating point
1076 * emulator.
1077 */
1078void
1079i386_exception(
1080 int exc,
1081 int code,
1082 int subcode)
1083{
1084 spl_t s;
1085 exception_data_type_t codes[EXCEPTION_CODE_MAX];
1086
1087 /*
1088 * Turn off delayed FPU error handling.
1089 */
1090 s = splsched();
1091 mp_disable_preemption();
1092 ast_off(AST_I386_FP);
1093 mp_enable_preemption();
1094 splx(s);
1095
1096 codes[0] = code; /* new exception interface */
1097 codes[1] = subcode;
91447636 1098 exception_triage(exc, codes, 2);
1c79356b
A
1099 /*NOTREACHED*/
1100}
1101
1102boolean_t
1103check_io_fault(
1104 struct i386_saved_state *regs)
1105{
1106 int eip, opcode, io_port;
1107 boolean_t data_16 = FALSE;
1108
1109 /*
1110 * Get the instruction.
1111 */
1112 eip = regs->eip;
1113
1114 for (;;) {
1115 opcode = inst_fetch(eip, regs->cs);
1116 eip++;
1117 switch (opcode) {
1118 case 0x66: /* data-size prefix */
1119 data_16 = TRUE;
1120 continue;
1121
1122 case 0xf3: /* rep prefix */
1123 case 0x26: /* es */
1124 case 0x2e: /* cs */
1125 case 0x36: /* ss */
1126 case 0x3e: /* ds */
1127 case 0x64: /* fs */
1128 case 0x65: /* gs */
1129 continue;
1130
1131 case 0xE4: /* inb imm */
1132 case 0xE5: /* inl imm */
1133 case 0xE6: /* outb imm */
1134 case 0xE7: /* outl imm */
1135 /* port is immediate byte */
1136 io_port = inst_fetch(eip, regs->cs);
1137 eip++;
1138 break;
1139
1140 case 0xEC: /* inb dx */
1141 case 0xED: /* inl dx */
1142 case 0xEE: /* outb dx */
1143 case 0xEF: /* outl dx */
1144 case 0x6C: /* insb */
1145 case 0x6D: /* insl */
1146 case 0x6E: /* outsb */
1147 case 0x6F: /* outsl */
1148 /* port is in DX register */
1149 io_port = regs->edx & 0xFFFF;
1150 break;
1151
1152 default:
1153 return FALSE;
1154 }
1155 break;
1156 }
1157
1158 if (data_16)
1159 opcode |= 0x6600; /* word IO */
1160
1161 switch (emulate_io(regs, opcode, io_port)) {
1162 case EM_IO_DONE:
1163 /* instruction executed */
1164 regs->eip = eip;
1165 return TRUE;
1166
1167 case EM_IO_RETRY:
1168 /* port mapped, retry instruction */
1169 return TRUE;
1170
1171 case EM_IO_ERROR:
1172 /* port not mapped */
1173 return FALSE;
1174 }
1175 return FALSE;
1176}
1177
1178void
1179kernel_preempt_check (void)
1180{
91447636
A
1181 ast_t *myast;
1182
1c79356b 1183 mp_disable_preemption();
91447636
A
1184 myast = ast_pending();
1185 if ((*myast & AST_URGENT) &&
1c79356b 1186 get_interrupt_level() == 1
1c79356b
A
1187 ) {
1188 mp_enable_preemption_no_check();
1189 __asm__ volatile (" int $0xff");
1190 } else {
1191 mp_enable_preemption_no_check();
1192 }
1193}
1194
1195#if MACH_KDB
1196
1197extern void db_i386_state(struct i386_saved_state *regs);
1198
1199#include <ddb/db_output.h>
1200
1201void
1202db_i386_state(
1203 struct i386_saved_state *regs)
1204{
1205 db_printf("eip %8x\n", regs->eip);
1206 db_printf("trap %8x\n", regs->trapno);
1207 db_printf("err %8x\n", regs->err);
1208 db_printf("efl %8x\n", regs->efl);
1209 db_printf("ebp %8x\n", regs->ebp);
1210 db_printf("esp %8x\n", regs->esp);
1211 db_printf("uesp %8x\n", regs->uesp);
1212 db_printf("cs %8x\n", regs->cs & 0xff);
1213 db_printf("ds %8x\n", regs->ds & 0xff);
1214 db_printf("es %8x\n", regs->es & 0xff);
1215 db_printf("fs %8x\n", regs->fs & 0xff);
1216 db_printf("gs %8x\n", regs->gs & 0xff);
1217 db_printf("ss %8x\n", regs->ss & 0xff);
1218 db_printf("eax %8x\n", regs->eax);
1219 db_printf("ebx %8x\n", regs->ebx);
1220 db_printf("ecx %8x\n", regs->ecx);
1221 db_printf("edx %8x\n", regs->edx);
1222 db_printf("esi %8x\n", regs->esi);
1223 db_printf("edi %8x\n", regs->edi);
1224}
1225
1226#endif /* MACH_KDB */