]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/trap.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / i386 / trap.c
index 29ead88d9f5aa55d2611ce762a480d117c6bbc6c..ee4a70b3f3a00a080e889ebb32c9558de129c010 100644 (file)
@@ -1,10 +1,8 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
- * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
- * 
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
@@ -56,8 +54,6 @@
  * Hardware trap/fault handler.
  */
 
-#include <cpus.h>
-#include <fast_idle.h>
 #include <mach_kdb.h>
 #include <mach_kgdb.h>
 #include <mach_kdp.h>
@@ -77,9 +73,8 @@
 #include <vm/vm_kern.h>
 #include <vm/vm_fault.h>
 
-#include <kern/etap_macros.h>
 #include <kern/kern_types.h>
-#include <kern/ast.h>
+#include <kern/processor.h>
 #include <kern/thread.h>
 #include <kern/task.h>
 #include <kern/sched.h>
@@ -130,7 +125,7 @@ void
 thread_syscall_return(
         kern_return_t ret)
 {
-        register thread_act_t   thr_act = current_act();
+        register thread_t   thr_act = current_thread();
         register struct i386_saved_state *regs = USER_REGS(thr_act);
         regs->eax = ret;
         thread_exception_return();
@@ -147,34 +142,27 @@ extern boolean_t db_breakpoints_inserted;
 void
 thread_kdb_return(void)
 {
-       register thread_act_t   thr_act = current_act();
-       register thread_t       cur_thr = current_thread();
-       register struct i386_saved_state *regs = USER_REGS(thr_act);
+       register thread_t       thread = current_thread();
+       register struct i386_saved_state *regs = USER_REGS(thread);
 
        if (kdb_trap(regs->trapno, regs->err, regs)) {
 #if            MACH_LDEBUG
-               assert(cur_thr->mutex_count == 0); 
+               assert(thread->mutex_count == 0); 
 #endif         /* MACH_LDEBUG */
-               check_simple_locks();
                thread_exception_return();
                /*NOTREACHED*/
        }
 }
 boolean_t let_ddb_vm_fault = FALSE;
 
-#if    NCPUS > 1
-extern int kdb_active[NCPUS];
-#endif /* NCPUS > 1 */
-
 #endif /* MACH_KDB */
 
 void
 user_page_fault_continue(
        kern_return_t   kr)
 {
-       register thread_act_t   thr_act = current_act();
-       register thread_t       cur_thr = current_thread();
-       register struct i386_saved_state *regs = USER_REGS(thr_act);
+       register thread_t       thread = current_thread();
+       register struct i386_saved_state *regs = USER_REGS(thread);
 
        if ((kr == KERN_SUCCESS) || (kr == KERN_ABORTED)) {
 #if    MACH_KDB
@@ -184,7 +172,7 @@ user_page_fault_continue(
                if (db_watchpoint_list &&
                    db_watchpoints_inserted &&
                    (regs->err & T_PF_WRITE) &&
-                   db_find_watchpoint(thr_act->map,
+                   db_find_watchpoint(thread->map,
                                       (vm_offset_t)regs->cr2,
                                       regs))
                        kdb_trap(T_WATCHPOINT, 0, regs);
@@ -197,9 +185,8 @@ user_page_fault_continue(
        if (debug_all_traps_with_kdb &&
            kdb_trap(regs->trapno, regs->err, regs)) {
 #if            MACH_LDEBUG
-               assert(cur_thr->mutex_count == 0);
+               assert(thread->mutex_count == 0);
 #endif         /* MACH_LDEBUG */
-               check_simple_locks();
                thread_exception_return();
                /*NOTREACHED*/
        }
@@ -213,8 +200,8 @@ user_page_fault_continue(
  * Fault recovery in copyin/copyout routines.
  */
 struct recovery {
-       int     fault_addr;
-       int     recover_addr;
+       uint32_t        fault_addr;
+       uint32_t        recover_addr;
 };
 
 extern struct recovery recover_table[];
@@ -228,9 +215,10 @@ extern struct recovery     recover_table_end[];
 extern struct recovery retry_table[];
 extern struct recovery retry_table_end[];
 
-char * trap_type[] = {TRAP_NAMES};
+const char *           trap_type[] = {TRAP_NAMES};
 int    TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]);
 
+
 /*
  * Trap from kernel mode.  Only page-fault errors are recoverable,
  * and then only in special circumstances.  All other errors are
@@ -240,35 +228,21 @@ boolean_t
 kernel_trap(
        register struct i386_saved_state        *regs)
 {
-       int     exc;
-       int     code;
-       int     subcode;
-       int     interruptible;
-       register int    type;
-       vm_map_t        map;
-       kern_return_t   result;
+       int                     code;
+       unsigned int            subcode;
+       int                     interruptible = THREAD_UNINT;
+       register int            type;
+       vm_map_t                map;
+       kern_return_t           result = KERN_FAILURE;
        register thread_t       thread;
-       thread_act_t            thr_act;
-       etap_data_t             probe_data;
-       pt_entry_t              *pte;
-       extern vm_offset_t      vm_last_phys;
 
        type = regs->trapno;
        code = regs->err;
        thread = current_thread();
-       thr_act = current_act();
-
-       ETAP_DATA_LOAD(probe_data[0], regs->trapno);
-       ETAP_DATA_LOAD(probe_data[1], MACH_PORT_NULL);
-       ETAP_DATA_LOAD(probe_data[2], MACH_PORT_NULL);
-       ETAP_PROBE_DATA(ETAP_P_EXCEPTION,
-                       0,
-                       thread,
-                       &probe_data,
-                       ETAP_DATA_ENTRY*3);
 
        switch (type) {
            case T_PREEMPT:
+               ast_taken(AST_PREEMPTION, FALSE);
                return (TRUE);
 
            case T_NO_FPU:
@@ -294,9 +268,7 @@ kernel_trap(
 #if    MACH_KDB
                mp_disable_preemption();
                if (db_active
-#if    NCPUS > 1
                    && kdb_active[cpu_number()]
-#endif /* NCPUS > 1 */
                    && !let_ddb_vm_fault) {
                        /*
                         * Force kdb to handle this one.
@@ -310,13 +282,11 @@ kernel_trap(
 
                if (subcode > LINEAR_KERNEL_ADDRESS) {
                    map = kernel_map;
-                   subcode -= LINEAR_KERNEL_ADDRESS;
-               } else if (thr_act == THR_ACT_NULL || thread == THREAD_NULL)
+               } else if (thread == THREAD_NULL)
                    map = kernel_map;
                else {
-                   map = thr_act->map;
+                   map = thread->map;
                }
-
 #if    MACH_KDB
                /*
                 * Check for watchpoint on kernel static data.
@@ -329,9 +299,7 @@ kernel_trap(
                    (vm_offset_t)subcode < vm_last_phys &&
                    ((*(pte = pmap_pte(kernel_pmap, (vm_offset_t)subcode))) &
                     INTEL_PTE_WRITE) == 0) {
-                       *pte = INTEL_PTE_VALID | INTEL_PTE_WRITE |
-                              pa_to_pte(trunc_page((vm_offset_t)subcode) -
-                                        VM_MIN_KERNEL_ADDRESS);
+                 *pte = *pte | INTEL_PTE_VALID | INTEL_PTE_WRITE; /* XXX need invltlb here? */
                        result = KERN_SUCCESS;
                } else
 #endif /* MACH_KDB */
@@ -353,7 +321,6 @@ kernel_trap(
                                        }
                                }
                        }
-
                        result = vm_fault(map,
                                          trunc_page((vm_offset_t)subcode),
                                          VM_PROT_READ|VM_PROT_WRITE,
@@ -438,6 +405,15 @@ kernel_trap(
                /* fall through... */
 
            default:
+               /*
+                * Exception 15 is reserved but some chips may generate it
+                * spuriously. Seen at startup on AMD Athlon-64.
+                */
+               if (type == 15) {
+                       kprintf("kernel_trap() ignoring spurious trap 15\n"); 
+                       return (TRUE);
+               }
+
                /*
                 * ...and return failure, so that locore can call into
                 * debugger.
@@ -478,15 +454,13 @@ user_trap(
 {
        int             exc;
        int             code;
-       int             subcode;
+       unsigned int    subcode;
        register int    type;
        vm_map_t        map;
        vm_prot_t       prot;
        kern_return_t   result;
-       register thread_act_t thr_act = current_act();
-       thread_t thread = (thr_act ? thr_act->thread : THREAD_NULL);
-       boolean_t       kernel_act = thr_act->kernel_loaded;
-       etap_data_t     probe_data;
+       thread_t        thread = current_thread();
+       boolean_t       kernel_act = FALSE;
 
        if (regs->efl & EFL_VM) {
            /*
@@ -500,6 +474,7 @@ user_trap(
        type = regs->trapno;
        code = 0;
        subcode = 0;
+       exc = 0;
 
        switch (type) {
 
@@ -576,7 +551,7 @@ user_trap(
                if (kernel_act == FALSE) {
                        if (!(regs->err & T_PF_WRITE))
                                prot = VM_PROT_READ;
-                       (void) user_page_fault_continue(vm_fault(thr_act->map,
+                       (void) user_page_fault_continue(vm_fault(thread->map,
                                trunc_page((vm_offset_t)subcode),
                                prot,
                                FALSE,
@@ -586,9 +561,8 @@ user_trap(
                else {
                        if (subcode > LINEAR_KERNEL_ADDRESS) {
                                map = kernel_map;
-                               subcode -= LINEAR_KERNEL_ADDRESS;
                        }
-                       result = vm_fault(thr_act->map,
+                       result = vm_fault(thread->map,
                                trunc_page((vm_offset_t)subcode),
                                prot,
                                FALSE,
@@ -599,7 +573,7 @@ user_trap(
                                 * so that we can ask for read-only access
                                 * but enter a (kernel) writable mapping.
                                 */
-                               result = intel_read_fault(thr_act->map,
+                               result = intel_read_fault(thread->map,
                                        trunc_page((vm_offset_t)subcode));
                        }
                        user_page_fault_continue(result);
@@ -632,21 +606,6 @@ user_trap(
                return;
 #endif /* MACH_KDB */
 
-#if    ETAP_EVENT_MONITOR
-       if (thread != THREAD_NULL) {
-               ETAP_DATA_LOAD(probe_data[0], regs->trapno);
-               ETAP_DATA_LOAD(probe_data[1],
-                              thr_act->exc_actions[exc].port);
-               ETAP_DATA_LOAD(probe_data[2],
-                              thr_act->task->exc_actions[exc].port);
-               ETAP_PROBE_DATA(ETAP_P_EXCEPTION,
-                               0,
-                               thread,         
-                               &probe_data,
-                               ETAP_DATA_ENTRY*3);
-       }
-#endif /* ETAP_EVENT_MONITOR */
-
        i386_exception(exc, code, subcode);
        /*NOTREACHED*/
 }
@@ -669,7 +628,7 @@ v86_assist(
        thread_t                                thread,
        register struct i386_saved_state        *regs)
 {
-       register struct v86_assist_state *v86 = &thread->top_act->mact.pcb->ims.v86s;
+       register struct v86_assist_state *v86 = &thread->machine.pcb->ims.v86s;
 
 /*
  * Build an 8086 address.  Use only when off is known to be 16 bits.
@@ -841,9 +800,9 @@ v86_assist(
 
                    case 0x9c:          /* pushf */
                    {
-                       int     flags;
-                       vm_offset_t sp;
-                       int     size;
+                       int             flags;
+                       vm_offset_t     sp;
+                       unsigned int    size;
 
                        flags = regs->efl;
                        if ((v86->flags & EFL_IF) == 0)
@@ -863,7 +822,7 @@ v86_assist(
                            goto stack_error;
                        sp -= size;
                        if (copyout((char *)&flags,
-                                   (char *)Addr8086(regs->ss,sp),
+                                   (user_addr_t)Addr8086(regs->ss,sp),
                                    size))
                            goto addr_error;
                        if (addr_32)
@@ -916,7 +875,6 @@ v86_assist(
                    {
                        vm_offset_t sp;
                        int     nflags;
-                       int     size;
                        union iret_struct iret_struct;
 
                        v86->flags &= ~V86_IRET_PENDING;
@@ -970,7 +928,7 @@ v86_assist(
                }
                break;  /* exit from 'while TRUE' */
            }
-           regs->eip = (regs->eip & 0xffff0000 | eip);
+           regs->eip = (regs->eip & 0xffff0000) | eip;
        }
        else {
            /*
@@ -1021,7 +979,7 @@ v86_assist(
                              (char *) (sizeof(struct int_vec) * vec),
                              sizeof (struct int_vec));
                if (copyout((char *)&iret_16,
-                           (char *)Addr8086(regs->ss,sp),
+                           (user_addr_t)Addr8086(regs->ss,sp),
                            sizeof(struct iret_16)))
                    goto addr_error;
                regs->uesp = (regs->uesp & 0xFFFF0000) | (sp & 0xffff);
@@ -1066,18 +1024,16 @@ extern void     log_thread_action (thread_t, char *);
 void
 i386_astintr(int preemption)
 {
-       int             mycpu;
-       ast_t           mask = AST_ALL;
+       ast_t           *my_ast, mask = AST_ALL;
        spl_t           s;
-       thread_t        self = current_thread();
 
        s = splsched();         /* block interrupts to check reasons */
        mp_disable_preemption();
-       mycpu = cpu_number();
-       if (need_ast[mycpu] & AST_I386_FP) {
+       my_ast = ast_pending();
+       if (*my_ast & AST_I386_FP) {
            /*
             * AST was for delayed floating-point exception -
-            * FP interrupt occured while in kernel.
+            * FP interrupt occurred while in kernel.
             * Turn off this AST reason and handle the FPU error.
             */
 
@@ -1093,32 +1049,10 @@ i386_astintr(int preemption)
             * Interrupts are still blocked.
             */
 
-#ifdef XXX
+#if 1
            if (preemption) {
-
-           /*
-            * We don't want to process any AST if we were in
-            * kernel-mode and the current thread is in any
-            * funny state (waiting and/or suspended).
-            */
-
-               thread_lock (self);
-
-               if (thread_not_preemptable(self) || self->preempt) {
-                       ast_off(AST_URGENT);
-                       thread_unlock (self);
-                       mp_enable_preemption();
-                       splx(s);
-                       return;
-               }
-               else mask = AST_PREEMPT;
+               mask = AST_PREEMPTION;
                mp_enable_preemption();
-
-/*
-               self->preempt = TH_NOT_PREEMPTABLE;
-*/
-
-               thread_unlock (self);
            } else {
                mp_enable_preemption();
            }
@@ -1126,14 +1060,8 @@ i386_astintr(int preemption)
        mp_enable_preemption();
 #endif
 
-           ast_taken(mask, s
-#if    FAST_IDLE
-                     ,NO_IDLE_THREAD
-#endif /* FAST_IDLE */
-                     );
-/*
-           self->preempt = TH_PREEMPTABLE;
-*/
+       ast_taken(mask, s);
+
        }
 }
 
@@ -1167,7 +1095,7 @@ i386_exception(
 
        codes[0] = code;                /* new exception interface */
        codes[1] = subcode;
-       exception(exc, codes, 2);
+       exception_triage(exc, codes, 2);
        /*NOTREACHED*/
 }
 
@@ -1250,13 +1178,12 @@ check_io_fault(
 void
 kernel_preempt_check (void)
 {
+       ast_t           *myast;
+
        mp_disable_preemption();
-        if ((need_ast[cpu_number()] & AST_URGENT) &&
-#if    NCPUS > 1
+       myast = ast_pending();
+        if ((*myast & AST_URGENT) &&
            get_interrupt_level() == 1
-#else  /* NCPUS > 1 */
-           get_interrupt_level() == 0
-#endif /* NCPUS > 1 */
            ) {
                mp_enable_preemption_no_check();
                 __asm__ volatile ("     int     $0xff");