]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/locore.s
xnu-1504.7.4.tar.gz
[apple/xnu.git] / osfmk / i386 / locore.s
index 61243db06f62d5a7c7f395477bef0354b4a974e6..b58b7ece7cc834dc7bdbd42fc6060a01e829b735 100644 (file)
@@ -1,31 +1,29 @@
 /*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * This file contains Original Code and/or Modifications of Original Code 
- * as defined in and that are subject to the Apple Public Source License 
- * Version 2.0 (the 'License'). You may not use this file except in 
- * compliance with the License.  The rights granted to you under the 
- * License may not be used to create, or enable the creation or 
- * redistribution of, unlawful or unlicensed copies of an Apple operating 
- * system, or to circumvent, violate, or enable the circumvention or 
- * violation of, any terms of an Apple operating system software license 
- * agreement.
- *
- * Please obtain a copy of the License at 
- * http://www.opensource.apple.com/apsl/ and read it before using this 
- * file.
- *
- * The Original Code and all software distributed under the License are 
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
- * Please see the License for the specific language governing rights and 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
  * limitations under the License.
- *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
+ * 
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
 #include <i386/asm.h>
 #include <i386/cpuid.h>
 #include <i386/eflags.h>
+#include <i386/lapic.h>
+#include <i386/rtclock.h>
 #include <i386/proc_reg.h>
 #include <i386/trap.h>
 #include <assym.s>
 #include <mach/exception_types.h>
+#include <config_dtrace.h>
 
 #define _ARCH_I386_ASM_HELP_H_          /* Prevent inclusion of user header */
 #include <mach/i386/syscall_sw.h>
 
 #include <i386/mp.h>
 
+
+#define CLI cli
+#define STI sti
+       
 /*
  * PTmap is recursive pagemap at top of virtual address space.
  * Within PTmap, the page directory can be found (third indirection).
        call    EXT(fn)                 ;\
        movl    %edi, %esp
 
-#define CCALL3(fn, arg1, arg2, arg3)   \
+/*
+ * CCALL5 is used for callee functions with 3 arguments but
+ * where arg2 (a3:a2) and arg3 (a5:a4) are 64-bit values.
+ */
+#define CCALL5(fn, a1, a2, a3, a4, a5) \
        movl    %esp, %edi              ;\
-       subl    $12, %esp               ;\
+       subl    $20, %esp               ;\
        andl    $0xFFFFFFF0, %esp       ;\
-       movl    arg3, 8(%esp)           ;\
-       movl    arg2, 4(%esp)           ;\
-       movl    arg1, 0(%esp)           ;\
+       movl    a5, 16(%esp)            ;\
+       movl    a4, 12(%esp)            ;\
+       movl    a3,  8(%esp)            ;\
+       movl    a2,  4(%esp)            ;\
+       movl    a1,  0(%esp)            ;\
        call    EXT(fn)                 ;\
        movl    %edi, %esp
 
@@ -184,7 +195,16 @@ LEXT(recover_table)                ;\
        .align  2                       ;\
        .globl  EXT(recover_table_end)  ;\
 LEXT(recover_table_end)                        ;\
-       .text
+       .long 0  /* workaround see comment below */ ;\
+       .text ;
+
+/* TODO FIXME
+ * the .long 0 is to work around a linker bug (insert radar# here)
+ * basically recover_table_end has zero size and bumps up right against saved_esp in acpi_wakeup.s
+ * recover_table_end is in __RECOVER,__vectors and saved_esp is in __SLEEP,__data, but they're right next to each
+ * other and so the linker combines them and incorrectly relocates everything referencing recover_table_end to point
+ * into the SLEEP section
+ */
 
 /*
  * Allocate recovery and table.
@@ -225,99 +245,128 @@ Entry(timer_grab)
  */
 
 /*
- * Low 32-bits of nanotime returned in %eax.
+ * Nanotime returned in %edx:%eax.
  * Computed from tsc based on the scale factor
  * and an implicit 32 bit shift.
  *
- * Uses %esi, %edi, %ebx, %ecx and %edx.
+ * Uses %eax, %ebx, %ecx, %edx, %esi, %edi.
  */
-#define RNT_INFO               _rtc_nanotime_info
-#define NANOTIME32                                                                                       \
-0:     movl    RNT_INFO+RNT_TSC_BASE,%esi                                               ;\
-       movl    RNT_INFO+RNT_TSC_BASE+4,%edi                                     ;\
-       rdtsc                                                                                                    ;\
-       subl    %esi,%eax                                       /* tsc - tsc_base */ ;\
-       sbbl    %edi,%edx                                                                                ;\
-       movl    RNT_INFO+RNT_SCALE,%ecx                                                  ;\
-       movl    %edx,%ebx                                       /* delta * scale */  ;\
-       mull    %ecx                                                                                     ;\
-       movl    %ebx,%eax                                                                                ;\
-       movl    %edx,%ebx                                                                                ;\
-       mull    %ecx                                                                                     ;\
-       addl    %ebx,%eax                                                                                ;\
-       addl    RNT_INFO+RNT_NS_BASE,%eax       /* add ns_base */        ;\
-       cmpl    RNT_INFO+RNT_TSC_BASE,%esi                                               ;\
-       jne             0b                                                                                               ;\
-       cmpl    RNT_INFO+RNT_TSC_BASE+4,%edi                                     ;\
-       jne             0b
+#define NANOTIME                                                       \
+       mov     %gs:CPU_NANOTIME,%edi                                   ; \
+       RTC_NANOTIME_READ_FAST()
+
 
 /*
- * Add 32-bit ns delta in register dreg to timer pointed to by register treg.
+ * Add 64-bit delta in register dreg : areg to timer pointed to by register treg.
  */
-#define TIMER_UPDATE(treg,dreg)                                                      \
-       addl    TIMER_LOW(treg),dreg            /* add delta low bits     */ ;\
-       adcl    $0,TIMER_HIGHCHK(treg)          /* add carry check bits   */ ;\
-       movl    dreg,TIMER_LOW(treg)            /* store updated low bit  */ ;\
-       movl    TIMER_HIGHCHK(treg),dreg        /* copy high check bits   */ ;\
-       movl    dreg,TIMER_HIGH(treg)           /*   to high bita         */
+#define TIMER_UPDATE(treg,dreg,areg,offset)                                                                    \
+       addl    (TIMER_LOW+(offset))(treg),areg         /* add low bits */                      ;\
+       adcl    dreg,(TIMER_HIGH+(offset))(treg)        /* add carry high bits */       ;\
+       movl    areg,(TIMER_LOW+(offset))(treg)         /* store updated low bit */     ;\
+       movl    (TIMER_HIGH+(offset))(treg),dreg        /* copy high bits */            ;\
+       movl    dreg,(TIMER_HIGHCHK+(offset))(treg)     /* to high check */
 
 /*
  * Add time delta to old timer and start new.
  */
-#define TIMER_EVENT(old,new)                                                  \
-       NANOTIME32                              /* eax low bits nanosecs  */ ;\
-       movl    %gs:CPU_PROCESSOR,%ecx          /* get current processor  */ ;\
-       movl    CURRENT_TIMER(%ecx),%ecx        /* get current timer      */ ;\
-       movl    %eax,%edx                       /* save timestamp in %edx */ ;\
-       subl    TIMER_TSTAMP(%ecx),%eax         /* compute elapsed time   */ ;\
-       TIMER_UPDATE(%ecx,%eax)                 /* update timer struct    */ ;\
-       addl    $(new##_TIMER-old##_TIMER),%ecx /* point to new timer     */ ;\
-       movl    %edx,TIMER_TSTAMP(%ecx)         /* set timestamp          */ ;\
-       movl    %gs:CPU_PROCESSOR,%edx          /* get current processor  */ ;\
-       movl    %ecx,CURRENT_TIMER(%edx)        /* set current timer      */
-
+#define TIMER_EVENT(old,new)                                                                                     \
+       NANOTIME                                                        /* edx:eax nanosecs */                  ; \
+       movl    %eax,%esi                                       /* save timestamp */                    ; \
+       movl    %edx,%edi                                       /* save timestamp */                    ; \
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx  /* get current thread */                    ; \
+       subl    (old##_TIMER)+TIMER_TSTAMP(%ecx),%eax      /* compute elapsed time */     ; \
+       sbbl    (old##_TIMER)+TIMER_TSTAMP+4(%ecx),%edx  /* compute elapsed time */       ; \
+       TIMER_UPDATE(%ecx,%edx,%eax,old##_TIMER)                        /* update timer */                        ; \
+       movl    %esi,(new##_TIMER)+TIMER_TSTAMP(%ecx)      /* set timestamp */                   ; \
+       movl    %edi,(new##_TIMER)+TIMER_TSTAMP+4(%ecx)  /* set timestamp */                     ; \
+       leal    (new##_TIMER)(%ecx), %ecx   /* compute new timer pointer */ ; \
+       movl    %gs:CPU_PROCESSOR,%ebx          /* get current processor */             ; \
+       movl    %ecx,THREAD_TIMER(%ebx)         /* set current timer */                 ; \
+       movl    %esi,%eax                                       /* restore timestamp */                 ; \
+       movl    %edi,%edx                                       /* restore timestamp */                 ; \
+       subl    (old##_STATE)+TIMER_TSTAMP(%ebx),%eax      /* compute elapsed time */     ; \
+       sbbl    (old##_STATE)+TIMER_TSTAMP+4(%ebx),%edx  /* compute elapsed time */       ; \
+       TIMER_UPDATE(%ebx,%edx,%eax,old##_STATE)                        /* update timer */                        ; \
+       leal    (new##_STATE)(%ebx),%ecx        /* compute new state pointer */ ; \
+       movl    %ecx,CURRENT_STATE(%ebx)        /* set current state */                 ; \
+       movl    %esi,TIMER_TSTAMP(%ecx)         /* set timestamp */                             ; \
+       movl    %edi,TIMER_TSTAMP+4(%ecx)       /* set timestamp */
 
 /*
  * Update time on user trap entry.
- * Uses %eax,%ecx,%edx,%esi.
+ * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
  */
-#define        TIME_TRAP_UENTRY        TIMER_EVENT(USER,SYSTEM)
+#define        TIME_TRAP_UENTRY                        TIMER_EVENT(USER,SYSTEM)
 
 /*
  * update time on user trap exit.
- * Uses %eax,%ecx,%edx,%esi.
+ * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
  */
-#define        TIME_TRAP_UEXIT         TIMER_EVENT(SYSTEM,USER)
+#define        TIME_TRAP_UEXIT                         TIMER_EVENT(SYSTEM,USER)
 
 /*
  * update time on interrupt entry.
- * Uses %eax,%ecx,%edx,%esi.
- */
-#define        TIME_INT_ENTRY \
-       NANOTIME32                              /* eax low bits nanosecs  */ ;\
-       movl    %gs:CPU_PROCESSOR,%ecx          /* get current processor  */ ;\
-       movl    CURRENT_TIMER(%ecx),%ecx        /* get current timer      */ ;\
-       movl    %eax,%edx                       /* save timestamp in %edx */ ;\
-       subl    TIMER_TSTAMP(%ecx),%eax         /* compute elapsed time   */ ;\
-       TIMER_UPDATE(%ecx,%eax)                 /* update timer struct    */ ;\
-       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */ ;\
-       addl    $(SYSTEM_TIMER),%ecx            /* point to sys timer     */ ;\
-       movl    %edx,TIMER_TSTAMP(%ecx)         /* set timestamp          */
+ * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
+ * Saves processor state info on stack.
+ */
+#define        TIME_INT_ENTRY                                                                                                    \
+       NANOTIME                                                        /* edx:eax nanosecs */                  ; \
+       movl    %eax,%gs:CPU_INT_EVENT_TIME             /* save in cpu data */          ; \
+       movl    %edx,%gs:CPU_INT_EVENT_TIME+4   /* save in cpu data */          ; \
+       movl    %eax,%esi                                       /* save timestamp */                    ; \
+       movl    %edx,%edi                                       /* save timestamp */                    ; \
+       movl    %gs:CPU_PROCESSOR,%ebx          /* get current processor */             ; \
+       movl    THREAD_TIMER(%ebx),%ecx         /* get current timer */                 ; \
+       subl    TIMER_TSTAMP(%ecx),%eax         /* compute elapsed time */              ; \
+       sbbl    TIMER_TSTAMP+4(%ecx),%edx       /* compute elapsed time */              ; \
+       TIMER_UPDATE(%ecx,%edx,%eax,0)          /* update timer */                              ; \
+       movl    KERNEL_TIMER(%ebx),%ecx         /* point to kernel timer */             ; \
+       movl    %esi,TIMER_TSTAMP(%ecx)         /* set timestamp */                             ; \
+       movl    %edi,TIMER_TSTAMP+4(%ecx)       /* set timestamp */                             ; \
+       movl    %esi,%eax                                       /* restore timestamp */                 ; \
+       movl    %edi,%edx                                       /* restore timestamp */                 ; \
+       movl    CURRENT_STATE(%ebx),%ecx        /* get current state */                 ; \
+       pushl   %ecx                                            /* save state */                                ; \
+       subl    TIMER_TSTAMP(%ecx),%eax         /* compute elapsed time */              ; \
+       sbbl    TIMER_TSTAMP+4(%ecx),%edx       /* compute elapsed time */              ; \
+       TIMER_UPDATE(%ecx,%edx,%eax,0)          /* update timer */                              ; \
+       leal    IDLE_STATE(%ebx),%eax           /* get idle state */                    ; \
+       cmpl    %eax,%ecx                                       /* compare current state */             ; \
+       je              0f                                                      /* skip if equal */                             ; \
+       leal    SYSTEM_STATE(%ebx),%ecx         /* get system state */                  ; \
+       movl    %ecx,CURRENT_STATE(%ebx)        /* set current state */                 ; \
+0:     movl    %esi,TIMER_TSTAMP(%ecx)         /* set timestamp */                             ; \
+       movl    %edi,TIMER_TSTAMP+4(%ecx)       /* set timestamp */
 
 /*
  * update time on interrupt exit.
- * Uses %eax, %ecx, %edx, %esi.
- */
-#define        TIME_INT_EXIT \
-       NANOTIME32                              /* eax low bits nanosecs  */ ;\
-       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */ ;\
-       addl    $(SYSTEM_TIMER),%ecx            /* point to sys timer     */ ;\
-       movl    %eax,%edx                       /* save timestamp in %edx */ ;\
-       subl    TIMER_TSTAMP(%ecx),%eax         /* compute elapsed time   */ ;\
-       TIMER_UPDATE(%ecx,%eax)                 /* update timer struct    */ ;\
-       movl    %gs:CPU_PROCESSOR,%ecx          /* get current processor  */ ;\
-       movl    CURRENT_TIMER(%ecx),%ecx        /* interrupted timer      */ ;\
-       movl    %edx,TIMER_TSTAMP(%ecx)         /* set timestamp          */
+ * Uses %eax,%ebx,%ecx,%edx,%esi,%edi.
+ * Restores processor state info from stack.
+ */
+#define        TIME_INT_EXIT                                                                                                     \
+       NANOTIME                                                        /* edx:eax nanosecs */                  ; \
+       movl    %eax,%gs:CPU_INT_EVENT_TIME             /* save in cpu data */          ; \
+       movl    %edx,%gs:CPU_INT_EVENT_TIME+4   /* save in cpu data */          ; \
+       movl    %eax,%esi                                       /* save timestamp */                    ; \
+       movl    %edx,%edi                                       /* save timestamp */                    ; \
+       movl    %gs:CPU_PROCESSOR,%ebx          /* get current processor */             ; \
+       movl    KERNEL_TIMER(%ebx),%ecx         /* point to kernel timer */             ; \
+       subl    TIMER_TSTAMP(%ecx),%eax         /* compute elapsed time */              ; \
+       sbbl    TIMER_TSTAMP+4(%ecx),%edx       /* compute elapsed time */              ; \
+       TIMER_UPDATE(%ecx,%edx,%eax,0)          /* update timer */                              ; \
+       movl    THREAD_TIMER(%ebx),%ecx         /* interrupted timer */                 ; \
+       movl    %esi,TIMER_TSTAMP(%ecx)         /* set timestamp */                             ; \
+       movl    %edi,TIMER_TSTAMP+4(%ecx)       /* set timestamp */                             ; \
+       movl    %esi,%eax                                       /* restore timestamp */                 ; \
+       movl    %edi,%edx                                       /* restore timestamp */                 ; \
+       movl    CURRENT_STATE(%ebx),%ecx        /* get current state */                 ; \
+       subl    TIMER_TSTAMP(%ecx),%eax         /* compute elapsed time */              ; \
+       sbbl    TIMER_TSTAMP+4(%ecx),%edx       /* compute elapsed time */              ; \
+       TIMER_UPDATE(%ecx,%edx,%eax,0)          /* update timer */                              ; \
+       popl    %ecx                                            /* restore state */                             ; \
+       movl    %ecx,CURRENT_STATE(%ebx)        /* set current state */                 ; \
+       movl    %esi,TIMER_TSTAMP(%ecx)         /* set timestamp */                             ; \
+       movl    %edi,TIMER_TSTAMP+4(%ecx)       /* set timestamp */
 
 #endif /* STAT_TIME */
 
@@ -415,9 +464,9 @@ Entry(db_task_gen_prot)
 Entry(db_task_start)
        movl    %esp,%edx
        subl    $(ISS32_SIZE),%edx
-       movl    %edx,%esp               /* allocate i386_saved_state on stack */
-       movl    %eax,R_ERR(%esp)
-       movl    %ebx,R_TRAPNO(%esp)
+       movl    %edx,%esp               /* allocate x86_saved_state on stack */
+       movl    %eax,R32_ERR(%esp)
+       movl    %ebx,R32_TRAPNO(%esp)
        pushl   %edx
        CPU_NUMBER(%edx)
        movl    CX(EXT(master_dbtss),%edx),%edx
@@ -436,15 +485,23 @@ Entry(db_task_start)
 /*
  *     Called as a function, makes the current thread
  *     return from the kernel as if from an exception.
+ *     We will consult with DTrace if this is a 
+ *     newly created thread and we need to fire a probe.
  */
 
        .globl  EXT(thread_exception_return)
        .globl  EXT(thread_bootstrap_return)
-LEXT(thread_exception_return)
 LEXT(thread_bootstrap_return)
-       cli
+#if CONFIG_DTRACE
+       call EXT(dtrace_thread_bootstrap)
+#endif
+
+LEXT(thread_exception_return)
+       CLI
        movl    %gs:CPU_KERNEL_STACK,%ecx
+
        movl    (%ecx),%esp                     /* switch back to PCB stack */
+       xorl    %ecx,%ecx               /* don't check if we're in the PFZ */
        jmp     EXT(return_from_trap)
 
 Entry(call_continuation)
@@ -462,10 +519,10 @@ Entry(call_continuation)
        pushl   %eax
        call    EXT(thread_terminate)
        
-
+       
        
 /*******************************************************************************************************
- *     
+ *
  * All 64 bit task 'exceptions' enter lo_alltraps:
  *     esp     -> x86_saved_state_t
  * 
@@ -473,23 +530,38 @@ Entry(call_continuation)
  *     cr3      -> kernel directory
  *     esp      -> low based stack
  *     gs       -> CPU_DATA_GS
- *     cs       -> KERNEL_CS
+ *     cs       -> KERNEL32_CS
  *     ss/ds/es -> KERNEL_DS
  *
  *     interrupts disabled
  *     direction flag cleared
  */
 Entry(lo_alltraps)
-       movl    R_CS(%esp),%eax         /* assume 32-bit state */
+       movl    R32_CS(%esp),%eax       /* assume 32-bit state */
        cmpl    $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */   
        jne     1f
        movl    R64_CS(%esp),%eax       /* 64-bit user mode */
 1:
-       testb   $3,%eax
+       testb   $3,%al
        jz      trap_from_kernel
                                                /* user mode trap */
        TIME_TRAP_UENTRY
 
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx
+       movl    ACT_TASK(%ecx),%ebx
+
+       /* Check for active vtimers in the current task */
+       cmpl    $0,TASK_VTIMERS(%ebx)
+       jz              1f
+
+       /* Set a pending AST */
+       orl             $(AST_BSD),%gs:CPU_PENDING_AST
+
+       /* Set a thread AST (atomic) */
+       lock
+       orl             $(AST_BSD),ACT_AST(%ecx)
+       
+1:
        movl    %gs:CPU_KERNEL_STACK,%ebx
        xchgl   %ebx,%esp                       /* switch to kernel stack */
        sti
@@ -497,36 +569,70 @@ Entry(lo_alltraps)
        CCALL1(user_trap, %ebx)         /* call user trap routine */
        cli                             /* hold off intrs - critical section */
        popl    %esp                    /* switch back to PCB stack */
-
+       xorl    %ecx,%ecx               /* don't check if we're in the PFZ */
+       
 /*
  * Return from trap or system call, checking for ASTs.
  * On lowbase PCB stack with intrs disabled
  */    
 LEXT(return_from_trap)
-       movl    %gs:CPU_PENDING_AST,%eax
-       testl   %eax,%eax
+       movl    %gs:CPU_PENDING_AST, %eax
+       testl   %eax, %eax
        je      EXT(return_to_user)     /* branch if no AST */
 
-       movl    %gs:CPU_KERNEL_STACK,%ebx
-       xchgl   %ebx,%esp               /* switch to kernel stack */
-       sti                             /* interrupts always enabled on return to user mode */
+LEXT(return_from_trap_with_ast)
+       movl    %gs:CPU_KERNEL_STACK, %ebx
+       xchgl   %ebx, %esp              /* switch to kernel stack */
 
+       testl   %ecx, %ecx              /* see if we need to check for an EIP in the PFZ */
+       je      2f                      /* no, go handle the AST */
+       cmpl    $(SS_64), SS_FLAVOR(%ebx)       /* are we a 64-bit task? */
+       je      1f
+                                       /* no... 32-bit user mode */
+       movl    R32_EIP(%ebx), %eax
+       pushl   %ebx                    /* save PCB stack */
+       xorl    %ebp, %ebp              /* clear frame pointer */
+       CCALL1(commpage_is_in_pfz32, %eax)
+       popl    %ebx                    /* retrieve pointer to PCB stack */
+       testl   %eax, %eax
+       je      2f                      /* not in the PFZ... go service AST */
+       movl    %eax, R32_EBX(%ebx)     /* let the PFZ know we've pended an AST */
+       xchgl   %ebx, %esp              /* switch back to PCB stack */
+       jmp     EXT(return_to_user)
+1:                                     /* 64-bit user mode */
+       movl    R64_RIP(%ebx), %ecx
+       movl    R64_RIP+4(%ebx), %eax
        pushl   %ebx                    /* save PCB stack */
+       xorl    %ebp, %ebp              /* clear frame pointer */
+       CCALL2(commpage_is_in_pfz64, %ecx, %eax)
+       popl    %ebx                    /* retrieve pointer to PCB stack */
+       testl   %eax, %eax              
+       je      2f                      /* not in the PFZ... go service AST */
+       movl    %eax, R64_RBX(%ebx)     /* let the PFZ know we've pended an AST */
+       xchgl   %ebx, %esp              /* switch back to PCB stack */
+       jmp     EXT(return_to_user)
+2:     
+       STI                             /* interrupts always enabled on return to user mode */
+       pushl   %ebx                    /* save PCB stack */
+       xorl    %ebp, %ebp              /* Clear framepointer */
        CCALL1(i386_astintr, $0)        /* take the AST */
-       cli
+       CLI
+       
        popl    %esp                    /* switch back to PCB stack (w/exc link) */
+
+       xorl    %ecx, %ecx              /* don't check if we're in the PFZ */
        jmp     EXT(return_from_trap)   /* and check again (rare) */
 
 LEXT(return_to_user)
        TIME_TRAP_UEXIT
-
+       
 LEXT(ret_to_user)
        cmpl    $0, %gs:CPU_IS64BIT
        je      EXT(lo_ret_to_user)
        jmp     EXT(lo64_ret_to_user)
 
 
-       
+
 /*
  * Trap from kernel mode.  No need to switch stacks.
  * Interrupts must be off here - we will set them to state at time of trap
@@ -534,22 +640,27 @@ LEXT(ret_to_user)
  */
 trap_from_kernel:
        movl    %esp, %eax              /* saved state addr */
-       CCALL1(kernel_trap, %eax)       /* to kernel trap routine */
+       pushl   R32_EIP(%esp)           /* Simulate a CALL from fault point */
+       pushl   %ebp                    /* Extend framepointer chain */
+       movl    %esp, %ebp
+       CCALL1(kernel_trap, %eax)       /* Call kernel trap handler */
+       popl    %ebp
+       addl    $4, %esp
        cli
 
        movl    %gs:CPU_PENDING_AST,%eax                /* get pending asts */
        testl   $ AST_URGENT,%eax       /* any urgent preemption? */
        je      ret_to_kernel                   /* no, nothing to do */
-       cmpl    $ T_PREEMPT,R_TRAPNO(%esp)
+       cmpl    $ T_PREEMPT,R32_TRAPNO(%esp)
        je      ret_to_kernel                     /* T_PREEMPT handled in kernel_trap() */
-       testl   $ EFL_IF,R_EFLAGS(%esp)                 /* interrupts disabled? */
+       testl   $ EFL_IF,R32_EFLAGS(%esp)               /* interrupts disabled? */
        je      ret_to_kernel
        cmpl    $0,%gs:CPU_PREEMPTION_LEVEL             /* preemption disabled? */
        jne     ret_to_kernel
        movl    %gs:CPU_KERNEL_STACK,%eax
        movl    %esp,%ecx
        xorl    %eax,%ecx
-       andl    $(-KERNEL_STACK_SIZE),%ecx
+       and     EXT(kernel_stack_mask),%ecx
        testl   %ecx,%ecx               /* are we on the kernel stack? */
        jne     ret_to_kernel           /* no, skip it */
 
@@ -570,7 +681,7 @@ ret_to_kernel:
  *     cr3      -> kernel directory
  *     esp      -> low based stack
  *     gs       -> CPU_DATA_GS
- *     cs       -> KERNEL_CS
+ *     cs       -> KERNEL32_CS
  *     ss/ds/es -> KERNEL_DS
  *
  *     interrupts disabled
@@ -599,19 +710,31 @@ Entry(lo_allintrs)
        
        TIME_INT_ENTRY                  /* do timing */
 
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx
+       movl    ACT_TASK(%ecx),%ebx
+
+       /* Check for active vtimers in the current task */
+       cmpl    $0,TASK_VTIMERS(%ebx)
+       jz              1f
+
+       /* Set a pending AST */
+       orl             $(AST_BSD),%gs:CPU_PENDING_AST
+
+       /* Set a thread AST (atomic) */
+       lock
+       orl             $(AST_BSD),ACT_AST(%ecx)
+       
+1:
        incl    %gs:CPU_PREEMPTION_LEVEL
        incl    %gs:CPU_INTERRUPT_LEVEL
 
        movl    %gs:CPU_INT_STATE, %eax
-       CCALL1(PE_incoming_interrupt, %eax) /* call generic interrupt routine */
+       CCALL1(interrupt, %eax)         /* call generic interrupt routine */
 
        cli                             /* just in case we returned with intrs enabled */
        xorl    %eax,%eax
        movl    %eax,%gs:CPU_INT_STATE  /* clear intr state pointer */
 
-       .globl  EXT(return_to_iret)
-LEXT(return_to_iret)                   /* (label for kdb_kintr and hardclock) */
-
        decl    %gs:CPU_INTERRUPT_LEVEL
        decl    %gs:CPU_PREEMPTION_LEVEL
 
@@ -635,12 +758,12 @@ LEXT(return_to_iret)                      /* (label for kdb_kintr and hardclock) */
        popl    %esp                    /* switch back to old stack */
 
        /* Load interrupted code segment into %eax */
-       movl    R_CS(%esp),%eax         /* assume 32-bit state */
+       movl    R32_CS(%esp),%eax       /* assume 32-bit state */
        cmpl    $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */   
        jne     3f
        movl    R64_CS(%esp),%eax       /* 64-bit user mode */
 3:
-       testb   $3,%eax                 /* user mode, */
+       testb   $3,%al                  /* user mode, */
        jnz     ast_from_interrupt_user /* go handle potential ASTs */
        /*
         * we only want to handle preemption requests if
@@ -657,7 +780,7 @@ LEXT(return_to_iret)                        /* (label for kdb_kintr and hardclock) */
        movl    %gs:CPU_KERNEL_STACK,%eax
        movl    %esp,%ecx
        xorl    %eax,%ecx
-       andl    $(-KERNEL_STACK_SIZE),%ecx
+       and     EXT(kernel_stack_mask),%ecx
        testl   %ecx,%ecx                       /* are we on the kernel stack? */
        jne     ret_to_kernel                   /* no, skip it */
 
@@ -678,8 +801,8 @@ int_from_intstack:
        incl    %gs:CPU_PREEMPTION_LEVEL
        incl    %gs:CPU_INTERRUPT_LEVEL
 
-       movl    %esp, %edx              /* i386_saved_state */
-       CCALL1(PE_incoming_interrupt, %edx)
+       movl    %esp, %edx              /* x86_saved_state */
+       CCALL1(interrupt, %edx)
 
        decl    %gs:CPU_INTERRUPT_LEVEL
        decl    %gs:CPU_PREEMPTION_LEVEL
@@ -696,7 +819,8 @@ ast_from_interrupt_user:
 
        TIME_TRAP_UENTRY
 
-       jmp     EXT(return_from_trap)   /* return */
+       movl    $1, %ecx                /* check if we're in the PFZ */
+       jmp     EXT(return_from_trap_with_ast)  /* return */
 
 
 /*******************************************************************************************************
@@ -704,11 +828,11 @@ ast_from_interrupt_user:
  * 32bit Tasks
  * System call entries via INTR_GATE or sysenter:
  *
- *     esp      -> i386_saved_state_t
+ *     esp      -> x86_saved_state32_t
  *     cr3      -> kernel directory
  *     esp      -> low based stack
  *     gs       -> CPU_DATA_GS
- *     cs       -> KERNEL_CS
+ *     cs       -> KERNEL32_CS
  *     ss/ds/es -> KERNEL_DS
  *
  *     interrupts disabled
@@ -720,48 +844,89 @@ Entry(lo_sysenter)
         * We can be here either for a mach syscall or a unix syscall,
         * as indicated by the sign of the code:
         */
-       movl    R_EAX(%esp),%eax
+       movl    R32_EAX(%esp),%eax
        testl   %eax,%eax
        js      EXT(lo_mach_scall)              /* < 0 => mach */
                                                /* > 0 => unix */
        
 Entry(lo_unix_scall)
-        TIME_TRAP_UENTRY
+       TIME_TRAP_UENTRY
+
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
+       movl    ACT_TASK(%ecx),%ebx                     /* point to current task  */
+       addl    $1,TASK_SYSCALLS_UNIX(%ebx)     /* increment call count   */
+
+       /* Check for active vtimers in the current task */
+       cmpl    $0,TASK_VTIMERS(%ebx)
+       jz              1f
+
+       /* Set a pending AST */
+       orl             $(AST_BSD),%gs:CPU_PENDING_AST
 
+       /* Set a thread AST (atomic) */
+       lock
+       orl             $(AST_BSD),ACT_AST(%ecx)
+       
+1:
        movl    %gs:CPU_KERNEL_STACK,%ebx
        xchgl   %ebx,%esp               /* switch to kernel stack */
 
        sti
-       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
-       movl    ACT_TASK(%ecx),%ecx             /* point to current task  */
-       addl    $1,TASK_SYSCALLS_UNIX(%ecx)     /* increment call count   */
 
        CCALL1(unix_syscall, %ebx)
        /*
         * always returns through thread_exception_return
         */
-       
+
 
 Entry(lo_mach_scall)
        TIME_TRAP_UENTRY
 
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
+       movl    ACT_TASK(%ecx),%ebx                     /* point to current task  */
+       addl    $1,TASK_SYSCALLS_MACH(%ebx)     /* increment call count   */
+
+       /* Check for active vtimers in the current task */
+       cmpl    $0,TASK_VTIMERS(%ebx)
+       jz              1f
+
+       /* Set a pending AST */
+       orl             $(AST_BSD),%gs:CPU_PENDING_AST
+
+       /* Set a thread AST (atomic) */
+       lock
+       orl             $(AST_BSD),ACT_AST(%ecx)
+       
+1:
        movl    %gs:CPU_KERNEL_STACK,%ebx
        xchgl   %ebx,%esp               /* switch to kernel stack */
 
        sti
-       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
-       movl    ACT_TASK(%ecx),%ecx             /* point to current task  */
-       addl    $1,TASK_SYSCALLS_MACH(%ecx)     /* increment call count   */
 
        CCALL1(mach_call_munger, %ebx)
        /*
         * always returns through thread_exception_return
         */
 
-       
+
 Entry(lo_mdep_scall)
-        TIME_TRAP_UENTRY
+       TIME_TRAP_UENTRY
+
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
+       movl    ACT_TASK(%ecx),%ebx                     /* point to current task  */
 
+       /* Check for active vtimers in the current task */
+       cmpl    $0,TASK_VTIMERS(%ebx)
+       jz              1f
+
+       /* Set a pending AST */
+       orl             $(AST_BSD),%gs:CPU_PENDING_AST
+
+       /* Set a thread AST (atomic) */
+       lock
+       orl             $(AST_BSD),ACT_AST(%ecx)
+       
+1:
        movl    %gs:CPU_KERNEL_STACK,%ebx
        xchgl   %ebx,%esp               /* switch to kernel stack */
 
@@ -771,22 +936,38 @@ Entry(lo_mdep_scall)
        /*
         * always returns through thread_exception_return
         */
-       
+
 
 Entry(lo_diag_scall)
        TIME_TRAP_UENTRY
 
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
+       movl    ACT_TASK(%ecx),%ebx                     /* point to current task  */
+
+       /* Check for active vtimers in the current task */
+       cmpl    $0,TASK_VTIMERS(%ebx)
+       jz              1f
+
+       /* Set a pending AST */
+       orl             $(AST_BSD),%gs:CPU_PENDING_AST
+
+       /* Set a thread AST (atomic) */
+       lock
+       orl             $(AST_BSD),ACT_AST(%ecx)
+       
+1:
        movl    %gs:CPU_KERNEL_STACK,%ebx       // Get the address of the kernel stack
        xchgl   %ebx,%esp               // Switch to it, saving the previous
 
        CCALL1(diagCall, %ebx)          // Call diagnostics
-       cli                             // Disable interruptions just in case they were enabled
-       popl    %esp                    // Get back the original stack
        
        cmpl    $0,%eax                 // What kind of return is this?
-       jne     EXT(return_to_user)     // Normal return, do not check asts...
-                               
-       CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
+       je      2f
+       cli                             // Disable interruptions just in case they were enabled
+       popl    %esp                    // Get back the original stack
+       jmp     EXT(return_to_user)     // Normal return, do not check asts...
+2:     
+       CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
                // pass what would be the diag syscall
                // error return - cause an exception
        /* no return */
@@ -802,7 +983,7 @@ Entry(lo_diag_scall)
  *     cr3      -> kernel directory
  *     esp      -> low based stack
  *     gs       -> CPU_DATA_GS
- *     cs       -> KERNEL_CS
+ *     cs       -> KERNEL32_CS
  *     ss/ds/es -> KERNEL_DS
  *
  *     interrupts disabled
@@ -810,6 +991,8 @@ Entry(lo_diag_scall)
  */
 
 Entry(lo_syscall)
+       TIME_TRAP_UENTRY
+
        /*
         * We can be here either for a mach, unix machdep or diag syscall,
         * as indicated by the syscall class:
@@ -826,47 +1009,88 @@ Entry(lo_syscall)
        cmpl    $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %ebx
        je      EXT(lo64_diag_scall)
 
+       movl    %gs:CPU_KERNEL_STACK,%ebx
+       xchgl   %ebx,%esp               /* switch to kernel stack */
+
+       sti
+
        /* Syscall class unknown */
-       CCALL3(i386_exception, $(EXC_SYSCALL), %eax, $1)
+       CCALL5(i386_exception, $(EXC_SYSCALL), %eax, $0, $1, $0)
        /* no return */
 
+
 Entry(lo64_unix_scall)
-        TIME_TRAP_UENTRY
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
+       movl    ACT_TASK(%ecx),%ebx                     /* point to current task  */
+       addl    $1,TASK_SYSCALLS_UNIX(%ebx)     /* increment call count   */
+
+       /* Check for active vtimers in the current task */
+       cmpl    $0,TASK_VTIMERS(%ebx)
+       jz              1f
 
+       /* Set a pending AST */
+       orl             $(AST_BSD),%gs:CPU_PENDING_AST
+
+       /* Set a thread AST (atomic) */
+       lock
+       orl             $(AST_BSD),ACT_AST(%ecx)
+       
+1:
        movl    %gs:CPU_KERNEL_STACK,%ebx
        xchgl   %ebx,%esp               /* switch to kernel stack */
 
        sti
-       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
-       movl    ACT_TASK(%ecx),%ecx             /* point to current task  */
-       addl    $1,TASK_SYSCALLS_UNIX(%ecx)     /* increment call count   */
 
        CCALL1(unix_syscall64, %ebx)
        /*
         * always returns through thread_exception_return
         */
-       
+
 
 Entry(lo64_mach_scall)
-       TIME_TRAP_UENTRY
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
+       movl    ACT_TASK(%ecx),%ebx                     /* point to current task  */
+       addl    $1,TASK_SYSCALLS_MACH(%ebx)     /* increment call count   */
+
+       /* Check for active vtimers in the current task */
+       cmpl    $0,TASK_VTIMERS(%ebx)
+       jz              1f
+
+       /* Set a pending AST */
+       orl             $(AST_BSD),%gs:CPU_PENDING_AST
 
+       lock
+       orl             $(AST_BSD),ACT_AST(%ecx)
+       
+1:
        movl    %gs:CPU_KERNEL_STACK,%ebx
        xchgl   %ebx,%esp               /* switch to kernel stack */
 
        sti
-       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
-       movl    ACT_TASK(%ecx),%ecx             /* point to current task  */
-       addl    $1,TASK_SYSCALLS_MACH(%ecx)     /* increment call count   */
 
        CCALL1(mach_call_munger64, %ebx)
        /*
         * always returns through thread_exception_return
         */
 
-       
+
+
 Entry(lo64_mdep_scall)
-        TIME_TRAP_UENTRY
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
+       movl    ACT_TASK(%ecx),%ebx                     /* point to current task  */
 
+       /* Check for active vtimers in the current task */
+       cmpl    $0,TASK_VTIMERS(%ebx)
+       jz              1f
+
+       /* Set a pending AST */
+       orl             $(AST_BSD),%gs:CPU_PENDING_AST
+
+       /* Set a thread AST (atomic) */
+       lock
+       orl             $(AST_BSD),ACT_AST(%ecx)
+       
+1:
        movl    %gs:CPU_KERNEL_STACK,%ebx
        xchgl   %ebx,%esp               /* switch to kernel stack */
 
@@ -876,29 +1100,40 @@ Entry(lo64_mdep_scall)
        /*
         * always returns through thread_exception_return
         */
-       
+
 
 Entry(lo64_diag_scall)
-       TIME_TRAP_UENTRY
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
+       movl    ACT_TASK(%ecx),%ebx                     /* point to current task  */
 
-       movl    %gs:CPU_KERNEL_STACK,%ebx       // Get the address of the kernel stack
-       xchgl   %ebx,%esp               // Switch to it, saving the previous
+       /* Check for active vtimers in the current task */
+       cmpl    $0,TASK_VTIMERS(%ebx)
+       jz              1f
+
+       /* Set a pending AST */
+       orl             $(AST_BSD),%gs:CPU_PENDING_AST
+
+       /* Set a thread AST (atomic) */
+       lock
+       orl             $(AST_BSD),ACT_AST(%ecx)
        
-       pushl   %ebx                    // Push the previous stack
+1:
+       movl    %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
+       xchgl   %ebx,%esp               // Switch to it, saving the previous
+
        CCALL1(diagCall64, %ebx)        // Call diagnostics
+               
+       cmpl    $0,%eax                 // What kind of return is this?
+       je      2f
        cli                             // Disable interruptions just in case they were enabled
        popl    %esp                    // Get back the original stack
-       
-       cmpl    $0,%eax                 // What kind of return is this?
-       jne     EXT(return_to_user)     // Normal return, do not check asts...
-                               
-       CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
+       jmp     EXT(return_to_user)     // Normal return, do not check asts...
+2:     
+       CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
+               // pass what would be the diag syscall
+               // error return - cause an exception
        /* no return */
-       
 
-                       
-/******************************************************************************************************
-                       
 /*\f*/
 /*
  * Utility routines.
@@ -911,20 +1146,20 @@ Entry(lo64_diag_scall)
  * arg1:       kernel address
  * arg2:       byte count
  */
-ENTRY(copyinphys_user)
+Entry(copyinphys_user)
        movl    $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
        mov     %cx,%ds
 
-ENTRY(copyinphys_kern)
+Entry(copyinphys_kern)
        movl    $(PHYS_WINDOW_SEL),%ecx /* physical access through kernel window */
        mov     %cx,%es
        jmp     copyin_common
 
-ENTRY(copyin_user)
+Entry(copyin_user)
        movl    $(USER_WINDOW_SEL),%ecx /* user data segment access through kernel window */
        mov     %cx,%ds
 
-ENTRY(copyin_kern)
+Entry(copyin_kern)
 
 copyin_common:
        pushl   %esi
@@ -1076,7 +1311,6 @@ copyout_fail:
        movl    $(EFAULT),%eax          /* return error for failure */
        jmp     copyout_ret             /* pop frame and return */
 
-
 /*
  * io register must not be used on slaves (no AT bus)
  */
@@ -1101,103 +1335,6 @@ copyout_fail:
 
 #endif /* MACH_ASSERT */
 
-
-#if    MACH_KDB || MACH_ASSERT
-
-/*
- * Following routines are also defined as macros in i386/pio.h
- * Compile then when MACH_KDB is configured so that they
- * can be invoked from the debugger.
- */
-
-/*
- * void outb(unsigned char *io_port,
- *          unsigned char byte)
- *
- * Output a byte to an IO port.
- */
-ENTRY(outb)
-       PUSH_FRAME
-       ILL_ON_SLAVE
-       movl    ARG0,%edx               /* IO port address */
-       movl    ARG1,%eax               /* data to output */
-       outb    %al,%dx                 /* send it out */
-       POP_FRAME
-       ret
-
-/*
- * unsigned char inb(unsigned char *io_port)
- *
- * Input a byte from an IO port.
- */
-ENTRY(inb)
-       PUSH_FRAME
-       ILL_ON_SLAVE
-       movl    ARG0,%edx               /* IO port address */
-       xor     %eax,%eax               /* clear high bits of register */
-       inb     %dx,%al                 /* get the byte */
-       POP_FRAME
-       ret
-
-/*
- * void outw(unsigned short *io_port,
- *          unsigned short word)
- *
- * Output a word to an IO port.
- */
-ENTRY(outw)
-       PUSH_FRAME
-       ILL_ON_SLAVE
-       movl    ARG0,%edx               /* IO port address */
-       movl    ARG1,%eax               /* data to output */
-       outw    %ax,%dx                 /* send it out */
-       POP_FRAME
-       ret
-
-/*
- * unsigned short inw(unsigned short *io_port)
- *
- * Input a word from an IO port.
- */
-ENTRY(inw)
-       PUSH_FRAME
-       ILL_ON_SLAVE
-       movl    ARG0,%edx               /* IO port address */
-       xor     %eax,%eax               /* clear high bits of register */
-       inw     %dx,%ax                 /* get the word */
-       POP_FRAME
-       ret
-
-/*
- * void outl(unsigned int *io_port,
- *          unsigned int byte)
- *
- * Output an int to an IO port.
- */
-ENTRY(outl)
-       PUSH_FRAME
-       ILL_ON_SLAVE
-       movl    ARG0,%edx               /* IO port address*/
-       movl    ARG1,%eax               /* data to output */
-       outl    %eax,%dx                /* send it out */
-       POP_FRAME
-       ret
-
-/*
- * unsigned int inl(unsigned int *io_port)
- *
- * Input an int from an IO port.
- */
-ENTRY(inl)
-       PUSH_FRAME
-       ILL_ON_SLAVE
-       movl    ARG0,%edx               /* IO port address */
-       inl     %dx,%eax                /* get the int */
-       POP_FRAME
-       ret
-
-#endif /* MACH_KDB  || MACH_ASSERT*/
-
 /*
  * void loutb(unsigned byte *io_port,
  *           unsigned byte *data,
@@ -1359,70 +1496,6 @@ rdmsr_fail:
        RECOVERY_SECTION
        RECOVER_TABLE_END
 
-
-
-ENTRY(dr6)
-       movl    %db6, %eax
-       ret
-
-/*     dr<i>(address, type, len, persistence)
- */
-ENTRY(dr0)
-       movl    S_ARG0, %eax
-       movl    %eax,EXT(dr_addr)
-       movl    %eax, %db0
-       movl    $0, %ecx
-       jmp     0f
-ENTRY(dr1)
-       movl    S_ARG0, %eax
-       movl    %eax,EXT(dr_addr)+1*4
-       movl    %eax, %db1
-       movl    $2, %ecx
-       jmp     0f
-ENTRY(dr2)
-       movl    S_ARG0, %eax
-       movl    %eax,EXT(dr_addr)+2*4
-       movl    %eax, %db2
-       movl    $4, %ecx
-       jmp     0f
-
-ENTRY(dr3)
-       movl    S_ARG0, %eax
-       movl    %eax,EXT(dr_addr)+3*4
-       movl    %eax, %db3
-       movl    $6, %ecx
-
-0:
-       pushl   %ebp
-       movl    %esp, %ebp
-
-       movl    %db7, %edx
-       movl    %edx,EXT(dr_addr)+4*4
-       andl    dr_msk(,%ecx,2),%edx    /* clear out new entry */
-       movl    %edx,EXT(dr_addr)+5*4
-       movzbl  B_ARG3, %eax
-       andb    $3, %al
-       shll    %cl, %eax
-       orl     %eax, %edx
-
-       movzbl  B_ARG1, %eax
-       andb    $3, %al
-       addb    $0x10, %cl
-       shll    %cl, %eax
-       orl     %eax, %edx
-
-       movzbl  B_ARG2, %eax
-       andb    $3, %al
-       addb    $0x2, %cl
-       shll    %cl, %eax
-       orl     %eax, %edx
-
-       movl    %edx, %db7
-       movl    %edx,EXT(dr_addr)+7*4
-       movl    %edx, %eax
-       leave
-       ret
-
        .data
 dr_msk:
        .long   ~0x000f0003
@@ -1435,15 +1508,6 @@ ENTRY(dr_addr)
 
        .text
 
-ENTRY(get_cr0)
-       movl    %cr0, %eax
-       ret
-
-ENTRY(set_cr0)
-       movl    4(%esp), %eax
-       movl    %eax, %cr0
-       ret
-
 #ifndef        SYMMETRY
 
 /*
@@ -1615,7 +1679,7 @@ ENTRY(mul_scale)
  * Double-fault exception handler task. The last gasp...
  */
 Entry(df_task_start)
-       CCALL1(panic_double_fault, $(T_DOUBLE_FAULT))
+       CCALL1(panic_double_fault32, $(T_DOUBLE_FAULT))
        hlt
 
 
@@ -1623,7 +1687,7 @@ Entry(df_task_start)
  * machine-check handler task. The last gasp...
  */
 Entry(mc_task_start)
-       CCALL1(panic_machine_check, $(T_MACHINE_CHECK))
+       CCALL1(panic_machine_check32, $(T_MACHINE_CHECK))
        hlt
 
 /*