]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/idt64.s
xnu-2050.18.24.tar.gz
[apple/xnu.git] / osfmk / i386 / idt64.s
index 99cb3b8f2f6c1b519842e3ccd48812244b72a3f1..fd488ebd91fd42ef166c14d5a3446380b7b34b0e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2010 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
 #include <i386/asm.h>
 #include <i386/asm64.h>
 #include <assym.s>
-#include <mach_kdb.h>
 #include <i386/eflags.h>
 #include <i386/trap.h>
+#include <i386/rtclock_asm.h>
 #define _ARCH_I386_ASM_HELP_H_         /* Prevent inclusion of user header */
 #include <mach/i386/syscall_sw.h>
 #include <i386/postcode.h>
 #include <i386/proc_reg.h>
+#include <mach/exception_types.h>
+
 
 /*
- * Locore handlers.
+ * Low-memory compability-mode handlers.
  */
 #define        LO_ALLINTRS             EXT(lo_allintrs)
 #define        LO_ALLTRAPS             EXT(lo_alltraps)
-#define        LO_SYSENTER             EXT(lo_sysenter)
 #define        LO_SYSCALL              EXT(lo_syscall)
 #define        LO_UNIX_SCALL           EXT(lo_unix_scall)
 #define        LO_MACH_SCALL           EXT(lo_mach_scall)
 #define        LO_MDEP_SCALL           EXT(lo_mdep_scall)
-#define        LO_DIAG_SCALL           EXT(lo_diag_scall)
 #define        LO_DOUBLE_FAULT         EXT(lo_df64)
 #define        LO_MACHINE_CHECK        EXT(lo_mc64)
 
@@ -80,8 +80,8 @@
 #define        EXCEP64_ERR(n,name)                              \
        IDT64_ENTRY(name,0,K_INTR_GATE)                 ;\
 Entry(name)                                            ;\
+       push    $(LO_ALLTRAPS)                          ;\
        push    $(n)                                    ;\
-       movl    $(LO_ALLTRAPS), 4(%rsp)                 ;\
        jmp     L_enter_lohandler
 
 
@@ -93,8 +93,8 @@ Entry(name)                                           ;\
        IDT64_ENTRY(name,0,K_INTR_GATE)                 ;\
 Entry(name)                                            ;\
        push    $0                                      ;\
+       push    $(LO_ALLTRAPS)                          ;\
        push    $(n)                                    ;\
-       movl    $(LO_ALLTRAPS), 4(%rsp)                 ;\
        jmp     L_enter_lohandler
 
        
@@ -107,8 +107,8 @@ Entry(name)                                         ;\
        IDT64_ENTRY(name,0,U_INTR_GATE)                 ;\
 Entry(name)                                            ;\
        push    $0                                      ;\
+       push    $(LO_ALLTRAPS)                          ;\
        push    $(n)                                    ;\
-       movl    $(LO_ALLTRAPS), 4(%rsp)                 ;\
        jmp     L_enter_lohandler
 
 
@@ -139,8 +139,8 @@ Entry(name)                                         ;\
        .align FALIGN                                   ;\
 L_ ## n:                                               ;\
        push    $0                                      ;\
+       push    $(LO_ALLINTRS)                          ;\
        push    $(n)                                    ;\
-       movl    $(LO_ALLINTRS), 4(%rsp)                 ;\
        jmp     L_enter_lohandler
 
 
@@ -160,21 +160,13 @@ EXCEP64_USR(0x04,t64_into)
 EXCEP64_USR(0x05,t64_bounds)
 EXCEPTION64(0x06,t64_invop)
 EXCEPTION64(0x07,t64_nofpu)
-#if    MACH_KDB
-EXCEP64_IST(0x08,db_task_dbl_fault64,1)
-#else
 EXCEP64_IST(0x08,hi64_double_fault,1)
-#endif
 EXCEPTION64(0x09,a64_fpu_over)
 EXCEPTION64(0x0a,a64_inv_tss)
 EXCEP64_SPC(0x0b,hi64_segnp)
-#if    MACH_KDB
-EXCEP64_IST(0x0c,db_task_stk_fault64,1)
-#else
 EXCEP64_SPC(0x0c,hi64_stack_fault)
-#endif
 EXCEP64_SPC(0x0d,hi64_gen_prot)
-EXCEP64_ERR(0x0e,t64_page_fault)
+EXCEP64_SPC(0x0e, hi64_page_fault)
 EXCEPTION64(0x0f,t64_trap_0f)
 EXCEPTION64(0x10,t64_fpu_err)
 EXCEPTION64(0x11,t64_trap_11)
@@ -293,13 +285,12 @@ INTERRUPT64(0x7b)
 INTERRUPT64(0x7c)
 INTERRUPT64(0x7d)
 INTERRUPT64(0x7e)
-INTERRUPT64(0x7f)
+EXCEP64_USR(0x7f, t64_dtrace_ret)
 
 EXCEP64_SPC_USR(0x80,hi64_unix_scall)
 EXCEP64_SPC_USR(0x81,hi64_mach_scall)
 EXCEP64_SPC_USR(0x82,hi64_mdep_scall)
-EXCEP64_SPC_USR(0x83,hi64_diag_scall)
-
+INTERRUPT64(0x83)
 INTERRUPT64(0x84)
 INTERRUPT64(0x85)
 INTERRUPT64(0x86)
@@ -475,21 +466,22 @@ EXCEPTION64(0xff,t64_preempt)
  */
 
        .code32
+
 /*
  * Control is passed here to return to the compatibility mode user.
  * At this stage we're in kernel space in compatibility mode
  * but we need to switch into 64-bit mode in the 4G-based trampoline
  * space before performing the iret.
  */ 
-Entry(lo64_ret_to_user)
+ret_to_user:
        movl    %gs:CPU_ACTIVE_THREAD,%ecx
 
-       movl    ACT_PCB_IDS(%ecx),%eax  /* Obtain this thread's debug state */
+       movl    TH_PCB_IDS(%ecx),%eax   /* Obtain this thread's debug state */
        cmpl    $0,%eax                 /* Is there a debug register context? */
        je      2f                      /* branch if not */
-       cmpl    $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP     /* Are we a 64-bit task? */
+       cmpl    $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */
        jne     1f
-       movl    DS_DR0(%eax), %ecx      /* If not, load the 32 bit DRs */
+       movl    DS_DR0(%eax), %ecx      /* If so, load the 32 bit DRs */
        movl    %ecx, %db0
        movl    DS_DR1(%eax), %ecx
        movl    %ecx, %db1
@@ -530,7 +522,7 @@ Entry(lo64_ret_to_user)
        je      1f
        /* flag the copyio engine state as WINDOWS_CLEAN */
        mov     %gs:CPU_ACTIVE_THREAD,%eax
-       movl    $(WINDOWS_CLEAN),ACT_COPYIO_STATE(%eax)
+       movl    $(WINDOWS_CLEAN),TH_COPYIO_STATE(%eax)
        mov     %rcx,%cr3               /* switch to user's address space */
 1:
 
@@ -552,7 +544,7 @@ Entry(lo64_ret_to_user)
        jne     L_64bit_return
        jmp     L_32bit_return
 
-Entry(lo64_ret_to_kernel)
+ret_to_kernel:
        ENTER_64BIT_MODE()
        ENTER_UBERSPACE()       
 
@@ -574,27 +566,27 @@ L_32bit_return:
        /*
         * Restore registers into the machine state for iret.
         */
-       movl    R_EIP(%rsp), %eax
+       movl    R32_EIP(%rsp), %eax
        movl    %eax, ISC32_RIP(%rsp)
-       movl    R_EFLAGS(%rsp), %eax
+       movl    R32_EFLAGS(%rsp), %eax
        movl    %eax, ISC32_RFLAGS(%rsp)
-       movl    R_CS(%rsp), %eax
+       movl    R32_CS(%rsp), %eax
        movl    %eax, ISC32_CS(%rsp)
-       movl    R_UESP(%rsp), %eax
+       movl    R32_UESP(%rsp), %eax
        movl    %eax, ISC32_RSP(%rsp)
-       movl    R_SS(%rsp), %eax
+       movl    R32_SS(%rsp), %eax
        movl    %eax, ISC32_SS(%rsp)
 
        /*
         * Restore general 32-bit registers
         */
-       movl    R_EAX(%rsp), %eax
-       movl    R_EBX(%rsp), %ebx
-       movl    R_ECX(%rsp), %ecx
-       movl    R_EDX(%rsp), %edx
-       movl    R_EBP(%rsp), %ebp
-       movl    R_ESI(%rsp), %esi
-       movl    R_EDI(%rsp), %edi
+       movl    R32_EAX(%rsp), %eax
+       movl    R32_EBX(%rsp), %ebx
+       movl    R32_ECX(%rsp), %ecx
+       movl    R32_EDX(%rsp), %edx
+       movl    R32_EBP(%rsp), %ebp
+       movl    R32_ESI(%rsp), %esi
+       movl    R32_EDI(%rsp), %edi
 
        /*
         * Restore segment registers. We make take an exception here but
@@ -603,31 +595,31 @@ L_32bit_return:
         */
        swapgs
 EXT(ret32_set_ds):     
-       movw    R_DS(%rsp), %ds
+       movw    R32_DS(%rsp), %ds
 EXT(ret32_set_es):
-       movw    R_ES(%rsp), %es
+       movw    R32_ES(%rsp), %es
 EXT(ret32_set_fs):
-       movw    R_FS(%rsp), %fs
+       movw    R32_FS(%rsp), %fs
 EXT(ret32_set_gs):
-       movw    R_GS(%rsp), %gs
+       movw    R32_GS(%rsp), %gs
 
-       add     $(ISC32_OFFSET)+8+8, %rsp       /* pop compat frame +
-                                                  trapno/trapfn and error */   
-        cmp    $(SYSENTER_CS),ISF64_CS-8-8(%rsp)
+       add     $(ISC32_OFFSET)+8+8+8, %rsp     /* pop compat frame +
+                                                  trapno, trapfn and error */  
+        cmpl   $(SYSENTER_CS),ISF64_CS-8-8-8(%rsp)
                                        /* test for fast entry/exit */
         je      L_fast_exit
 EXT(ret32_iret):
         iretq                          /* return from interrupt */
 
 L_fast_exit:
-       pop     %rdx                    /* user return eip */
-        pop    %rcx                    /* pop and toss cs */
+       pop     %rdx                    /* user return eip */
+       pop     %rcx                    /* pop and toss cs */
        andl    $(~EFL_IF), (%rsp)      /* clear interrupts enable, sti below */
-        popf                            /* flags - carry denotes failure */
-        pop    %rcx                    /* user return esp */
+       popf                            /* flags - carry denotes failure */
+       pop     %rcx                    /* user return esp */
        .code32
        sti                             /* interrupts enabled after sysexit */
-        sysexit                                /* 32-bit sysexit */
+       .byte 0x0f,0x35                 /* 32-bit sysexit */
        .code64
 
 L_64bit_return:
@@ -662,9 +654,9 @@ L_64bit_return:
        mov     R64_RCX(%rsp), %rcx
        mov     R64_RAX(%rsp), %rax
 
-       add     $(ISS64_OFFSET)+8+8, %rsp       /* pop saved state frame +
-                                                  trapno/trapfn and error */   
-        cmpl   $(SYSCALL_CS),ISF64_CS-8-8(%rsp)
+       add     $(ISS64_OFFSET)+8+8+8, %rsp     /* pop saved state frame +
+                                                  trapno, trapfn and error */  
+        cmpl   $(SYSCALL_CS),ISF64_CS-8-8-8(%rsp)
                                        /* test for fast entry/exit */
         je      L_sysret
 EXT(ret64_iret):
@@ -677,10 +669,10 @@ L_sysret:
         *      r1      user rflags
         *      rsp     user stack pointer
         */
-       mov     ISF64_RIP-16(%rsp), %rcx
-       mov     ISF64_RFLAGS-16(%rsp), %r11
-       mov     ISF64_RSP-16(%rsp), %rsp
-        sysretq                                /* return from system call */
+       mov     ISF64_RIP-8-8-8(%rsp), %rcx
+       mov     ISF64_RFLAGS-8-8-8(%rsp), %r11
+       mov     ISF64_RSP-8-8-8(%rsp), %rsp
+       sysretq                         /* return from system call */
 
 /*
  * Common path to enter locore handlers.
@@ -705,8 +697,8 @@ Entry(hi64_unix_scall)
        swapgs                          /* switch to kernel gs (cpu_data) */
 L_unix_scall_continue:
        push    %rax                    /* save system call number */
+       push    $(LO_UNIX_SCALL)
        push    $(UNIX_INT)
-       movl    $(LO_UNIX_SCALL), 4(%rsp)
        jmp     L_32bit_enter_check
 
        
@@ -714,8 +706,8 @@ Entry(hi64_mach_scall)
        swapgs                          /* switch to kernel gs (cpu_data) */
 L_mach_scall_continue:
        push    %rax                    /* save system call number */
+       push    $(LO_MACH_SCALL)
        push    $(MACH_INT)
-       movl    $(LO_MACH_SCALL), 4(%rsp)
        jmp     L_32bit_enter_check
 
        
@@ -723,19 +715,11 @@ Entry(hi64_mdep_scall)
        swapgs                          /* switch to kernel gs (cpu_data) */
 L_mdep_scall_continue:
        push    %rax                    /* save system call number */
+       push    $(LO_MDEP_SCALL)
        push    $(MACHDEP_INT)
-       movl    $(LO_MDEP_SCALL), 4(%rsp)
        jmp     L_32bit_enter_check
 
        
-Entry(hi64_diag_scall)
-       swapgs                          /* switch to kernel gs (cpu_data) */
-L_diag_scall_continue:
-       push    %rax                    /* save system call number */
-       push    $(DIAG_INT)
-       movl    $(LO_DIAG_SCALL), 4(%rsp)
-       jmp     L_32bit_enter_check
-
 Entry(hi64_syscall)
        swapgs                          /* Kapow! get per-cpu data area */
 L_syscall_continue:
@@ -753,10 +737,18 @@ L_syscall_continue:
        mov     %gs:CPU_UBER_TMP, %rcx
        mov     %rcx, ISF64_RSP(%rsp)           /* user stack */
        mov     %rax, ISF64_ERR(%rsp)           /* err/rax - syscall code */
-       movl    $(0), ISF64_TRAPNO(%rsp)        /* trapno */
+       movl    $(T_SYSCALL), ISF64_TRAPNO(%rsp)        /* trapno */
        movl    $(LO_SYSCALL), ISF64_TRAPFN(%rsp)
        jmp     L_64bit_enter           /* this can only be a 64-bit task */
-       
+
+
+L_32bit_enter_check:
+       /*
+        * Check we're not a confused 64-bit user.
+        */
+       cmpl    $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
+       jne     L_64bit_entry_reject
+       jmp     L_32bit_enter
 /*
  * sysenter entry point
  * Requires user code to set up:
@@ -777,10 +769,9 @@ Entry(hi64_sysenter)
        push    %rcx                    /* uesp */
        pushf                           /* flags */
        /*
-       * Clear, among others, the Nested Task (NT) flags bit;
-       * This is cleared by INT, but not by sysenter, which only
-       * clears RF, VM and IF.
-       */
+        * Clear, among others, the Nested Task (NT) flags bit;
+        * this is zeroed by INT, but not by SYSENTER.
+        */
        push    $0
        popf
        push    $(SYSENTER_CS)          /* cs */
@@ -788,19 +779,44 @@ Entry(hi64_sysenter)
 L_sysenter_continue:
        push    %rdx                    /* eip */
        push    %rax                    /* err/eax - syscall code */
-       push    $(0)
-       movl    $(LO_SYSENTER), ISF64_TRAPFN(%rsp)
+       push    $0
+       push    $(T_SYSENTER)
        orl     $(EFL_IF), ISF64_RFLAGS(%rsp)
-
-L_32bit_enter_check:
-       /*
-        * Check we're not a confused 64-bit user.
-        */
-       cmpl    $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
-       jne     L_64bit_entry_reject
-       /* fall through to 32-bit handler: */
+       movl    $(LO_MACH_SCALL), ISF64_TRAPFN(%rsp)
+       testl   %eax, %eax
+       js      L_32bit_enter_check
+       movl    $(LO_UNIX_SCALL), ISF64_TRAPFN(%rsp)
+       cmpl    $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
+       jne     L_64bit_entry_reject
+/* If the caller (typically LibSystem) has recorded the cumulative size of
+ * the arguments in EAX, copy them over from the user stack directly.
+ * We recover from exceptions inline--if the copy loop doesn't complete
+ * due to an exception, we fall back to copyin from compatibility mode.
+ * We can potentially extend this mechanism to mach traps as well (DRK).
+ */
+L_sysenter_copy_args:
+       testl   $(I386_SYSCALL_ARG_BYTES_MASK), %eax
+       jz      L_32bit_enter
+       xor     %r9, %r9
+       mov     %gs:CPU_UBER_ARG_STORE, %r8
+       movl    %eax, %r9d
+       mov     %gs:CPU_UBER_ARG_STORE_VALID, %r12
+       xor     %r10, %r10
+       shrl    $(I386_SYSCALL_ARG_DWORDS_SHIFT), %r9d
+       andl    $(I386_SYSCALL_ARG_DWORDS_MASK), %r9d
+       movl    $0, (%r12)
+EXT(hi64_sysenter_user_arg_copy):
+0:
+       movl    4(%rcx, %r10, 4), %r11d
+       movl    %r11d, (%r8, %r10, 4)
+       incl    %r10d
+       decl    %r9d
+       jnz     0b
+       movl    $1, (%r12)
+       /* Fall through to 32-bit handler */
 
 L_32bit_enter:
+       cld
        /*
         * Make space for the compatibility save area.
         */
@@ -810,45 +826,49 @@ L_32bit_enter:
        /*
         * Save segment regs
         */
-       mov     %ds, R_DS(%rsp)
-       mov     %es, R_ES(%rsp)
-       mov     %fs, R_FS(%rsp)
-       mov     %gs, R_GS(%rsp)
+       mov     %ds, R32_DS(%rsp)
+       mov     %es, R32_ES(%rsp)
+       mov     %fs, R32_FS(%rsp)
+       mov     %gs, R32_GS(%rsp)
 
        /*
         * Save general 32-bit registers
         */
-       mov     %eax, R_EAX(%rsp)
-       mov     %ebx, R_EBX(%rsp)
-       mov     %ecx, R_ECX(%rsp)
-       mov     %edx, R_EDX(%rsp)
-       mov     %ebp, R_EBP(%rsp)
-       mov     %esi, R_ESI(%rsp)
-       mov     %edi, R_EDI(%rsp)
+       mov     %eax, R32_EAX(%rsp)
+       mov     %ebx, R32_EBX(%rsp)
+       mov     %ecx, R32_ECX(%rsp)
+       mov     %edx, R32_EDX(%rsp)
+       mov     %ebp, R32_EBP(%rsp)
+       mov     %esi, R32_ESI(%rsp)
+       mov     %edi, R32_EDI(%rsp)
 
        /* Unconditionally save cr2; only meaningful on page faults */
        mov     %cr2, %rax
-       mov     %eax, R_CR2(%rsp)
+       mov     %eax, R32_CR2(%rsp)
 
        /*
         * Copy registers already saved in the machine state 
         * (in the interrupt stack frame) into the compat save area.
         */
        mov     ISC32_RIP(%rsp), %eax
-       mov     %eax, R_EIP(%rsp)
+       mov     %eax, R32_EIP(%rsp)
        mov     ISC32_RFLAGS(%rsp), %eax
-       mov     %eax, R_EFLAGS(%rsp)
+       mov     %eax, R32_EFLAGS(%rsp)
        mov     ISC32_CS(%rsp), %eax
-       mov     %eax, R_CS(%rsp)
+       mov     %eax, R32_CS(%rsp)
+       testb   $3, %al
+       jz      1f
+       xor     %ebp, %ebp
+1:     
        mov     ISC32_RSP(%rsp), %eax
-       mov     %eax, R_UESP(%rsp)
+       mov     %eax, R32_UESP(%rsp)
        mov     ISC32_SS(%rsp), %eax
-       mov     %eax, R_SS(%rsp)
+       mov     %eax, R32_SS(%rsp)
 L_32bit_enter_after_fault:
        mov     ISC32_TRAPNO(%rsp), %ebx        /* %ebx := trapno for later */
-       mov     %ebx, R_TRAPNO(%rsp)
+       mov     %ebx, R32_TRAPNO(%rsp)
        mov     ISC32_ERR(%rsp), %eax
-       mov     %eax, R_ERR(%rsp)
+       mov     %eax, R32_ERR(%rsp)
        mov     ISC32_TRAPFN(%rsp), %edx
 
 /*
@@ -875,6 +895,12 @@ L_enter_lohandler2:
        mov     %rcx, %cr3
        mov     %rcx, %gs:CPU_ACTIVE_CR3
 2:
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* Get the active thread */
+       cmpl    $0, TH_PCB_IDS(%ecx)    /* Is there a debug register state? */
+       jz      21f
+       xor     %ecx, %ecx              /* If so, reset DR7 (the control) */
+       mov     %rcx, %dr7
+21:    
        /*
         * Switch to compatibility mode.
         * Then establish kernel segments.
@@ -898,14 +924,8 @@ L_enter_lohandler2:
        mov     $(CPU_DATA_GS), %eax
        mov     %eax, %gs
 
-       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* Get the active thread */
-       cmpl    $0, ACT_PCB_IDS(%ecx)   /* Is there a debug register state? */
-       je      1f
-       movl    $0, %ecx                /* If so, reset DR7 (the control) */
-       movl    %ecx, %dr7
-1:
-       addl    $1,%gs:hwIntCnt(,%ebx,4)        // Bump the trap/intr count
-       
+       incl    %gs:hwIntCnt(,%ebx,4)   /* Bump the trap/intr count */
+
        /* Dispatch the designated lo handler */
        jmp     *%edx
 
@@ -926,6 +946,7 @@ L_64bit_enter:
        sub     $(ISS64_OFFSET), %rsp
        movl    $(SS_64), SS_FLAVOR(%rsp)
 
+       cld
        /*
         * Save segment regs
         */
@@ -962,8 +983,22 @@ L_64bit_enter_after_fault:
        mov     R64_TRAPNO(%rsp), %ebx
        mov     R64_TRAPFN(%rsp), %edx
 
+       testb   $3, ISF64_CS+ISS64_OFFSET(%rsp)
+       jz      1f
+       xor     %rbp, %rbp
+1:
        jmp     L_enter_lohandler2
 
+Entry(hi64_page_fault)
+       push    $(LO_ALLTRAPS)
+       push    $(T_PAGE_FAULT)
+       cmpl    $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
+       jne     L_enter_lohandler
+       cmpl    $(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp)
+       jne     hi64_kernel_trap
+       mov     ISF64_RSP(%rsp), %rsp
+       jmp     L_32bit_enter
+
 /*
  * Debug trap.  Check for single-stepping across system call into
  * kernel.  If this is the case, taking the debug trap has turned
@@ -973,8 +1008,8 @@ L_64bit_enter_after_fault:
 Entry(hi64_debug)
        swapgs                          /* set %gs for cpu data */
        push    $0                      /* error code */
+       push    $(LO_ALLTRAPS)
        push    $(T_DEBUG)
-       movl    $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
 
        testb   $3, ISF64_CS(%rsp)
        jnz     L_enter_lohandler_continue
@@ -1006,7 +1041,7 @@ Entry(hi64_debug)
         * Interrupt stack frame has been pushed on the temporary stack.
         * We have to switch to pcb stack and copy eflags.
         */ 
-       add     $32,%rsp                /* remove trapno/trapfn/err/rip/cs */
+       add     $40,%rsp                /* remove trapno/trapfn/err/rip/cs */
        push    %rcx                    /* save %rcx - user stack pointer */
        mov     32(%rsp),%rcx           /* top of intr stack -> pcb stack */
        xchg    %rcx,%rsp               /* switch to pcb stack */
@@ -1020,8 +1055,8 @@ Entry(hi64_debug)
 
 Entry(hi64_double_fault)
        swapgs                          /* set %gs for cpu data */
+       push    $(LO_DOUBLE_FAULT)
        push    $(T_DOUBLE_FAULT)
-       movl    $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
 
        cmpl    $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
        jne     L_enter_lohandler_continue      /* trap not in uber-space */
@@ -1038,92 +1073,133 @@ Entry(hi64_double_fault)
  * Check for a GP/NP fault in the kernel_return
  * sequence; if there, report it as a GP/NP fault on the user's instruction.
  *
- * rsp->     0:        trap code (NP or GP) and trap function
- *          8: segment number in error (error code)
- *         16  rip
- *         24  cs
- *         32  rflags 
- *         40  rsp
- *         48  ss
- *         56  old registers (trap is from kernel)
+ * rsp->     0 ISF64_TRAPNO:   trap code (NP or GP)
+ *          8 ISF64_TRAPFN:    trap function
+ *         16 ISF64_ERR:       segment number in error (error code)
+ *         24 ISF64_RIP:       rip
+ *         32 ISF64_CS:        cs
+ *         40 ISF64_RFLAGS:    rflags 
+ *         48 ISF64_RSP:       rsp
+ *         56 ISF64_SS:        ss
+ *         64                  old registers (trap is from kernel)
  */
 Entry(hi64_gen_prot)
+       push    $(LO_ALLTRAPS)
        push    $(T_GENERAL_PROTECTION)
        jmp     trap_check_kernel_exit  /* check for kernel exit sequence */
 
 Entry(hi64_stack_fault)
+       push    $(LO_ALLTRAPS)
        push    $(T_STACK_FAULT)
        jmp     trap_check_kernel_exit  /* check for kernel exit sequence */
 
 Entry(hi64_segnp)
+       push    $(LO_ALLTRAPS)
        push    $(T_SEGMENT_NOT_PRESENT)
                                        /* indicate fault type */
 trap_check_kernel_exit:
-       movl    $(LO_ALLTRAPS), 4(%rsp)
-       testb   $3,24(%rsp)
-       jnz     hi64_take_trap
+       testb   $3,ISF64_CS(%rsp)
+       jnz     L_enter_lohandler
                                        /* trap was from kernel mode, so */
                                        /* check for the kernel exit sequence */
-       cmpl    $(KERNEL_UBER_BASE_HI32), 16+4(%rsp)
-       jne     hi64_take_trap          /* trap not in uber-space */
+       cmpl    $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
+       jne     L_enter_lohandler_continue      /* trap not in uber-space */
 
-       cmpl    $(EXT(ret32_iret)), 16(%rsp)
+       cmpl    $(EXT(ret32_iret)), ISF64_RIP(%rsp)
        je      L_fault_iret32
-       cmpl    $(EXT(ret32_set_ds)), 16(%rsp)
+       cmpl    $(EXT(ret32_set_ds)), ISF64_RIP(%rsp)
        je      L_32bit_fault_set_seg
-       cmpl    $(EXT(ret32_set_es)), 16(%rsp)
+       cmpl    $(EXT(ret32_set_es)), ISF64_RIP(%rsp)
        je      L_32bit_fault_set_seg
-       cmpl    $(EXT(ret32_set_fs)), 16(%rsp)
+       cmpl    $(EXT(ret32_set_fs)), ISF64_RIP(%rsp)
        je      L_32bit_fault_set_seg
-       cmpl    $(EXT(ret32_set_gs)), 16(%rsp)
+       cmpl    $(EXT(ret32_set_gs)), ISF64_RIP(%rsp)
        je      L_32bit_fault_set_seg
 
-       cmpl    $(EXT(ret64_iret)), 16(%rsp)
+       cmpl    $(EXT(ret64_iret)), ISF64_RIP(%rsp)
        je      L_fault_iret64
 
-hi64_take_trap:
-       jmp     L_enter_lohandler
+       cmpl    $(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp)
+       cmove   ISF64_RSP(%rsp), %rsp
+       je      L_32bit_enter
+
+hi64_kernel_trap:
+       /*
+        * Here after taking an unexpected trap from kernel mode - perhaps
+        * while running in the trampolines hereabouts.
+        * Make sure we're not on the PCB stack, if so move to the kernel stack.
+        * This is likely a fatal condition.
+        * But first, try to be sure we have the kernel gs base active...
+        */
+       cmpq    $0, %gs:CPU_THIS                /* test gs_base */
+       js      1f                              /* -ve kernel addr, no swap */
+       swapgs                                  /* +ve user addr, swap */
+1:
+       movq    %rax, %gs:CPU_UBER_TMP          /* save %rax */
+       movq    %gs:CPU_UBER_ISF, %rax          /* PCB stack addr */
+       subq    %rsp, %rax
+       cmpq    $(PAGE_SIZE), %rax              /* current stack in PCB? */
+       movq    %gs:CPU_UBER_TMP, %rax          /* restore %rax */
+       ja      L_enter_lohandler_continue      /* stack not in PCB */
+
+       /*
+        *  Here if %rsp is in the PCB
+        *  Copy the interrupt stack frame from PCB stack to kernel stack
+        */
+       movq    %gs:CPU_KERNEL_STACK, %rax      /* note: %rax restored below */
+       xchgq   %rax, %rsp
+       pushq   ISF64_SS(%rax)
+       pushq   ISF64_RSP(%rax)
+       pushq   ISF64_RFLAGS(%rax)
+       pushq   ISF64_CS(%rax)
+       pushq   ISF64_RIP(%rax)
+       pushq   ISF64_ERR(%rax)
+       pushq   ISF64_TRAPFN(%rax)
+       pushq   ISF64_TRAPNO(%rax)
+       movq    %gs:CPU_UBER_TMP, %rax          /* restore %rax */
+       jmp     L_enter_lohandler_continue
+
 
-               
 /*
  * GP/NP fault on IRET: CS or SS is in error.
  * All registers contain the user's values.
  *
  * on SP is
- *   0 trap number/function
- *   8 errcode
- *  16 rip
- *  24 cs
- *  32 rflags
- *  40 rsp
- *  48 ss                      --> new trapno/trapfn
- *  56  (16-byte padding)      --> new errcode
- *  64 user rip
- *  72 user cs
- *  80 user rflags
- *  88 user rsp
- *  96  user ss
+ *   0 ISF64_TRAPNO:   trap code (NP or GP)
+ *   8 ISF64_TRAPFN:   trap function
+ *  16 ISF64_ERR:      segment number in error (error code)
+ *  24 ISF64_RIP:      rip
+ *  32 ISF64_CS:       cs
+ *  40 ISF64_RFLAGS:   rflags 
+ *  48 ISF64_RSP:      rsp
+ *  56 ISF64_SS:       ss  --> new new trapno/trapfn
+ *  64                 pad --> new errcode
+ *  72                 user rip
+ *  80                 user cs
+ *  88                 user rflags
+ *  96                 user rsp
+ * 104                         user ss (16-byte aligned)
  */
 L_fault_iret32:
-       mov     %rax, 16(%rsp)          /* save rax (we don`t need saved rip) */
-       mov     0(%rsp), %rax           /* get trap number */
-       mov     %rax, 48(%rsp)          /* put in user trap number */
-       mov     8(%rsp), %rax           /* get error code */
-       mov     %rax, 56(%rsp)          /* put in user errcode */
-       mov     16(%rsp), %rax          /* restore rax */
-       add     $48, %rsp               /* reset to original frame */
+       mov     %rax, ISF64_RIP(%rsp)   /* save rax (we don`t need saved rip) */
+       mov     ISF64_TRAPNO(%rsp), %rax
+       mov     %rax, ISF64_SS(%rsp)    /* put in user trap number */
+       mov     ISF64_ERR(%rsp), %rax
+       mov     %rax, 8+ISF64_SS(%rsp)  /* put in user errcode */
+       mov     ISF64_RIP(%rsp), %rax   /* restore rax */
+       add     $(ISF64_SS), %rsp       /* reset to original frame */
                                        /* now treat as fault from user */
        swapgs
        jmp     L_32bit_enter
 
 L_fault_iret64:
-       mov     %rax, 16(%rsp)          /* save rax (we don`t need saved rip) */
-       mov     0(%rsp), %rax           /* get trap number */
-       mov     %rax, 48(%rsp)          /* put in user trap number */
-       mov     8(%rsp), %rax           /* get error code */
-       mov     %rax, 56(%rsp)          /* put in user errcode */
-       mov     16(%rsp), %rax          /* restore rax */
-       add     $48, %rsp               /* reset to original frame */
+       mov     %rax, ISF64_RIP(%rsp)   /* save rax (we don`t need saved rip) */
+       mov     ISF64_TRAPNO(%rsp), %rax
+       mov     %rax, ISF64_SS(%rsp)    /* put in user trap number */
+       mov     ISF64_ERR(%rsp), %rax
+       mov     %rax, 8+ISF64_SS(%rsp)  /* put in user errcode */
+       mov     ISF64_RIP(%rsp), %rax   /* restore rax */
+       add     $(ISF64_SS), %rsp       /* reset to original frame */
                                        /* now treat as fault from user */
        swapgs
        jmp     L_64bit_enter
@@ -1133,9 +1209,9 @@ L_fault_iret64:
  * on the stack untouched since we didn't move the stack pointer.
  */
 L_32bit_fault_set_seg:
-       mov     0(%rsp), %rax           /* get trap number/function */
-       mov     8(%rsp), %rdx           /* get error code */
-       mov     40(%rsp), %rsp          /* reload stack prior to fault */
+       mov     ISF64_TRAPNO(%rsp), %rax
+       mov     ISF64_ERR(%rsp), %rdx
+       mov     ISF64_RSP(%rsp), %rsp   /* reload stack prior to fault */
        mov     %rax,ISC32_TRAPNO(%rsp)
        mov     %rdx,ISC32_ERR(%rsp)
                                        /* now treat as fault from user */
@@ -1151,17 +1227,475 @@ L_32bit_fault_set_seg:
  * Fatal exception handlers:
  */
 Entry(db_task_dbl_fault64)
+       push    $(LO_DOUBLE_FAULT)
        push    $(T_DOUBLE_FAULT)
-       movl    $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
        jmp     L_enter_lohandler       
 
 Entry(db_task_stk_fault64)
+       push    $(LO_DOUBLE_FAULT)
        push    $(T_STACK_FAULT)
-       movl    $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
        jmp     L_enter_lohandler       
 
 Entry(mc64)
        push    $(0)                    /* Error */
+       push    $(LO_MACHINE_CHECK)
        push    $(T_MACHINE_CHECK)
-       movl    $(LO_MACHINE_CHECK), ISF64_TRAPFN(%rsp)
        jmp     L_enter_lohandler       
+
+
+       .code32
+
+/*
+ * All task 'exceptions' enter lo_alltraps:
+ *     esp     -> x86_saved_state_t
+ * 
+ * The rest of the state is set up as: 
+ *     cr3      -> kernel directory
+ *     esp      -> low based stack
+ *     gs       -> CPU_DATA_GS
+ *     cs       -> KERNEL32_CS
+ *     ss/ds/es -> KERNEL_DS
+ *
+ *     interrupts disabled
+ *     direction flag cleared
+ */
+Entry(lo_alltraps)
+       movl    R32_CS(%esp),%eax       /* assume 32-bit state */
+       cmpl    $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */   
+       jne     1f
+       movl    R64_CS(%esp),%eax       /* 64-bit user mode */
+1:
+       testb   $3,%al
+       jz      trap_from_kernel
+                                               /* user mode trap */
+       TIME_TRAP_UENTRY
+
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx
+       movl    TH_TASK(%ecx),%ebx
+
+       /* Check for active vtimers in the current task */
+       TASK_VTIMER_CHECK(%ebx, %ecx)
+
+       movl    %gs:CPU_KERNEL_STACK,%ebx
+       xchgl   %ebx,%esp               /* switch to kernel stack */
+
+       CCALL1(user_trap, %ebx)         /* call user trap routine */
+       /* user_trap() unmasks interrupts */
+       cli                             /* hold off intrs - critical section */
+       xorl    %ecx,%ecx               /* don't check if we're in the PFZ */
+       
+/*
+ * Return from trap or system call, checking for ASTs.
+ * On lowbase PCB stack with intrs disabled
+ */    
+Entry(return_from_trap)
+       movl    %gs:CPU_ACTIVE_THREAD, %esp
+       movl    TH_PCB_ISS(%esp),%esp   /* switch back to PCB stack */
+       movl    %gs:CPU_PENDING_AST, %eax
+       testl   %eax, %eax
+       je      return_to_user          /* branch if no AST */
+LEXT(return_from_trap_with_ast)
+       movl    %gs:CPU_KERNEL_STACK, %ebx
+       xchgl   %ebx, %esp              /* switch to kernel stack */
+
+       testl   %ecx, %ecx              /* see if we need to check for an EIP in the PFZ */
+       je      2f                      /* no, go handle the AST */
+       cmpl    $(SS_64), SS_FLAVOR(%ebx)       /* are we a 64-bit task? */
+       je      1f
+                                       /* no... 32-bit user mode */
+       movl    R32_EIP(%ebx), %eax
+       pushl   %ebx                    /* save PCB stack */
+       xorl    %ebp, %ebp              /* clear frame pointer */
+       CCALL1(commpage_is_in_pfz32, %eax)
+       popl    %ebx                    /* retrieve pointer to PCB stack */
+       testl   %eax, %eax
+       je      2f                      /* not in the PFZ... go service AST */
+       movl    %eax, R32_EBX(%ebx)     /* let the PFZ know we've pended an AST */
+       xchgl   %ebx, %esp              /* switch back to PCB stack */
+       jmp     return_to_user
+1:                                     /* 64-bit user mode */
+       movl    R64_RIP(%ebx), %ecx
+       movl    R64_RIP+4(%ebx), %eax
+       pushl   %ebx                    /* save PCB stack */
+       xorl    %ebp, %ebp              /* clear frame pointer */
+       CCALL2(commpage_is_in_pfz64, %ecx, %eax)
+       popl    %ebx                    /* retrieve pointer to PCB stack */
+       testl   %eax, %eax              
+       je      2f                      /* not in the PFZ... go service AST */
+       movl    %eax, R64_RBX(%ebx)     /* let the PFZ know we've pended an AST */
+       xchgl   %ebx, %esp              /* switch back to PCB stack */
+       jmp     return_to_user
+2:     
+       sti                             /* interrupts always enabled on return to user mode */
+       pushl   %ebx                    /* save PCB stack */
+       xorl    %ebp, %ebp              /* Clear framepointer */
+       CCALL1(i386_astintr, $0)        /* take the AST */
+       cli
+       
+       popl    %esp                    /* switch back to PCB stack (w/exc link) */
+
+       xorl    %ecx, %ecx              /* don't check if we're in the PFZ */
+       jmp     EXT(return_from_trap)   /* and check again (rare) */
+
+
+
+/*
+ * Trap from kernel mode.  No need to switch stacks.
+ * Interrupts must be off here - we will set them to state at time of trap
+ * as soon as it's safe for us to do so and not recurse doing preemption
+ */
+trap_from_kernel:
+       movl    %esp, %eax              /* saved state addr */
+       pushl   R32_EIP(%esp)           /* Simulate a CALL from fault point */
+       pushl   %ebp                    /* Extend framepointer chain */
+       movl    %esp, %ebp
+       CCALL1WITHSP(kernel_trap, %eax) /* Call kernel trap handler */
+       popl    %ebp
+       addl    $4, %esp
+       cli
+
+       movl    %gs:CPU_PENDING_AST,%eax                /* get pending asts */
+       testl   $ AST_URGENT,%eax       /* any urgent preemption? */
+       je      ret_to_kernel                   /* no, nothing to do */
+       cmpl    $ T_PREEMPT,R32_TRAPNO(%esp)
+       je      ret_to_kernel                     /* T_PREEMPT handled in kernel_trap() */
+       testl   $ EFL_IF,R32_EFLAGS(%esp)               /* interrupts disabled? */
+       je      ret_to_kernel
+       cmpl    $0,%gs:CPU_PREEMPTION_LEVEL             /* preemption disabled? */
+       jne     ret_to_kernel
+       movl    %gs:CPU_KERNEL_STACK,%eax
+       movl    %esp,%ecx
+       xorl    %eax,%ecx
+       and     EXT(kernel_stack_mask),%ecx
+       testl   %ecx,%ecx               /* are we on the kernel stack? */
+       jne     ret_to_kernel           /* no, skip it */
+
+       CCALL1(i386_astintr, $1)        /* take the AST */
+
+
+/*
+ * All interrupts on all tasks enter here with:
+ *     esp->    -> x86_saved_state_t
+ *
+ *     cr3      -> kernel directory
+ *     esp      -> low based stack
+ *     gs       -> CPU_DATA_GS
+ *     cs       -> KERNEL32_CS
+ *     ss/ds/es -> KERNEL_DS
+ *
+ *     interrupts disabled
+ *     direction flag cleared
+ */
+Entry(lo_allintrs)
+       /*
+        * test whether already on interrupt stack
+        */
+       movl    %gs:CPU_INT_STACK_TOP,%ecx
+       cmpl    %esp,%ecx
+       jb      1f
+       leal    -INTSTACK_SIZE(%ecx),%edx
+       cmpl    %esp,%edx
+       jb      int_from_intstack
+1:     
+       xchgl   %ecx,%esp               /* switch to interrupt stack */
+
+       movl    %cr0,%eax               /* get cr0 */
+       orl     $(CR0_TS),%eax          /* or in TS bit */
+       movl    %eax,%cr0               /* set cr0 */
+
+       subl    $8, %esp                /* for 16-byte stack alignment */
+       pushl   %ecx                    /* save pointer to old stack */
+       movl    %ecx,%gs:CPU_INT_STATE  /* save intr state */
+       
+       TIME_INT_ENTRY                  /* do timing */
+
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx
+       movl    TH_TASK(%ecx),%ebx
+
+       /* Check for active vtimers in the current task */
+       TASK_VTIMER_CHECK(%ebx, %ecx)
+
+       incl    %gs:CPU_PREEMPTION_LEVEL
+       incl    %gs:CPU_INTERRUPT_LEVEL
+
+       movl    %gs:CPU_INT_STATE, %eax
+       CCALL1(interrupt, %eax)         /* call generic interrupt routine */
+
+       cli                             /* just in case we returned with intrs enabled */
+       xorl    %eax,%eax
+       movl    %eax,%gs:CPU_INT_STATE  /* clear intr state pointer */
+
+       decl    %gs:CPU_INTERRUPT_LEVEL
+       decl    %gs:CPU_PREEMPTION_LEVEL
+
+       TIME_INT_EXIT                   /* do timing */
+
+       movl    %gs:CPU_ACTIVE_THREAD,%eax
+       movl    TH_PCB_FPS(%eax),%eax   /* get pcb's ifps */
+       testl   %eax, %eax              /* Is there a context */
+       je      1f                      /* Branch if not */
+       cmpl    $0, FP_VALID(%eax)      /* Check fp_valid */
+       jne     1f                      /* Branch if valid */
+       clts                            /* Clear TS */
+       jmp     2f
+1:
+       movl    %cr0,%eax               /* get cr0 */
+       orl     $(CR0_TS),%eax          /* or in TS bit */
+       movl    %eax,%cr0               /* set cr0 */
+2:
+       popl    %esp                    /* switch back to old stack */
+
+       /* Load interrupted code segment into %eax */
+       movl    R32_CS(%esp),%eax       /* assume 32-bit state */
+       cmpl    $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */   
+       jne     3f
+       movl    R64_CS(%esp),%eax       /* 64-bit user mode */
+3:
+       testb   $3,%al                  /* user mode, */
+       jnz     ast_from_interrupt_user /* go handle potential ASTs */
+       /*
+        * we only want to handle preemption requests if
+        * the interrupt fell in the kernel context
+        * and preemption isn't disabled
+        */
+       movl    %gs:CPU_PENDING_AST,%eax        
+       testl   $ AST_URGENT,%eax               /* any urgent requests? */
+       je      ret_to_kernel                   /* no, nothing to do */
+
+       cmpl    $0,%gs:CPU_PREEMPTION_LEVEL     /* preemption disabled? */
+       jne     ret_to_kernel                   /* yes, skip it */
+
+       movl    %gs:CPU_KERNEL_STACK,%eax
+       movl    %esp,%ecx
+       xorl    %eax,%ecx
+       and     EXT(kernel_stack_mask),%ecx
+       testl   %ecx,%ecx                       /* are we on the kernel stack? */
+       jne     ret_to_kernel                   /* no, skip it */
+
+       /*
+        * Take an AST from kernel space.  We don't need (and don't want)
+        * to do as much as the case where the interrupt came from user
+        * space.
+        */
+       CCALL1(i386_astintr, $1)
+
+       jmp     ret_to_kernel
+
+
+/*
+ * nested int - simple path, can't preempt etc on way out
+ */
+int_from_intstack:
+       incl    %gs:CPU_PREEMPTION_LEVEL
+       incl    %gs:CPU_INTERRUPT_LEVEL
+       incl    %gs:CPU_NESTED_ISTACK
+
+       movl    %esp, %edx              /* x86_saved_state */
+       CCALL1(interrupt, %edx)
+
+       decl    %gs:CPU_INTERRUPT_LEVEL
+       decl    %gs:CPU_PREEMPTION_LEVEL
+       decl    %gs:CPU_NESTED_ISTACK
+
+       jmp     ret_to_kernel
+
+/*
+ *     Take an AST from an interrupted user
+ */
+ast_from_interrupt_user:
+       movl    %gs:CPU_PENDING_AST,%eax
+       testl   %eax,%eax               /* pending ASTs? */
+       je      ret_to_user             /* no, nothing to do */
+
+       TIME_TRAP_UENTRY
+
+       movl    $1, %ecx                /* check if we're in the PFZ */
+       jmp     EXT(return_from_trap_with_ast)  /* return */
+
+
+/*
+ * 32bit Tasks
+ * System call entries via INTR_GATE or sysenter:
+ *
+ *     esp      -> x86_saved_state32_t
+ *     cr3      -> kernel directory
+ *     esp      -> low based stack
+ *     gs       -> CPU_DATA_GS
+ *     cs       -> KERNEL32_CS
+ *     ss/ds/es -> KERNEL_DS
+ *
+ *     interrupts disabled
+ *     direction flag cleared
+ */
+
+Entry(lo_unix_scall)
+       TIME_TRAP_UENTRY
+
+       movl    %gs:CPU_KERNEL_STACK,%edi
+       xchgl   %edi,%esp                       /* switch to kernel stack */
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
+       movl    TH_TASK(%ecx),%ebx              /* point to current task  */
+       incl    TH_SYSCALLS_UNIX(%ecx)          /* increment call count   */
+
+       /* Check for active vtimers in the current task */
+       TASK_VTIMER_CHECK(%ebx, %ecx)
+
+       sti
+
+       CCALL1(unix_syscall, %edi)
+       /*
+        * always returns through thread_exception_return
+        */
+
+
+Entry(lo_mach_scall)
+       TIME_TRAP_UENTRY
+
+       movl    %gs:CPU_KERNEL_STACK,%edi
+       xchgl   %edi,%esp                       /* switch to kernel stack */
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
+       movl    TH_TASK(%ecx),%ebx              /* point to current task  */
+       incl    TH_SYSCALLS_MACH(%ecx)          /* increment call count   */
+
+       /* Check for active vtimers in the current task */
+       TASK_VTIMER_CHECK(%ebx, %ecx)
+
+       sti
+
+       CCALL1(mach_call_munger, %edi)
+       /*
+        * always returns through thread_exception_return
+        */
+
+
+Entry(lo_mdep_scall)
+       TIME_TRAP_UENTRY
+
+       movl    %gs:CPU_KERNEL_STACK,%edi
+       xchgl   %edi,%esp                       /* switch to kernel stack */
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
+       movl    TH_TASK(%ecx),%ebx              /* point to current task  */
+
+       /* Check for active vtimers in the current task */
+       TASK_VTIMER_CHECK(%ebx, %ecx)
+       
+       sti
+
+       CCALL1(machdep_syscall, %edi)
+       /*
+        * always returns through thread_exception_return
+        */
+
+return_to_user:
+       TIME_TRAP_UEXIT
+       jmp     ret_to_user
+       
+
+/*
+ * 64bit Tasks
+ * System call entries via syscall only:
+ *
+ *     esp      -> x86_saved_state64_t
+ *     cr3      -> kernel directory
+ *     esp      -> low based stack
+ *     gs       -> CPU_DATA_GS
+ *     cs       -> KERNEL32_CS
+ *     ss/ds/es -> KERNEL_DS
+ *
+ *     interrupts disabled
+ *     direction flag cleared
+ */
+
+Entry(lo_syscall)
+       TIME_TRAP_UENTRY
+
+       movl    %gs:CPU_KERNEL_STACK,%edi
+       xchgl   %edi,%esp                       /* switch to kernel stack */
+
+       movl    %gs:CPU_ACTIVE_THREAD,%ecx      /* get current thread     */
+       movl    TH_TASK(%ecx),%ebx              /* point to current task  */
+
+       /* Check for active vtimers in the current task */
+       TASK_VTIMER_CHECK(%ebx, %ecx)
+
+       /*
+        * We can be here either for a mach, unix machdep or diag syscall,
+        * as indicated by the syscall class:
+        */
+       movl    R64_RAX(%edi), %eax             /* syscall number/class */
+       movl    %eax, %edx
+       andl    $(SYSCALL_CLASS_MASK), %edx     /* syscall class */
+       cmpl    $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %edx
+       je      EXT(lo64_mach_scall)
+       cmpl    $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %edx
+       je      EXT(lo64_unix_scall)
+       cmpl    $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %edx
+       je      EXT(lo64_mdep_scall)
+       cmpl    $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %edx
+       je      EXT(lo64_diag_scall)
+
+       sti
+
+       /* Syscall class unknown */
+       CCALL5(i386_exception, $(EXC_SYSCALL), %eax, $0, $1, $0)
+       /* no return */
+
+
+Entry(lo64_unix_scall)
+       incl    TH_SYSCALLS_UNIX(%ecx)          /* increment call count   */
+       sti
+
+       CCALL1(unix_syscall64, %edi)
+       /*
+        * always returns through thread_exception_return
+        */
+
+
+Entry(lo64_mach_scall)
+       incl    TH_SYSCALLS_MACH(%ecx)          /* increment call count   */
+       sti
+
+       CCALL1(mach_call_munger64, %edi)
+       /*
+        * always returns through thread_exception_return
+        */
+
+
+
+Entry(lo64_mdep_scall)
+       sti
+
+       CCALL1(machdep_syscall64, %edi)
+       /*
+        * always returns through thread_exception_return
+        */
+
+
+Entry(lo64_diag_scall)
+       CCALL1(diagCall64, %edi)        // Call diagnostics
+               
+       cli                             // Disable interruptions just in case
+       cmpl    $0,%eax                 // What kind of return is this?
+       je      1f
+       movl    %edi, %esp              // Get back the original stack
+       jmp     return_to_user          // Normal return, do not check asts...
+1:     
+       CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
+               // pass what would be the diag syscall
+               // error return - cause an exception
+       /* no return */
+
+
+       
+/*
+ * Compatibility mode's last gasp...
+ */
+Entry(lo_df64)
+       movl    %esp, %eax
+       CCALL1(panic_double_fault64, %eax)
+       hlt
+
+Entry(lo_mc64)
+       movl    %esp, %eax
+       CCALL1(panic_machine_check64, %eax)
+       hlt