2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <i386/asm64.h>
26 #include <i386/eflags.h>
27 #include <i386/trap.h>
28 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
29 #include <mach/i386/syscall_sw.h>
30 #include <i386/postcode.h>
31 #include <i386/proc_reg.h>
36 #define LO_ALLINTRS EXT(lo_allintrs)
37 #define LO_ALLTRAPS EXT(lo_alltraps)
38 #define LO_SYSENTER EXT(lo_sysenter)
39 #define LO_SYSCALL EXT(lo_syscall)
40 #define LO_UNIX_SCALL EXT(lo_unix_scall)
41 #define LO_MACH_SCALL EXT(lo_mach_scall)
42 #define LO_MDEP_SCALL EXT(lo_mdep_scall)
43 #define LO_DIAG_SCALL EXT(lo_diag_scall)
44 #define LO_DOUBLE_FAULT EXT(lo_df64)
45 #define LO_MACHINE_CHECK EXT(lo_mc64)
48 * Interrupt descriptor table and code vectors for it.
50 * The IDT64_BASE_ENTRY macro lays down a fake descriptor that must be
51 * reformatted ("fixed") before use.
52 * All vector are rebased in uber-space.
53 * Special vectors (e.g. double-fault) use a non-0 IST.
55 #define IDT64_BASE_ENTRY(vec,seg,ist,type) \
58 .long KERNEL_UBER_BASE_HI32 ;\
65 #define IDT64_ENTRY(vec,ist,type) \
66 IDT64_BASE_ENTRY(EXT(vec),KERNEL64_CS,ist,type)
67 #define IDT64_ENTRY_LOCAL(vec,ist,type) \
68 IDT64_BASE_ENTRY(vec,KERNEL64_CS,ist,type)
71 * Push trap number and address of compatibility mode handler,
72 * then branch to common trampoline. Error already pushed.
74 #define EXCEP64_ERR(n,name) \
75 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
78 movl $(LO_ALLTRAPS), 4(%rsp) ;\
83 * Push error(0), trap number and address of compatibility mode handler,
84 * then branch to common trampoline.
86 #define EXCEPTION64(n,name) \
87 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
91 movl $(LO_ALLTRAPS), 4(%rsp) ;\
96 * Interrupt from user.
97 * Push error (0), trap number and address of compatibility mode handler,
98 * then branch to common trampoline.
100 #define EXCEP64_USR(n,name) \
101 IDT64_ENTRY(name,0,U_INTR_GATE) ;\
105 movl $(LO_ALLTRAPS), 4(%rsp) ;\
106 jmp L_enter_lohandler
110 * Special interrupt code from user.
112 #define EXCEP64_SPC_USR(n,name) \
113 IDT64_ENTRY(name,0,U_INTR_GATE)
117 * Special interrupt code.
118 * In 64-bit mode we may use an IST slot instead of task gates.
120 #define EXCEP64_IST(n,name,ist) \
121 IDT64_ENTRY(name,ist,K_INTR_GATE)
122 #define EXCEP64_SPC(n,name) \
123 IDT64_ENTRY(name,0,K_INTR_GATE)
128 * Push zero err, interrupt vector and address of compatibility mode handler,
129 * then branch to common trampoline.
131 #define INTERRUPT64(n) \
132 IDT64_ENTRY_LOCAL(L_ ## n,0,K_INTR_GATE) ;\
137 movl $(LO_ALLINTRS), 4(%rsp) ;\
138 jmp L_enter_lohandler
144 Entry(hi64_data_base)
147 Entry(hi64_text_base)
149 EXCEPTION64(0x00,t64_zero_div)
150 EXCEP64_SPC(0x01,hi64_debug)
151 INTERRUPT64(0x02) /* NMI */
152 EXCEP64_USR(0x03,t64_int3)
153 EXCEP64_USR(0x04,t64_into)
154 EXCEP64_USR(0x05,t64_bounds)
155 EXCEPTION64(0x06,t64_invop)
156 EXCEPTION64(0x07,t64_nofpu)
158 EXCEP64_IST(0x08,db_task_dbl_fault64,1)
160 EXCEP64_IST(0x08,hi64_double_fault,1)
162 EXCEPTION64(0x09,a64_fpu_over)
163 EXCEPTION64(0x0a,a64_inv_tss)
164 EXCEP64_SPC(0x0b,hi64_segnp)
166 EXCEP64_IST(0x0c,db_task_stk_fault64,1)
168 EXCEP64_IST(0x0c,hi64_stack_fault,1)
170 EXCEP64_SPC(0x0d,hi64_gen_prot)
171 EXCEP64_ERR(0x0e,t64_page_fault)
172 EXCEPTION64(0x0f,t64_trap_0f)
173 EXCEPTION64(0x10,t64_fpu_err)
174 EXCEPTION64(0x11,t64_trap_11)
175 EXCEP64_IST(0x12,mc64,1)
176 EXCEPTION64(0x13,t64_sse_err)
177 EXCEPTION64(0x14,t64_trap_14)
178 EXCEPTION64(0x15,t64_trap_15)
179 EXCEPTION64(0x16,t64_trap_16)
180 EXCEPTION64(0x17,t64_trap_17)
181 EXCEPTION64(0x18,t64_trap_18)
182 EXCEPTION64(0x19,t64_trap_19)
183 EXCEPTION64(0x1a,t64_trap_1a)
184 EXCEPTION64(0x1b,t64_trap_1b)
185 EXCEPTION64(0x1c,t64_trap_1c)
186 EXCEPTION64(0x1d,t64_trap_1d)
187 EXCEPTION64(0x1e,t64_trap_1e)
188 EXCEPTION64(0x1f,t64_trap_1f)
292 EXCEP64_SPC_USR(0x80,hi64_unix_scall)
293 EXCEP64_SPC_USR(0x81,hi64_mach_scall)
294 EXCEP64_SPC_USR(0x82,hi64_mdep_scall)
295 EXCEP64_SPC_USR(0x83,hi64_diag_scall)
427 EXCEPTION64(0xff,t64_preempt)
433 * Trap/interrupt entry points.
435 * All traps must create the following 32-bit save area on the PCB "stack"
436 * - this is identical to the legacy mode 32-bit case:
445 * cr2 (defined only for page fault)
455 * user esp - if from user
456 * user ss - if from user
458 * Above this is the trap number and compatibility mode handler address
459 * (packed into an 8-byte stack entry) and the 64-bit interrupt stack frame:
473 * Control is passed here to return to the compatibility mode user.
474 * At this stage we're in kernel space in compatibility mode
475 * but we need to switch into 64-bit mode in the 4G-based trampoline
476 * space before performing the iret.
478 Entry(lo64_ret_to_user)
479 movl %gs:CPU_ACTIVE_THREAD,%ecx
481 movl ACT_PCB_IDS(%ecx),%eax /* Obtain this thread's debug state */
482 cmpl $0,%eax /* Is there a debug register context? */
483 je 2f /* branch if not */
484 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 64-bit task? */
486 movl DS_DR0(%eax), %ecx /* If not, load the 32 bit DRs */
488 movl DS_DR1(%eax), %ecx
490 movl DS_DR2(%eax), %ecx
492 movl DS_DR3(%eax), %ecx
494 movl DS_DR7(%eax), %ecx
495 movl %ecx, %gs:CPU_DR7
496 movl $0, %gs:CPU_DR7 + 4
499 ENTER_64BIT_MODE() /* Enter long mode */
500 mov DS64_DR0(%eax), %rcx /* Load the full width DRs*/
502 mov DS64_DR1(%eax), %rcx
504 mov DS64_DR2(%eax), %rcx
506 mov DS64_DR3(%eax), %rcx
508 mov DS64_DR7(%eax), %rcx
509 mov %rcx, %gs:CPU_DR7
510 jmp 3f /* Enter uberspace */
517 * Now switch %cr3, if necessary.
519 swapgs /* switch back to uber-kernel gs base */
520 mov %gs:CPU_TASK_CR3,%rcx
521 mov %rcx,%gs:CPU_ACTIVE_CR3
525 /* flag the copyio engine state as WINDOWS_CLEAN */
526 mov %gs:CPU_ACTIVE_THREAD,%eax
527 movl $(WINDOWS_CLEAN),ACT_COPYIO_STATE(%eax)
528 mov %rcx,%cr3 /* switch to user's address space */
531 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
534 mov %rax, %dr7 /* Set DR7 */
539 * Adjust stack to use uber-space.
541 mov $(KERNEL_UBER_BASE_HI32), %rax
543 shrd $32, %rax, %rsp /* relocate into uber-space */
545 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
549 Entry(lo64_ret_to_kernel)
553 swapgs /* switch back to uber-kernel gs base */
556 * Adjust stack to use uber-space.
558 mov $(KERNEL_UBER_BASE_HI32), %rax
560 shrd $32, %rax, %rsp /* relocate into uber-space */
562 /* Check for return to 64-bit kernel space (EFI today) */
563 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
565 /* fall through for 32-bit return */
569 * Restore registers into the machine state for iret.
571 movl R_EIP(%rsp), %eax
572 movl %eax, ISC32_RIP(%rsp)
573 movl R_EFLAGS(%rsp), %eax
574 movl %eax, ISC32_RFLAGS(%rsp)
575 movl R_CS(%rsp), %eax
576 movl %eax, ISC32_CS(%rsp)
577 movl R_UESP(%rsp), %eax
578 movl %eax, ISC32_RSP(%rsp)
579 movl R_SS(%rsp), %eax
580 movl %eax, ISC32_SS(%rsp)
583 * Restore general 32-bit registers
585 movl R_EAX(%rsp), %eax
586 movl R_EBX(%rsp), %ebx
587 movl R_ECX(%rsp), %ecx
588 movl R_EDX(%rsp), %edx
589 movl R_EBP(%rsp), %ebp
590 movl R_ESI(%rsp), %esi
591 movl R_EDI(%rsp), %edi
594 * Restore segment registers. We make take an exception here but
595 * we've got enough space left in the save frame area to absorb
596 * a hardware frame plus the trapfn and trapno
608 add $(ISC32_OFFSET)+8+8, %rsp /* pop compat frame +
609 trapno/trapfn and error */
610 cmp $(SYSENTER_CS),ISF64_CS-8-8(%rsp)
611 /* test for fast entry/exit */
614 iretq /* return from interrupt */
617 pop %rdx /* user return eip */
618 pop %rcx /* pop and toss cs */
619 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
620 popf /* flags - carry denotes failure */
621 pop %rcx /* user return esp */
623 sti /* interrupts enabled after sysexit */
624 sysexit /* 32-bit sysexit */
629 * Set the GS Base MSR with the user's gs base.
631 movl %gs:CPU_UBER_USER_GS_BASE, %eax
632 movl %gs:CPU_UBER_USER_GS_BASE+4, %edx
633 movl $(MSR_IA32_GS_BASE), %ecx
635 testb $3, R64_CS(%rsp) /* returning to user-space? */
637 wrmsr /* set 64-bit base */
641 * Restore general 64-bit registers
643 mov R64_R15(%rsp), %r15
644 mov R64_R14(%rsp), %r14
645 mov R64_R13(%rsp), %r13
646 mov R64_R12(%rsp), %r12
647 mov R64_R11(%rsp), %r11
648 mov R64_R10(%rsp), %r10
649 mov R64_R9(%rsp), %r9
650 mov R64_R8(%rsp), %r8
651 mov R64_RSI(%rsp), %rsi
652 mov R64_RDI(%rsp), %rdi
653 mov R64_RBP(%rsp), %rbp
654 mov R64_RDX(%rsp), %rdx
655 mov R64_RBX(%rsp), %rbx
656 mov R64_RCX(%rsp), %rcx
657 mov R64_RAX(%rsp), %rax
659 add $(ISS64_OFFSET)+8+8, %rsp /* pop saved state frame +
660 trapno/trapfn and error */
661 cmpl $(SYSCALL_CS),ISF64_CS-8-8(%rsp)
662 /* test for fast entry/exit */
665 iretq /* return from interrupt */
669 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
672 * rsp user stack pointer
674 mov ISF64_RIP-16(%rsp), %rcx
675 mov ISF64_RFLAGS-16(%rsp), %r11
676 mov ISF64_RSP-16(%rsp), %rsp
677 sysretq /* return from system call */
680 * Common path to enter locore handlers.
683 swapgs /* switch to kernel gs (cpu_data) */
684 L_enter_lohandler_continue:
685 cmpl $(USER64_CS), ISF64_CS(%rsp)
686 je L_64bit_enter /* this is a 64-bit user task */
687 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
688 je L_64bit_enter /* we're in 64-bit (EFI) code */
692 * System call handlers.
693 * These are entered via a syscall interrupt. The system call number in %rax
694 * is saved to the error code slot in the stack frame. We then branch to the
695 * common state saving code.
698 Entry(hi64_unix_scall)
699 swapgs /* switch to kernel gs (cpu_data) */
700 L_unix_scall_continue:
701 push %rax /* save system call number */
703 movl $(LO_UNIX_SCALL), 4(%rsp)
704 jmp L_32bit_enter_check
707 Entry(hi64_mach_scall)
708 swapgs /* switch to kernel gs (cpu_data) */
709 L_mach_scall_continue:
710 push %rax /* save system call number */
712 movl $(LO_MACH_SCALL), 4(%rsp)
713 jmp L_32bit_enter_check
716 Entry(hi64_mdep_scall)
717 swapgs /* switch to kernel gs (cpu_data) */
718 L_mdep_scall_continue:
719 push %rax /* save system call number */
721 movl $(LO_MDEP_SCALL), 4(%rsp)
722 jmp L_32bit_enter_check
725 Entry(hi64_diag_scall)
726 swapgs /* switch to kernel gs (cpu_data) */
727 L_diag_scall_continue:
728 push %rax /* save system call number */
730 movl $(LO_DIAG_SCALL), 4(%rsp)
731 jmp L_32bit_enter_check
734 swapgs /* Kapow! get per-cpu data area */
736 mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
737 mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
740 * Save values in the ISF frame in the PCB
741 * to cons up the saved machine state.
743 movl $(USER_DS), ISF64_SS(%rsp)
744 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
745 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
746 mov %rcx, ISF64_RIP(%rsp) /* rip */
747 mov %gs:CPU_UBER_TMP, %rcx
748 mov %rcx, ISF64_RSP(%rsp) /* user stack */
749 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
750 movl $(0), ISF64_TRAPNO(%rsp) /* trapno */
751 movl $(LO_SYSCALL), ISF64_TRAPFN(%rsp)
752 jmp L_64bit_enter /* this can only be a 64-bit task */
755 * sysenter entry point
756 * Requires user code to set up:
757 * edx: user instruction pointer (return address)
758 * ecx: user stack pointer
759 * on which is pushed stub ret addr and saved ebx
760 * Return to user-space is made using sysexit.
761 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
762 * or requiring ecx to be preserved.
765 mov (%rsp), %rsp /* switch from temporary stack to pcb */
767 * Push values on to the PCB stack
768 * to cons up the saved machine state.
770 push $(USER_DS) /* ss */
774 * Clear, among others, the Nested Task (NT) flags bit;
775 * This is cleared by INT, but not by sysenter, which only
776 * clears RF, VM and IF.
780 push $(SYSENTER_CS) /* cs */
781 swapgs /* switch to kernel gs (cpu_data) */
784 push %rax /* err/eax - syscall code */
786 movl $(LO_SYSENTER), ISF64_TRAPFN(%rsp)
787 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
791 * Check we're not a confused 64-bit user.
793 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
794 jne L_64bit_entry_reject
795 /* fall through to 32-bit handler: */
799 * Make space for the compatibility save area.
801 sub $(ISC32_OFFSET), %rsp
802 movl $(SS_32), SS_FLAVOR(%rsp)
813 * Save general 32-bit registers
815 mov %eax, R_EAX(%rsp)
816 mov %ebx, R_EBX(%rsp)
817 mov %ecx, R_ECX(%rsp)
818 mov %edx, R_EDX(%rsp)
819 mov %ebp, R_EBP(%rsp)
820 mov %esi, R_ESI(%rsp)
821 mov %edi, R_EDI(%rsp)
823 /* Unconditionally save cr2; only meaningful on page faults */
825 mov %eax, R_CR2(%rsp)
828 * Copy registers already saved in the machine state
829 * (in the interrupt stack frame) into the compat save area.
831 mov ISC32_RIP(%rsp), %eax
832 mov %eax, R_EIP(%rsp)
833 mov ISC32_RFLAGS(%rsp), %eax
834 mov %eax, R_EFLAGS(%rsp)
835 mov ISC32_CS(%rsp), %eax
837 mov ISC32_RSP(%rsp), %eax
838 mov %eax, R_UESP(%rsp)
839 mov ISC32_SS(%rsp), %eax
841 L_32bit_enter_after_fault:
842 mov ISC32_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
843 mov %ebx, R_TRAPNO(%rsp)
844 mov ISC32_ERR(%rsp), %eax
845 mov %eax, R_ERR(%rsp)
846 mov ISC32_TRAPFN(%rsp), %edx
849 * Common point to enter lo_handler in compatibilty mode:
851 * %edx locore handler address
855 * Switch address space to kernel
856 * if not shared space and not already mapped.
857 * Note: cpu_task_map is valid only if cpu_task_cr3 is loaded in cr3.
860 mov %gs:CPU_TASK_CR3, %rcx
861 cmp %rax, %rcx /* is the task's cr3 loaded? */
863 cmpl $(TASK_MAP_64BIT_SHARED), %gs:CPU_TASK_MAP
866 mov %gs:CPU_KERNEL_CR3, %rcx
870 mov %rcx, %gs:CPU_ACTIVE_CR3
873 * Switch to compatibility mode.
874 * Then establish kernel segments.
876 swapgs /* Done with uber-kernel gs */
880 * Now in compatibility mode and running in compatibility space
881 * prepare to enter the locore handler.
883 * %edx lo_handler pointer
884 * Note: the stack pointer (now 32-bit) is now directly addressing the
885 * the kernel below 4G and therefore is automagically re-based.
887 mov $(KERNEL_DS), %eax
892 mov $(CPU_DATA_GS), %eax
895 movl %gs:CPU_ACTIVE_THREAD,%ecx /* Get the active thread */
896 cmpl $0, ACT_PCB_IDS(%ecx) /* Is there a debug register state? */
898 movl $0, %ecx /* If so, reset DR7 (the control) */
901 addl $1,%gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count
903 /* Dispatch the designated lo handler */
907 L_64bit_entry_reject:
909 * Here for a 64-bit user attempting an invalid kernel entry.
911 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
912 movl $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
913 /* Fall through... */
917 * Here for a 64-bit user task, or special 64-bit kernel code.
918 * Make space for the save area.
920 sub $(ISS64_OFFSET), %rsp
921 movl $(SS_64), SS_FLAVOR(%rsp)
926 mov %fs, R64_FS(%rsp)
927 mov %gs, R64_GS(%rsp)
929 /* Save general-purpose registers */
930 mov %rax, R64_RAX(%rsp)
931 mov %rcx, R64_RCX(%rsp)
932 mov %rbx, R64_RBX(%rsp)
933 mov %rbp, R64_RBP(%rsp)
934 mov %r11, R64_R11(%rsp)
935 mov %r12, R64_R12(%rsp)
936 mov %r13, R64_R13(%rsp)
937 mov %r14, R64_R14(%rsp)
938 mov %r15, R64_R15(%rsp)
940 /* cr2 is significant only for page-faults */
942 mov %rax, R64_CR2(%rsp)
944 /* Other registers (which may contain syscall args) */
945 mov %rdi, R64_RDI(%rsp) /* arg0 .. */
946 mov %rsi, R64_RSI(%rsp)
947 mov %rdx, R64_RDX(%rsp)
948 mov %r10, R64_R10(%rsp)
949 mov %r8, R64_R8(%rsp)
950 mov %r9, R64_R9(%rsp) /* .. arg5 */
952 L_64bit_enter_after_fault:
954 * At this point we're almost ready to join the common lo-entry code.
956 mov R64_TRAPNO(%rsp), %ebx
957 mov R64_TRAPFN(%rsp), %edx
959 jmp L_enter_lohandler2
962 * Debug trap. Check for single-stepping across system call into
963 * kernel. If this is the case, taking the debug trap has turned
964 * off single-stepping - save the flags register with the trace
968 swapgs /* set %gs for cpu data */
969 push $0 /* error code */
971 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
973 testb $3, ISF64_CS(%rsp)
974 jnz L_enter_lohandler_continue
977 * trap came from kernel mode
979 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
980 jne L_enter_lohandler_continue /* trap not in uber-space */
982 cmpl $(EXT(hi64_mach_scall)), ISF64_RIP(%rsp)
984 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
985 jmp L_mach_scall_continue /* continue system call entry */
987 cmpl $(EXT(hi64_mdep_scall)), ISF64_RIP(%rsp)
989 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
990 jmp L_mdep_scall_continue /* continue system call entry */
992 cmpl $(EXT(hi64_unix_scall)), ISF64_RIP(%rsp)
994 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
995 jmp L_unix_scall_continue /* continue system call entry */
997 cmpl $(EXT(hi64_sysenter)), ISF64_RIP(%rsp)
998 jne L_enter_lohandler_continue
1000 * Interrupt stack frame has been pushed on the temporary stack.
1001 * We have to switch to pcb stack and copy eflags.
1003 add $32,%rsp /* remove trapno/trapfn/err/rip/cs */
1004 push %rcx /* save %rcx - user stack pointer */
1005 mov 32(%rsp),%rcx /* top of intr stack -> pcb stack */
1006 xchg %rcx,%rsp /* switch to pcb stack */
1007 push $(USER_DS) /* ss */
1008 push (%rcx) /* saved %rcx into rsp slot */
1009 push 8(%rcx) /* rflags */
1010 mov (%rcx),%rcx /* restore %rcx */
1011 push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
1012 jmp L_sysenter_continue /* continue sysenter entry */
1015 Entry(hi64_double_fault)
1016 swapgs /* set %gs for cpu data */
1017 push $(T_DOUBLE_FAULT)
1018 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1020 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1021 jne L_enter_lohandler_continue /* trap not in uber-space */
1023 cmpl $(EXT(hi64_syscall)), ISF64_RIP(%rsp)
1024 jne L_enter_lohandler_continue
1026 mov ISF64_RSP(%rsp), %rsp
1027 jmp L_syscall_continue
1031 * General protection or segment-not-present fault.
1032 * Check for a GP/NP fault in the kernel_return
1033 * sequence; if there, report it as a GP/NP fault on the user's instruction.
1035 * rsp-> 0: trap code (NP or GP) and trap function
1036 * 8: segment number in error (error code)
1042 * 56 old registers (trap is from kernel)
1044 Entry(hi64_gen_prot)
1045 push $(T_GENERAL_PROTECTION)
1046 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1049 push $(T_SEGMENT_NOT_PRESENT)
1050 /* indicate fault type */
1051 trap_check_kernel_exit:
1052 movl $(LO_ALLTRAPS), 4(%rsp)
1055 /* trap was from kernel mode, so */
1056 /* check for the kernel exit sequence */
1057 cmpl $(KERNEL_UBER_BASE_HI32), 16+4(%rsp)
1058 jne hi64_take_trap /* trap not in uber-space */
1060 cmpl $(EXT(ret32_iret)), 16(%rsp)
1062 cmpl $(EXT(ret32_set_ds)), 16(%rsp)
1063 je L_32bit_fault_set_seg
1064 cmpl $(EXT(ret32_set_es)), 16(%rsp)
1065 je L_32bit_fault_set_seg
1066 cmpl $(EXT(ret32_set_fs)), 16(%rsp)
1067 je L_32bit_fault_set_seg
1068 cmpl $(EXT(ret32_set_gs)), 16(%rsp)
1069 je L_32bit_fault_set_seg
1071 cmpl $(EXT(ret64_iret)), 16(%rsp)
1075 jmp L_enter_lohandler
1079 * GP/NP fault on IRET: CS or SS is in error.
1080 * All registers contain the user's values.
1083 * 0 trap number/function
1088 * 40 rsp --> new trapno
1089 * 48 ss --> new errcode
1097 mov %rax, 16(%rsp) /* save rax (we don`t need saved rip) */
1098 pop %rax /* get trap number */
1099 mov %rax, 40-8(%rsp) /* put in user trap number */
1100 pop %rax /* get error code */
1101 mov %rax, 48-8-8(%rsp) /* put in user errcode */
1102 pop %rax /* restore rax */
1103 add $16,%rsp /* eat 2 more slots */
1104 /* now treat as fault from user */
1105 jmp L_enter_lohandler
1108 * Fault restoring a segment register. All of the saved state is still
1109 * on the stack untouched since we haven't yet moved the stack pointer.
1111 L_32bit_fault_set_seg:
1112 pop %rax /* get trap number/function */
1113 pop %rdx /* get error code */
1114 add $40,%rsp /* pop stack to saved state */
1115 mov %rax,ISC32_TRAPNO(%rsp)
1116 mov %rdx,ISC32_ERR(%rsp)
1117 /* now treat as fault from user */
1118 /* except that all the state is */
1119 /* already saved - we just have to */
1120 /* move the trapno and error into */
1121 /* the compatibility frame */
1123 jmp L_32bit_enter_after_fault
1127 * Fatal exception handlers:
1129 Entry(db_task_dbl_fault64)
1130 push $(T_DOUBLE_FAULT)
1131 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1132 jmp L_enter_lohandler
1134 Entry(db_task_stk_fault64)
1135 Entry(hi64_stack_fault)
1136 push $(T_STACK_FAULT)
1137 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1138 jmp L_enter_lohandler
1141 push $(0) /* Error */
1142 push $(T_MACHINE_CHECK)
1143 movl $(LO_MACHINE_CHECK), ISF64_TRAPFN(%rsp)
1144 jmp L_enter_lohandler