2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 #include <i386/asm64.h>
34 #include <i386/eflags.h>
35 #include <i386/trap.h>
36 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
37 #include <mach/i386/syscall_sw.h>
38 #include <i386/postcode.h>
39 #include <i386/proc_reg.h>
44 #define LO_ALLINTRS EXT(lo_allintrs)
45 #define LO_ALLTRAPS EXT(lo_alltraps)
46 #define LO_SYSENTER EXT(lo_sysenter)
47 #define LO_SYSCALL EXT(lo_syscall)
48 #define LO_UNIX_SCALL EXT(lo_unix_scall)
49 #define LO_MACH_SCALL EXT(lo_mach_scall)
50 #define LO_MDEP_SCALL EXT(lo_mdep_scall)
51 #define LO_DIAG_SCALL EXT(lo_diag_scall)
52 #define LO_DOUBLE_FAULT EXT(lo_df64)
53 #define LO_MACHINE_CHECK EXT(lo_mc64)
56 * Interrupt descriptor table and code vectors for it.
58 * The IDT64_BASE_ENTRY macro lays down a fake descriptor that must be
59 * reformatted ("fixed") before use.
60 * All vector are rebased in uber-space.
61 * Special vectors (e.g. double-fault) use a non-0 IST.
63 #define IDT64_BASE_ENTRY(vec,seg,ist,type) \
66 .long KERNEL_UBER_BASE_HI32 ;\
73 #define IDT64_ENTRY(vec,ist,type) \
74 IDT64_BASE_ENTRY(EXT(vec),KERNEL64_CS,ist,type)
75 #define IDT64_ENTRY_LOCAL(vec,ist,type) \
76 IDT64_BASE_ENTRY(vec,KERNEL64_CS,ist,type)
79 * Push trap number and address of compatibility mode handler,
80 * then branch to common trampoline. Error already pushed.
82 #define EXCEP64_ERR(n,name) \
83 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
86 movl $(LO_ALLTRAPS), 4(%rsp) ;\
91 * Push error(0), trap number and address of compatibility mode handler,
92 * then branch to common trampoline.
94 #define EXCEPTION64(n,name) \
95 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
99 movl $(LO_ALLTRAPS), 4(%rsp) ;\
100 jmp L_enter_lohandler
104 * Interrupt from user.
105 * Push error (0), trap number and address of compatibility mode handler,
106 * then branch to common trampoline.
108 #define EXCEP64_USR(n,name) \
109 IDT64_ENTRY(name,0,U_INTR_GATE) ;\
113 movl $(LO_ALLTRAPS), 4(%rsp) ;\
114 jmp L_enter_lohandler
118 * Special interrupt code from user.
120 #define EXCEP64_SPC_USR(n,name) \
121 IDT64_ENTRY(name,0,U_INTR_GATE)
125 * Special interrupt code.
126 * In 64-bit mode we may use an IST slot instead of task gates.
128 #define EXCEP64_IST(n,name,ist) \
129 IDT64_ENTRY(name,ist,K_INTR_GATE)
130 #define EXCEP64_SPC(n,name) \
131 IDT64_ENTRY(name,0,K_INTR_GATE)
136 * Push zero err, interrupt vector and address of compatibility mode handler,
137 * then branch to common trampoline.
139 #define INTERRUPT64(n) \
140 IDT64_ENTRY_LOCAL(L_ ## n,0,K_INTR_GATE) ;\
145 movl $(LO_ALLINTRS), 4(%rsp) ;\
146 jmp L_enter_lohandler
152 Entry(hi64_data_base)
155 Entry(hi64_text_base)
157 EXCEPTION64(0x00,t64_zero_div)
158 EXCEP64_SPC(0x01,hi64_debug)
159 INTERRUPT64(0x02) /* NMI */
160 EXCEP64_USR(0x03,t64_int3)
161 EXCEP64_USR(0x04,t64_into)
162 EXCEP64_USR(0x05,t64_bounds)
163 EXCEPTION64(0x06,t64_invop)
164 EXCEPTION64(0x07,t64_nofpu)
166 EXCEP64_IST(0x08,db_task_dbl_fault64,1)
168 EXCEP64_IST(0x08,hi64_double_fault,1)
170 EXCEPTION64(0x09,a64_fpu_over)
171 EXCEPTION64(0x0a,a64_inv_tss)
172 EXCEP64_SPC(0x0b,hi64_segnp)
174 EXCEP64_IST(0x0c,db_task_stk_fault64,1)
176 EXCEP64_IST(0x0c,hi64_stack_fault,1)
178 EXCEP64_SPC(0x0d,hi64_gen_prot)
179 EXCEP64_ERR(0x0e,t64_page_fault)
180 EXCEPTION64(0x0f,t64_trap_0f)
181 EXCEPTION64(0x10,t64_fpu_err)
182 EXCEPTION64(0x11,t64_trap_11)
183 EXCEP64_IST(0x12,mc64,1)
184 EXCEPTION64(0x13,t64_sse_err)
185 EXCEPTION64(0x14,t64_trap_14)
186 EXCEPTION64(0x15,t64_trap_15)
187 EXCEPTION64(0x16,t64_trap_16)
188 EXCEPTION64(0x17,t64_trap_17)
189 EXCEPTION64(0x18,t64_trap_18)
190 EXCEPTION64(0x19,t64_trap_19)
191 EXCEPTION64(0x1a,t64_trap_1a)
192 EXCEPTION64(0x1b,t64_trap_1b)
193 EXCEPTION64(0x1c,t64_trap_1c)
194 EXCEPTION64(0x1d,t64_trap_1d)
195 EXCEPTION64(0x1e,t64_trap_1e)
196 EXCEPTION64(0x1f,t64_trap_1f)
300 EXCEP64_SPC_USR(0x80,hi64_unix_scall)
301 EXCEP64_SPC_USR(0x81,hi64_mach_scall)
302 EXCEP64_SPC_USR(0x82,hi64_mdep_scall)
303 EXCEP64_SPC_USR(0x83,hi64_diag_scall)
435 EXCEPTION64(0xff,t64_preempt)
441 * Trap/interrupt entry points.
443 * All traps must create the following 32-bit save area on the PCB "stack"
444 * - this is identical to the legacy mode 32-bit case:
453 * cr2 (defined only for page fault)
463 * user esp - if from user
464 * user ss - if from user
466 * Above this is the trap number and compatibility mode handler address
467 * (packed into an 8-byte stack entry) and the 64-bit interrupt stack frame:
481 * Control is passed here to return to the compatibility mode user.
482 * At this stage we're in kernel space in compatibility mode
483 * but we need to switch into 64-bit mode in the 4G-based trampoline
484 * space before performing the iret.
486 Entry(lo64_ret_to_user)
487 movl %gs:CPU_ACTIVE_THREAD,%ecx
489 movl ACT_PCB_IDS(%ecx),%eax /* Obtain this thread's debug state */
490 cmpl $0,%eax /* Is there a debug register context? */
491 je 2f /* branch if not */
492 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 64-bit task? */
494 movl DS_DR0(%eax), %ecx /* If not, load the 32 bit DRs */
496 movl DS_DR1(%eax), %ecx
498 movl DS_DR2(%eax), %ecx
500 movl DS_DR3(%eax), %ecx
502 movl DS_DR7(%eax), %ecx
503 movl %ecx, %gs:CPU_DR7
504 movl $0, %gs:CPU_DR7 + 4
507 ENTER_64BIT_MODE() /* Enter long mode */
508 mov DS64_DR0(%eax), %rcx /* Load the full width DRs*/
510 mov DS64_DR1(%eax), %rcx
512 mov DS64_DR2(%eax), %rcx
514 mov DS64_DR3(%eax), %rcx
516 mov DS64_DR7(%eax), %rcx
517 mov %rcx, %gs:CPU_DR7
518 jmp 3f /* Enter uberspace */
525 * Now switch %cr3, if necessary.
527 swapgs /* switch back to uber-kernel gs base */
528 mov %gs:CPU_TASK_CR3,%rcx
529 mov %rcx,%gs:CPU_ACTIVE_CR3
533 /* flag the copyio engine state as WINDOWS_CLEAN */
534 mov %gs:CPU_ACTIVE_THREAD,%eax
535 movl $(WINDOWS_CLEAN),ACT_COPYIO_STATE(%eax)
536 mov %rcx,%cr3 /* switch to user's address space */
539 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
542 mov %rax, %dr7 /* Set DR7 */
547 * Adjust stack to use uber-space.
549 mov $(KERNEL_UBER_BASE_HI32), %rax
551 shrd $32, %rax, %rsp /* relocate into uber-space */
553 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
557 Entry(lo64_ret_to_kernel)
561 swapgs /* switch back to uber-kernel gs base */
564 * Adjust stack to use uber-space.
566 mov $(KERNEL_UBER_BASE_HI32), %rax
568 shrd $32, %rax, %rsp /* relocate into uber-space */
570 /* Check for return to 64-bit kernel space (EFI today) */
571 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
573 /* fall through for 32-bit return */
577 * Restore registers into the machine state for iret.
579 movl R_EIP(%rsp), %eax
580 movl %eax, ISC32_RIP(%rsp)
581 movl R_EFLAGS(%rsp), %eax
582 movl %eax, ISC32_RFLAGS(%rsp)
583 movl R_CS(%rsp), %eax
584 movl %eax, ISC32_CS(%rsp)
585 movl R_UESP(%rsp), %eax
586 movl %eax, ISC32_RSP(%rsp)
587 movl R_SS(%rsp), %eax
588 movl %eax, ISC32_SS(%rsp)
591 * Restore general 32-bit registers
593 movl R_EAX(%rsp), %eax
594 movl R_EBX(%rsp), %ebx
595 movl R_ECX(%rsp), %ecx
596 movl R_EDX(%rsp), %edx
597 movl R_EBP(%rsp), %ebp
598 movl R_ESI(%rsp), %esi
599 movl R_EDI(%rsp), %edi
602 * Restore segment registers. We make take an exception here but
603 * we've got enough space left in the save frame area to absorb
604 * a hardware frame plus the trapfn and trapno
616 add $(ISC32_OFFSET)+8+8, %rsp /* pop compat frame +
617 trapno/trapfn and error */
618 cmp $(SYSENTER_CS),ISF64_CS-8-8(%rsp)
619 /* test for fast entry/exit */
622 iretq /* return from interrupt */
625 pop %rdx /* user return eip */
626 pop %rcx /* pop and toss cs */
627 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
628 popf /* flags - carry denotes failure */
629 pop %rcx /* user return esp */
631 sti /* interrupts enabled after sysexit */
632 sysexit /* 32-bit sysexit */
637 * Set the GS Base MSR with the user's gs base.
639 movl %gs:CPU_UBER_USER_GS_BASE, %eax
640 movl %gs:CPU_UBER_USER_GS_BASE+4, %edx
641 movl $(MSR_IA32_GS_BASE), %ecx
643 testb $3, R64_CS(%rsp) /* returning to user-space? */
645 wrmsr /* set 64-bit base */
649 * Restore general 64-bit registers
651 mov R64_R15(%rsp), %r15
652 mov R64_R14(%rsp), %r14
653 mov R64_R13(%rsp), %r13
654 mov R64_R12(%rsp), %r12
655 mov R64_R11(%rsp), %r11
656 mov R64_R10(%rsp), %r10
657 mov R64_R9(%rsp), %r9
658 mov R64_R8(%rsp), %r8
659 mov R64_RSI(%rsp), %rsi
660 mov R64_RDI(%rsp), %rdi
661 mov R64_RBP(%rsp), %rbp
662 mov R64_RDX(%rsp), %rdx
663 mov R64_RBX(%rsp), %rbx
664 mov R64_RCX(%rsp), %rcx
665 mov R64_RAX(%rsp), %rax
667 add $(ISS64_OFFSET)+8+8, %rsp /* pop saved state frame +
668 trapno/trapfn and error */
669 cmpl $(SYSCALL_CS),ISF64_CS-8-8(%rsp)
670 /* test for fast entry/exit */
673 iretq /* return from interrupt */
677 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
680 * rsp user stack pointer
682 mov ISF64_RIP-16(%rsp), %rcx
683 mov ISF64_RFLAGS-16(%rsp), %r11
684 mov ISF64_RSP-16(%rsp), %rsp
685 sysretq /* return from system call */
688 * Common path to enter locore handlers.
691 swapgs /* switch to kernel gs (cpu_data) */
692 L_enter_lohandler_continue:
693 cmpl $(USER64_CS), ISF64_CS(%rsp)
694 je L_64bit_enter /* this is a 64-bit user task */
695 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
696 je L_64bit_enter /* we're in 64-bit (EFI) code */
700 * System call handlers.
701 * These are entered via a syscall interrupt. The system call number in %rax
702 * is saved to the error code slot in the stack frame. We then branch to the
703 * common state saving code.
706 Entry(hi64_unix_scall)
707 swapgs /* switch to kernel gs (cpu_data) */
708 L_unix_scall_continue:
709 push %rax /* save system call number */
711 movl $(LO_UNIX_SCALL), 4(%rsp)
712 jmp L_32bit_enter_check
715 Entry(hi64_mach_scall)
716 swapgs /* switch to kernel gs (cpu_data) */
717 L_mach_scall_continue:
718 push %rax /* save system call number */
720 movl $(LO_MACH_SCALL), 4(%rsp)
721 jmp L_32bit_enter_check
724 Entry(hi64_mdep_scall)
725 swapgs /* switch to kernel gs (cpu_data) */
726 L_mdep_scall_continue:
727 push %rax /* save system call number */
729 movl $(LO_MDEP_SCALL), 4(%rsp)
730 jmp L_32bit_enter_check
733 Entry(hi64_diag_scall)
734 swapgs /* switch to kernel gs (cpu_data) */
735 L_diag_scall_continue:
736 push %rax /* save system call number */
738 movl $(LO_DIAG_SCALL), 4(%rsp)
739 jmp L_32bit_enter_check
742 swapgs /* Kapow! get per-cpu data area */
744 mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
745 mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
748 * Save values in the ISF frame in the PCB
749 * to cons up the saved machine state.
751 movl $(USER_DS), ISF64_SS(%rsp)
752 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
753 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
754 mov %rcx, ISF64_RIP(%rsp) /* rip */
755 mov %gs:CPU_UBER_TMP, %rcx
756 mov %rcx, ISF64_RSP(%rsp) /* user stack */
757 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
758 movl $(0), ISF64_TRAPNO(%rsp) /* trapno */
759 movl $(LO_SYSCALL), ISF64_TRAPFN(%rsp)
760 jmp L_64bit_enter /* this can only be a 64-bit task */
763 * sysenter entry point
764 * Requires user code to set up:
765 * edx: user instruction pointer (return address)
766 * ecx: user stack pointer
767 * on which is pushed stub ret addr and saved ebx
768 * Return to user-space is made using sysexit.
769 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
770 * or requiring ecx to be preserved.
773 mov (%rsp), %rsp /* switch from temporary stack to pcb */
775 * Push values on to the PCB stack
776 * to cons up the saved machine state.
778 push $(USER_DS) /* ss */
782 * Clear, among others, the Nested Task (NT) flags bit;
783 * This is cleared by INT, but not by sysenter, which only
784 * clears RF, VM and IF.
788 push $(SYSENTER_CS) /* cs */
789 swapgs /* switch to kernel gs (cpu_data) */
792 push %rax /* err/eax - syscall code */
794 movl $(LO_SYSENTER), ISF64_TRAPFN(%rsp)
795 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
799 * Check we're not a confused 64-bit user.
801 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
802 jne L_64bit_entry_reject
803 /* fall through to 32-bit handler: */
807 * Make space for the compatibility save area.
809 sub $(ISC32_OFFSET), %rsp
810 movl $(SS_32), SS_FLAVOR(%rsp)
821 * Save general 32-bit registers
823 mov %eax, R_EAX(%rsp)
824 mov %ebx, R_EBX(%rsp)
825 mov %ecx, R_ECX(%rsp)
826 mov %edx, R_EDX(%rsp)
827 mov %ebp, R_EBP(%rsp)
828 mov %esi, R_ESI(%rsp)
829 mov %edi, R_EDI(%rsp)
831 /* Unconditionally save cr2; only meaningful on page faults */
833 mov %eax, R_CR2(%rsp)
836 * Copy registers already saved in the machine state
837 * (in the interrupt stack frame) into the compat save area.
839 mov ISC32_RIP(%rsp), %eax
840 mov %eax, R_EIP(%rsp)
841 mov ISC32_RFLAGS(%rsp), %eax
842 mov %eax, R_EFLAGS(%rsp)
843 mov ISC32_CS(%rsp), %eax
845 mov ISC32_RSP(%rsp), %eax
846 mov %eax, R_UESP(%rsp)
847 mov ISC32_SS(%rsp), %eax
849 L_32bit_enter_after_fault:
850 mov ISC32_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
851 mov %ebx, R_TRAPNO(%rsp)
852 mov ISC32_ERR(%rsp), %eax
853 mov %eax, R_ERR(%rsp)
854 mov ISC32_TRAPFN(%rsp), %edx
857 * Common point to enter lo_handler in compatibilty mode:
859 * %edx locore handler address
863 * Switch address space to kernel
864 * if not shared space and not already mapped.
865 * Note: cpu_task_map is valid only if cpu_task_cr3 is loaded in cr3.
868 mov %gs:CPU_TASK_CR3, %rcx
869 cmp %rax, %rcx /* is the task's cr3 loaded? */
871 cmpl $(TASK_MAP_64BIT_SHARED), %gs:CPU_TASK_MAP
874 mov %gs:CPU_KERNEL_CR3, %rcx
878 mov %rcx, %gs:CPU_ACTIVE_CR3
881 * Switch to compatibility mode.
882 * Then establish kernel segments.
884 swapgs /* Done with uber-kernel gs */
888 * Now in compatibility mode and running in compatibility space
889 * prepare to enter the locore handler.
891 * %edx lo_handler pointer
892 * Note: the stack pointer (now 32-bit) is now directly addressing the
893 * the kernel below 4G and therefore is automagically re-based.
895 mov $(KERNEL_DS), %eax
900 mov $(CPU_DATA_GS), %eax
903 movl %gs:CPU_ACTIVE_THREAD,%ecx /* Get the active thread */
904 cmpl $0, ACT_PCB_IDS(%ecx) /* Is there a debug register state? */
906 movl $0, %ecx /* If so, reset DR7 (the control) */
909 addl $1,%gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count
911 /* Dispatch the designated lo handler */
915 L_64bit_entry_reject:
917 * Here for a 64-bit user attempting an invalid kernel entry.
919 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
920 movl $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
921 /* Fall through... */
925 * Here for a 64-bit user task, or special 64-bit kernel code.
926 * Make space for the save area.
928 sub $(ISS64_OFFSET), %rsp
929 movl $(SS_64), SS_FLAVOR(%rsp)
934 mov %fs, R64_FS(%rsp)
935 mov %gs, R64_GS(%rsp)
937 /* Save general-purpose registers */
938 mov %rax, R64_RAX(%rsp)
939 mov %rcx, R64_RCX(%rsp)
940 mov %rbx, R64_RBX(%rsp)
941 mov %rbp, R64_RBP(%rsp)
942 mov %r11, R64_R11(%rsp)
943 mov %r12, R64_R12(%rsp)
944 mov %r13, R64_R13(%rsp)
945 mov %r14, R64_R14(%rsp)
946 mov %r15, R64_R15(%rsp)
948 /* cr2 is significant only for page-faults */
950 mov %rax, R64_CR2(%rsp)
952 /* Other registers (which may contain syscall args) */
953 mov %rdi, R64_RDI(%rsp) /* arg0 .. */
954 mov %rsi, R64_RSI(%rsp)
955 mov %rdx, R64_RDX(%rsp)
956 mov %r10, R64_R10(%rsp)
957 mov %r8, R64_R8(%rsp)
958 mov %r9, R64_R9(%rsp) /* .. arg5 */
960 L_64bit_enter_after_fault:
962 * At this point we're almost ready to join the common lo-entry code.
964 mov R64_TRAPNO(%rsp), %ebx
965 mov R64_TRAPFN(%rsp), %edx
967 jmp L_enter_lohandler2
970 * Debug trap. Check for single-stepping across system call into
971 * kernel. If this is the case, taking the debug trap has turned
972 * off single-stepping - save the flags register with the trace
976 swapgs /* set %gs for cpu data */
977 push $0 /* error code */
979 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
981 testb $3, ISF64_CS(%rsp)
982 jnz L_enter_lohandler_continue
985 * trap came from kernel mode
987 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
988 jne L_enter_lohandler_continue /* trap not in uber-space */
990 cmpl $(EXT(hi64_mach_scall)), ISF64_RIP(%rsp)
992 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
993 jmp L_mach_scall_continue /* continue system call entry */
995 cmpl $(EXT(hi64_mdep_scall)), ISF64_RIP(%rsp)
997 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
998 jmp L_mdep_scall_continue /* continue system call entry */
1000 cmpl $(EXT(hi64_unix_scall)), ISF64_RIP(%rsp)
1002 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1003 jmp L_unix_scall_continue /* continue system call entry */
1005 cmpl $(EXT(hi64_sysenter)), ISF64_RIP(%rsp)
1006 jne L_enter_lohandler_continue
1008 * Interrupt stack frame has been pushed on the temporary stack.
1009 * We have to switch to pcb stack and copy eflags.
1011 add $32,%rsp /* remove trapno/trapfn/err/rip/cs */
1012 push %rcx /* save %rcx - user stack pointer */
1013 mov 32(%rsp),%rcx /* top of intr stack -> pcb stack */
1014 xchg %rcx,%rsp /* switch to pcb stack */
1015 push $(USER_DS) /* ss */
1016 push (%rcx) /* saved %rcx into rsp slot */
1017 push 8(%rcx) /* rflags */
1018 mov (%rcx),%rcx /* restore %rcx */
1019 push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
1020 jmp L_sysenter_continue /* continue sysenter entry */
1023 Entry(hi64_double_fault)
1024 swapgs /* set %gs for cpu data */
1025 push $(T_DOUBLE_FAULT)
1026 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1028 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1029 jne L_enter_lohandler_continue /* trap not in uber-space */
1031 cmpl $(EXT(hi64_syscall)), ISF64_RIP(%rsp)
1032 jne L_enter_lohandler_continue
1034 mov ISF64_RSP(%rsp), %rsp
1035 jmp L_syscall_continue
1039 * General protection or segment-not-present fault.
1040 * Check for a GP/NP fault in the kernel_return
1041 * sequence; if there, report it as a GP/NP fault on the user's instruction.
1043 * rsp-> 0: trap code (NP or GP) and trap function
1044 * 8: segment number in error (error code)
1050 * 56 old registers (trap is from kernel)
1052 Entry(hi64_gen_prot)
1053 push $(T_GENERAL_PROTECTION)
1054 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1057 push $(T_SEGMENT_NOT_PRESENT)
1058 /* indicate fault type */
1059 trap_check_kernel_exit:
1060 movl $(LO_ALLTRAPS), 4(%rsp)
1063 /* trap was from kernel mode, so */
1064 /* check for the kernel exit sequence */
1065 cmpl $(KERNEL_UBER_BASE_HI32), 16+4(%rsp)
1066 jne hi64_take_trap /* trap not in uber-space */
1068 cmpl $(EXT(ret32_iret)), 16(%rsp)
1070 cmpl $(EXT(ret32_set_ds)), 16(%rsp)
1071 je L_32bit_fault_set_seg
1072 cmpl $(EXT(ret32_set_es)), 16(%rsp)
1073 je L_32bit_fault_set_seg
1074 cmpl $(EXT(ret32_set_fs)), 16(%rsp)
1075 je L_32bit_fault_set_seg
1076 cmpl $(EXT(ret32_set_gs)), 16(%rsp)
1077 je L_32bit_fault_set_seg
1079 cmpl $(EXT(ret64_iret)), 16(%rsp)
1083 jmp L_enter_lohandler
1087 * GP/NP fault on IRET: CS or SS is in error.
1088 * All registers contain the user's values.
1091 * 0 trap number/function
1096 * 40 rsp --> new trapno
1097 * 48 ss --> new errcode
1105 mov %rax, 16(%rsp) /* save rax (we don`t need saved rip) */
1106 pop %rax /* get trap number */
1107 mov %rax, 40-8(%rsp) /* put in user trap number */
1108 pop %rax /* get error code */
1109 mov %rax, 48-8-8(%rsp) /* put in user errcode */
1110 pop %rax /* restore rax */
1111 add $16,%rsp /* eat 2 more slots */
1112 /* now treat as fault from user */
1113 jmp L_enter_lohandler
1116 * Fault restoring a segment register. All of the saved state is still
1117 * on the stack untouched since we haven't yet moved the stack pointer.
1119 L_32bit_fault_set_seg:
1120 pop %rax /* get trap number/function */
1121 pop %rdx /* get error code */
1122 add $40,%rsp /* pop stack to saved state */
1123 mov %rax,ISC32_TRAPNO(%rsp)
1124 mov %rdx,ISC32_ERR(%rsp)
1125 /* now treat as fault from user */
1126 /* except that all the state is */
1127 /* already saved - we just have to */
1128 /* move the trapno and error into */
1129 /* the compatibility frame */
1131 jmp L_32bit_enter_after_fault
1135 * Fatal exception handlers:
1137 Entry(db_task_dbl_fault64)
1138 push $(T_DOUBLE_FAULT)
1139 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1140 jmp L_enter_lohandler
1142 Entry(db_task_stk_fault64)
1143 Entry(hi64_stack_fault)
1144 push $(T_STACK_FAULT)
1145 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1146 jmp L_enter_lohandler
1149 push $(0) /* Error */
1150 push $(T_MACHINE_CHECK)
1151 movl $(LO_MACHINE_CHECK), ISF64_TRAPFN(%rsp)
1152 jmp L_enter_lohandler