2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <i386/asm64.h>
32 #include <i386/eflags.h>
33 #include <i386/trap.h>
34 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
35 #include <mach/i386/syscall_sw.h>
36 #include <i386/postcode.h>
37 #include <i386/proc_reg.h>
42 #define LO_ALLINTRS EXT(lo_allintrs)
43 #define LO_ALLTRAPS EXT(lo_alltraps)
44 #define LO_SYSENTER EXT(lo_sysenter)
45 #define LO_SYSCALL EXT(lo_syscall)
46 #define LO_UNIX_SCALL EXT(lo_unix_scall)
47 #define LO_MACH_SCALL EXT(lo_mach_scall)
48 #define LO_MDEP_SCALL EXT(lo_mdep_scall)
49 #define LO_DIAG_SCALL EXT(lo_diag_scall)
50 #define LO_DOUBLE_FAULT EXT(lo_df64)
51 #define LO_MACHINE_CHECK EXT(lo_mc64)
54 * Interrupt descriptor table and code vectors for it.
56 * The IDT64_BASE_ENTRY macro lays down a fake descriptor that must be
57 * reformatted ("fixed") before use.
58 * All vector are rebased in uber-space.
59 * Special vectors (e.g. double-fault) use a non-0 IST.
61 #define IDT64_BASE_ENTRY(vec,seg,ist,type) \
64 .long KERNEL_UBER_BASE_HI32 ;\
71 #define IDT64_ENTRY(vec,ist,type) \
72 IDT64_BASE_ENTRY(EXT(vec),KERNEL64_CS,ist,type)
73 #define IDT64_ENTRY_LOCAL(vec,ist,type) \
74 IDT64_BASE_ENTRY(vec,KERNEL64_CS,ist,type)
77 * Push trap number and address of compatibility mode handler,
78 * then branch to common trampoline. Error already pushed.
80 #define EXCEP64_ERR(n,name) \
81 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
84 movl $(LO_ALLTRAPS), 4(%rsp) ;\
89 * Push error(0), trap number and address of compatibility mode handler,
90 * then branch to common trampoline.
92 #define EXCEPTION64(n,name) \
93 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
97 movl $(LO_ALLTRAPS), 4(%rsp) ;\
102 * Interrupt from user.
103 * Push error (0), trap number and address of compatibility mode handler,
104 * then branch to common trampoline.
106 #define EXCEP64_USR(n,name) \
107 IDT64_ENTRY(name,0,U_INTR_GATE) ;\
111 movl $(LO_ALLTRAPS), 4(%rsp) ;\
112 jmp L_enter_lohandler
116 * Special interrupt code from user.
118 #define EXCEP64_SPC_USR(n,name) \
119 IDT64_ENTRY(name,0,U_INTR_GATE)
123 * Special interrupt code.
124 * In 64-bit mode we may use an IST slot instead of task gates.
126 #define EXCEP64_IST(n,name,ist) \
127 IDT64_ENTRY(name,ist,K_INTR_GATE)
128 #define EXCEP64_SPC(n,name) \
129 IDT64_ENTRY(name,0,K_INTR_GATE)
134 * Push zero err, interrupt vector and address of compatibility mode handler,
135 * then branch to common trampoline.
137 #define INTERRUPT64(n) \
138 IDT64_ENTRY_LOCAL(L_ ## n,0,K_INTR_GATE) ;\
143 movl $(LO_ALLINTRS), 4(%rsp) ;\
144 jmp L_enter_lohandler
150 Entry(hi64_data_base)
153 Entry(hi64_text_base)
155 EXCEPTION64(0x00,t64_zero_div)
156 EXCEP64_SPC(0x01,hi64_debug)
157 INTERRUPT64(0x02) /* NMI */
158 EXCEP64_USR(0x03,t64_int3)
159 EXCEP64_USR(0x04,t64_into)
160 EXCEP64_USR(0x05,t64_bounds)
161 EXCEPTION64(0x06,t64_invop)
162 EXCEPTION64(0x07,t64_nofpu)
164 EXCEP64_IST(0x08,db_task_dbl_fault64,1)
166 EXCEP64_IST(0x08,hi64_double_fault,1)
168 EXCEPTION64(0x09,a64_fpu_over)
169 EXCEPTION64(0x0a,a64_inv_tss)
170 EXCEP64_SPC(0x0b,hi64_segnp)
172 EXCEP64_IST(0x0c,db_task_stk_fault64,1)
174 EXCEP64_SPC(0x0c,hi64_stack_fault)
176 EXCEP64_SPC(0x0d,hi64_gen_prot)
177 EXCEP64_ERR(0x0e,t64_page_fault)
178 EXCEPTION64(0x0f,t64_trap_0f)
179 EXCEPTION64(0x10,t64_fpu_err)
180 EXCEPTION64(0x11,t64_trap_11)
181 EXCEP64_IST(0x12,mc64,1)
182 EXCEPTION64(0x13,t64_sse_err)
183 EXCEPTION64(0x14,t64_trap_14)
184 EXCEPTION64(0x15,t64_trap_15)
185 EXCEPTION64(0x16,t64_trap_16)
186 EXCEPTION64(0x17,t64_trap_17)
187 EXCEPTION64(0x18,t64_trap_18)
188 EXCEPTION64(0x19,t64_trap_19)
189 EXCEPTION64(0x1a,t64_trap_1a)
190 EXCEPTION64(0x1b,t64_trap_1b)
191 EXCEPTION64(0x1c,t64_trap_1c)
192 EXCEPTION64(0x1d,t64_trap_1d)
193 EXCEPTION64(0x1e,t64_trap_1e)
194 EXCEPTION64(0x1f,t64_trap_1f)
298 EXCEP64_SPC_USR(0x80,hi64_unix_scall)
299 EXCEP64_SPC_USR(0x81,hi64_mach_scall)
300 EXCEP64_SPC_USR(0x82,hi64_mdep_scall)
301 EXCEP64_SPC_USR(0x83,hi64_diag_scall)
433 EXCEPTION64(0xff,t64_preempt)
439 * Trap/interrupt entry points.
441 * All traps must create the following 32-bit save area on the PCB "stack"
442 * - this is identical to the legacy mode 32-bit case:
451 * cr2 (defined only for page fault)
461 * user esp - if from user
462 * user ss - if from user
464 * Above this is the trap number and compatibility mode handler address
465 * (packed into an 8-byte stack entry) and the 64-bit interrupt stack frame:
479 * Control is passed here to return to the compatibility mode user.
480 * At this stage we're in kernel space in compatibility mode
481 * but we need to switch into 64-bit mode in the 4G-based trampoline
482 * space before performing the iret.
484 Entry(lo64_ret_to_user)
485 movl %gs:CPU_ACTIVE_THREAD,%ecx
487 movl ACT_PCB_IDS(%ecx),%eax /* Obtain this thread's debug state */
488 cmpl $0,%eax /* Is there a debug register context? */
489 je 2f /* branch if not */
490 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 64-bit task? */
492 movl DS_DR0(%eax), %ecx /* If not, load the 32 bit DRs */
494 movl DS_DR1(%eax), %ecx
496 movl DS_DR2(%eax), %ecx
498 movl DS_DR3(%eax), %ecx
500 movl DS_DR7(%eax), %ecx
501 movl %ecx, %gs:CPU_DR7
502 movl $0, %gs:CPU_DR7 + 4
505 ENTER_64BIT_MODE() /* Enter long mode */
506 mov DS64_DR0(%eax), %rcx /* Load the full width DRs*/
508 mov DS64_DR1(%eax), %rcx
510 mov DS64_DR2(%eax), %rcx
512 mov DS64_DR3(%eax), %rcx
514 mov DS64_DR7(%eax), %rcx
515 mov %rcx, %gs:CPU_DR7
516 jmp 3f /* Enter uberspace */
523 * Now switch %cr3, if necessary.
525 swapgs /* switch back to uber-kernel gs base */
526 mov %gs:CPU_TASK_CR3,%rcx
527 mov %rcx,%gs:CPU_ACTIVE_CR3
531 /* flag the copyio engine state as WINDOWS_CLEAN */
532 mov %gs:CPU_ACTIVE_THREAD,%eax
533 movl $(WINDOWS_CLEAN),ACT_COPYIO_STATE(%eax)
534 mov %rcx,%cr3 /* switch to user's address space */
537 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
540 mov %rax, %dr7 /* Set DR7 */
545 * Adjust stack to use uber-space.
547 mov $(KERNEL_UBER_BASE_HI32), %rax
549 shrd $32, %rax, %rsp /* relocate into uber-space */
551 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
555 Entry(lo64_ret_to_kernel)
559 swapgs /* switch back to uber-kernel gs base */
562 * Adjust stack to use uber-space.
564 mov $(KERNEL_UBER_BASE_HI32), %rax
566 shrd $32, %rax, %rsp /* relocate into uber-space */
568 /* Check for return to 64-bit kernel space (EFI today) */
569 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
571 /* fall through for 32-bit return */
575 * Restore registers into the machine state for iret.
577 movl R_EIP(%rsp), %eax
578 movl %eax, ISC32_RIP(%rsp)
579 movl R_EFLAGS(%rsp), %eax
580 movl %eax, ISC32_RFLAGS(%rsp)
581 movl R_CS(%rsp), %eax
582 movl %eax, ISC32_CS(%rsp)
583 movl R_UESP(%rsp), %eax
584 movl %eax, ISC32_RSP(%rsp)
585 movl R_SS(%rsp), %eax
586 movl %eax, ISC32_SS(%rsp)
589 * Restore general 32-bit registers
591 movl R_EAX(%rsp), %eax
592 movl R_EBX(%rsp), %ebx
593 movl R_ECX(%rsp), %ecx
594 movl R_EDX(%rsp), %edx
595 movl R_EBP(%rsp), %ebp
596 movl R_ESI(%rsp), %esi
597 movl R_EDI(%rsp), %edi
600 * Restore segment registers. We make take an exception here but
601 * we've got enough space left in the save frame area to absorb
602 * a hardware frame plus the trapfn and trapno
614 add $(ISC32_OFFSET)+8+8, %rsp /* pop compat frame +
615 trapno/trapfn and error */
616 cmp $(SYSENTER_CS),ISF64_CS-8-8(%rsp)
617 /* test for fast entry/exit */
620 iretq /* return from interrupt */
623 pop %rdx /* user return eip */
624 pop %rcx /* pop and toss cs */
625 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
626 popf /* flags - carry denotes failure */
627 pop %rcx /* user return esp */
629 sti /* interrupts enabled after sysexit */
630 sysexit /* 32-bit sysexit */
635 * Set the GS Base MSR with the user's gs base.
637 movl %gs:CPU_UBER_USER_GS_BASE, %eax
638 movl %gs:CPU_UBER_USER_GS_BASE+4, %edx
639 movl $(MSR_IA32_GS_BASE), %ecx
641 testb $3, R64_CS(%rsp) /* returning to user-space? */
643 wrmsr /* set 64-bit base */
647 * Restore general 64-bit registers
649 mov R64_R15(%rsp), %r15
650 mov R64_R14(%rsp), %r14
651 mov R64_R13(%rsp), %r13
652 mov R64_R12(%rsp), %r12
653 mov R64_R11(%rsp), %r11
654 mov R64_R10(%rsp), %r10
655 mov R64_R9(%rsp), %r9
656 mov R64_R8(%rsp), %r8
657 mov R64_RSI(%rsp), %rsi
658 mov R64_RDI(%rsp), %rdi
659 mov R64_RBP(%rsp), %rbp
660 mov R64_RDX(%rsp), %rdx
661 mov R64_RBX(%rsp), %rbx
662 mov R64_RCX(%rsp), %rcx
663 mov R64_RAX(%rsp), %rax
665 add $(ISS64_OFFSET)+8+8, %rsp /* pop saved state frame +
666 trapno/trapfn and error */
667 cmpl $(SYSCALL_CS),ISF64_CS-8-8(%rsp)
668 /* test for fast entry/exit */
671 iretq /* return from interrupt */
675 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
678 * rsp user stack pointer
680 mov ISF64_RIP-16(%rsp), %rcx
681 mov ISF64_RFLAGS-16(%rsp), %r11
682 mov ISF64_RSP-16(%rsp), %rsp
683 sysretq /* return from system call */
686 * Common path to enter locore handlers.
689 swapgs /* switch to kernel gs (cpu_data) */
690 L_enter_lohandler_continue:
691 cmpl $(USER64_CS), ISF64_CS(%rsp)
692 je L_64bit_enter /* this is a 64-bit user task */
693 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
694 je L_64bit_enter /* we're in 64-bit (EFI) code */
698 * System call handlers.
699 * These are entered via a syscall interrupt. The system call number in %rax
700 * is saved to the error code slot in the stack frame. We then branch to the
701 * common state saving code.
704 Entry(hi64_unix_scall)
705 swapgs /* switch to kernel gs (cpu_data) */
706 L_unix_scall_continue:
707 push %rax /* save system call number */
709 movl $(LO_UNIX_SCALL), 4(%rsp)
710 jmp L_32bit_enter_check
713 Entry(hi64_mach_scall)
714 swapgs /* switch to kernel gs (cpu_data) */
715 L_mach_scall_continue:
716 push %rax /* save system call number */
718 movl $(LO_MACH_SCALL), 4(%rsp)
719 jmp L_32bit_enter_check
722 Entry(hi64_mdep_scall)
723 swapgs /* switch to kernel gs (cpu_data) */
724 L_mdep_scall_continue:
725 push %rax /* save system call number */
727 movl $(LO_MDEP_SCALL), 4(%rsp)
728 jmp L_32bit_enter_check
731 Entry(hi64_diag_scall)
732 swapgs /* switch to kernel gs (cpu_data) */
733 L_diag_scall_continue:
734 push %rax /* save system call number */
736 movl $(LO_DIAG_SCALL), 4(%rsp)
737 jmp L_32bit_enter_check
740 swapgs /* Kapow! get per-cpu data area */
742 mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
743 mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
746 * Save values in the ISF frame in the PCB
747 * to cons up the saved machine state.
749 movl $(USER_DS), ISF64_SS(%rsp)
750 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
751 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
752 mov %rcx, ISF64_RIP(%rsp) /* rip */
753 mov %gs:CPU_UBER_TMP, %rcx
754 mov %rcx, ISF64_RSP(%rsp) /* user stack */
755 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
756 movl $(0), ISF64_TRAPNO(%rsp) /* trapno */
757 movl $(LO_SYSCALL), ISF64_TRAPFN(%rsp)
758 jmp L_64bit_enter /* this can only be a 64-bit task */
761 * sysenter entry point
762 * Requires user code to set up:
763 * edx: user instruction pointer (return address)
764 * ecx: user stack pointer
765 * on which is pushed stub ret addr and saved ebx
766 * Return to user-space is made using sysexit.
767 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
768 * or requiring ecx to be preserved.
771 mov (%rsp), %rsp /* switch from temporary stack to pcb */
773 * Push values on to the PCB stack
774 * to cons up the saved machine state.
776 push $(USER_DS) /* ss */
780 * Clear, among others, the Nested Task (NT) flags bit;
781 * This is cleared by INT, but not by sysenter, which only
782 * clears RF, VM and IF.
786 push $(SYSENTER_CS) /* cs */
787 swapgs /* switch to kernel gs (cpu_data) */
790 push %rax /* err/eax - syscall code */
792 movl $(LO_SYSENTER), ISF64_TRAPFN(%rsp)
793 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
797 * Check we're not a confused 64-bit user.
799 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
800 jne L_64bit_entry_reject
801 /* fall through to 32-bit handler: */
805 * Make space for the compatibility save area.
807 sub $(ISC32_OFFSET), %rsp
808 movl $(SS_32), SS_FLAVOR(%rsp)
819 * Save general 32-bit registers
821 mov %eax, R_EAX(%rsp)
822 mov %ebx, R_EBX(%rsp)
823 mov %ecx, R_ECX(%rsp)
824 mov %edx, R_EDX(%rsp)
825 mov %ebp, R_EBP(%rsp)
826 mov %esi, R_ESI(%rsp)
827 mov %edi, R_EDI(%rsp)
829 /* Unconditionally save cr2; only meaningful on page faults */
831 mov %eax, R_CR2(%rsp)
834 * Copy registers already saved in the machine state
835 * (in the interrupt stack frame) into the compat save area.
837 mov ISC32_RIP(%rsp), %eax
838 mov %eax, R_EIP(%rsp)
839 mov ISC32_RFLAGS(%rsp), %eax
840 mov %eax, R_EFLAGS(%rsp)
841 mov ISC32_CS(%rsp), %eax
843 mov ISC32_RSP(%rsp), %eax
844 mov %eax, R_UESP(%rsp)
845 mov ISC32_SS(%rsp), %eax
847 L_32bit_enter_after_fault:
848 mov ISC32_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
849 mov %ebx, R_TRAPNO(%rsp)
850 mov ISC32_ERR(%rsp), %eax
851 mov %eax, R_ERR(%rsp)
852 mov ISC32_TRAPFN(%rsp), %edx
855 * Common point to enter lo_handler in compatibilty mode:
857 * %edx locore handler address
861 * Switch address space to kernel
862 * if not shared space and not already mapped.
863 * Note: cpu_task_map is valid only if cpu_task_cr3 is loaded in cr3.
866 mov %gs:CPU_TASK_CR3, %rcx
867 cmp %rax, %rcx /* is the task's cr3 loaded? */
869 cmpl $(TASK_MAP_64BIT_SHARED), %gs:CPU_TASK_MAP
872 mov %gs:CPU_KERNEL_CR3, %rcx
876 mov %rcx, %gs:CPU_ACTIVE_CR3
879 * Switch to compatibility mode.
880 * Then establish kernel segments.
882 swapgs /* Done with uber-kernel gs */
886 * Now in compatibility mode and running in compatibility space
887 * prepare to enter the locore handler.
889 * %edx lo_handler pointer
890 * Note: the stack pointer (now 32-bit) is now directly addressing the
891 * the kernel below 4G and therefore is automagically re-based.
893 mov $(KERNEL_DS), %eax
898 mov $(CPU_DATA_GS), %eax
901 movl %gs:CPU_ACTIVE_THREAD,%ecx /* Get the active thread */
902 cmpl $0, ACT_PCB_IDS(%ecx) /* Is there a debug register state? */
904 movl $0, %ecx /* If so, reset DR7 (the control) */
907 addl $1,%gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count
909 /* Dispatch the designated lo handler */
913 L_64bit_entry_reject:
915 * Here for a 64-bit user attempting an invalid kernel entry.
917 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
918 movl $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
919 /* Fall through... */
923 * Here for a 64-bit user task, or special 64-bit kernel code.
924 * Make space for the save area.
926 sub $(ISS64_OFFSET), %rsp
927 movl $(SS_64), SS_FLAVOR(%rsp)
932 mov %fs, R64_FS(%rsp)
933 mov %gs, R64_GS(%rsp)
935 /* Save general-purpose registers */
936 mov %rax, R64_RAX(%rsp)
937 mov %rcx, R64_RCX(%rsp)
938 mov %rbx, R64_RBX(%rsp)
939 mov %rbp, R64_RBP(%rsp)
940 mov %r11, R64_R11(%rsp)
941 mov %r12, R64_R12(%rsp)
942 mov %r13, R64_R13(%rsp)
943 mov %r14, R64_R14(%rsp)
944 mov %r15, R64_R15(%rsp)
946 /* cr2 is significant only for page-faults */
948 mov %rax, R64_CR2(%rsp)
950 /* Other registers (which may contain syscall args) */
951 mov %rdi, R64_RDI(%rsp) /* arg0 .. */
952 mov %rsi, R64_RSI(%rsp)
953 mov %rdx, R64_RDX(%rsp)
954 mov %r10, R64_R10(%rsp)
955 mov %r8, R64_R8(%rsp)
956 mov %r9, R64_R9(%rsp) /* .. arg5 */
958 L_64bit_enter_after_fault:
960 * At this point we're almost ready to join the common lo-entry code.
962 mov R64_TRAPNO(%rsp), %ebx
963 mov R64_TRAPFN(%rsp), %edx
965 jmp L_enter_lohandler2
968 * Debug trap. Check for single-stepping across system call into
969 * kernel. If this is the case, taking the debug trap has turned
970 * off single-stepping - save the flags register with the trace
974 swapgs /* set %gs for cpu data */
975 push $0 /* error code */
977 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
979 testb $3, ISF64_CS(%rsp)
980 jnz L_enter_lohandler_continue
983 * trap came from kernel mode
985 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
986 jne L_enter_lohandler_continue /* trap not in uber-space */
988 cmpl $(EXT(hi64_mach_scall)), ISF64_RIP(%rsp)
990 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
991 jmp L_mach_scall_continue /* continue system call entry */
993 cmpl $(EXT(hi64_mdep_scall)), ISF64_RIP(%rsp)
995 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
996 jmp L_mdep_scall_continue /* continue system call entry */
998 cmpl $(EXT(hi64_unix_scall)), ISF64_RIP(%rsp)
1000 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1001 jmp L_unix_scall_continue /* continue system call entry */
1003 cmpl $(EXT(hi64_sysenter)), ISF64_RIP(%rsp)
1004 jne L_enter_lohandler_continue
1006 * Interrupt stack frame has been pushed on the temporary stack.
1007 * We have to switch to pcb stack and copy eflags.
1009 add $32,%rsp /* remove trapno/trapfn/err/rip/cs */
1010 push %rcx /* save %rcx - user stack pointer */
1011 mov 32(%rsp),%rcx /* top of intr stack -> pcb stack */
1012 xchg %rcx,%rsp /* switch to pcb stack */
1013 push $(USER_DS) /* ss */
1014 push (%rcx) /* saved %rcx into rsp slot */
1015 push 8(%rcx) /* rflags */
1016 mov (%rcx),%rcx /* restore %rcx */
1017 push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
1018 jmp L_sysenter_continue /* continue sysenter entry */
1021 Entry(hi64_double_fault)
1022 swapgs /* set %gs for cpu data */
1023 push $(T_DOUBLE_FAULT)
1024 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1026 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1027 jne L_enter_lohandler_continue /* trap not in uber-space */
1029 cmpl $(EXT(hi64_syscall)), ISF64_RIP(%rsp)
1030 jne L_enter_lohandler_continue
1032 mov ISF64_RSP(%rsp), %rsp
1033 jmp L_syscall_continue
1037 * General protection or segment-not-present fault.
1038 * Check for a GP/NP fault in the kernel_return
1039 * sequence; if there, report it as a GP/NP fault on the user's instruction.
1041 * rsp-> 0: trap code (NP or GP) and trap function
1042 * 8: segment number in error (error code)
1048 * 56 old registers (trap is from kernel)
1050 Entry(hi64_gen_prot)
1051 push $(T_GENERAL_PROTECTION)
1052 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1054 Entry(hi64_stack_fault)
1055 push $(T_STACK_FAULT)
1056 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1059 push $(T_SEGMENT_NOT_PRESENT)
1060 /* indicate fault type */
1061 trap_check_kernel_exit:
1062 movl $(LO_ALLTRAPS), 4(%rsp)
1065 /* trap was from kernel mode, so */
1066 /* check for the kernel exit sequence */
1067 cmpl $(KERNEL_UBER_BASE_HI32), 16+4(%rsp)
1068 jne hi64_take_trap /* trap not in uber-space */
1070 cmpl $(EXT(ret32_iret)), 16(%rsp)
1072 cmpl $(EXT(ret32_set_ds)), 16(%rsp)
1073 je L_32bit_fault_set_seg
1074 cmpl $(EXT(ret32_set_es)), 16(%rsp)
1075 je L_32bit_fault_set_seg
1076 cmpl $(EXT(ret32_set_fs)), 16(%rsp)
1077 je L_32bit_fault_set_seg
1078 cmpl $(EXT(ret32_set_gs)), 16(%rsp)
1079 je L_32bit_fault_set_seg
1081 cmpl $(EXT(ret64_iret)), 16(%rsp)
1085 jmp L_enter_lohandler
1089 * GP/NP fault on IRET: CS or SS is in error.
1090 * All registers contain the user's values.
1093 * 0 trap number/function
1099 * 48 ss --> new trapno/trapfn
1100 * 56 (16-byte padding) --> new errcode
1108 mov %rax, 16(%rsp) /* save rax (we don`t need saved rip) */
1109 mov 0(%rsp), %rax /* get trap number */
1110 mov %rax, 48(%rsp) /* put in user trap number */
1111 mov 8(%rsp), %rax /* get error code */
1112 mov %rax, 56(%rsp) /* put in user errcode */
1113 mov 16(%rsp), %rax /* restore rax */
1114 add $48, %rsp /* reset to original frame */
1115 /* now treat as fault from user */
1120 mov %rax, 16(%rsp) /* save rax (we don`t need saved rip) */
1121 mov 0(%rsp), %rax /* get trap number */
1122 mov %rax, 48(%rsp) /* put in user trap number */
1123 mov 8(%rsp), %rax /* get error code */
1124 mov %rax, 56(%rsp) /* put in user errcode */
1125 mov 16(%rsp), %rax /* restore rax */
1126 add $48, %rsp /* reset to original frame */
1127 /* now treat as fault from user */
1132 * Fault restoring a segment register. All of the saved state is still
1133 * on the stack untouched since we didn't move the stack pointer.
1135 L_32bit_fault_set_seg:
1136 mov 0(%rsp), %rax /* get trap number/function */
1137 mov 8(%rsp), %rdx /* get error code */
1138 mov 40(%rsp), %rsp /* reload stack prior to fault */
1139 mov %rax,ISC32_TRAPNO(%rsp)
1140 mov %rdx,ISC32_ERR(%rsp)
1141 /* now treat as fault from user */
1142 /* except that all the state is */
1143 /* already saved - we just have to */
1144 /* move the trapno and error into */
1145 /* the compatibility frame */
1147 jmp L_32bit_enter_after_fault
1151 * Fatal exception handlers:
1153 Entry(db_task_dbl_fault64)
1154 push $(T_DOUBLE_FAULT)
1155 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1156 jmp L_enter_lohandler
1158 Entry(db_task_stk_fault64)
1159 push $(T_STACK_FAULT)
1160 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1161 jmp L_enter_lohandler
1164 push $(0) /* Error */
1165 push $(T_MACHINE_CHECK)
1166 movl $(LO_MACHINE_CHECK), ISF64_TRAPFN(%rsp)
1167 jmp L_enter_lohandler