2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <i386/asm64.h>
32 #include <i386/eflags.h>
33 #include <i386/trap.h>
34 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
35 #include <mach/i386/syscall_sw.h>
36 #include <i386/postcode.h>
37 #include <i386/proc_reg.h>
42 #define LO_ALLINTRS EXT(lo_allintrs)
43 #define LO_ALLTRAPS EXT(lo_alltraps)
44 #define LO_SYSCALL EXT(lo_syscall)
45 #define LO_UNIX_SCALL EXT(lo_unix_scall)
46 #define LO_MACH_SCALL EXT(lo_mach_scall)
47 #define LO_MDEP_SCALL EXT(lo_mdep_scall)
48 #define LO_DIAG_SCALL EXT(lo_diag_scall)
49 #define LO_DOUBLE_FAULT EXT(lo_df64)
50 #define LO_MACHINE_CHECK EXT(lo_mc64)
53 * Interrupt descriptor table and code vectors for it.
55 * The IDT64_BASE_ENTRY macro lays down a fake descriptor that must be
56 * reformatted ("fixed") before use.
57 * All vector are rebased in uber-space.
58 * Special vectors (e.g. double-fault) use a non-0 IST.
60 #define IDT64_BASE_ENTRY(vec,seg,ist,type) \
63 .long KERNEL_UBER_BASE_HI32 ;\
70 #define IDT64_ENTRY(vec,ist,type) \
71 IDT64_BASE_ENTRY(EXT(vec),KERNEL64_CS,ist,type)
72 #define IDT64_ENTRY_LOCAL(vec,ist,type) \
73 IDT64_BASE_ENTRY(vec,KERNEL64_CS,ist,type)
76 * Push trap number and address of compatibility mode handler,
77 * then branch to common trampoline. Error already pushed.
79 #define EXCEP64_ERR(n,name) \
80 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
83 movl $(LO_ALLTRAPS), 4(%rsp) ;\
88 * Push error(0), trap number and address of compatibility mode handler,
89 * then branch to common trampoline.
91 #define EXCEPTION64(n,name) \
92 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
96 movl $(LO_ALLTRAPS), 4(%rsp) ;\
101 * Interrupt from user.
102 * Push error (0), trap number and address of compatibility mode handler,
103 * then branch to common trampoline.
105 #define EXCEP64_USR(n,name) \
106 IDT64_ENTRY(name,0,U_INTR_GATE) ;\
110 movl $(LO_ALLTRAPS), 4(%rsp) ;\
111 jmp L_enter_lohandler
115 * Special interrupt code from user.
117 #define EXCEP64_SPC_USR(n,name) \
118 IDT64_ENTRY(name,0,U_INTR_GATE)
122 * Special interrupt code.
123 * In 64-bit mode we may use an IST slot instead of task gates.
125 #define EXCEP64_IST(n,name,ist) \
126 IDT64_ENTRY(name,ist,K_INTR_GATE)
127 #define EXCEP64_SPC(n,name) \
128 IDT64_ENTRY(name,0,K_INTR_GATE)
133 * Push zero err, interrupt vector and address of compatibility mode handler,
134 * then branch to common trampoline.
136 #define INTERRUPT64(n) \
137 IDT64_ENTRY_LOCAL(L_ ## n,0,K_INTR_GATE) ;\
142 movl $(LO_ALLINTRS), 4(%rsp) ;\
143 jmp L_enter_lohandler
149 Entry(hi64_data_base)
152 Entry(hi64_text_base)
154 EXCEPTION64(0x00,t64_zero_div)
155 EXCEP64_SPC(0x01,hi64_debug)
156 INTERRUPT64(0x02) /* NMI */
157 EXCEP64_USR(0x03,t64_int3)
158 EXCEP64_USR(0x04,t64_into)
159 EXCEP64_USR(0x05,t64_bounds)
160 EXCEPTION64(0x06,t64_invop)
161 EXCEPTION64(0x07,t64_nofpu)
163 EXCEP64_IST(0x08,db_task_dbl_fault64,1)
165 EXCEP64_IST(0x08,hi64_double_fault,1)
167 EXCEPTION64(0x09,a64_fpu_over)
168 EXCEPTION64(0x0a,a64_inv_tss)
169 EXCEP64_SPC(0x0b,hi64_segnp)
171 EXCEP64_IST(0x0c,db_task_stk_fault64,1)
173 EXCEP64_SPC(0x0c,hi64_stack_fault)
175 EXCEP64_SPC(0x0d,hi64_gen_prot)
176 EXCEP64_SPC(0x0e, hi64_page_fault)
177 EXCEPTION64(0x0f,t64_trap_0f)
178 EXCEPTION64(0x10,t64_fpu_err)
179 EXCEPTION64(0x11,t64_trap_11)
180 EXCEP64_IST(0x12,mc64,1)
181 EXCEPTION64(0x13,t64_sse_err)
182 EXCEPTION64(0x14,t64_trap_14)
183 EXCEPTION64(0x15,t64_trap_15)
184 EXCEPTION64(0x16,t64_trap_16)
185 EXCEPTION64(0x17,t64_trap_17)
186 EXCEPTION64(0x18,t64_trap_18)
187 EXCEPTION64(0x19,t64_trap_19)
188 EXCEPTION64(0x1a,t64_trap_1a)
189 EXCEPTION64(0x1b,t64_trap_1b)
190 EXCEPTION64(0x1c,t64_trap_1c)
191 EXCEPTION64(0x1d,t64_trap_1d)
192 EXCEPTION64(0x1e,t64_trap_1e)
193 EXCEPTION64(0x1f,t64_trap_1f)
295 EXCEP64_USR(0x7f, t64_dtrace_ret)
297 EXCEP64_SPC_USR(0x80,hi64_unix_scall)
298 EXCEP64_SPC_USR(0x81,hi64_mach_scall)
299 EXCEP64_SPC_USR(0x82,hi64_mdep_scall)
300 EXCEP64_SPC_USR(0x83,hi64_diag_scall)
432 EXCEPTION64(0xff,t64_preempt)
438 * Trap/interrupt entry points.
440 * All traps must create the following 32-bit save area on the PCB "stack"
441 * - this is identical to the legacy mode 32-bit case:
450 * cr2 (defined only for page fault)
460 * user esp - if from user
461 * user ss - if from user
463 * Above this is the trap number and compatibility mode handler address
464 * (packed into an 8-byte stack entry) and the 64-bit interrupt stack frame:
478 * Control is passed here to return to the compatibility mode user.
479 * At this stage we're in kernel space in compatibility mode
480 * but we need to switch into 64-bit mode in the 4G-based trampoline
481 * space before performing the iret.
483 Entry(lo64_ret_to_user)
484 movl %gs:CPU_ACTIVE_THREAD,%ecx
486 movl ACT_PCB_IDS(%ecx),%eax /* Obtain this thread's debug state */
487 cmpl $0,%eax /* Is there a debug register context? */
488 je 2f /* branch if not */
489 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */
491 movl DS_DR0(%eax), %ecx /* If so, load the 32 bit DRs */
493 movl DS_DR1(%eax), %ecx
495 movl DS_DR2(%eax), %ecx
497 movl DS_DR3(%eax), %ecx
499 movl DS_DR7(%eax), %ecx
500 movl %ecx, %gs:CPU_DR7
501 movl $0, %gs:CPU_DR7 + 4
504 ENTER_64BIT_MODE() /* Enter long mode */
505 mov DS64_DR0(%eax), %rcx /* Load the full width DRs*/
507 mov DS64_DR1(%eax), %rcx
509 mov DS64_DR2(%eax), %rcx
511 mov DS64_DR3(%eax), %rcx
513 mov DS64_DR7(%eax), %rcx
514 mov %rcx, %gs:CPU_DR7
515 jmp 3f /* Enter uberspace */
522 * Now switch %cr3, if necessary.
524 swapgs /* switch back to uber-kernel gs base */
525 mov %gs:CPU_TASK_CR3,%rcx
526 mov %rcx,%gs:CPU_ACTIVE_CR3
530 /* flag the copyio engine state as WINDOWS_CLEAN */
531 mov %gs:CPU_ACTIVE_THREAD,%eax
532 movl $(WINDOWS_CLEAN),ACT_COPYIO_STATE(%eax)
533 mov %rcx,%cr3 /* switch to user's address space */
536 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
539 mov %rax, %dr7 /* Set DR7 */
544 * Adjust stack to use uber-space.
546 mov $(KERNEL_UBER_BASE_HI32), %rax
548 shrd $32, %rax, %rsp /* relocate into uber-space */
550 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
554 Entry(lo64_ret_to_kernel)
558 swapgs /* switch back to uber-kernel gs base */
561 * Adjust stack to use uber-space.
563 mov $(KERNEL_UBER_BASE_HI32), %rax
565 shrd $32, %rax, %rsp /* relocate into uber-space */
567 /* Check for return to 64-bit kernel space (EFI today) */
568 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
570 /* fall through for 32-bit return */
574 * Restore registers into the machine state for iret.
576 movl R_EIP(%rsp), %eax
577 movl %eax, ISC32_RIP(%rsp)
578 movl R_EFLAGS(%rsp), %eax
579 movl %eax, ISC32_RFLAGS(%rsp)
580 movl R_CS(%rsp), %eax
581 movl %eax, ISC32_CS(%rsp)
582 movl R_UESP(%rsp), %eax
583 movl %eax, ISC32_RSP(%rsp)
584 movl R_SS(%rsp), %eax
585 movl %eax, ISC32_SS(%rsp)
588 * Restore general 32-bit registers
590 movl R_EAX(%rsp), %eax
591 movl R_EBX(%rsp), %ebx
592 movl R_ECX(%rsp), %ecx
593 movl R_EDX(%rsp), %edx
594 movl R_EBP(%rsp), %ebp
595 movl R_ESI(%rsp), %esi
596 movl R_EDI(%rsp), %edi
599 * Restore segment registers. We make take an exception here but
600 * we've got enough space left in the save frame area to absorb
601 * a hardware frame plus the trapfn and trapno
613 add $(ISC32_OFFSET)+8+8, %rsp /* pop compat frame +
614 trapno/trapfn and error */
615 cmp $(SYSENTER_CS),ISF64_CS-8-8(%rsp)
616 /* test for fast entry/exit */
619 iretq /* return from interrupt */
622 pop %rdx /* user return eip */
623 pop %rcx /* pop and toss cs */
624 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
625 popf /* flags - carry denotes failure */
626 pop %rcx /* user return esp */
628 sti /* interrupts enabled after sysexit */
629 sysexit /* 32-bit sysexit */
634 * Set the GS Base MSR with the user's gs base.
636 movl %gs:CPU_UBER_USER_GS_BASE, %eax
637 movl %gs:CPU_UBER_USER_GS_BASE+4, %edx
638 movl $(MSR_IA32_GS_BASE), %ecx
640 testb $3, R64_CS(%rsp) /* returning to user-space? */
642 wrmsr /* set 64-bit base */
646 * Restore general 64-bit registers
648 mov R64_R15(%rsp), %r15
649 mov R64_R14(%rsp), %r14
650 mov R64_R13(%rsp), %r13
651 mov R64_R12(%rsp), %r12
652 mov R64_R11(%rsp), %r11
653 mov R64_R10(%rsp), %r10
654 mov R64_R9(%rsp), %r9
655 mov R64_R8(%rsp), %r8
656 mov R64_RSI(%rsp), %rsi
657 mov R64_RDI(%rsp), %rdi
658 mov R64_RBP(%rsp), %rbp
659 mov R64_RDX(%rsp), %rdx
660 mov R64_RBX(%rsp), %rbx
661 mov R64_RCX(%rsp), %rcx
662 mov R64_RAX(%rsp), %rax
664 add $(ISS64_OFFSET)+8+8, %rsp /* pop saved state frame +
665 trapno/trapfn and error */
666 cmpl $(SYSCALL_CS),ISF64_CS-8-8(%rsp)
667 /* test for fast entry/exit */
670 iretq /* return from interrupt */
674 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
677 * rsp user stack pointer
679 mov ISF64_RIP-16(%rsp), %rcx
680 mov ISF64_RFLAGS-16(%rsp), %r11
681 mov ISF64_RSP-16(%rsp), %rsp
682 sysretq /* return from system call */
685 * Common path to enter locore handlers.
688 swapgs /* switch to kernel gs (cpu_data) */
689 L_enter_lohandler_continue:
690 cmpl $(USER64_CS), ISF64_CS(%rsp)
691 je L_64bit_enter /* this is a 64-bit user task */
692 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
693 je L_64bit_enter /* we're in 64-bit (EFI) code */
697 * System call handlers.
698 * These are entered via a syscall interrupt. The system call number in %rax
699 * is saved to the error code slot in the stack frame. We then branch to the
700 * common state saving code.
703 Entry(hi64_unix_scall)
704 swapgs /* switch to kernel gs (cpu_data) */
705 L_unix_scall_continue:
706 push %rax /* save system call number */
708 movl $(LO_UNIX_SCALL), 4(%rsp)
709 jmp L_32bit_enter_check
712 Entry(hi64_mach_scall)
713 swapgs /* switch to kernel gs (cpu_data) */
714 L_mach_scall_continue:
715 push %rax /* save system call number */
717 movl $(LO_MACH_SCALL), 4(%rsp)
718 jmp L_32bit_enter_check
721 Entry(hi64_mdep_scall)
722 swapgs /* switch to kernel gs (cpu_data) */
723 L_mdep_scall_continue:
724 push %rax /* save system call number */
726 movl $(LO_MDEP_SCALL), 4(%rsp)
727 jmp L_32bit_enter_check
730 Entry(hi64_diag_scall)
731 swapgs /* switch to kernel gs (cpu_data) */
732 L_diag_scall_continue:
733 push %rax /* save system call number */
735 movl $(LO_DIAG_SCALL), 4(%rsp)
736 jmp L_32bit_enter_check
739 swapgs /* Kapow! get per-cpu data area */
741 mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
742 mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
745 * Save values in the ISF frame in the PCB
746 * to cons up the saved machine state.
748 movl $(USER_DS), ISF64_SS(%rsp)
749 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
750 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
751 mov %rcx, ISF64_RIP(%rsp) /* rip */
752 mov %gs:CPU_UBER_TMP, %rcx
753 mov %rcx, ISF64_RSP(%rsp) /* user stack */
754 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
755 movl $(0), ISF64_TRAPNO(%rsp) /* trapno */
756 movl $(LO_SYSCALL), ISF64_TRAPFN(%rsp)
757 jmp L_64bit_enter /* this can only be a 64-bit task */
762 * Check we're not a confused 64-bit user.
764 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
765 jne L_64bit_entry_reject
768 * sysenter entry point
769 * Requires user code to set up:
770 * edx: user instruction pointer (return address)
771 * ecx: user stack pointer
772 * on which is pushed stub ret addr and saved ebx
773 * Return to user-space is made using sysexit.
774 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
775 * or requiring ecx to be preserved.
778 mov (%rsp), %rsp /* switch from temporary stack to pcb */
780 * Push values on to the PCB stack
781 * to cons up the saved machine state.
783 push $(USER_DS) /* ss */
787 * Clear, among others, the Nested Task (NT) flags bit;
788 * this is zeroed by INT, but not by SYSENTER.
792 push $(SYSENTER_CS) /* cs */
793 swapgs /* switch to kernel gs (cpu_data) */
796 push %rax /* err/eax - syscall code */
798 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
799 movl $(LO_MACH_SCALL), ISF64_TRAPFN(%rsp)
801 js L_32bit_enter_check
802 movl $(LO_UNIX_SCALL), ISF64_TRAPFN(%rsp)
803 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
804 jne L_64bit_entry_reject
805 /* If the caller (typically LibSystem) has recorded the cumulative size of
806 * the arguments in EAX, copy them over from the user stack directly.
807 * We recover from exceptions inline--if the copy loop doesn't complete
808 * due to an exception, we fall back to copyin from compatibility mode.
809 * We can potentially extend this mechanism to mach traps as well (DRK).
811 L_sysenter_copy_args:
812 testl $(I386_SYSCALL_ARG_BYTES_MASK), %eax
815 mov %gs:CPU_UBER_ARG_STORE, %r8
817 mov %gs:CPU_UBER_ARG_STORE_VALID, %r12
819 shrl $(I386_SYSCALL_ARG_DWORDS_SHIFT), %r9d
820 andl $(I386_SYSCALL_ARG_DWORDS_MASK), %r9d
822 EXT(hi64_sysenter_user_arg_copy):
824 movl 4(%rcx, %r10, 4), %r11d
825 movl %r11d, (%r8, %r10, 4)
830 /* Fall through to 32-bit handler */
834 * Make space for the compatibility save area.
836 sub $(ISC32_OFFSET), %rsp
837 movl $(SS_32), SS_FLAVOR(%rsp)
848 * Save general 32-bit registers
850 mov %eax, R_EAX(%rsp)
851 mov %ebx, R_EBX(%rsp)
852 mov %ecx, R_ECX(%rsp)
853 mov %edx, R_EDX(%rsp)
854 mov %ebp, R_EBP(%rsp)
855 mov %esi, R_ESI(%rsp)
856 mov %edi, R_EDI(%rsp)
858 /* Unconditionally save cr2; only meaningful on page faults */
860 mov %eax, R_CR2(%rsp)
863 * Copy registers already saved in the machine state
864 * (in the interrupt stack frame) into the compat save area.
866 mov ISC32_RIP(%rsp), %eax
867 mov %eax, R_EIP(%rsp)
868 mov ISC32_RFLAGS(%rsp), %eax
869 mov %eax, R_EFLAGS(%rsp)
870 mov ISC32_CS(%rsp), %eax
872 mov ISC32_RSP(%rsp), %eax
873 mov %eax, R_UESP(%rsp)
874 mov ISC32_SS(%rsp), %eax
876 L_32bit_enter_after_fault:
877 mov ISC32_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
878 mov %ebx, R_TRAPNO(%rsp)
879 mov ISC32_ERR(%rsp), %eax
880 mov %eax, R_ERR(%rsp)
881 mov ISC32_TRAPFN(%rsp), %edx
884 * Common point to enter lo_handler in compatibilty mode:
886 * %edx locore handler address
890 * Switch address space to kernel
891 * if not shared space and not already mapped.
892 * Note: cpu_task_map is valid only if cpu_task_cr3 is loaded in cr3.
895 mov %gs:CPU_TASK_CR3, %rcx
896 cmp %rax, %rcx /* is the task's cr3 loaded? */
898 cmpl $(TASK_MAP_64BIT_SHARED), %gs:CPU_TASK_MAP
901 mov %gs:CPU_KERNEL_CR3, %rcx
905 mov %rcx, %gs:CPU_ACTIVE_CR3
908 * Switch to compatibility mode.
909 * Then establish kernel segments.
911 swapgs /* Done with uber-kernel gs */
915 * Now in compatibility mode and running in compatibility space
916 * prepare to enter the locore handler.
918 * %edx lo_handler pointer
919 * Note: the stack pointer (now 32-bit) is now directly addressing the
920 * the kernel below 4G and therefore is automagically re-based.
922 mov $(KERNEL_DS), %eax
927 mov $(CPU_DATA_GS), %eax
930 movl %gs:CPU_ACTIVE_THREAD,%ecx /* Get the active thread */
931 cmpl $0, ACT_PCB_IDS(%ecx) /* Is there a debug register state? */
933 movl $0, %ecx /* If so, reset DR7 (the control) */
936 addl $1,%gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count
938 /* Dispatch the designated lo handler */
942 L_64bit_entry_reject:
944 * Here for a 64-bit user attempting an invalid kernel entry.
946 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
947 movl $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
948 /* Fall through... */
952 * Here for a 64-bit user task, or special 64-bit kernel code.
953 * Make space for the save area.
955 sub $(ISS64_OFFSET), %rsp
956 movl $(SS_64), SS_FLAVOR(%rsp)
961 mov %fs, R64_FS(%rsp)
962 mov %gs, R64_GS(%rsp)
964 /* Save general-purpose registers */
965 mov %rax, R64_RAX(%rsp)
966 mov %rcx, R64_RCX(%rsp)
967 mov %rbx, R64_RBX(%rsp)
968 mov %rbp, R64_RBP(%rsp)
969 mov %r11, R64_R11(%rsp)
970 mov %r12, R64_R12(%rsp)
971 mov %r13, R64_R13(%rsp)
972 mov %r14, R64_R14(%rsp)
973 mov %r15, R64_R15(%rsp)
975 /* cr2 is significant only for page-faults */
977 mov %rax, R64_CR2(%rsp)
979 /* Other registers (which may contain syscall args) */
980 mov %rdi, R64_RDI(%rsp) /* arg0 .. */
981 mov %rsi, R64_RSI(%rsp)
982 mov %rdx, R64_RDX(%rsp)
983 mov %r10, R64_R10(%rsp)
984 mov %r8, R64_R8(%rsp)
985 mov %r9, R64_R9(%rsp) /* .. arg5 */
987 L_64bit_enter_after_fault:
989 * At this point we're almost ready to join the common lo-entry code.
991 mov R64_TRAPNO(%rsp), %ebx
992 mov R64_TRAPFN(%rsp), %edx
994 jmp L_enter_lohandler2
996 Entry(hi64_page_fault)
998 movl $(LO_ALLTRAPS), 4(%rsp)
999 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1000 jne L_enter_lohandler
1001 cmpl $(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp)
1002 jne L_enter_lohandler
1003 mov ISF64_RSP(%rsp), %rsp
1007 * Debug trap. Check for single-stepping across system call into
1008 * kernel. If this is the case, taking the debug trap has turned
1009 * off single-stepping - save the flags register with the trace
1013 swapgs /* set %gs for cpu data */
1014 push $0 /* error code */
1016 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
1018 testb $3, ISF64_CS(%rsp)
1019 jnz L_enter_lohandler_continue
1022 * trap came from kernel mode
1024 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1025 jne L_enter_lohandler_continue /* trap not in uber-space */
1027 cmpl $(EXT(hi64_mach_scall)), ISF64_RIP(%rsp)
1029 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1030 jmp L_mach_scall_continue /* continue system call entry */
1032 cmpl $(EXT(hi64_mdep_scall)), ISF64_RIP(%rsp)
1034 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1035 jmp L_mdep_scall_continue /* continue system call entry */
1037 cmpl $(EXT(hi64_unix_scall)), ISF64_RIP(%rsp)
1039 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1040 jmp L_unix_scall_continue /* continue system call entry */
1042 cmpl $(EXT(hi64_sysenter)), ISF64_RIP(%rsp)
1043 jne L_enter_lohandler_continue
1045 * Interrupt stack frame has been pushed on the temporary stack.
1046 * We have to switch to pcb stack and copy eflags.
1048 add $32,%rsp /* remove trapno/trapfn/err/rip/cs */
1049 push %rcx /* save %rcx - user stack pointer */
1050 mov 32(%rsp),%rcx /* top of intr stack -> pcb stack */
1051 xchg %rcx,%rsp /* switch to pcb stack */
1052 push $(USER_DS) /* ss */
1053 push (%rcx) /* saved %rcx into rsp slot */
1054 push 8(%rcx) /* rflags */
1055 mov (%rcx),%rcx /* restore %rcx */
1056 push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
1057 jmp L_sysenter_continue /* continue sysenter entry */
1060 Entry(hi64_double_fault)
1061 swapgs /* set %gs for cpu data */
1062 push $(T_DOUBLE_FAULT)
1063 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1065 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1066 jne L_enter_lohandler_continue /* trap not in uber-space */
1068 cmpl $(EXT(hi64_syscall)), ISF64_RIP(%rsp)
1069 jne L_enter_lohandler_continue
1071 mov ISF64_RSP(%rsp), %rsp
1072 jmp L_syscall_continue
1076 * General protection or segment-not-present fault.
1077 * Check for a GP/NP fault in the kernel_return
1078 * sequence; if there, report it as a GP/NP fault on the user's instruction.
1080 * rsp-> 0: trap code (NP or GP) and trap function
1081 * 8: segment number in error (error code)
1087 * 56 old registers (trap is from kernel)
1089 Entry(hi64_gen_prot)
1090 push $(T_GENERAL_PROTECTION)
1091 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1093 Entry(hi64_stack_fault)
1094 push $(T_STACK_FAULT)
1095 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1098 push $(T_SEGMENT_NOT_PRESENT)
1099 /* indicate fault type */
1100 trap_check_kernel_exit:
1101 movl $(LO_ALLTRAPS), 4(%rsp)
1104 /* trap was from kernel mode, so */
1105 /* check for the kernel exit sequence */
1106 cmpl $(KERNEL_UBER_BASE_HI32), 16+4(%rsp)
1107 jne hi64_take_trap /* trap not in uber-space */
1109 cmpl $(EXT(ret32_iret)), 16(%rsp)
1111 cmpl $(EXT(ret32_set_ds)), 16(%rsp)
1112 je L_32bit_fault_set_seg
1113 cmpl $(EXT(ret32_set_es)), 16(%rsp)
1114 je L_32bit_fault_set_seg
1115 cmpl $(EXT(ret32_set_fs)), 16(%rsp)
1116 je L_32bit_fault_set_seg
1117 cmpl $(EXT(ret32_set_gs)), 16(%rsp)
1118 je L_32bit_fault_set_seg
1120 cmpl $(EXT(ret64_iret)), 16(%rsp)
1123 cmpl $(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp)
1125 mov ISF64_RSP(%rsp), %rsp
1128 jmp L_enter_lohandler
1132 * GP/NP fault on IRET: CS or SS is in error.
1133 * All registers contain the user's values.
1136 * 0 trap number/function
1142 * 48 ss --> new trapno/trapfn
1143 * 56 (16-byte padding) --> new errcode
1151 mov %rax, 16(%rsp) /* save rax (we don`t need saved rip) */
1152 mov 0(%rsp), %rax /* get trap number */
1153 mov %rax, 48(%rsp) /* put in user trap number */
1154 mov 8(%rsp), %rax /* get error code */
1155 mov %rax, 56(%rsp) /* put in user errcode */
1156 mov 16(%rsp), %rax /* restore rax */
1157 add $48, %rsp /* reset to original frame */
1158 /* now treat as fault from user */
1163 mov %rax, 16(%rsp) /* save rax (we don`t need saved rip) */
1164 mov 0(%rsp), %rax /* get trap number */
1165 mov %rax, 48(%rsp) /* put in user trap number */
1166 mov 8(%rsp), %rax /* get error code */
1167 mov %rax, 56(%rsp) /* put in user errcode */
1168 mov 16(%rsp), %rax /* restore rax */
1169 add $48, %rsp /* reset to original frame */
1170 /* now treat as fault from user */
1175 * Fault restoring a segment register. All of the saved state is still
1176 * on the stack untouched since we didn't move the stack pointer.
1178 L_32bit_fault_set_seg:
1179 mov 0(%rsp), %rax /* get trap number/function */
1180 mov 8(%rsp), %rdx /* get error code */
1181 mov 40(%rsp), %rsp /* reload stack prior to fault */
1182 mov %rax,ISC32_TRAPNO(%rsp)
1183 mov %rdx,ISC32_ERR(%rsp)
1184 /* now treat as fault from user */
1185 /* except that all the state is */
1186 /* already saved - we just have to */
1187 /* move the trapno and error into */
1188 /* the compatibility frame */
1190 jmp L_32bit_enter_after_fault
1194 * Fatal exception handlers:
1196 Entry(db_task_dbl_fault64)
1197 push $(T_DOUBLE_FAULT)
1198 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1199 jmp L_enter_lohandler
1201 Entry(db_task_stk_fault64)
1202 push $(T_STACK_FAULT)
1203 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1204 jmp L_enter_lohandler
1207 push $(0) /* Error */
1208 push $(T_MACHINE_CHECK)
1209 movl $(LO_MACHINE_CHECK), ISF64_TRAPFN(%rsp)
1210 jmp L_enter_lohandler