2 * Copyright (c) 2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <i386/asm64.h>
32 #include <i386/eflags.h>
33 #include <i386/trap.h>
34 #include <i386/rtclock_asm.h>
35 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
36 #include <mach/i386/syscall_sw.h>
37 #include <i386/postcode.h>
38 #include <i386/proc_reg.h>
39 #include <mach/exception_types.h>
43 * Low-memory compability-mode handlers.
45 #define LO_ALLINTRS EXT(lo_allintrs)
46 #define LO_ALLTRAPS EXT(lo_alltraps)
47 #define LO_SYSCALL EXT(lo_syscall)
48 #define LO_UNIX_SCALL EXT(lo_unix_scall)
49 #define LO_MACH_SCALL EXT(lo_mach_scall)
50 #define LO_MDEP_SCALL EXT(lo_mdep_scall)
51 #define LO_DIAG_SCALL EXT(lo_diag_scall)
52 #define LO_DOUBLE_FAULT EXT(lo_df64)
53 #define LO_MACHINE_CHECK EXT(lo_mc64)
56 * Interrupt descriptor table and code vectors for it.
58 * The IDT64_BASE_ENTRY macro lays down a fake descriptor that must be
59 * reformatted ("fixed") before use.
60 * All vector are rebased in uber-space.
61 * Special vectors (e.g. double-fault) use a non-0 IST.
63 #define IDT64_BASE_ENTRY(vec,seg,ist,type) \
66 .long KERNEL_UBER_BASE_HI32 ;\
73 #define IDT64_ENTRY(vec,ist,type) \
74 IDT64_BASE_ENTRY(EXT(vec),KERNEL64_CS,ist,type)
75 #define IDT64_ENTRY_LOCAL(vec,ist,type) \
76 IDT64_BASE_ENTRY(vec,KERNEL64_CS,ist,type)
79 * Push trap number and address of compatibility mode handler,
80 * then branch to common trampoline. Error already pushed.
82 #define EXCEP64_ERR(n,name) \
83 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
85 push $(LO_ALLTRAPS) ;\
91 * Push error(0), trap number and address of compatibility mode handler,
92 * then branch to common trampoline.
94 #define EXCEPTION64(n,name) \
95 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
98 push $(LO_ALLTRAPS) ;\
100 jmp L_enter_lohandler
104 * Interrupt from user.
105 * Push error (0), trap number and address of compatibility mode handler,
106 * then branch to common trampoline.
108 #define EXCEP64_USR(n,name) \
109 IDT64_ENTRY(name,0,U_INTR_GATE) ;\
112 push $(LO_ALLTRAPS) ;\
114 jmp L_enter_lohandler
118 * Special interrupt code from user.
120 #define EXCEP64_SPC_USR(n,name) \
121 IDT64_ENTRY(name,0,U_INTR_GATE)
125 * Special interrupt code.
126 * In 64-bit mode we may use an IST slot instead of task gates.
128 #define EXCEP64_IST(n,name,ist) \
129 IDT64_ENTRY(name,ist,K_INTR_GATE)
130 #define EXCEP64_SPC(n,name) \
131 IDT64_ENTRY(name,0,K_INTR_GATE)
136 * Push zero err, interrupt vector and address of compatibility mode handler,
137 * then branch to common trampoline.
139 #define INTERRUPT64(n) \
140 IDT64_ENTRY_LOCAL(L_ ## n,0,K_INTR_GATE) ;\
144 push $(LO_ALLINTRS) ;\
146 jmp L_enter_lohandler
152 Entry(hi64_data_base)
155 Entry(hi64_text_base)
157 EXCEPTION64(0x00,t64_zero_div)
158 EXCEP64_SPC(0x01,hi64_debug)
159 INTERRUPT64(0x02) /* NMI */
160 EXCEP64_USR(0x03,t64_int3)
161 EXCEP64_USR(0x04,t64_into)
162 EXCEP64_USR(0x05,t64_bounds)
163 EXCEPTION64(0x06,t64_invop)
164 EXCEPTION64(0x07,t64_nofpu)
166 EXCEP64_IST(0x08,db_task_dbl_fault64,1)
168 EXCEP64_IST(0x08,hi64_double_fault,1)
170 EXCEPTION64(0x09,a64_fpu_over)
171 EXCEPTION64(0x0a,a64_inv_tss)
172 EXCEP64_SPC(0x0b,hi64_segnp)
174 EXCEP64_IST(0x0c,db_task_stk_fault64,1)
176 EXCEP64_SPC(0x0c,hi64_stack_fault)
178 EXCEP64_SPC(0x0d,hi64_gen_prot)
179 EXCEP64_SPC(0x0e, hi64_page_fault)
180 EXCEPTION64(0x0f,t64_trap_0f)
181 EXCEPTION64(0x10,t64_fpu_err)
182 EXCEPTION64(0x11,t64_trap_11)
183 EXCEP64_IST(0x12,mc64,1)
184 EXCEPTION64(0x13,t64_sse_err)
185 EXCEPTION64(0x14,t64_trap_14)
186 EXCEPTION64(0x15,t64_trap_15)
187 EXCEPTION64(0x16,t64_trap_16)
188 EXCEPTION64(0x17,t64_trap_17)
189 EXCEPTION64(0x18,t64_trap_18)
190 EXCEPTION64(0x19,t64_trap_19)
191 EXCEPTION64(0x1a,t64_trap_1a)
192 EXCEPTION64(0x1b,t64_trap_1b)
193 EXCEPTION64(0x1c,t64_trap_1c)
194 EXCEPTION64(0x1d,t64_trap_1d)
195 EXCEPTION64(0x1e,t64_trap_1e)
196 EXCEPTION64(0x1f,t64_trap_1f)
298 EXCEP64_USR(0x7f, t64_dtrace_ret)
300 EXCEP64_SPC_USR(0x80,hi64_unix_scall)
301 EXCEP64_SPC_USR(0x81,hi64_mach_scall)
302 EXCEP64_SPC_USR(0x82,hi64_mdep_scall)
303 EXCEP64_SPC_USR(0x83,hi64_diag_scall)
435 EXCEPTION64(0xff,t64_preempt)
441 * Trap/interrupt entry points.
443 * All traps must create the following 32-bit save area on the PCB "stack"
444 * - this is identical to the legacy mode 32-bit case:
453 * cr2 (defined only for page fault)
463 * user esp - if from user
464 * user ss - if from user
466 * Above this is the trap number and compatibility mode handler address
467 * (packed into an 8-byte stack entry) and the 64-bit interrupt stack frame:
482 * Control is passed here to return to the compatibility mode user.
483 * At this stage we're in kernel space in compatibility mode
484 * but we need to switch into 64-bit mode in the 4G-based trampoline
485 * space before performing the iret.
488 movl %gs:CPU_ACTIVE_THREAD,%ecx
490 movl TH_PCB_IDS(%ecx),%eax /* Obtain this thread's debug state */
491 cmpl $0,%eax /* Is there a debug register context? */
492 je 2f /* branch if not */
493 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */
495 movl DS_DR0(%eax), %ecx /* If so, load the 32 bit DRs */
497 movl DS_DR1(%eax), %ecx
499 movl DS_DR2(%eax), %ecx
501 movl DS_DR3(%eax), %ecx
503 movl DS_DR7(%eax), %ecx
504 movl %ecx, %gs:CPU_DR7
505 movl $0, %gs:CPU_DR7 + 4
508 ENTER_64BIT_MODE() /* Enter long mode */
509 mov DS64_DR0(%eax), %rcx /* Load the full width DRs*/
511 mov DS64_DR1(%eax), %rcx
513 mov DS64_DR2(%eax), %rcx
515 mov DS64_DR3(%eax), %rcx
517 mov DS64_DR7(%eax), %rcx
518 mov %rcx, %gs:CPU_DR7
519 jmp 3f /* Enter uberspace */
526 * Now switch %cr3, if necessary.
528 swapgs /* switch back to uber-kernel gs base */
529 mov %gs:CPU_TASK_CR3,%rcx
530 mov %rcx,%gs:CPU_ACTIVE_CR3
534 /* flag the copyio engine state as WINDOWS_CLEAN */
535 mov %gs:CPU_ACTIVE_THREAD,%eax
536 movl $(WINDOWS_CLEAN),TH_COPYIO_STATE(%eax)
537 mov %rcx,%cr3 /* switch to user's address space */
540 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
543 mov %rax, %dr7 /* Set DR7 */
548 * Adjust stack to use uber-space.
550 mov $(KERNEL_UBER_BASE_HI32), %rax
552 shrd $32, %rax, %rsp /* relocate into uber-space */
554 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
562 swapgs /* switch back to uber-kernel gs base */
565 * Adjust stack to use uber-space.
567 mov $(KERNEL_UBER_BASE_HI32), %rax
569 shrd $32, %rax, %rsp /* relocate into uber-space */
571 /* Check for return to 64-bit kernel space (EFI today) */
572 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
574 /* fall through for 32-bit return */
578 * Restore registers into the machine state for iret.
580 movl R32_EIP(%rsp), %eax
581 movl %eax, ISC32_RIP(%rsp)
582 movl R32_EFLAGS(%rsp), %eax
583 movl %eax, ISC32_RFLAGS(%rsp)
584 movl R32_CS(%rsp), %eax
585 movl %eax, ISC32_CS(%rsp)
586 movl R32_UESP(%rsp), %eax
587 movl %eax, ISC32_RSP(%rsp)
588 movl R32_SS(%rsp), %eax
589 movl %eax, ISC32_SS(%rsp)
592 * Restore general 32-bit registers
594 movl R32_EAX(%rsp), %eax
595 movl R32_EBX(%rsp), %ebx
596 movl R32_ECX(%rsp), %ecx
597 movl R32_EDX(%rsp), %edx
598 movl R32_EBP(%rsp), %ebp
599 movl R32_ESI(%rsp), %esi
600 movl R32_EDI(%rsp), %edi
603 * Restore segment registers. We make take an exception here but
604 * we've got enough space left in the save frame area to absorb
605 * a hardware frame plus the trapfn and trapno
609 movw R32_DS(%rsp), %ds
611 movw R32_ES(%rsp), %es
613 movw R32_FS(%rsp), %fs
615 movw R32_GS(%rsp), %gs
617 add $(ISC32_OFFSET)+8+8+8, %rsp /* pop compat frame +
618 trapno, trapfn and error */
619 cmp $(SYSENTER_CS),ISF64_CS-8-8-8(%rsp)
620 /* test for fast entry/exit */
623 iretq /* return from interrupt */
626 pop %rdx /* user return eip */
627 pop %rcx /* pop and toss cs */
628 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
629 popf /* flags - carry denotes failure */
630 pop %rcx /* user return esp */
632 sti /* interrupts enabled after sysexit */
633 sysexit /* 32-bit sysexit */
638 * Set the GS Base MSR with the user's gs base.
640 movl %gs:CPU_UBER_USER_GS_BASE, %eax
641 movl %gs:CPU_UBER_USER_GS_BASE+4, %edx
642 movl $(MSR_IA32_GS_BASE), %ecx
644 testb $3, R64_CS(%rsp) /* returning to user-space? */
646 wrmsr /* set 64-bit base */
650 * Restore general 64-bit registers
652 mov R64_R15(%rsp), %r15
653 mov R64_R14(%rsp), %r14
654 mov R64_R13(%rsp), %r13
655 mov R64_R12(%rsp), %r12
656 mov R64_R11(%rsp), %r11
657 mov R64_R10(%rsp), %r10
658 mov R64_R9(%rsp), %r9
659 mov R64_R8(%rsp), %r8
660 mov R64_RSI(%rsp), %rsi
661 mov R64_RDI(%rsp), %rdi
662 mov R64_RBP(%rsp), %rbp
663 mov R64_RDX(%rsp), %rdx
664 mov R64_RBX(%rsp), %rbx
665 mov R64_RCX(%rsp), %rcx
666 mov R64_RAX(%rsp), %rax
668 add $(ISS64_OFFSET)+8+8+8, %rsp /* pop saved state frame +
669 trapno, trapfn and error */
670 cmpl $(SYSCALL_CS),ISF64_CS-8-8-8(%rsp)
671 /* test for fast entry/exit */
674 iretq /* return from interrupt */
678 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
681 * rsp user stack pointer
683 mov ISF64_RIP-8-8-8(%rsp), %rcx
684 mov ISF64_RFLAGS-8-8-8(%rsp), %r11
685 mov ISF64_RSP-8-8-8(%rsp), %rsp
686 sysretq /* return from system call */
689 * Common path to enter locore handlers.
692 swapgs /* switch to kernel gs (cpu_data) */
693 L_enter_lohandler_continue:
694 cmpl $(USER64_CS), ISF64_CS(%rsp)
695 je L_64bit_enter /* this is a 64-bit user task */
696 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
697 je L_64bit_enter /* we're in 64-bit (EFI) code */
701 * System call handlers.
702 * These are entered via a syscall interrupt. The system call number in %rax
703 * is saved to the error code slot in the stack frame. We then branch to the
704 * common state saving code.
707 Entry(hi64_unix_scall)
708 swapgs /* switch to kernel gs (cpu_data) */
709 L_unix_scall_continue:
710 push %rax /* save system call number */
711 push $(LO_UNIX_SCALL)
713 jmp L_32bit_enter_check
716 Entry(hi64_mach_scall)
717 swapgs /* switch to kernel gs (cpu_data) */
718 L_mach_scall_continue:
719 push %rax /* save system call number */
720 push $(LO_MACH_SCALL)
722 jmp L_32bit_enter_check
725 Entry(hi64_mdep_scall)
726 swapgs /* switch to kernel gs (cpu_data) */
727 L_mdep_scall_continue:
728 push %rax /* save system call number */
729 push $(LO_MDEP_SCALL)
731 jmp L_32bit_enter_check
734 Entry(hi64_diag_scall)
735 swapgs /* switch to kernel gs (cpu_data) */
736 L_diag_scall_continue:
737 push %rax /* save system call number */
738 push $(LO_DIAG_SCALL)
740 jmp L_32bit_enter_check
743 swapgs /* Kapow! get per-cpu data area */
745 mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
746 mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
749 * Save values in the ISF frame in the PCB
750 * to cons up the saved machine state.
752 movl $(USER_DS), ISF64_SS(%rsp)
753 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
754 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
755 mov %rcx, ISF64_RIP(%rsp) /* rip */
756 mov %gs:CPU_UBER_TMP, %rcx
757 mov %rcx, ISF64_RSP(%rsp) /* user stack */
758 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
759 movl $(T_SYSCALL), ISF64_TRAPNO(%rsp) /* trapno */
760 movl $(LO_SYSCALL), ISF64_TRAPFN(%rsp)
761 jmp L_64bit_enter /* this can only be a 64-bit task */
766 * Check we're not a confused 64-bit user.
768 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
769 jne L_64bit_entry_reject
772 * sysenter entry point
773 * Requires user code to set up:
774 * edx: user instruction pointer (return address)
775 * ecx: user stack pointer
776 * on which is pushed stub ret addr and saved ebx
777 * Return to user-space is made using sysexit.
778 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
779 * or requiring ecx to be preserved.
782 mov (%rsp), %rsp /* switch from temporary stack to pcb */
784 * Push values on to the PCB stack
785 * to cons up the saved machine state.
787 push $(USER_DS) /* ss */
791 * Clear, among others, the Nested Task (NT) flags bit;
792 * this is zeroed by INT, but not by SYSENTER.
796 push $(SYSENTER_CS) /* cs */
797 swapgs /* switch to kernel gs (cpu_data) */
800 push %rax /* err/eax - syscall code */
803 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
804 movl $(LO_MACH_SCALL), ISF64_TRAPFN(%rsp)
806 js L_32bit_enter_check
807 movl $(LO_UNIX_SCALL), ISF64_TRAPFN(%rsp)
808 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
809 jne L_64bit_entry_reject
810 /* If the caller (typically LibSystem) has recorded the cumulative size of
811 * the arguments in EAX, copy them over from the user stack directly.
812 * We recover from exceptions inline--if the copy loop doesn't complete
813 * due to an exception, we fall back to copyin from compatibility mode.
814 * We can potentially extend this mechanism to mach traps as well (DRK).
816 L_sysenter_copy_args:
817 testl $(I386_SYSCALL_ARG_BYTES_MASK), %eax
820 mov %gs:CPU_UBER_ARG_STORE, %r8
822 mov %gs:CPU_UBER_ARG_STORE_VALID, %r12
824 shrl $(I386_SYSCALL_ARG_DWORDS_SHIFT), %r9d
825 andl $(I386_SYSCALL_ARG_DWORDS_MASK), %r9d
827 EXT(hi64_sysenter_user_arg_copy):
829 movl 4(%rcx, %r10, 4), %r11d
830 movl %r11d, (%r8, %r10, 4)
835 /* Fall through to 32-bit handler */
840 * Make space for the compatibility save area.
842 sub $(ISC32_OFFSET), %rsp
843 movl $(SS_32), SS_FLAVOR(%rsp)
848 mov %ds, R32_DS(%rsp)
849 mov %es, R32_ES(%rsp)
850 mov %fs, R32_FS(%rsp)
851 mov %gs, R32_GS(%rsp)
854 * Save general 32-bit registers
856 mov %eax, R32_EAX(%rsp)
857 mov %ebx, R32_EBX(%rsp)
858 mov %ecx, R32_ECX(%rsp)
859 mov %edx, R32_EDX(%rsp)
860 mov %ebp, R32_EBP(%rsp)
861 mov %esi, R32_ESI(%rsp)
862 mov %edi, R32_EDI(%rsp)
864 /* Unconditionally save cr2; only meaningful on page faults */
866 mov %eax, R32_CR2(%rsp)
869 * Copy registers already saved in the machine state
870 * (in the interrupt stack frame) into the compat save area.
872 mov ISC32_RIP(%rsp), %eax
873 mov %eax, R32_EIP(%rsp)
874 mov ISC32_RFLAGS(%rsp), %eax
875 mov %eax, R32_EFLAGS(%rsp)
876 mov ISC32_CS(%rsp), %eax
877 mov %eax, R32_CS(%rsp)
882 mov ISC32_RSP(%rsp), %eax
883 mov %eax, R32_UESP(%rsp)
884 mov ISC32_SS(%rsp), %eax
885 mov %eax, R32_SS(%rsp)
886 L_32bit_enter_after_fault:
887 mov ISC32_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
888 mov %ebx, R32_TRAPNO(%rsp)
889 mov ISC32_ERR(%rsp), %eax
890 mov %eax, R32_ERR(%rsp)
891 mov ISC32_TRAPFN(%rsp), %edx
894 * Common point to enter lo_handler in compatibilty mode:
896 * %edx locore handler address
900 * Switch address space to kernel
901 * if not shared space and not already mapped.
902 * Note: cpu_task_map is valid only if cpu_task_cr3 is loaded in cr3.
905 mov %gs:CPU_TASK_CR3, %rcx
906 cmp %rax, %rcx /* is the task's cr3 loaded? */
908 cmpl $(TASK_MAP_64BIT_SHARED), %gs:CPU_TASK_MAP
911 mov %gs:CPU_KERNEL_CR3, %rcx
915 mov %rcx, %gs:CPU_ACTIVE_CR3
917 movl %gs:CPU_ACTIVE_THREAD,%ecx /* Get the active thread */
918 cmpl $0, TH_PCB_IDS(%ecx) /* Is there a debug register state? */
920 xor %ecx, %ecx /* If so, reset DR7 (the control) */
924 * Switch to compatibility mode.
925 * Then establish kernel segments.
927 swapgs /* Done with uber-kernel gs */
931 * Now in compatibility mode and running in compatibility space
932 * prepare to enter the locore handler.
934 * %edx lo_handler pointer
935 * Note: the stack pointer (now 32-bit) is now directly addressing the
936 * the kernel below 4G and therefore is automagically re-based.
938 mov $(KERNEL_DS), %eax
943 mov $(CPU_DATA_GS), %eax
946 incl %gs:hwIntCnt(,%ebx,4) /* Bump the trap/intr count */
948 /* Dispatch the designated lo handler */
952 L_64bit_entry_reject:
954 * Here for a 64-bit user attempting an invalid kernel entry.
956 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
957 movl $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
958 /* Fall through... */
962 * Here for a 64-bit user task, or special 64-bit kernel code.
963 * Make space for the save area.
965 sub $(ISS64_OFFSET), %rsp
966 movl $(SS_64), SS_FLAVOR(%rsp)
972 mov %fs, R64_FS(%rsp)
973 mov %gs, R64_GS(%rsp)
975 /* Save general-purpose registers */
976 mov %rax, R64_RAX(%rsp)
977 mov %rcx, R64_RCX(%rsp)
978 mov %rbx, R64_RBX(%rsp)
979 mov %rbp, R64_RBP(%rsp)
980 mov %r11, R64_R11(%rsp)
981 mov %r12, R64_R12(%rsp)
982 mov %r13, R64_R13(%rsp)
983 mov %r14, R64_R14(%rsp)
984 mov %r15, R64_R15(%rsp)
986 /* cr2 is significant only for page-faults */
988 mov %rax, R64_CR2(%rsp)
990 /* Other registers (which may contain syscall args) */
991 mov %rdi, R64_RDI(%rsp) /* arg0 .. */
992 mov %rsi, R64_RSI(%rsp)
993 mov %rdx, R64_RDX(%rsp)
994 mov %r10, R64_R10(%rsp)
995 mov %r8, R64_R8(%rsp)
996 mov %r9, R64_R9(%rsp) /* .. arg5 */
998 L_64bit_enter_after_fault:
1000 * At this point we're almost ready to join the common lo-entry code.
1002 mov R64_TRAPNO(%rsp), %ebx
1003 mov R64_TRAPFN(%rsp), %edx
1005 testb $3, ISF64_CS+ISS64_OFFSET(%rsp)
1009 jmp L_enter_lohandler2
1011 Entry(hi64_page_fault)
1013 push $(T_PAGE_FAULT)
1014 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1015 jne L_enter_lohandler
1016 cmpl $(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp)
1017 jne hi64_kernel_trap
1018 mov ISF64_RSP(%rsp), %rsp
1022 * Debug trap. Check for single-stepping across system call into
1023 * kernel. If this is the case, taking the debug trap has turned
1024 * off single-stepping - save the flags register with the trace
1028 swapgs /* set %gs for cpu data */
1029 push $0 /* error code */
1033 testb $3, ISF64_CS(%rsp)
1034 jnz L_enter_lohandler_continue
1037 * trap came from kernel mode
1039 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1040 jne L_enter_lohandler_continue /* trap not in uber-space */
1042 cmpl $(EXT(hi64_mach_scall)), ISF64_RIP(%rsp)
1044 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1045 jmp L_mach_scall_continue /* continue system call entry */
1047 cmpl $(EXT(hi64_mdep_scall)), ISF64_RIP(%rsp)
1049 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1050 jmp L_mdep_scall_continue /* continue system call entry */
1052 cmpl $(EXT(hi64_unix_scall)), ISF64_RIP(%rsp)
1054 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1055 jmp L_unix_scall_continue /* continue system call entry */
1057 cmpl $(EXT(hi64_sysenter)), ISF64_RIP(%rsp)
1058 jne L_enter_lohandler_continue
1060 * Interrupt stack frame has been pushed on the temporary stack.
1061 * We have to switch to pcb stack and copy eflags.
1063 add $40,%rsp /* remove trapno/trapfn/err/rip/cs */
1064 push %rcx /* save %rcx - user stack pointer */
1065 mov 32(%rsp),%rcx /* top of intr stack -> pcb stack */
1066 xchg %rcx,%rsp /* switch to pcb stack */
1067 push $(USER_DS) /* ss */
1068 push (%rcx) /* saved %rcx into rsp slot */
1069 push 8(%rcx) /* rflags */
1070 mov (%rcx),%rcx /* restore %rcx */
1071 push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
1072 jmp L_sysenter_continue /* continue sysenter entry */
1075 Entry(hi64_double_fault)
1076 swapgs /* set %gs for cpu data */
1077 push $(LO_DOUBLE_FAULT)
1078 push $(T_DOUBLE_FAULT)
1080 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1081 jne L_enter_lohandler_continue /* trap not in uber-space */
1083 cmpl $(EXT(hi64_syscall)), ISF64_RIP(%rsp)
1084 jne L_enter_lohandler_continue
1086 mov ISF64_RSP(%rsp), %rsp
1087 jmp L_syscall_continue
1091 * General protection or segment-not-present fault.
1092 * Check for a GP/NP fault in the kernel_return
1093 * sequence; if there, report it as a GP/NP fault on the user's instruction.
1095 * rsp-> 0 ISF64_TRAPNO: trap code (NP or GP)
1096 * 8 ISF64_TRAPFN: trap function
1097 * 16 ISF64_ERR: segment number in error (error code)
1100 * 40 ISF64_RFLAGS: rflags
1103 * 64 old registers (trap is from kernel)
1105 Entry(hi64_gen_prot)
1107 push $(T_GENERAL_PROTECTION)
1108 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1110 Entry(hi64_stack_fault)
1112 push $(T_STACK_FAULT)
1113 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1117 push $(T_SEGMENT_NOT_PRESENT)
1118 /* indicate fault type */
1119 trap_check_kernel_exit:
1120 testb $3,ISF64_CS(%rsp)
1121 jnz L_enter_lohandler
1122 /* trap was from kernel mode, so */
1123 /* check for the kernel exit sequence */
1124 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1125 jne L_enter_lohandler_continue /* trap not in uber-space */
1127 cmpl $(EXT(ret32_iret)), ISF64_RIP(%rsp)
1129 cmpl $(EXT(ret32_set_ds)), ISF64_RIP(%rsp)
1130 je L_32bit_fault_set_seg
1131 cmpl $(EXT(ret32_set_es)), ISF64_RIP(%rsp)
1132 je L_32bit_fault_set_seg
1133 cmpl $(EXT(ret32_set_fs)), ISF64_RIP(%rsp)
1134 je L_32bit_fault_set_seg
1135 cmpl $(EXT(ret32_set_gs)), ISF64_RIP(%rsp)
1136 je L_32bit_fault_set_seg
1138 cmpl $(EXT(ret64_iret)), ISF64_RIP(%rsp)
1141 cmpl $(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp)
1142 cmove ISF64_RSP(%rsp), %rsp
1147 * Here after taking an unexpected trap from kernel mode - perhaps
1148 * while running in the trampolines hereabouts.
1149 * Make sure we're not on the PCB stack, if so move to the kernel stack.
1150 * This is likely a fatal condition.
1151 * But first, try to be sure we have the kernel gs base active...
1153 cmpq $0, %gs:CPU_THIS /* test gs_base */
1154 js 1f /* -ve kernel addr, no swap */
1155 swapgs /* +ve user addr, swap */
1157 movq %rax, %gs:CPU_UBER_TMP /* save %rax */
1158 movq %gs:CPU_UBER_ISF, %rax /* PCB stack addr */
1160 cmpq $(PAGE_SIZE), %rax /* current stack in PCB? */
1161 movq %gs:CPU_UBER_TMP, %rax /* restore %rax */
1162 ja L_enter_lohandler_continue /* stack not in PCB */
1165 * Here if %rsp is in the PCB
1166 * Copy the interrupt stack frame from PCB stack to kernel stack
1168 movq %gs:CPU_KERNEL_STACK, %rax /* note: %rax restored below */
1170 pushq ISF64_SS(%rax)
1171 pushq ISF64_RSP(%rax)
1172 pushq ISF64_RFLAGS(%rax)
1173 pushq ISF64_CS(%rax)
1174 pushq ISF64_RIP(%rax)
1175 pushq ISF64_ERR(%rax)
1176 pushq ISF64_TRAPFN(%rax)
1177 pushq ISF64_TRAPNO(%rax)
1178 movq %gs:CPU_UBER_TMP, %rax /* restore %rax */
1179 jmp L_enter_lohandler_continue
1183 * GP/NP fault on IRET: CS or SS is in error.
1184 * All registers contain the user's values.
1187 * 0 ISF64_TRAPNO: trap code (NP or GP)
1188 * 8 ISF64_TRAPFN: trap function
1189 * 16 ISF64_ERR: segment number in error (error code)
1192 * 40 ISF64_RFLAGS: rflags
1194 * 56 ISF64_SS: ss --> new new trapno/trapfn
1195 * 64 pad --> new errcode
1200 * 104 user ss (16-byte aligned)
1203 mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */
1204 mov ISF64_TRAPNO(%rsp), %rax
1205 mov %rax, ISF64_SS(%rsp) /* put in user trap number */
1206 mov ISF64_ERR(%rsp), %rax
1207 mov %rax, 8+ISF64_SS(%rsp) /* put in user errcode */
1208 mov ISF64_RIP(%rsp), %rax /* restore rax */
1209 add $(ISF64_SS), %rsp /* reset to original frame */
1210 /* now treat as fault from user */
1215 mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */
1216 mov ISF64_TRAPNO(%rsp), %rax
1217 mov %rax, ISF64_SS(%rsp) /* put in user trap number */
1218 mov ISF64_ERR(%rsp), %rax
1219 mov %rax, 8+ISF64_SS(%rsp) /* put in user errcode */
1220 mov ISF64_RIP(%rsp), %rax /* restore rax */
1221 add $(ISF64_SS), %rsp /* reset to original frame */
1222 /* now treat as fault from user */
1227 * Fault restoring a segment register. All of the saved state is still
1228 * on the stack untouched since we didn't move the stack pointer.
1230 L_32bit_fault_set_seg:
1231 mov ISF64_TRAPNO(%rsp), %rax
1232 mov ISF64_ERR(%rsp), %rdx
1233 mov ISF64_RSP(%rsp), %rsp /* reload stack prior to fault */
1234 mov %rax,ISC32_TRAPNO(%rsp)
1235 mov %rdx,ISC32_ERR(%rsp)
1236 /* now treat as fault from user */
1237 /* except that all the state is */
1238 /* already saved - we just have to */
1239 /* move the trapno and error into */
1240 /* the compatibility frame */
1242 jmp L_32bit_enter_after_fault
1246 * Fatal exception handlers:
1248 Entry(db_task_dbl_fault64)
1249 push $(LO_DOUBLE_FAULT)
1250 push $(T_DOUBLE_FAULT)
1251 jmp L_enter_lohandler
1253 Entry(db_task_stk_fault64)
1254 push $(LO_DOUBLE_FAULT)
1255 push $(T_STACK_FAULT)
1256 jmp L_enter_lohandler
1259 push $(0) /* Error */
1260 push $(LO_MACHINE_CHECK)
1261 push $(T_MACHINE_CHECK)
1262 jmp L_enter_lohandler
1268 * All task 'exceptions' enter lo_alltraps:
1269 * esp -> x86_saved_state_t
1271 * The rest of the state is set up as:
1272 * cr3 -> kernel directory
1273 * esp -> low based stack
1276 * ss/ds/es -> KERNEL_DS
1278 * interrupts disabled
1279 * direction flag cleared
1282 movl R32_CS(%esp),%eax /* assume 32-bit state */
1283 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
1285 movl R64_CS(%esp),%eax /* 64-bit user mode */
1289 /* user mode trap */
1292 movl %gs:CPU_ACTIVE_THREAD,%ecx
1293 movl TH_TASK(%ecx),%ebx
1295 /* Check for active vtimers in the current task */
1296 TASK_VTIMER_CHECK(%ebx, %ecx)
1298 movl %gs:CPU_KERNEL_STACK,%ebx
1299 xchgl %ebx,%esp /* switch to kernel stack */
1301 CCALL1(user_trap, %ebx) /* call user trap routine */
1302 /* user_trap() unmasks interrupts */
1303 cli /* hold off intrs - critical section */
1304 xorl %ecx,%ecx /* don't check if we're in the PFZ */
1307 * Return from trap or system call, checking for ASTs.
1308 * On lowbase PCB stack with intrs disabled
1310 Entry(return_from_trap)
1311 movl %gs:CPU_ACTIVE_THREAD, %esp
1312 movl TH_PCB_ISS(%esp),%esp /* switch back to PCB stack */
1313 movl %gs:CPU_PENDING_AST, %eax
1315 je return_to_user /* branch if no AST */
1316 LEXT(return_from_trap_with_ast)
1317 movl %gs:CPU_KERNEL_STACK, %ebx
1318 xchgl %ebx, %esp /* switch to kernel stack */
1320 testl %ecx, %ecx /* see if we need to check for an EIP in the PFZ */
1321 je 2f /* no, go handle the AST */
1322 cmpl $(SS_64), SS_FLAVOR(%ebx) /* are we a 64-bit task? */
1324 /* no... 32-bit user mode */
1325 movl R32_EIP(%ebx), %eax
1326 pushl %ebx /* save PCB stack */
1327 xorl %ebp, %ebp /* clear frame pointer */
1328 CCALL1(commpage_is_in_pfz32, %eax)
1329 popl %ebx /* retrieve pointer to PCB stack */
1331 je 2f /* not in the PFZ... go service AST */
1332 movl %eax, R32_EBX(%ebx) /* let the PFZ know we've pended an AST */
1333 xchgl %ebx, %esp /* switch back to PCB stack */
1335 1: /* 64-bit user mode */
1336 movl R64_RIP(%ebx), %ecx
1337 movl R64_RIP+4(%ebx), %eax
1338 pushl %ebx /* save PCB stack */
1339 xorl %ebp, %ebp /* clear frame pointer */
1340 CCALL2(commpage_is_in_pfz64, %ecx, %eax)
1341 popl %ebx /* retrieve pointer to PCB stack */
1343 je 2f /* not in the PFZ... go service AST */
1344 movl %eax, R64_RBX(%ebx) /* let the PFZ know we've pended an AST */
1345 xchgl %ebx, %esp /* switch back to PCB stack */
1348 sti /* interrupts always enabled on return to user mode */
1349 pushl %ebx /* save PCB stack */
1350 xorl %ebp, %ebp /* Clear framepointer */
1351 CCALL1(i386_astintr, $0) /* take the AST */
1354 popl %esp /* switch back to PCB stack (w/exc link) */
1356 xorl %ecx, %ecx /* don't check if we're in the PFZ */
1357 jmp EXT(return_from_trap) /* and check again (rare) */
1362 * Trap from kernel mode. No need to switch stacks.
1363 * Interrupts must be off here - we will set them to state at time of trap
1364 * as soon as it's safe for us to do so and not recurse doing preemption
1367 movl %esp, %eax /* saved state addr */
1368 pushl R32_EIP(%esp) /* Simulate a CALL from fault point */
1369 pushl %ebp /* Extend framepointer chain */
1371 CCALL1WITHSP(kernel_trap, %eax) /* Call kernel trap handler */
1376 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
1377 testl $ AST_URGENT,%eax /* any urgent preemption? */
1378 je ret_to_kernel /* no, nothing to do */
1379 cmpl $ T_PREEMPT,R32_TRAPNO(%esp)
1380 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
1381 testl $ EFL_IF,R32_EFLAGS(%esp) /* interrupts disabled? */
1383 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
1385 movl %gs:CPU_KERNEL_STACK,%eax
1388 and EXT(kernel_stack_mask),%ecx
1389 testl %ecx,%ecx /* are we on the kernel stack? */
1390 jne ret_to_kernel /* no, skip it */
1392 CCALL1(i386_astintr, $1) /* take the AST */
1396 * All interrupts on all tasks enter here with:
1397 * esp-> -> x86_saved_state_t
1399 * cr3 -> kernel directory
1400 * esp -> low based stack
1403 * ss/ds/es -> KERNEL_DS
1405 * interrupts disabled
1406 * direction flag cleared
1410 * test whether already on interrupt stack
1412 movl %gs:CPU_INT_STACK_TOP,%ecx
1415 leal -INTSTACK_SIZE(%ecx),%edx
1417 jb int_from_intstack
1419 xchgl %ecx,%esp /* switch to interrupt stack */
1421 movl %cr0,%eax /* get cr0 */
1422 orl $(CR0_TS),%eax /* or in TS bit */
1423 movl %eax,%cr0 /* set cr0 */
1425 subl $8, %esp /* for 16-byte stack alignment */
1426 pushl %ecx /* save pointer to old stack */
1427 movl %ecx,%gs:CPU_INT_STATE /* save intr state */
1429 TIME_INT_ENTRY /* do timing */
1431 movl %gs:CPU_ACTIVE_THREAD,%ecx
1432 movl TH_TASK(%ecx),%ebx
1434 /* Check for active vtimers in the current task */
1435 TASK_VTIMER_CHECK(%ebx, %ecx)
1437 incl %gs:CPU_PREEMPTION_LEVEL
1438 incl %gs:CPU_INTERRUPT_LEVEL
1440 movl %gs:CPU_INT_STATE, %eax
1441 CCALL1(interrupt, %eax) /* call generic interrupt routine */
1443 cli /* just in case we returned with intrs enabled */
1445 movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */
1447 decl %gs:CPU_INTERRUPT_LEVEL
1448 decl %gs:CPU_PREEMPTION_LEVEL
1450 TIME_INT_EXIT /* do timing */
1452 movl %gs:CPU_ACTIVE_THREAD,%eax
1453 movl TH_PCB_FPS(%eax),%eax /* get pcb's ifps */
1454 testl %eax, %eax /* Is there a context */
1455 je 1f /* Branch if not */
1456 cmpl $0, FP_VALID(%eax) /* Check fp_valid */
1457 jne 1f /* Branch if valid */
1461 movl %cr0,%eax /* get cr0 */
1462 orl $(CR0_TS),%eax /* or in TS bit */
1463 movl %eax,%cr0 /* set cr0 */
1465 popl %esp /* switch back to old stack */
1467 /* Load interrupted code segment into %eax */
1468 movl R32_CS(%esp),%eax /* assume 32-bit state */
1469 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
1471 movl R64_CS(%esp),%eax /* 64-bit user mode */
1473 testb $3,%al /* user mode, */
1474 jnz ast_from_interrupt_user /* go handle potential ASTs */
1476 * we only want to handle preemption requests if
1477 * the interrupt fell in the kernel context
1478 * and preemption isn't disabled
1480 movl %gs:CPU_PENDING_AST,%eax
1481 testl $ AST_URGENT,%eax /* any urgent requests? */
1482 je ret_to_kernel /* no, nothing to do */
1484 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
1485 jne ret_to_kernel /* yes, skip it */
1487 movl %gs:CPU_KERNEL_STACK,%eax
1490 and EXT(kernel_stack_mask),%ecx
1491 testl %ecx,%ecx /* are we on the kernel stack? */
1492 jne ret_to_kernel /* no, skip it */
1495 * Take an AST from kernel space. We don't need (and don't want)
1496 * to do as much as the case where the interrupt came from user
1499 CCALL1(i386_astintr, $1)
1505 * nested int - simple path, can't preempt etc on way out
1508 incl %gs:CPU_PREEMPTION_LEVEL
1509 incl %gs:CPU_INTERRUPT_LEVEL
1510 incl %gs:CPU_NESTED_ISTACK
1512 movl %esp, %edx /* x86_saved_state */
1513 CCALL1(interrupt, %edx)
1515 decl %gs:CPU_INTERRUPT_LEVEL
1516 decl %gs:CPU_PREEMPTION_LEVEL
1517 decl %gs:CPU_NESTED_ISTACK
1522 * Take an AST from an interrupted user
1524 ast_from_interrupt_user:
1525 movl %gs:CPU_PENDING_AST,%eax
1526 testl %eax,%eax /* pending ASTs? */
1527 je ret_to_user /* no, nothing to do */
1531 movl $1, %ecx /* check if we're in the PFZ */
1532 jmp EXT(return_from_trap_with_ast) /* return */
1537 * System call entries via INTR_GATE or sysenter:
1539 * esp -> x86_saved_state32_t
1540 * cr3 -> kernel directory
1541 * esp -> low based stack
1544 * ss/ds/es -> KERNEL_DS
1546 * interrupts disabled
1547 * direction flag cleared
1550 Entry(lo_unix_scall)
1553 movl %gs:CPU_KERNEL_STACK,%edi
1554 xchgl %edi,%esp /* switch to kernel stack */
1555 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1556 movl TH_TASK(%ecx),%ebx /* point to current task */
1557 incl TH_SYSCALLS_UNIX(%ecx) /* increment call count */
1559 /* Check for active vtimers in the current task */
1560 TASK_VTIMER_CHECK(%ebx, %ecx)
1564 CCALL1(unix_syscall, %edi)
1566 * always returns through thread_exception_return
1570 Entry(lo_mach_scall)
1573 movl %gs:CPU_KERNEL_STACK,%edi
1574 xchgl %edi,%esp /* switch to kernel stack */
1575 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1576 movl TH_TASK(%ecx),%ebx /* point to current task */
1577 incl TH_SYSCALLS_MACH(%ecx) /* increment call count */
1579 /* Check for active vtimers in the current task */
1580 TASK_VTIMER_CHECK(%ebx, %ecx)
1584 CCALL1(mach_call_munger, %edi)
1586 * always returns through thread_exception_return
1590 Entry(lo_mdep_scall)
1593 movl %gs:CPU_KERNEL_STACK,%edi
1594 xchgl %edi,%esp /* switch to kernel stack */
1595 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1596 movl TH_TASK(%ecx),%ebx /* point to current task */
1598 /* Check for active vtimers in the current task */
1599 TASK_VTIMER_CHECK(%ebx, %ecx)
1603 CCALL1(machdep_syscall, %edi)
1605 * always returns through thread_exception_return
1609 Entry(lo_diag_scall)
1612 movl %gs:CPU_KERNEL_STACK,%edi
1613 xchgl %edi,%esp /* switch to kernel stack */
1614 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1615 movl TH_TASK(%ecx),%ebx /* point to current task */
1617 /* Check for active vtimers in the current task */
1618 TASK_VTIMER_CHECK(%ebx, %ecx)
1620 pushl %edi /* push pbc stack for later */
1622 CCALL1(diagCall, %edi) // Call diagnostics
1624 cli // Disable interruptions just in case
1625 cmpl $0,%eax // What kind of return is this?
1626 je 1f // - branch if bad (zero)
1627 popl %esp // Get back the original stack
1628 jmp return_to_user // Normal return, do not check asts...
1630 CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
1631 // pass what would be the diag syscall
1632 // error return - cause an exception
1643 * System call entries via syscall only:
1645 * esp -> x86_saved_state64_t
1646 * cr3 -> kernel directory
1647 * esp -> low based stack
1650 * ss/ds/es -> KERNEL_DS
1652 * interrupts disabled
1653 * direction flag cleared
1659 movl %gs:CPU_KERNEL_STACK,%edi
1660 xchgl %edi,%esp /* switch to kernel stack */
1662 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1663 movl TH_TASK(%ecx),%ebx /* point to current task */
1665 /* Check for active vtimers in the current task */
1666 TASK_VTIMER_CHECK(%ebx, %ecx)
1669 * We can be here either for a mach, unix machdep or diag syscall,
1670 * as indicated by the syscall class:
1672 movl R64_RAX(%edi), %eax /* syscall number/class */
1674 andl $(SYSCALL_CLASS_MASK), %edx /* syscall class */
1675 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %edx
1676 je EXT(lo64_mach_scall)
1677 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %edx
1678 je EXT(lo64_unix_scall)
1679 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %edx
1680 je EXT(lo64_mdep_scall)
1681 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %edx
1682 je EXT(lo64_diag_scall)
1686 /* Syscall class unknown */
1687 CCALL5(i386_exception, $(EXC_SYSCALL), %eax, $0, $1, $0)
1691 Entry(lo64_unix_scall)
1692 incl TH_SYSCALLS_UNIX(%ecx) /* increment call count */
1695 CCALL1(unix_syscall64, %edi)
1697 * always returns through thread_exception_return
1701 Entry(lo64_mach_scall)
1702 incl TH_SYSCALLS_MACH(%ecx) /* increment call count */
1705 CCALL1(mach_call_munger64, %edi)
1707 * always returns through thread_exception_return
1712 Entry(lo64_mdep_scall)
1715 CCALL1(machdep_syscall64, %edi)
1717 * always returns through thread_exception_return
1721 Entry(lo64_diag_scall)
1722 CCALL1(diagCall64, %edi) // Call diagnostics
1724 cli // Disable interruptions just in case
1725 cmpl $0,%eax // What kind of return is this?
1727 movl %edi, %esp // Get back the original stack
1728 jmp return_to_user // Normal return, do not check asts...
1730 CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
1731 // pass what would be the diag syscall
1732 // error return - cause an exception
1738 * Compatibility mode's last gasp...
1742 CCALL1(panic_double_fault64, %eax)
1747 CCALL1(panic_machine_check64, %eax)