*/
#include <platforms.h>
-#include <mach_kdb.h>
-
#include <i386/asm.h>
#include <i386/asm64.h>
#include <i386/proc_reg.h>
#include <i386/postcode.h>
+#include <i386/vmx/vmx_asm.h>
#include <assym.s>
.data
ret
-Entry(ml_64bit_wrmsr64)
- /* (uint32_t msr, uint64_t value) */
- /* (uint32_t msr, uint32_t lo, uint32_t hi) */
-
- FRAME
-
- ENTER_64BIT_MODE()
-
- movl B_ARG0, %ecx
- movl B_ARG1, %eax
- movl B_ARG2, %edx
- wrmsr
-
- ENTER_COMPAT_MODE()
-
- EMARF
- ret
-
-
Entry(ml_64bit_lldt)
/* (int32_t selector) */
EMARF
ret
+Entry(set64_cr3)
+
+ FRAME
+
+ movl B_ARG0, %eax
+ movl B_ARG1, %edx
+
+ ENTER_64BIT_MODE()
+
+ /* %rax = %edx:%eax */
+ shl $32, %rax
+ shrd $32, %rdx, %rax
+
+ mov %rax, %cr3
+
+ ENTER_COMPAT_MODE()
+
+ EMARF
+ ret
+
+Entry(get64_cr3)
+
+ FRAME
+
+ ENTER_64BIT_MODE()
+
+ mov %cr3, %rax
+ mov %rax, %rdx
+ shr $32, %rdx // %edx:%eax = %cr3
+
+ ENTER_COMPAT_MODE()
+
+ EMARF
+ ret
+
+Entry(cpuid64)
+ ENTER_64BIT_MODE()
+ cpuid
+ ENTER_COMPAT_MODE()
+ ret
+
+
/* FXSAVE and FXRSTOR operate in a mode dependent fashion, hence these variants.
-* Must be called with interrupts disabled.
-* We clear pending x87 exceptions here; this is technically incorrect, since we should
-* propagate those to the user, but the compatibility mode kernel is currently not
-* prepared to handle exceptions originating in 64-bit kernel mode. However, it may be possible
-* to work around this should it prove necessary.
-*/
+ * Must be called with interrupts disabled.
+ */
Entry(fxsave64)
movl S_ARG0,%eax
ENTER_64BIT_MODE()
- fnclex
- fxsave 0(%eax)
+ fxsave (%eax)
ENTER_COMPAT_MODE()
ret
Entry(fxrstor64)
movl S_ARG0,%eax
ENTER_64BIT_MODE()
- fnclex
- fxrstor 0(%rax)
+ fxrstor (%rax)
+ ENTER_COMPAT_MODE()
+ ret
+
+Entry(xsave64o)
+ ENTER_64BIT_MODE()
+ .short 0xAE0F
+ /* MOD 0x4, ECX, 0x1 */
+ .byte 0x21
+ ENTER_COMPAT_MODE()
+ ret
+
+Entry(xrstor64o)
+ ENTER_64BIT_MODE()
+ .short 0xAE0F
+ /* MOD 0x5, ECX 0x1 */
+ .byte 0x29
+ ENTER_COMPAT_MODE()
+ ret
+
+#if CONFIG_VMX
+
+/*
+ * __vmxon -- Enter VMX Operation
+ * int __vmxon(addr64_t v);
+ */
+Entry(__vmxon)
+ FRAME
+
+ ENTER_64BIT_MODE()
+ mov $(VMX_FAIL_INVALID), %ecx
+ mov $(VMX_FAIL_VALID), %edx
+ mov $(VMX_SUCCEED), %eax
+ vmxon 8(%rbp) /* physical addr passed on stack */
+ cmovcl %ecx, %eax /* CF = 1, ZF = 0 */
+ cmovzl %edx, %eax /* CF = 0, ZF = 1 */
ENTER_COMPAT_MODE()
+
+ EMARF
ret
+
+/*
+ * __vmxoff -- Leave VMX Operation
+ * int __vmxoff(void);
+ */
+Entry(__vmxoff)
+ FRAME
+
+ ENTER_64BIT_MODE()
+ mov $(VMX_FAIL_INVALID), %ecx
+ mov $(VMX_FAIL_VALID), %edx
+ mov $(VMX_SUCCEED), %eax
+ vmxoff
+ cmovcl %ecx, %eax /* CF = 1, ZF = 0 */
+ cmovzl %edx, %eax /* CF = 0, ZF = 1 */
+ ENTER_COMPAT_MODE()
+
+ EMARF
+ ret
+
+#endif /* CONFIG_VMX */