/*
* Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the
- * License may not be used to create, or enable the creation or
- * redistribution of, unlawful or unlicensed copies of an Apple operating
- * system, or to circumvent, violate, or enable the circumvention or
- * violation of, any terms of an Apple operating system software license
- * agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
* limitations under the License.
- *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include <platforms.h>
-#include <mach_kdb.h>
-
#include <i386/asm.h>
#include <i386/asm64.h>
#include <i386/proc_reg.h>
#include <i386/postcode.h>
+#include <i386/vmx/vmx_asm.h>
#include <assym.s>
.data
ret
-Entry(ml_64bit_wrmsr64)
- /* (uint32_t msr, uint64_t value) */
- /* (uint32_t msr, uint32_t lo, uint32_t hi) */
-
- FRAME
-
- ENTER_64BIT_MODE()
-
- movl B_ARG0, %ecx
- movl B_ARG1, %eax
- movl B_ARG2, %edx
- wrmsr
-
- ENTER_COMPAT_MODE()
-
- EMARF
- ret
-
-
Entry(ml_64bit_lldt)
/* (int32_t selector) */
EMARF
ret
+Entry(set64_cr3)
+
+ FRAME
+
+ movl B_ARG0, %eax
+ movl B_ARG1, %edx
+
+ ENTER_64BIT_MODE()
+
+ /* %rax = %edx:%eax */
+ shl $32, %rax
+ shrd $32, %rdx, %rax
+
+ mov %rax, %cr3
+
+ ENTER_COMPAT_MODE()
+
+ EMARF
+ ret
+
+Entry(get64_cr3)
+
+ FRAME
+
+ ENTER_64BIT_MODE()
+
+ mov %cr3, %rax
+ mov %rax, %rdx
+ shr $32, %rdx // %edx:%eax = %cr3
+
+ ENTER_COMPAT_MODE()
+
+ EMARF
+ ret
+
+Entry(cpuid64)
+ ENTER_64BIT_MODE()
+ cpuid
+ ENTER_COMPAT_MODE()
+ ret
+
+
/* FXSAVE and FXRSTOR operate in a mode dependent fashion, hence these variants.
-* Must be called with interrupts disabled.
-* We clear pending x87 exceptions here; this is technically incorrect, since we should
-* propagate those to the user, but the compatibility mode kernel is currently not
-* prepared to handle exceptions originating in 64-bit kernel mode. However, it may be possible
-* to work around this should it prove necessary.
-*/
+ * Must be called with interrupts disabled.
+ */
Entry(fxsave64)
movl S_ARG0,%eax
ENTER_64BIT_MODE()
- fnclex
- fxsave 0(%eax)
+ fxsave (%eax)
ENTER_COMPAT_MODE()
ret
Entry(fxrstor64)
movl S_ARG0,%eax
ENTER_64BIT_MODE()
- fnclex
- fxrstor 0(%rax)
+ fxrstor (%rax)
ENTER_COMPAT_MODE()
ret
+
+Entry(xsave64o)
+ ENTER_64BIT_MODE()
+ .short 0xAE0F
+ /* MOD 0x4, ECX, 0x1 */
+ .byte 0x21
+ ENTER_COMPAT_MODE()
+ ret
+
+Entry(xrstor64o)
+ ENTER_64BIT_MODE()
+ .short 0xAE0F
+ /* MOD 0x5, ECX 0x1 */
+ .byte 0x29
+ ENTER_COMPAT_MODE()
+ ret
+
+#if CONFIG_VMX
+
+/*
+ * __vmxon -- Enter VMX Operation
+ * int __vmxon(addr64_t v);
+ */
+Entry(__vmxon)
+ FRAME
+
+ ENTER_64BIT_MODE()
+ mov $(VMX_FAIL_INVALID), %ecx
+ mov $(VMX_FAIL_VALID), %edx
+ mov $(VMX_SUCCEED), %eax
+ vmxon 8(%rbp) /* physical addr passed on stack */
+ cmovcl %ecx, %eax /* CF = 1, ZF = 0 */
+ cmovzl %edx, %eax /* CF = 0, ZF = 1 */
+ ENTER_COMPAT_MODE()
+
+ EMARF
+ ret
+
+/*
+ * __vmxoff -- Leave VMX Operation
+ * int __vmxoff(void);
+ */
+Entry(__vmxoff)
+ FRAME
+
+ ENTER_64BIT_MODE()
+ mov $(VMX_FAIL_INVALID), %ecx
+ mov $(VMX_FAIL_VALID), %edx
+ mov $(VMX_SUCCEED), %eax
+ vmxoff
+ cmovcl %ecx, %eax /* CF = 1, ZF = 0 */
+ cmovzl %edx, %eax /* CF = 0, ZF = 1 */
+ ENTER_COMPAT_MODE()
+
+ EMARF
+ ret
+
+#endif /* CONFIG_VMX */