]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/vmx/vmx_cpu.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / i386 / vmx / vmx_cpu.c
index 31c16ca9f6c85b1a0d9e7210601c9737d50c0580..49b36f8468b46ac4b60f5ad1b627ccc3ffe857c9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2006-2012 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
 #include <pexpert/pexpert.h>
 #include <i386/cpuid.h>
 #include <i386/cpu_data.h>
+#include <i386/mp.h>
 #include <i386/proc_reg.h>
 #include <i386/vmx.h>
 #include <i386/vmx/vmx_asm.h>
 #include <i386/vmx/vmx_shims.h>
 #include <i386/vmx/vmx_cpu.h>
-#include <i386/mtrr.h>
 #include <mach/mach_host.h>             /* for host_info() */
-#include <i386/mp.h>
 
 #define VMX_KPRINTF(x...) /* kprintf("vmx: " x) */
 
 int vmx_use_count = 0;
-int vmx_exclusive = 0;
-decl_simple_lock_data(static,vmx_use_count_lock)
+boolean_t vmx_exclusive = FALSE;
+
+lck_grp_t *vmx_lck_grp = NULL;
+lck_mtx_t *vmx_lck_mtx = NULL;
 
 /* -----------------------------------------------------------------------------
    vmx_is_available()
@@ -65,6 +66,7 @@ vmxon_is_enabled(void)
                (rdmsr64(MSR_IA32_FEATURE_CONTROL) & MSR_IA32_FEATCTL_VMXON));
 }
 
+#if MACH_ASSERT
 /* -----------------------------------------------------------------------------
    vmx_is_cr0_valid()
        Is CR0 valid for executing VMXON on this CPU?
@@ -72,7 +74,7 @@ vmxon_is_enabled(void)
 static inline boolean_t
 vmx_is_cr0_valid(vmx_specs_t *specs)
 {
-       uint32_t cr0 = get_cr0();
+       uintptr_t cr0 = get_cr0();
        return (0 == ((~cr0 & specs->cr0_fixed_0)|(cr0 & ~specs->cr0_fixed_1)));
 }
 
@@ -83,12 +85,14 @@ vmx_is_cr0_valid(vmx_specs_t *specs)
 static inline boolean_t
 vmx_is_cr4_valid(vmx_specs_t *specs)
 {
-       uint32_t cr4 = get_cr4();
+       uintptr_t cr4 = get_cr4();
        return (0 == ((~cr4 & specs->cr4_fixed_0)|(cr4 & ~specs->cr4_fixed_1)));
 }
 
+#endif
+
 static void
-vmx_init(void)
+vmx_enable(void)
 {
        uint64_t msr_image;
 
@@ -105,6 +109,18 @@ vmx_init(void)
                        (msr_image |
                         MSR_IA32_FEATCTL_VMXON |
                         MSR_IA32_FEATCTL_LOCK));
+
+       set_cr4(get_cr4() | CR4_VMXE);
+}
+
+void
+vmx_init()
+{
+       vmx_lck_grp = lck_grp_alloc_init("vmx", LCK_GRP_ATTR_NULL);
+       assert(vmx_lck_grp);
+
+       vmx_lck_mtx = lck_mtx_alloc_init(vmx_lck_grp, LCK_ATTR_NULL);
+       assert(vmx_lck_mtx);
 }
 
 /* -----------------------------------------------------------------------------
@@ -115,20 +131,16 @@ vmx_init(void)
        the remainder of the vmx_specs_t uninitialized. 
    -------------------------------------------------------------------------- */
 void
-vmx_get_specs()
+vmx_cpu_init()
 {
        vmx_specs_t *specs = &current_cpu_datap()->cpu_vmx.specs;
-       uint64_t msr_image;
-       
-       /* this is called once for every CPU, but the lock doesn't care :-) */
-       simple_lock_init(&vmx_use_count_lock, 0);
 
-       vmx_init();
+       vmx_enable();
 
-       /*
-        * if we have read the data on boot, we won't read it
-        *  again on wakeup, otherwise *bad* things will happen
-        */
+       VMX_KPRINTF("[%d]vmx_cpu_init() initialized: %d\n",
+                   cpu_number(), specs->initialized);
+
+       /* if we have read the data on boot, we won't read it again on wakeup */
        if (specs->initialized)
                return;
        else
@@ -136,53 +148,21 @@ vmx_get_specs()
 
        /* See if VMX is present, return if it is not */
        specs->vmx_present = vmx_is_available() && vmxon_is_enabled();
+       VMX_KPRINTF("[%d]vmx_cpu_init() vmx_present: %d\n",
+                   cpu_number(), specs->vmx_present);
        if (!specs->vmx_present)
                return;
 
-#define bitfield(x,f)  ((x >> f##_BIT) & f##_MASK)
-       /* Obtain and decode VMX general capabilities */        
-       msr_image = rdmsr64(MSR_IA32_VMX_BASIC);
-       specs->vmcs_id       = msr_image & VMX_VCR_VMCS_REV_ID;
-       specs->vmcs_mem_type = bitfield(msr_image, VMX_VCR_VMCS_MEM_TYPE) != 0;
-       specs->vmcs_size = bitfield(msr_image, VMX_VCR_VMCS_SIZE);
-                                                         
-       /* Obtain allowed settings for pin-based execution controls */
-       msr_image = rdmsr64(MSR_IA32_VMXPINBASED_CTLS);
-       specs->pin_exctls_0 = msr_image & 0xFFFFFFFF;
-       specs->pin_exctls_1 = msr_image >> 32;
-       
-       /* Obtain allowed settings for processor-based execution controls */
-       msr_image = rdmsr64(MSR_IA32_PROCBASED_CTLS);
-       specs->proc_exctls_0 = msr_image & 0xFFFFFFFF;
-       specs->proc_exctls_1 = msr_image >> 32;
-       
-       /* Obtain allowed settings for VM-exit controls */
-       msr_image = rdmsr64(MSR_IA32_VMX_EXIT_CTLS);
-       specs->exit_ctls_0 = msr_image & 0xFFFFFFFF;
-       specs->exit_ctls_1 = msr_image >> 32;
-       
-       /* Obtain allowed settings for VM-entry controls */
-       msr_image = rdmsr64(MSR_IA32_VMX_ENTRY_CTLS);
-       specs->enter_ctls_0 = msr_image & 0xFFFFFFFF;
-       specs->enter_ctls_0 = msr_image >> 32;
-       
-       /* Obtain and decode miscellaneous capabilities */
-       msr_image = rdmsr64(MSR_IA32_VMX_MISC);
-       specs->act_halt     = bitfield(msr_image, VMX_VCR_ACT_HLT) != 0;
-       specs->act_shutdown = bitfield(msr_image, VMX_VCR_ACT_SHUTDOWN) != 0;
-       specs->act_SIPI     = bitfield(msr_image, VMX_VCR_ACT_SIPI) != 0;
-       specs->act_CSTATE   = bitfield(msr_image, VMX_VCR_ACT_CSTATE) != 0;
-       specs->cr3_targs    = bitfield(msr_image, VMX_VCR_CR3_TARGS);
-       specs->max_msrs     = 512 * (1 + bitfield(msr_image, VMX_VCR_MAX_MSRS));
-       specs->mseg_id      = bitfield(msr_image, VMX_VCR_MSEG_ID);
-       
+#define rdmsr_mask(msr, mask) (uint32_t)(rdmsr64(msr) & (mask))
+       specs->vmcs_id = rdmsr_mask(MSR_IA32_VMX_BASIC, VMX_VCR_VMCS_REV_ID);
+
        /* Obtain VMX-fixed bits in CR0 */
-       specs->cr0_fixed_0 = rdmsr64(MSR_IA32_VMX_CR0_FIXED0) & 0xFFFFFFFF;
-       specs->cr0_fixed_1 = rdmsr64(MSR_IA32_VMX_CR0_FIXED1) & 0xFFFFFFFF;
+       specs->cr0_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED0, 0xFFFFFFFF);
+       specs->cr0_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED1, 0xFFFFFFFF);
        
        /* Obtain VMX-fixed bits in CR4 */
-       specs->cr4_fixed_0 = rdmsr64(MSR_IA32_VMX_CR4_FIXED0) & 0xFFFFFFFF;
-       specs->cr4_fixed_1 = rdmsr64(MSR_IA32_VMX_CR4_FIXED1) & 0xFFFFFFFF;
+       specs->cr4_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED0, 0xFFFFFFFF);
+       specs->cr4_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED1, 0xFFFFFFFF);
 }
 
 /* -----------------------------------------------------------------------------
@@ -190,14 +170,15 @@ vmx_get_specs()
        Enter VMX root operation on this CPU.
    -------------------------------------------------------------------------- */
 static void
-vmx_on(void)
+vmx_on(void *arg __unused)
 {
        vmx_cpu_t *cpu = &current_cpu_datap()->cpu_vmx;
        addr64_t vmxon_region_paddr;
        int result;
 
-       vmx_init();
-       
+       VMX_KPRINTF("[%d]vmx_on() entry state: %d\n",
+                   cpu_number(), cpu->specs.vmx_on);
+
        assert(cpu->specs.vmx_present);
 
        if (NULL == cpu->vmxon_region)
@@ -207,14 +188,20 @@ vmx_on(void)
        /*
         * Enable VMX operation.
         */
-       set_cr4(get_cr4() | CR4_VMXE);
-       
-       assert(vmx_is_cr0_valid(&cpu->specs));
-       assert(vmx_is_cr4_valid(&cpu->specs));
+       if (FALSE == cpu->specs.vmx_on) {
+               assert(vmx_is_cr0_valid(&cpu->specs));
+               assert(vmx_is_cr4_valid(&cpu->specs));
        
-       if ((result = __vmxon(&vmxon_region_paddr)) != VMX_SUCCEED) {
-               panic("vmx_on: unexpected return %d from __vmxon()", result);
+               result = __vmxon(vmxon_region_paddr);
+
+               if (result != VMX_SUCCEED) {
+                       panic("vmx_on: unexpected return %d from __vmxon()", result);
+               }
+
+               cpu->specs.vmx_on = TRUE;
        }
+       VMX_KPRINTF("[%d]vmx_on() return state: %d\n",
+                   cpu_number(), cpu->specs.vmx_on);
 }
 
 /* -----------------------------------------------------------------------------
@@ -222,14 +209,27 @@ vmx_on(void)
        Leave VMX root operation on this CPU.
    -------------------------------------------------------------------------- */
 static void
-vmx_off(void)
+vmx_off(void *arg __unused)
 {
+       vmx_cpu_t *cpu = &current_cpu_datap()->cpu_vmx;
        int result;
        
-       /* Tell the CPU to release the VMXON region */
-       if ((result = __vmxoff()) != VMX_SUCCEED) {
-               panic("vmx_off: unexpected return %d from __vmxoff()", result);
+       VMX_KPRINTF("[%d]vmx_off() entry state: %d\n",
+                   cpu_number(), cpu->specs.vmx_on);
+
+       if (TRUE == cpu->specs.vmx_on) {
+               /* Tell the CPU to release the VMXON region */
+               result = __vmxoff();
+
+               if (result != VMX_SUCCEED) {
+                       panic("vmx_off: unexpected return %d from __vmxoff()", result);
+               }
+       
+               cpu->specs.vmx_on = FALSE;
        }
+
+       VMX_KPRINTF("[%d]vmx_off() return state: %d\n",
+                   cpu_number(), cpu->specs.vmx_on);
 }
 
 /* -----------------------------------------------------------------------------
@@ -277,10 +277,10 @@ static boolean_t
 vmx_globally_available(void)
 {
        unsigned int i;
-       
+       unsigned int ncpus = ml_get_max_cpus();
        boolean_t available = TRUE;
 
-       for (i=0; i<real_ncpus; i++) {
+       for (i=0; i<ncpus; i++) {
                vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
 
                if (!cpu->specs.vmx_present)
@@ -300,26 +300,31 @@ host_vmxon(boolean_t exclusive)
 {
        int error;
 
+       assert(0 == get_preemption_level());
+
        if (!vmx_globally_available())
                return VMX_UNSUPPORTED;
 
-       simple_lock(&vmx_use_count_lock);
+       lck_mtx_lock(vmx_lck_mtx);
 
-       if (vmx_exclusive) {
+       if (vmx_exclusive || (exclusive && vmx_use_count)) {
                error = VMX_INUSE;
-               goto out;
-       }
-       vmx_use_count++;
-       if (vmx_use_count == 1) { /* was turned off before */
-               vmx_allocate_vmxon_regions();
-               mp_rendezvous(NULL, (void (*)(void *))vmx_on, NULL, NULL);
+       } else {
+               if (0 == vmx_use_count) {
+                       vmx_allocate_vmxon_regions();
+                       vmx_exclusive = exclusive;
+                       vmx_use_count = 1;
+                       mp_cpus_call(CPUMASK_ALL, ASYNC, vmx_on, NULL);
+
+               } else {
+                       vmx_use_count++;
+               }
+
+               VMX_KPRINTF("VMX use count: %d\n", vmx_use_count);
+               error = VMX_OK;
        }
-       vmx_exclusive = exclusive;
 
-       VMX_KPRINTF("VMX use count: %d\n", vmx_use_count);
-       error = VMX_OK;
-out:
-       simple_unlock(&vmx_use_count_lock);
+       lck_mtx_unlock(vmx_lck_mtx);
 
        return error;
 }
@@ -331,18 +336,20 @@ out:
 void
 host_vmxoff()
 {
-       simple_lock(&vmx_use_count_lock);
+       assert(0 == get_preemption_level());
 
-       if (vmx_use_count) {
+       lck_mtx_lock(vmx_lck_mtx);
+
+       if (1 == vmx_use_count) {
+               vmx_exclusive = FALSE;
+               vmx_use_count = 0;
+               mp_cpus_call(CPUMASK_ALL, ASYNC, vmx_off, NULL);
+               vmx_free_vmxon_regions();
+       } else {
                vmx_use_count--;
-               vmx_exclusive = 0;
-               if (!vmx_use_count) {
-                       mp_rendezvous(NULL, (void (*)(void *))vmx_off, NULL, NULL);
-                       vmx_free_vmxon_regions();
-               }
        }
 
-       simple_unlock(&vmx_use_count_lock);
+       lck_mtx_unlock(vmx_lck_mtx);
 
        VMX_KPRINTF("VMX use count: %d\n", vmx_use_count);
 }
@@ -356,8 +363,9 @@ void
 vmx_suspend()
 {
        VMX_KPRINTF("vmx_suspend\n");
+
        if (vmx_use_count)
-               vmx_off();
+               vmx_off(NULL);
 }
 
 /* -----------------------------------------------------------------------------
@@ -365,9 +373,49 @@ vmx_suspend()
        Restore the previous VT state. Called when CPU comes back online.
    -------------------------------------------------------------------------- */
 void
-vmx_resume()
+vmx_resume(boolean_t is_wake_from_hibernate)
 {
        VMX_KPRINTF("vmx_resume\n");
-       if (vmx_use_count)
-               vmx_on();
+
+       vmx_enable();
+
+       if (vmx_use_count == 0)
+               return;
+
+       /*
+        * When resuming from hiberate on the boot cpu,
+        * we must mark VMX as off since that's the state at wake-up
+        * because the restored state in memory records otherwise.
+        * This results in vmx_on() doing the right thing.
+        */
+       if (is_wake_from_hibernate) {
+               vmx_cpu_t *cpu = &current_cpu_datap()->cpu_vmx;
+               cpu->specs.vmx_on = FALSE;
+       }
+
+       vmx_on(NULL);
+}
+
+/* -----------------------------------------------------------------------------
+   vmx_hv_support()
+       Determine if the VMX feature set is sufficent for kernel HV support.
+   -------------------------------------------------------------------------- */
+boolean_t
+vmx_hv_support()
+{
+       if (!vmx_is_available())
+               return FALSE;
+
+#define CHK(msr, shift, mask) if (!VMX_CAP(msr, shift, mask)) return FALSE;
+
+       /* 'EPT' and 'Unrestricted Mode' are part of the secondary processor-based
+        * VM-execution controls */
+       CHK(MSR_IA32_VMX_BASIC, 0, VMX_BASIC_TRUE_CTLS)
+       CHK(MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 32, VMX_TRUE_PROCBASED_SECONDARY_CTLS)
+
+       /* if we have these, check for 'EPT' and 'Unrestricted Mode' */
+       CHK(MSR_IA32_VMX_PROCBASED_CTLS2, 32, VMX_PROCBASED_CTLS2_EPT)
+       CHK(MSR_IA32_VMX_PROCBASED_CTLS2, 32, VMX_PROCBASED_CTLS2_UNRESTRICTED)
+
+       return TRUE;
 }