]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/vmx/vmx_cpu.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / osfmk / i386 / vmx / vmx_cpu.c
CommitLineData
2d21ac55 1/*
39236c6e 2 * Copyright (c) 2006-2012 Apple Inc. All rights reserved.
2d21ac55
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <pexpert/pexpert.h>
30#include <i386/cpuid.h>
31#include <i386/cpu_data.h>
b0d623f7 32#include <i386/mp.h>
2d21ac55
A
33#include <i386/proc_reg.h>
34#include <i386/vmx.h>
35#include <i386/vmx/vmx_asm.h>
36#include <i386/vmx/vmx_shims.h>
37#include <i386/vmx/vmx_cpu.h>
2d21ac55 38#include <mach/mach_host.h> /* for host_info() */
2d21ac55
A
39
40#define VMX_KPRINTF(x...) /* kprintf("vmx: " x) */
41
42int vmx_use_count = 0;
b0d623f7 43boolean_t vmx_exclusive = FALSE;
fe8ab488
A
44
45lck_grp_t *vmx_lck_grp = NULL;
46lck_mtx_t *vmx_lck_mtx = NULL;
2d21ac55
A
47
48/* -----------------------------------------------------------------------------
49 vmx_is_available()
50 Is the VMX facility available on this CPU?
51 -------------------------------------------------------------------------- */
52static inline boolean_t
53vmx_is_available(void)
54{
55 return (0 != (cpuid_features() & CPUID_FEATURE_VMX));
56}
57
58/* -----------------------------------------------------------------------------
59 vmxon_is_enabled()
60 Is the VMXON instruction enabled on this CPU?
61 -------------------------------------------------------------------------- */
62static inline boolean_t
63vmxon_is_enabled(void)
64{
65 return (vmx_is_available() &&
66 (rdmsr64(MSR_IA32_FEATURE_CONTROL) & MSR_IA32_FEATCTL_VMXON));
67}
68
fe8ab488 69#if MACH_ASSERT
2d21ac55
A
70/* -----------------------------------------------------------------------------
71 vmx_is_cr0_valid()
72 Is CR0 valid for executing VMXON on this CPU?
73 -------------------------------------------------------------------------- */
74static inline boolean_t
75vmx_is_cr0_valid(vmx_specs_t *specs)
76{
b0d623f7 77 uintptr_t cr0 = get_cr0();
2d21ac55
A
78 return (0 == ((~cr0 & specs->cr0_fixed_0)|(cr0 & ~specs->cr0_fixed_1)));
79}
80
81/* -----------------------------------------------------------------------------
82 vmx_is_cr4_valid()
83 Is CR4 valid for executing VMXON on this CPU?
84 -------------------------------------------------------------------------- */
85static inline boolean_t
86vmx_is_cr4_valid(vmx_specs_t *specs)
87{
b0d623f7 88 uintptr_t cr4 = get_cr4();
2d21ac55
A
89 return (0 == ((~cr4 & specs->cr4_fixed_0)|(cr4 & ~specs->cr4_fixed_1)));
90}
91
fe8ab488
A
92#endif
93
2d21ac55 94static void
fe8ab488 95vmx_enable(void)
2d21ac55
A
96{
97 uint64_t msr_image;
98
99 if (!vmx_is_available())
100 return;
101
102 /*
103 * We don't count on EFI initializing MSR_IA32_FEATURE_CONTROL
104 * and turning VMXON on and locking the bit, so we do that now.
105 */
106 msr_image = rdmsr64(MSR_IA32_FEATURE_CONTROL);
107 if (0 == ((msr_image & MSR_IA32_FEATCTL_LOCK)))
108 wrmsr64(MSR_IA32_FEATURE_CONTROL,
109 (msr_image |
110 MSR_IA32_FEATCTL_VMXON |
111 MSR_IA32_FEATCTL_LOCK));
fe8ab488
A
112
113 set_cr4(get_cr4() | CR4_VMXE);
114}
115
116void
117vmx_init()
118{
119 vmx_lck_grp = lck_grp_alloc_init("vmx", LCK_GRP_ATTR_NULL);
120 assert(vmx_lck_grp);
121
122 vmx_lck_mtx = lck_mtx_alloc_init(vmx_lck_grp, LCK_ATTR_NULL);
123 assert(vmx_lck_mtx);
2d21ac55
A
124}
125
126/* -----------------------------------------------------------------------------
127 vmx_get_specs()
128 Obtain VMX facility specifications for this CPU and
129 enter them into the vmx_specs_t structure. If VMX is not available or
130 disabled on this CPU, set vmx_present to false and return leaving
131 the remainder of the vmx_specs_t uninitialized.
132 -------------------------------------------------------------------------- */
133void
fe8ab488 134vmx_cpu_init()
2d21ac55
A
135{
136 vmx_specs_t *specs = &current_cpu_datap()->cpu_vmx.specs;
2d21ac55 137
fe8ab488 138 vmx_enable();
2d21ac55 139
490019cf
A
140 VMX_KPRINTF("[%d]vmx_cpu_init() initialized: %d\n",
141 cpu_number(), specs->initialized);
142
fe8ab488 143 /* if we have read the data on boot, we won't read it again on wakeup */
2d21ac55
A
144 if (specs->initialized)
145 return;
146 else
147 specs->initialized = TRUE;
148
149 /* See if VMX is present, return if it is not */
150 specs->vmx_present = vmx_is_available() && vmxon_is_enabled();
490019cf
A
151 VMX_KPRINTF("[%d]vmx_cpu_init() vmx_present: %d\n",
152 cpu_number(), specs->vmx_present);
2d21ac55
A
153 if (!specs->vmx_present)
154 return;
155
fe8ab488
A
156#define rdmsr_mask(msr, mask) (uint32_t)(rdmsr64(msr) & (mask))
157 specs->vmcs_id = rdmsr_mask(MSR_IA32_VMX_BASIC, VMX_VCR_VMCS_REV_ID);
158
2d21ac55 159 /* Obtain VMX-fixed bits in CR0 */
fe8ab488
A
160 specs->cr0_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED0, 0xFFFFFFFF);
161 specs->cr0_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED1, 0xFFFFFFFF);
2d21ac55
A
162
163 /* Obtain VMX-fixed bits in CR4 */
fe8ab488
A
164 specs->cr4_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED0, 0xFFFFFFFF);
165 specs->cr4_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED1, 0xFFFFFFFF);
2d21ac55
A
166}
167
168/* -----------------------------------------------------------------------------
169 vmx_on()
170 Enter VMX root operation on this CPU.
171 -------------------------------------------------------------------------- */
172static void
6d2010ae 173vmx_on(void *arg __unused)
2d21ac55
A
174{
175 vmx_cpu_t *cpu = &current_cpu_datap()->cpu_vmx;
176 addr64_t vmxon_region_paddr;
177 int result;
178
490019cf
A
179 VMX_KPRINTF("[%d]vmx_on() entry state: %d\n",
180 cpu_number(), cpu->specs.vmx_on);
181
2d21ac55
A
182 assert(cpu->specs.vmx_present);
183
184 if (NULL == cpu->vmxon_region)
185 panic("vmx_on: VMXON region not allocated");
186 vmxon_region_paddr = vmx_paddr(cpu->vmxon_region);
187
188 /*
189 * Enable VMX operation.
190 */
fe8ab488
A
191 if (FALSE == cpu->specs.vmx_on) {
192 assert(vmx_is_cr0_valid(&cpu->specs));
193 assert(vmx_is_cr4_valid(&cpu->specs));
2d21ac55 194
fe8ab488
A
195 result = __vmxon(vmxon_region_paddr);
196
197 if (result != VMX_SUCCEED) {
198 panic("vmx_on: unexpected return %d from __vmxon()", result);
199 }
316670eb 200
fe8ab488 201 cpu->specs.vmx_on = TRUE;
2d21ac55 202 }
490019cf
A
203 VMX_KPRINTF("[%d]vmx_on() return state: %d\n",
204 cpu_number(), cpu->specs.vmx_on);
2d21ac55
A
205}
206
207/* -----------------------------------------------------------------------------
208 vmx_off()
209 Leave VMX root operation on this CPU.
210 -------------------------------------------------------------------------- */
211static void
6d2010ae 212vmx_off(void *arg __unused)
2d21ac55 213{
fe8ab488 214 vmx_cpu_t *cpu = &current_cpu_datap()->cpu_vmx;
2d21ac55
A
215 int result;
216
490019cf
A
217 VMX_KPRINTF("[%d]vmx_off() entry state: %d\n",
218 cpu_number(), cpu->specs.vmx_on);
219
fe8ab488
A
220 if (TRUE == cpu->specs.vmx_on) {
221 /* Tell the CPU to release the VMXON region */
222 result = __vmxoff();
316670eb 223
fe8ab488
A
224 if (result != VMX_SUCCEED) {
225 panic("vmx_off: unexpected return %d from __vmxoff()", result);
226 }
227
228 cpu->specs.vmx_on = FALSE;
2d21ac55 229 }
490019cf
A
230
231 VMX_KPRINTF("[%d]vmx_off() return state: %d\n",
232 cpu_number(), cpu->specs.vmx_on);
2d21ac55
A
233}
234
235/* -----------------------------------------------------------------------------
236 vmx_allocate_vmxon_regions()
237 Allocate, clear and init VMXON regions for all CPUs.
238 -------------------------------------------------------------------------- */
239static void
240vmx_allocate_vmxon_regions(void)
241{
242 unsigned int i;
243
244 for (i=0; i<real_ncpus; i++) {
245 vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
246
247 /* The size is defined to be always <= 4K, so we just allocate a page */
248 cpu->vmxon_region = vmx_pcalloc();
249 if (NULL == cpu->vmxon_region)
250 panic("vmx_allocate_vmxon_regions: unable to allocate VMXON region");
251 *(uint32_t*)(cpu->vmxon_region) = cpu->specs.vmcs_id;
252 }
253}
254
255/* -----------------------------------------------------------------------------
256 vmx_free_vmxon_regions()
257 Free VMXON regions for all CPUs.
258 -------------------------------------------------------------------------- */
259static void
260vmx_free_vmxon_regions(void)
261{
262 unsigned int i;
263
264 for (i=0; i<real_ncpus; i++) {
265 vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
266
267 vmx_pfree(cpu->vmxon_region);
268 cpu->vmxon_region = NULL;
269 }
270}
271
272/* -----------------------------------------------------------------------------
273 vmx_globally_available()
274 Checks whether VT can be turned on for all CPUs.
275 -------------------------------------------------------------------------- */
276static boolean_t
277vmx_globally_available(void)
278{
279 unsigned int i;
fe8ab488 280 unsigned int ncpus = ml_get_max_cpus();
2d21ac55
A
281 boolean_t available = TRUE;
282
fe8ab488 283 for (i=0; i<ncpus; i++) {
2d21ac55
A
284 vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
285
286 if (!cpu->specs.vmx_present)
287 available = FALSE;
288 }
289 VMX_KPRINTF("VMX available: %d\n", available);
290 return available;
291}
292
293
294/* -----------------------------------------------------------------------------
295 vmx_turn_on()
296 Turn on VT operation on all CPUs.
297 -------------------------------------------------------------------------- */
298int
299host_vmxon(boolean_t exclusive)
300{
301 int error;
fe8ab488
A
302
303 assert(0 == get_preemption_level());
2d21ac55
A
304
305 if (!vmx_globally_available())
306 return VMX_UNSUPPORTED;
307
fe8ab488 308 lck_mtx_lock(vmx_lck_mtx);
2d21ac55 309
fe8ab488 310 if (vmx_exclusive || (exclusive && vmx_use_count)) {
2d21ac55 311 error = VMX_INUSE;
b0d623f7 312 } else {
fe8ab488
A
313 if (0 == vmx_use_count) {
314 vmx_allocate_vmxon_regions();
315 vmx_exclusive = exclusive;
316 vmx_use_count = 1;
317 mp_cpus_call(CPUMASK_ALL, ASYNC, vmx_on, NULL);
318
319 } else {
320 vmx_use_count++;
321 }
b0d623f7
A
322
323 VMX_KPRINTF("VMX use count: %d\n", vmx_use_count);
324 error = VMX_OK;
2d21ac55 325 }
2d21ac55 326
fe8ab488 327 lck_mtx_unlock(vmx_lck_mtx);
2d21ac55
A
328
329 return error;
330}
331
332/* -----------------------------------------------------------------------------
333 vmx_turn_off()
334 Turn off VT operation on all CPUs.
335 -------------------------------------------------------------------------- */
336void
337host_vmxoff()
338{
fe8ab488 339 assert(0 == get_preemption_level());
b0d623f7 340
fe8ab488 341 lck_mtx_lock(vmx_lck_mtx);
2d21ac55 342
fe8ab488 343 if (1 == vmx_use_count) {
b0d623f7 344 vmx_exclusive = FALSE;
fe8ab488
A
345 vmx_use_count = 0;
346 mp_cpus_call(CPUMASK_ALL, ASYNC, vmx_off, NULL);
b0d623f7 347 vmx_free_vmxon_regions();
fe8ab488
A
348 } else {
349 vmx_use_count--;
b0d623f7
A
350 }
351
fe8ab488
A
352 lck_mtx_unlock(vmx_lck_mtx);
353
2d21ac55
A
354 VMX_KPRINTF("VMX use count: %d\n", vmx_use_count);
355}
356
357/* -----------------------------------------------------------------------------
358 vmx_suspend()
359 Turn off VT operation on this CPU if it was on.
360 Called when a CPU goes offline.
361 -------------------------------------------------------------------------- */
362void
363vmx_suspend()
364{
365 VMX_KPRINTF("vmx_suspend\n");
fe8ab488 366
2d21ac55 367 if (vmx_use_count)
6d2010ae 368 vmx_off(NULL);
2d21ac55
A
369}
370
371/* -----------------------------------------------------------------------------
372 vmx_suspend()
373 Restore the previous VT state. Called when CPU comes back online.
374 -------------------------------------------------------------------------- */
375void
490019cf 376vmx_resume(boolean_t is_wake_from_hibernate)
2d21ac55
A
377{
378 VMX_KPRINTF("vmx_resume\n");
fe8ab488
A
379
380 vmx_enable();
381
490019cf
A
382 if (vmx_use_count == 0)
383 return;
384
385 /*
386 * When resuming from hiberate on the boot cpu,
387 * we must mark VMX as off since that's the state at wake-up
388 * because the restored state in memory records otherwise.
389 * This results in vmx_on() doing the right thing.
390 */
391 if (is_wake_from_hibernate) {
392 vmx_cpu_t *cpu = &current_cpu_datap()->cpu_vmx;
393 cpu->specs.vmx_on = FALSE;
394 }
395
396 vmx_on(NULL);
2d21ac55 397}
fe8ab488
A
398
399/* -----------------------------------------------------------------------------
400 vmx_hv_support()
401 Determine if the VMX feature set is sufficent for kernel HV support.
402 -------------------------------------------------------------------------- */
403boolean_t
404vmx_hv_support()
405{
406 if (!vmx_is_available())
407 return FALSE;
408
409#define CHK(msr, shift, mask) if (!VMX_CAP(msr, shift, mask)) return FALSE;
410
411 /* 'EPT' and 'Unrestricted Mode' are part of the secondary processor-based
412 * VM-execution controls */
413 CHK(MSR_IA32_VMX_BASIC, 0, VMX_BASIC_TRUE_CTLS)
414 CHK(MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 32, VMX_TRUE_PROCBASED_SECONDARY_CTLS)
415
416 /* if we have these, check for 'EPT' and 'Unrestricted Mode' */
417 CHK(MSR_IA32_VMX_PROCBASED_CTLS2, 32, VMX_PROCBASED_CTLS2_EPT)
418 CHK(MSR_IA32_VMX_PROCBASED_CTLS2, 32, VMX_PROCBASED_CTLS2_UNRESTRICTED)
419
420 return TRUE;
421}