]>
Commit | Line | Data |
---|---|---|
2d21ac55 | 1 | /* |
39236c6e | 2 | * Copyright (c) 2006-2012 Apple Inc. All rights reserved. |
2d21ac55 A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 A |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ | |
28 | ||
29 | #include <pexpert/pexpert.h> | |
30 | #include <i386/cpuid.h> | |
31 | #include <i386/cpu_data.h> | |
b0d623f7 | 32 | #include <i386/mp.h> |
2d21ac55 A |
33 | #include <i386/proc_reg.h> |
34 | #include <i386/vmx.h> | |
35 | #include <i386/vmx/vmx_asm.h> | |
36 | #include <i386/vmx/vmx_shims.h> | |
37 | #include <i386/vmx/vmx_cpu.h> | |
2d21ac55 | 38 | #include <mach/mach_host.h> /* for host_info() */ |
2d21ac55 A |
39 | |
40 | #define VMX_KPRINTF(x...) /* kprintf("vmx: " x) */ | |
41 | ||
42 | int vmx_use_count = 0; | |
b0d623f7 | 43 | boolean_t vmx_exclusive = FALSE; |
fe8ab488 | 44 | |
c3c9b80d A |
45 | static LCK_GRP_DECLARE(vmx_lck_grp, "vmx"); |
46 | static LCK_MTX_DECLARE(vmx_lck_mtx, &vmx_lck_grp); | |
2d21ac55 A |
47 | |
48 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
49 | * vmx_is_available() |
50 | * Is the VMX facility available on this CPU? | |
51 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
52 | static inline boolean_t |
53 | vmx_is_available(void) | |
54 | { | |
0a7de745 | 55 | return 0 != (cpuid_features() & CPUID_FEATURE_VMX); |
2d21ac55 A |
56 | } |
57 | ||
58 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
59 | * vmxon_is_enabled() |
60 | * Is the VMXON instruction enabled on this CPU? | |
61 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
62 | static inline boolean_t |
63 | vmxon_is_enabled(void) | |
64 | { | |
0a7de745 A |
65 | return vmx_is_available() && |
66 | (rdmsr64(MSR_IA32_FEATURE_CONTROL) & MSR_IA32_FEATCTL_VMXON); | |
2d21ac55 A |
67 | } |
68 | ||
fe8ab488 | 69 | #if MACH_ASSERT |
2d21ac55 | 70 | /* ----------------------------------------------------------------------------- |
0a7de745 A |
71 | * vmx_is_cr0_valid() |
72 | * Is CR0 valid for executing VMXON on this CPU? | |
73 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
74 | static inline boolean_t |
75 | vmx_is_cr0_valid(vmx_specs_t *specs) | |
76 | { | |
b0d623f7 | 77 | uintptr_t cr0 = get_cr0(); |
0a7de745 | 78 | return 0 == ((~cr0 & specs->cr0_fixed_0) | (cr0 & ~specs->cr0_fixed_1)); |
2d21ac55 A |
79 | } |
80 | ||
81 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
82 | * vmx_is_cr4_valid() |
83 | * Is CR4 valid for executing VMXON on this CPU? | |
84 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
85 | static inline boolean_t |
86 | vmx_is_cr4_valid(vmx_specs_t *specs) | |
87 | { | |
b0d623f7 | 88 | uintptr_t cr4 = get_cr4(); |
0a7de745 | 89 | return 0 == ((~cr4 & specs->cr4_fixed_0) | (cr4 & ~specs->cr4_fixed_1)); |
2d21ac55 A |
90 | } |
91 | ||
fe8ab488 A |
92 | #endif |
93 | ||
2d21ac55 | 94 | static void |
fe8ab488 | 95 | vmx_enable(void) |
2d21ac55 A |
96 | { |
97 | uint64_t msr_image; | |
98 | ||
0a7de745 | 99 | if (!vmx_is_available()) { |
2d21ac55 | 100 | return; |
0a7de745 | 101 | } |
2d21ac55 A |
102 | |
103 | /* | |
104 | * We don't count on EFI initializing MSR_IA32_FEATURE_CONTROL | |
105 | * and turning VMXON on and locking the bit, so we do that now. | |
106 | */ | |
107 | msr_image = rdmsr64(MSR_IA32_FEATURE_CONTROL); | |
0a7de745 | 108 | if (0 == ((msr_image & MSR_IA32_FEATCTL_LOCK))) { |
2d21ac55 | 109 | wrmsr64(MSR_IA32_FEATURE_CONTROL, |
0a7de745 A |
110 | (msr_image | |
111 | MSR_IA32_FEATCTL_VMXON | | |
112 | MSR_IA32_FEATCTL_LOCK)); | |
113 | } | |
fe8ab488 A |
114 | |
115 | set_cr4(get_cr4() | CR4_VMXE); | |
116 | } | |
117 | ||
2d21ac55 | 118 | /* ----------------------------------------------------------------------------- |
0a7de745 A |
119 | * vmx_get_specs() |
120 | * Obtain VMX facility specifications for this CPU and | |
121 | * enter them into the vmx_specs_t structure. If VMX is not available or | |
122 | * disabled on this CPU, set vmx_present to false and return leaving | |
123 | * the remainder of the vmx_specs_t uninitialized. | |
124 | * -------------------------------------------------------------------------- */ | |
2d21ac55 | 125 | void |
fe8ab488 | 126 | vmx_cpu_init() |
2d21ac55 A |
127 | { |
128 | vmx_specs_t *specs = ¤t_cpu_datap()->cpu_vmx.specs; | |
2d21ac55 | 129 | |
fe8ab488 | 130 | vmx_enable(); |
2d21ac55 | 131 | |
490019cf | 132 | VMX_KPRINTF("[%d]vmx_cpu_init() initialized: %d\n", |
0a7de745 | 133 | cpu_number(), specs->initialized); |
490019cf | 134 | |
fe8ab488 | 135 | /* if we have read the data on boot, we won't read it again on wakeup */ |
0a7de745 | 136 | if (specs->initialized) { |
2d21ac55 | 137 | return; |
0a7de745 | 138 | } else { |
2d21ac55 | 139 | specs->initialized = TRUE; |
0a7de745 | 140 | } |
2d21ac55 A |
141 | |
142 | /* See if VMX is present, return if it is not */ | |
143 | specs->vmx_present = vmx_is_available() && vmxon_is_enabled(); | |
490019cf | 144 | VMX_KPRINTF("[%d]vmx_cpu_init() vmx_present: %d\n", |
0a7de745 A |
145 | cpu_number(), specs->vmx_present); |
146 | if (!specs->vmx_present) { | |
2d21ac55 | 147 | return; |
0a7de745 | 148 | } |
2d21ac55 | 149 | |
fe8ab488 A |
150 | #define rdmsr_mask(msr, mask) (uint32_t)(rdmsr64(msr) & (mask)) |
151 | specs->vmcs_id = rdmsr_mask(MSR_IA32_VMX_BASIC, VMX_VCR_VMCS_REV_ID); | |
152 | ||
2d21ac55 | 153 | /* Obtain VMX-fixed bits in CR0 */ |
fe8ab488 A |
154 | specs->cr0_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED0, 0xFFFFFFFF); |
155 | specs->cr0_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED1, 0xFFFFFFFF); | |
0a7de745 | 156 | |
2d21ac55 | 157 | /* Obtain VMX-fixed bits in CR4 */ |
fe8ab488 A |
158 | specs->cr4_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED0, 0xFFFFFFFF); |
159 | specs->cr4_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED1, 0xFFFFFFFF); | |
2d21ac55 A |
160 | } |
161 | ||
162 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
163 | * vmx_on() |
164 | * Enter VMX root operation on this CPU. | |
165 | * -------------------------------------------------------------------------- */ | |
2d21ac55 | 166 | static void |
6d2010ae | 167 | vmx_on(void *arg __unused) |
2d21ac55 A |
168 | { |
169 | vmx_cpu_t *cpu = ¤t_cpu_datap()->cpu_vmx; | |
170 | addr64_t vmxon_region_paddr; | |
171 | int result; | |
172 | ||
490019cf | 173 | VMX_KPRINTF("[%d]vmx_on() entry state: %d\n", |
0a7de745 | 174 | cpu_number(), cpu->specs.vmx_on); |
490019cf | 175 | |
2d21ac55 A |
176 | assert(cpu->specs.vmx_present); |
177 | ||
0a7de745 | 178 | if (NULL == cpu->vmxon_region) { |
2d21ac55 | 179 | panic("vmx_on: VMXON region not allocated"); |
0a7de745 | 180 | } |
2d21ac55 A |
181 | vmxon_region_paddr = vmx_paddr(cpu->vmxon_region); |
182 | ||
183 | /* | |
184 | * Enable VMX operation. | |
185 | */ | |
fe8ab488 A |
186 | if (FALSE == cpu->specs.vmx_on) { |
187 | assert(vmx_is_cr0_valid(&cpu->specs)); | |
188 | assert(vmx_is_cr4_valid(&cpu->specs)); | |
0a7de745 | 189 | |
fe8ab488 A |
190 | result = __vmxon(vmxon_region_paddr); |
191 | ||
192 | if (result != VMX_SUCCEED) { | |
193 | panic("vmx_on: unexpected return %d from __vmxon()", result); | |
194 | } | |
316670eb | 195 | |
fe8ab488 | 196 | cpu->specs.vmx_on = TRUE; |
2d21ac55 | 197 | } |
490019cf | 198 | VMX_KPRINTF("[%d]vmx_on() return state: %d\n", |
0a7de745 | 199 | cpu_number(), cpu->specs.vmx_on); |
2d21ac55 A |
200 | } |
201 | ||
202 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
203 | * vmx_off() |
204 | * Leave VMX root operation on this CPU. | |
205 | * -------------------------------------------------------------------------- */ | |
2d21ac55 | 206 | static void |
6d2010ae | 207 | vmx_off(void *arg __unused) |
2d21ac55 | 208 | { |
fe8ab488 | 209 | vmx_cpu_t *cpu = ¤t_cpu_datap()->cpu_vmx; |
2d21ac55 | 210 | int result; |
0a7de745 | 211 | |
490019cf | 212 | VMX_KPRINTF("[%d]vmx_off() entry state: %d\n", |
0a7de745 | 213 | cpu_number(), cpu->specs.vmx_on); |
490019cf | 214 | |
fe8ab488 A |
215 | if (TRUE == cpu->specs.vmx_on) { |
216 | /* Tell the CPU to release the VMXON region */ | |
217 | result = __vmxoff(); | |
316670eb | 218 | |
fe8ab488 A |
219 | if (result != VMX_SUCCEED) { |
220 | panic("vmx_off: unexpected return %d from __vmxoff()", result); | |
221 | } | |
0a7de745 | 222 | |
fe8ab488 | 223 | cpu->specs.vmx_on = FALSE; |
2d21ac55 | 224 | } |
490019cf A |
225 | |
226 | VMX_KPRINTF("[%d]vmx_off() return state: %d\n", | |
0a7de745 | 227 | cpu_number(), cpu->specs.vmx_on); |
2d21ac55 A |
228 | } |
229 | ||
230 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
231 | * vmx_allocate_vmxon_regions() |
232 | * Allocate, clear and init VMXON regions for all CPUs. | |
233 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
234 | static void |
235 | vmx_allocate_vmxon_regions(void) | |
236 | { | |
237 | unsigned int i; | |
0a7de745 A |
238 | |
239 | for (i = 0; i < real_ncpus; i++) { | |
2d21ac55 A |
240 | vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; |
241 | ||
242 | /* The size is defined to be always <= 4K, so we just allocate a page */ | |
243 | cpu->vmxon_region = vmx_pcalloc(); | |
0a7de745 | 244 | if (NULL == cpu->vmxon_region) { |
2d21ac55 | 245 | panic("vmx_allocate_vmxon_regions: unable to allocate VMXON region"); |
0a7de745 | 246 | } |
2d21ac55 A |
247 | *(uint32_t*)(cpu->vmxon_region) = cpu->specs.vmcs_id; |
248 | } | |
249 | } | |
250 | ||
251 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
252 | * vmx_free_vmxon_regions() |
253 | * Free VMXON regions for all CPUs. | |
254 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
255 | static void |
256 | vmx_free_vmxon_regions(void) | |
257 | { | |
258 | unsigned int i; | |
259 | ||
0a7de745 | 260 | for (i = 0; i < real_ncpus; i++) { |
2d21ac55 A |
261 | vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; |
262 | ||
263 | vmx_pfree(cpu->vmxon_region); | |
264 | cpu->vmxon_region = NULL; | |
265 | } | |
266 | } | |
267 | ||
268 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
269 | * vmx_globally_available() |
270 | * Checks whether VT can be turned on for all CPUs. | |
271 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
272 | static boolean_t |
273 | vmx_globally_available(void) | |
274 | { | |
275 | unsigned int i; | |
f427ee49 | 276 | unsigned int ncpus = ml_wait_max_cpus(); |
2d21ac55 A |
277 | boolean_t available = TRUE; |
278 | ||
0a7de745 | 279 | for (i = 0; i < ncpus; i++) { |
2d21ac55 A |
280 | vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; |
281 | ||
0a7de745 | 282 | if (!cpu->specs.vmx_present) { |
2d21ac55 | 283 | available = FALSE; |
0a7de745 | 284 | } |
2d21ac55 A |
285 | } |
286 | VMX_KPRINTF("VMX available: %d\n", available); | |
287 | return available; | |
288 | } | |
289 | ||
290 | ||
291 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
292 | * vmx_turn_on() |
293 | * Turn on VT operation on all CPUs. | |
294 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
295 | int |
296 | host_vmxon(boolean_t exclusive) | |
297 | { | |
298 | int error; | |
fe8ab488 A |
299 | |
300 | assert(0 == get_preemption_level()); | |
2d21ac55 | 301 | |
0a7de745 | 302 | if (!vmx_globally_available()) { |
2d21ac55 | 303 | return VMX_UNSUPPORTED; |
0a7de745 | 304 | } |
2d21ac55 | 305 | |
c3c9b80d | 306 | lck_mtx_lock(&vmx_lck_mtx); |
2d21ac55 | 307 | |
fe8ab488 | 308 | if (vmx_exclusive || (exclusive && vmx_use_count)) { |
2d21ac55 | 309 | error = VMX_INUSE; |
b0d623f7 | 310 | } else { |
fe8ab488 A |
311 | if (0 == vmx_use_count) { |
312 | vmx_allocate_vmxon_regions(); | |
313 | vmx_exclusive = exclusive; | |
314 | vmx_use_count = 1; | |
315 | mp_cpus_call(CPUMASK_ALL, ASYNC, vmx_on, NULL); | |
fe8ab488 A |
316 | } else { |
317 | vmx_use_count++; | |
318 | } | |
b0d623f7 A |
319 | |
320 | VMX_KPRINTF("VMX use count: %d\n", vmx_use_count); | |
321 | error = VMX_OK; | |
2d21ac55 | 322 | } |
2d21ac55 | 323 | |
c3c9b80d | 324 | lck_mtx_unlock(&vmx_lck_mtx); |
2d21ac55 A |
325 | |
326 | return error; | |
327 | } | |
328 | ||
329 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
330 | * vmx_turn_off() |
331 | * Turn off VT operation on all CPUs. | |
332 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
333 | void |
334 | host_vmxoff() | |
335 | { | |
fe8ab488 | 336 | assert(0 == get_preemption_level()); |
b0d623f7 | 337 | |
c3c9b80d | 338 | lck_mtx_lock(&vmx_lck_mtx); |
2d21ac55 | 339 | |
fe8ab488 | 340 | if (1 == vmx_use_count) { |
b0d623f7 | 341 | vmx_exclusive = FALSE; |
fe8ab488 A |
342 | vmx_use_count = 0; |
343 | mp_cpus_call(CPUMASK_ALL, ASYNC, vmx_off, NULL); | |
b0d623f7 | 344 | vmx_free_vmxon_regions(); |
fe8ab488 A |
345 | } else { |
346 | vmx_use_count--; | |
b0d623f7 A |
347 | } |
348 | ||
c3c9b80d | 349 | lck_mtx_unlock(&vmx_lck_mtx); |
fe8ab488 | 350 | |
2d21ac55 A |
351 | VMX_KPRINTF("VMX use count: %d\n", vmx_use_count); |
352 | } | |
353 | ||
354 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
355 | * vmx_suspend() |
356 | * Turn off VT operation on this CPU if it was on. | |
357 | * Called when a CPU goes offline. | |
358 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
359 | void |
360 | vmx_suspend() | |
361 | { | |
362 | VMX_KPRINTF("vmx_suspend\n"); | |
fe8ab488 | 363 | |
0a7de745 | 364 | if (vmx_use_count) { |
6d2010ae | 365 | vmx_off(NULL); |
0a7de745 | 366 | } |
2d21ac55 A |
367 | } |
368 | ||
369 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
370 | * vmx_suspend() |
371 | * Restore the previous VT state. Called when CPU comes back online. | |
372 | * -------------------------------------------------------------------------- */ | |
2d21ac55 | 373 | void |
490019cf | 374 | vmx_resume(boolean_t is_wake_from_hibernate) |
2d21ac55 A |
375 | { |
376 | VMX_KPRINTF("vmx_resume\n"); | |
fe8ab488 A |
377 | |
378 | vmx_enable(); | |
379 | ||
0a7de745 | 380 | if (vmx_use_count == 0) { |
490019cf | 381 | return; |
0a7de745 | 382 | } |
490019cf A |
383 | |
384 | /* | |
385 | * When resuming from hiberate on the boot cpu, | |
386 | * we must mark VMX as off since that's the state at wake-up | |
387 | * because the restored state in memory records otherwise. | |
388 | * This results in vmx_on() doing the right thing. | |
389 | */ | |
390 | if (is_wake_from_hibernate) { | |
391 | vmx_cpu_t *cpu = ¤t_cpu_datap()->cpu_vmx; | |
392 | cpu->specs.vmx_on = FALSE; | |
393 | } | |
394 | ||
395 | vmx_on(NULL); | |
2d21ac55 | 396 | } |
fe8ab488 A |
397 | |
398 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
399 | * vmx_hv_support() |
400 | * Determine if the VMX feature set is sufficent for kernel HV support. | |
401 | * -------------------------------------------------------------------------- */ | |
fe8ab488 A |
402 | boolean_t |
403 | vmx_hv_support() | |
404 | { | |
0a7de745 | 405 | if (!vmx_is_available()) { |
fe8ab488 | 406 | return FALSE; |
0a7de745 | 407 | } |
fe8ab488 A |
408 | |
409 | #define CHK(msr, shift, mask) if (!VMX_CAP(msr, shift, mask)) return FALSE; | |
410 | ||
411 | /* 'EPT' and 'Unrestricted Mode' are part of the secondary processor-based | |
412 | * VM-execution controls */ | |
413 | CHK(MSR_IA32_VMX_BASIC, 0, VMX_BASIC_TRUE_CTLS) | |
414 | CHK(MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 32, VMX_TRUE_PROCBASED_SECONDARY_CTLS) | |
415 | ||
416 | /* if we have these, check for 'EPT' and 'Unrestricted Mode' */ | |
417 | CHK(MSR_IA32_VMX_PROCBASED_CTLS2, 32, VMX_PROCBASED_CTLS2_EPT) | |
418 | CHK(MSR_IA32_VMX_PROCBASED_CTLS2, 32, VMX_PROCBASED_CTLS2_UNRESTRICTED) | |
419 | ||
420 | return TRUE; | |
421 | } |