]>
Commit | Line | Data |
---|---|---|
2d21ac55 | 1 | /* |
39236c6e | 2 | * Copyright (c) 2006-2012 Apple Inc. All rights reserved. |
2d21ac55 A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 A |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ | |
28 | ||
29 | #include <pexpert/pexpert.h> | |
30 | #include <i386/cpuid.h> | |
31 | #include <i386/cpu_data.h> | |
b0d623f7 | 32 | #include <i386/mp.h> |
2d21ac55 A |
33 | #include <i386/proc_reg.h> |
34 | #include <i386/vmx.h> | |
35 | #include <i386/vmx/vmx_asm.h> | |
36 | #include <i386/vmx/vmx_shims.h> | |
37 | #include <i386/vmx/vmx_cpu.h> | |
2d21ac55 | 38 | #include <mach/mach_host.h> /* for host_info() */ |
2d21ac55 A |
39 | |
40 | #define VMX_KPRINTF(x...) /* kprintf("vmx: " x) */ | |
41 | ||
42 | int vmx_use_count = 0; | |
b0d623f7 | 43 | boolean_t vmx_exclusive = FALSE; |
fe8ab488 A |
44 | |
45 | lck_grp_t *vmx_lck_grp = NULL; | |
46 | lck_mtx_t *vmx_lck_mtx = NULL; | |
2d21ac55 A |
47 | |
48 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
49 | * vmx_is_available() |
50 | * Is the VMX facility available on this CPU? | |
51 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
52 | static inline boolean_t |
53 | vmx_is_available(void) | |
54 | { | |
0a7de745 | 55 | return 0 != (cpuid_features() & CPUID_FEATURE_VMX); |
2d21ac55 A |
56 | } |
57 | ||
58 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
59 | * vmxon_is_enabled() |
60 | * Is the VMXON instruction enabled on this CPU? | |
61 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
62 | static inline boolean_t |
63 | vmxon_is_enabled(void) | |
64 | { | |
0a7de745 A |
65 | return vmx_is_available() && |
66 | (rdmsr64(MSR_IA32_FEATURE_CONTROL) & MSR_IA32_FEATCTL_VMXON); | |
2d21ac55 A |
67 | } |
68 | ||
fe8ab488 | 69 | #if MACH_ASSERT |
2d21ac55 | 70 | /* ----------------------------------------------------------------------------- |
0a7de745 A |
71 | * vmx_is_cr0_valid() |
72 | * Is CR0 valid for executing VMXON on this CPU? | |
73 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
74 | static inline boolean_t |
75 | vmx_is_cr0_valid(vmx_specs_t *specs) | |
76 | { | |
b0d623f7 | 77 | uintptr_t cr0 = get_cr0(); |
0a7de745 | 78 | return 0 == ((~cr0 & specs->cr0_fixed_0) | (cr0 & ~specs->cr0_fixed_1)); |
2d21ac55 A |
79 | } |
80 | ||
81 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
82 | * vmx_is_cr4_valid() |
83 | * Is CR4 valid for executing VMXON on this CPU? | |
84 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
85 | static inline boolean_t |
86 | vmx_is_cr4_valid(vmx_specs_t *specs) | |
87 | { | |
b0d623f7 | 88 | uintptr_t cr4 = get_cr4(); |
0a7de745 | 89 | return 0 == ((~cr4 & specs->cr4_fixed_0) | (cr4 & ~specs->cr4_fixed_1)); |
2d21ac55 A |
90 | } |
91 | ||
fe8ab488 A |
92 | #endif |
93 | ||
2d21ac55 | 94 | static void |
fe8ab488 | 95 | vmx_enable(void) |
2d21ac55 A |
96 | { |
97 | uint64_t msr_image; | |
98 | ||
0a7de745 | 99 | if (!vmx_is_available()) { |
2d21ac55 | 100 | return; |
0a7de745 | 101 | } |
2d21ac55 A |
102 | |
103 | /* | |
104 | * We don't count on EFI initializing MSR_IA32_FEATURE_CONTROL | |
105 | * and turning VMXON on and locking the bit, so we do that now. | |
106 | */ | |
107 | msr_image = rdmsr64(MSR_IA32_FEATURE_CONTROL); | |
0a7de745 | 108 | if (0 == ((msr_image & MSR_IA32_FEATCTL_LOCK))) { |
2d21ac55 | 109 | wrmsr64(MSR_IA32_FEATURE_CONTROL, |
0a7de745 A |
110 | (msr_image | |
111 | MSR_IA32_FEATCTL_VMXON | | |
112 | MSR_IA32_FEATCTL_LOCK)); | |
113 | } | |
fe8ab488 A |
114 | |
115 | set_cr4(get_cr4() | CR4_VMXE); | |
116 | } | |
117 | ||
118 | void | |
119 | vmx_init() | |
120 | { | |
121 | vmx_lck_grp = lck_grp_alloc_init("vmx", LCK_GRP_ATTR_NULL); | |
122 | assert(vmx_lck_grp); | |
123 | ||
124 | vmx_lck_mtx = lck_mtx_alloc_init(vmx_lck_grp, LCK_ATTR_NULL); | |
125 | assert(vmx_lck_mtx); | |
2d21ac55 A |
126 | } |
127 | ||
128 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
129 | * vmx_get_specs() |
130 | * Obtain VMX facility specifications for this CPU and | |
131 | * enter them into the vmx_specs_t structure. If VMX is not available or | |
132 | * disabled on this CPU, set vmx_present to false and return leaving | |
133 | * the remainder of the vmx_specs_t uninitialized. | |
134 | * -------------------------------------------------------------------------- */ | |
2d21ac55 | 135 | void |
fe8ab488 | 136 | vmx_cpu_init() |
2d21ac55 A |
137 | { |
138 | vmx_specs_t *specs = ¤t_cpu_datap()->cpu_vmx.specs; | |
2d21ac55 | 139 | |
fe8ab488 | 140 | vmx_enable(); |
2d21ac55 | 141 | |
490019cf | 142 | VMX_KPRINTF("[%d]vmx_cpu_init() initialized: %d\n", |
0a7de745 | 143 | cpu_number(), specs->initialized); |
490019cf | 144 | |
fe8ab488 | 145 | /* if we have read the data on boot, we won't read it again on wakeup */ |
0a7de745 | 146 | if (specs->initialized) { |
2d21ac55 | 147 | return; |
0a7de745 | 148 | } else { |
2d21ac55 | 149 | specs->initialized = TRUE; |
0a7de745 | 150 | } |
2d21ac55 A |
151 | |
152 | /* See if VMX is present, return if it is not */ | |
153 | specs->vmx_present = vmx_is_available() && vmxon_is_enabled(); | |
490019cf | 154 | VMX_KPRINTF("[%d]vmx_cpu_init() vmx_present: %d\n", |
0a7de745 A |
155 | cpu_number(), specs->vmx_present); |
156 | if (!specs->vmx_present) { | |
2d21ac55 | 157 | return; |
0a7de745 | 158 | } |
2d21ac55 | 159 | |
fe8ab488 A |
160 | #define rdmsr_mask(msr, mask) (uint32_t)(rdmsr64(msr) & (mask)) |
161 | specs->vmcs_id = rdmsr_mask(MSR_IA32_VMX_BASIC, VMX_VCR_VMCS_REV_ID); | |
162 | ||
2d21ac55 | 163 | /* Obtain VMX-fixed bits in CR0 */ |
fe8ab488 A |
164 | specs->cr0_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED0, 0xFFFFFFFF); |
165 | specs->cr0_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED1, 0xFFFFFFFF); | |
0a7de745 | 166 | |
2d21ac55 | 167 | /* Obtain VMX-fixed bits in CR4 */ |
fe8ab488 A |
168 | specs->cr4_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED0, 0xFFFFFFFF); |
169 | specs->cr4_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED1, 0xFFFFFFFF); | |
2d21ac55 A |
170 | } |
171 | ||
172 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
173 | * vmx_on() |
174 | * Enter VMX root operation on this CPU. | |
175 | * -------------------------------------------------------------------------- */ | |
2d21ac55 | 176 | static void |
6d2010ae | 177 | vmx_on(void *arg __unused) |
2d21ac55 A |
178 | { |
179 | vmx_cpu_t *cpu = ¤t_cpu_datap()->cpu_vmx; | |
180 | addr64_t vmxon_region_paddr; | |
181 | int result; | |
182 | ||
490019cf | 183 | VMX_KPRINTF("[%d]vmx_on() entry state: %d\n", |
0a7de745 | 184 | cpu_number(), cpu->specs.vmx_on); |
490019cf | 185 | |
2d21ac55 A |
186 | assert(cpu->specs.vmx_present); |
187 | ||
0a7de745 | 188 | if (NULL == cpu->vmxon_region) { |
2d21ac55 | 189 | panic("vmx_on: VMXON region not allocated"); |
0a7de745 | 190 | } |
2d21ac55 A |
191 | vmxon_region_paddr = vmx_paddr(cpu->vmxon_region); |
192 | ||
193 | /* | |
194 | * Enable VMX operation. | |
195 | */ | |
fe8ab488 A |
196 | if (FALSE == cpu->specs.vmx_on) { |
197 | assert(vmx_is_cr0_valid(&cpu->specs)); | |
198 | assert(vmx_is_cr4_valid(&cpu->specs)); | |
0a7de745 | 199 | |
fe8ab488 A |
200 | result = __vmxon(vmxon_region_paddr); |
201 | ||
202 | if (result != VMX_SUCCEED) { | |
203 | panic("vmx_on: unexpected return %d from __vmxon()", result); | |
204 | } | |
316670eb | 205 | |
fe8ab488 | 206 | cpu->specs.vmx_on = TRUE; |
2d21ac55 | 207 | } |
490019cf | 208 | VMX_KPRINTF("[%d]vmx_on() return state: %d\n", |
0a7de745 | 209 | cpu_number(), cpu->specs.vmx_on); |
2d21ac55 A |
210 | } |
211 | ||
212 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
213 | * vmx_off() |
214 | * Leave VMX root operation on this CPU. | |
215 | * -------------------------------------------------------------------------- */ | |
2d21ac55 | 216 | static void |
6d2010ae | 217 | vmx_off(void *arg __unused) |
2d21ac55 | 218 | { |
fe8ab488 | 219 | vmx_cpu_t *cpu = ¤t_cpu_datap()->cpu_vmx; |
2d21ac55 | 220 | int result; |
0a7de745 | 221 | |
490019cf | 222 | VMX_KPRINTF("[%d]vmx_off() entry state: %d\n", |
0a7de745 | 223 | cpu_number(), cpu->specs.vmx_on); |
490019cf | 224 | |
fe8ab488 A |
225 | if (TRUE == cpu->specs.vmx_on) { |
226 | /* Tell the CPU to release the VMXON region */ | |
227 | result = __vmxoff(); | |
316670eb | 228 | |
fe8ab488 A |
229 | if (result != VMX_SUCCEED) { |
230 | panic("vmx_off: unexpected return %d from __vmxoff()", result); | |
231 | } | |
0a7de745 | 232 | |
fe8ab488 | 233 | cpu->specs.vmx_on = FALSE; |
2d21ac55 | 234 | } |
490019cf A |
235 | |
236 | VMX_KPRINTF("[%d]vmx_off() return state: %d\n", | |
0a7de745 | 237 | cpu_number(), cpu->specs.vmx_on); |
2d21ac55 A |
238 | } |
239 | ||
240 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
241 | * vmx_allocate_vmxon_regions() |
242 | * Allocate, clear and init VMXON regions for all CPUs. | |
243 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
244 | static void |
245 | vmx_allocate_vmxon_regions(void) | |
246 | { | |
247 | unsigned int i; | |
0a7de745 A |
248 | |
249 | for (i = 0; i < real_ncpus; i++) { | |
2d21ac55 A |
250 | vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; |
251 | ||
252 | /* The size is defined to be always <= 4K, so we just allocate a page */ | |
253 | cpu->vmxon_region = vmx_pcalloc(); | |
0a7de745 | 254 | if (NULL == cpu->vmxon_region) { |
2d21ac55 | 255 | panic("vmx_allocate_vmxon_regions: unable to allocate VMXON region"); |
0a7de745 | 256 | } |
2d21ac55 A |
257 | *(uint32_t*)(cpu->vmxon_region) = cpu->specs.vmcs_id; |
258 | } | |
259 | } | |
260 | ||
261 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
262 | * vmx_free_vmxon_regions() |
263 | * Free VMXON regions for all CPUs. | |
264 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
265 | static void |
266 | vmx_free_vmxon_regions(void) | |
267 | { | |
268 | unsigned int i; | |
269 | ||
0a7de745 | 270 | for (i = 0; i < real_ncpus; i++) { |
2d21ac55 A |
271 | vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; |
272 | ||
273 | vmx_pfree(cpu->vmxon_region); | |
274 | cpu->vmxon_region = NULL; | |
275 | } | |
276 | } | |
277 | ||
278 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
279 | * vmx_globally_available() |
280 | * Checks whether VT can be turned on for all CPUs. | |
281 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
282 | static boolean_t |
283 | vmx_globally_available(void) | |
284 | { | |
285 | unsigned int i; | |
fe8ab488 | 286 | unsigned int ncpus = ml_get_max_cpus(); |
2d21ac55 A |
287 | boolean_t available = TRUE; |
288 | ||
0a7de745 | 289 | for (i = 0; i < ncpus; i++) { |
2d21ac55 A |
290 | vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; |
291 | ||
0a7de745 | 292 | if (!cpu->specs.vmx_present) { |
2d21ac55 | 293 | available = FALSE; |
0a7de745 | 294 | } |
2d21ac55 A |
295 | } |
296 | VMX_KPRINTF("VMX available: %d\n", available); | |
297 | return available; | |
298 | } | |
299 | ||
300 | ||
301 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
302 | * vmx_turn_on() |
303 | * Turn on VT operation on all CPUs. | |
304 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
305 | int |
306 | host_vmxon(boolean_t exclusive) | |
307 | { | |
308 | int error; | |
fe8ab488 A |
309 | |
310 | assert(0 == get_preemption_level()); | |
2d21ac55 | 311 | |
0a7de745 | 312 | if (!vmx_globally_available()) { |
2d21ac55 | 313 | return VMX_UNSUPPORTED; |
0a7de745 | 314 | } |
2d21ac55 | 315 | |
fe8ab488 | 316 | lck_mtx_lock(vmx_lck_mtx); |
2d21ac55 | 317 | |
fe8ab488 | 318 | if (vmx_exclusive || (exclusive && vmx_use_count)) { |
2d21ac55 | 319 | error = VMX_INUSE; |
b0d623f7 | 320 | } else { |
fe8ab488 A |
321 | if (0 == vmx_use_count) { |
322 | vmx_allocate_vmxon_regions(); | |
323 | vmx_exclusive = exclusive; | |
324 | vmx_use_count = 1; | |
325 | mp_cpus_call(CPUMASK_ALL, ASYNC, vmx_on, NULL); | |
fe8ab488 A |
326 | } else { |
327 | vmx_use_count++; | |
328 | } | |
b0d623f7 A |
329 | |
330 | VMX_KPRINTF("VMX use count: %d\n", vmx_use_count); | |
331 | error = VMX_OK; | |
2d21ac55 | 332 | } |
2d21ac55 | 333 | |
fe8ab488 | 334 | lck_mtx_unlock(vmx_lck_mtx); |
2d21ac55 A |
335 | |
336 | return error; | |
337 | } | |
338 | ||
339 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
340 | * vmx_turn_off() |
341 | * Turn off VT operation on all CPUs. | |
342 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
343 | void |
344 | host_vmxoff() | |
345 | { | |
fe8ab488 | 346 | assert(0 == get_preemption_level()); |
b0d623f7 | 347 | |
fe8ab488 | 348 | lck_mtx_lock(vmx_lck_mtx); |
2d21ac55 | 349 | |
fe8ab488 | 350 | if (1 == vmx_use_count) { |
b0d623f7 | 351 | vmx_exclusive = FALSE; |
fe8ab488 A |
352 | vmx_use_count = 0; |
353 | mp_cpus_call(CPUMASK_ALL, ASYNC, vmx_off, NULL); | |
b0d623f7 | 354 | vmx_free_vmxon_regions(); |
fe8ab488 A |
355 | } else { |
356 | vmx_use_count--; | |
b0d623f7 A |
357 | } |
358 | ||
fe8ab488 A |
359 | lck_mtx_unlock(vmx_lck_mtx); |
360 | ||
2d21ac55 A |
361 | VMX_KPRINTF("VMX use count: %d\n", vmx_use_count); |
362 | } | |
363 | ||
364 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
365 | * vmx_suspend() |
366 | * Turn off VT operation on this CPU if it was on. | |
367 | * Called when a CPU goes offline. | |
368 | * -------------------------------------------------------------------------- */ | |
2d21ac55 A |
369 | void |
370 | vmx_suspend() | |
371 | { | |
372 | VMX_KPRINTF("vmx_suspend\n"); | |
fe8ab488 | 373 | |
0a7de745 | 374 | if (vmx_use_count) { |
6d2010ae | 375 | vmx_off(NULL); |
0a7de745 | 376 | } |
2d21ac55 A |
377 | } |
378 | ||
379 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
380 | * vmx_suspend() |
381 | * Restore the previous VT state. Called when CPU comes back online. | |
382 | * -------------------------------------------------------------------------- */ | |
2d21ac55 | 383 | void |
490019cf | 384 | vmx_resume(boolean_t is_wake_from_hibernate) |
2d21ac55 A |
385 | { |
386 | VMX_KPRINTF("vmx_resume\n"); | |
fe8ab488 A |
387 | |
388 | vmx_enable(); | |
389 | ||
0a7de745 | 390 | if (vmx_use_count == 0) { |
490019cf | 391 | return; |
0a7de745 | 392 | } |
490019cf A |
393 | |
394 | /* | |
395 | * When resuming from hiberate on the boot cpu, | |
396 | * we must mark VMX as off since that's the state at wake-up | |
397 | * because the restored state in memory records otherwise. | |
398 | * This results in vmx_on() doing the right thing. | |
399 | */ | |
400 | if (is_wake_from_hibernate) { | |
401 | vmx_cpu_t *cpu = ¤t_cpu_datap()->cpu_vmx; | |
402 | cpu->specs.vmx_on = FALSE; | |
403 | } | |
404 | ||
405 | vmx_on(NULL); | |
2d21ac55 | 406 | } |
fe8ab488 A |
407 | |
408 | /* ----------------------------------------------------------------------------- | |
0a7de745 A |
409 | * vmx_hv_support() |
410 | * Determine if the VMX feature set is sufficent for kernel HV support. | |
411 | * -------------------------------------------------------------------------- */ | |
fe8ab488 A |
412 | boolean_t |
413 | vmx_hv_support() | |
414 | { | |
0a7de745 | 415 | if (!vmx_is_available()) { |
fe8ab488 | 416 | return FALSE; |
0a7de745 | 417 | } |
fe8ab488 A |
418 | |
419 | #define CHK(msr, shift, mask) if (!VMX_CAP(msr, shift, mask)) return FALSE; | |
420 | ||
421 | /* 'EPT' and 'Unrestricted Mode' are part of the secondary processor-based | |
422 | * VM-execution controls */ | |
423 | CHK(MSR_IA32_VMX_BASIC, 0, VMX_BASIC_TRUE_CTLS) | |
424 | CHK(MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 32, VMX_TRUE_PROCBASED_SECONDARY_CTLS) | |
425 | ||
426 | /* if we have these, check for 'EPT' and 'Unrestricted Mode' */ | |
427 | CHK(MSR_IA32_VMX_PROCBASED_CTLS2, 32, VMX_PROCBASED_CTLS2_EPT) | |
428 | CHK(MSR_IA32_VMX_PROCBASED_CTLS2, 32, VMX_PROCBASED_CTLS2_UNRESTRICTED) | |
429 | ||
430 | return TRUE; | |
431 | } |