]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2006-2012 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <pexpert/pexpert.h> | |
30 | #include <i386/cpuid.h> | |
31 | #include <i386/cpu_data.h> | |
32 | #include <i386/mp.h> | |
33 | #include <i386/proc_reg.h> | |
34 | #include <i386/vmx.h> | |
35 | #include <i386/vmx/vmx_asm.h> | |
36 | #include <i386/vmx/vmx_shims.h> | |
37 | #include <i386/vmx/vmx_cpu.h> | |
38 | #include <mach/mach_host.h> /* for host_info() */ | |
39 | ||
40 | #define VMX_KPRINTF(x...) /* kprintf("vmx: " x) */ | |
41 | ||
42 | int vmx_use_count = 0; | |
43 | boolean_t vmx_exclusive = FALSE; | |
44 | ||
45 | lck_grp_t *vmx_lck_grp = NULL; | |
46 | lck_mtx_t *vmx_lck_mtx = NULL; | |
47 | ||
48 | /* ----------------------------------------------------------------------------- | |
49 | * vmx_is_available() | |
50 | * Is the VMX facility available on this CPU? | |
51 | * -------------------------------------------------------------------------- */ | |
52 | static inline boolean_t | |
53 | vmx_is_available(void) | |
54 | { | |
55 | return 0 != (cpuid_features() & CPUID_FEATURE_VMX); | |
56 | } | |
57 | ||
58 | /* ----------------------------------------------------------------------------- | |
59 | * vmxon_is_enabled() | |
60 | * Is the VMXON instruction enabled on this CPU? | |
61 | * -------------------------------------------------------------------------- */ | |
62 | static inline boolean_t | |
63 | vmxon_is_enabled(void) | |
64 | { | |
65 | return vmx_is_available() && | |
66 | (rdmsr64(MSR_IA32_FEATURE_CONTROL) & MSR_IA32_FEATCTL_VMXON); | |
67 | } | |
68 | ||
69 | #if MACH_ASSERT | |
70 | /* ----------------------------------------------------------------------------- | |
71 | * vmx_is_cr0_valid() | |
72 | * Is CR0 valid for executing VMXON on this CPU? | |
73 | * -------------------------------------------------------------------------- */ | |
74 | static inline boolean_t | |
75 | vmx_is_cr0_valid(vmx_specs_t *specs) | |
76 | { | |
77 | uintptr_t cr0 = get_cr0(); | |
78 | return 0 == ((~cr0 & specs->cr0_fixed_0) | (cr0 & ~specs->cr0_fixed_1)); | |
79 | } | |
80 | ||
81 | /* ----------------------------------------------------------------------------- | |
82 | * vmx_is_cr4_valid() | |
83 | * Is CR4 valid for executing VMXON on this CPU? | |
84 | * -------------------------------------------------------------------------- */ | |
85 | static inline boolean_t | |
86 | vmx_is_cr4_valid(vmx_specs_t *specs) | |
87 | { | |
88 | uintptr_t cr4 = get_cr4(); | |
89 | return 0 == ((~cr4 & specs->cr4_fixed_0) | (cr4 & ~specs->cr4_fixed_1)); | |
90 | } | |
91 | ||
92 | #endif | |
93 | ||
94 | static void | |
95 | vmx_enable(void) | |
96 | { | |
97 | uint64_t msr_image; | |
98 | ||
99 | if (!vmx_is_available()) { | |
100 | return; | |
101 | } | |
102 | ||
103 | /* | |
104 | * We don't count on EFI initializing MSR_IA32_FEATURE_CONTROL | |
105 | * and turning VMXON on and locking the bit, so we do that now. | |
106 | */ | |
107 | msr_image = rdmsr64(MSR_IA32_FEATURE_CONTROL); | |
108 | if (0 == ((msr_image & MSR_IA32_FEATCTL_LOCK))) { | |
109 | wrmsr64(MSR_IA32_FEATURE_CONTROL, | |
110 | (msr_image | | |
111 | MSR_IA32_FEATCTL_VMXON | | |
112 | MSR_IA32_FEATCTL_LOCK)); | |
113 | } | |
114 | ||
115 | set_cr4(get_cr4() | CR4_VMXE); | |
116 | } | |
117 | ||
118 | void | |
119 | vmx_init() | |
120 | { | |
121 | vmx_lck_grp = lck_grp_alloc_init("vmx", LCK_GRP_ATTR_NULL); | |
122 | assert(vmx_lck_grp); | |
123 | ||
124 | vmx_lck_mtx = lck_mtx_alloc_init(vmx_lck_grp, LCK_ATTR_NULL); | |
125 | assert(vmx_lck_mtx); | |
126 | } | |
127 | ||
128 | /* ----------------------------------------------------------------------------- | |
129 | * vmx_get_specs() | |
130 | * Obtain VMX facility specifications for this CPU and | |
131 | * enter them into the vmx_specs_t structure. If VMX is not available or | |
132 | * disabled on this CPU, set vmx_present to false and return leaving | |
133 | * the remainder of the vmx_specs_t uninitialized. | |
134 | * -------------------------------------------------------------------------- */ | |
135 | void | |
136 | vmx_cpu_init() | |
137 | { | |
138 | vmx_specs_t *specs = ¤t_cpu_datap()->cpu_vmx.specs; | |
139 | ||
140 | vmx_enable(); | |
141 | ||
142 | VMX_KPRINTF("[%d]vmx_cpu_init() initialized: %d\n", | |
143 | cpu_number(), specs->initialized); | |
144 | ||
145 | /* if we have read the data on boot, we won't read it again on wakeup */ | |
146 | if (specs->initialized) { | |
147 | return; | |
148 | } else { | |
149 | specs->initialized = TRUE; | |
150 | } | |
151 | ||
152 | /* See if VMX is present, return if it is not */ | |
153 | specs->vmx_present = vmx_is_available() && vmxon_is_enabled(); | |
154 | VMX_KPRINTF("[%d]vmx_cpu_init() vmx_present: %d\n", | |
155 | cpu_number(), specs->vmx_present); | |
156 | if (!specs->vmx_present) { | |
157 | return; | |
158 | } | |
159 | ||
160 | #define rdmsr_mask(msr, mask) (uint32_t)(rdmsr64(msr) & (mask)) | |
161 | specs->vmcs_id = rdmsr_mask(MSR_IA32_VMX_BASIC, VMX_VCR_VMCS_REV_ID); | |
162 | ||
163 | /* Obtain VMX-fixed bits in CR0 */ | |
164 | specs->cr0_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED0, 0xFFFFFFFF); | |
165 | specs->cr0_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED1, 0xFFFFFFFF); | |
166 | ||
167 | /* Obtain VMX-fixed bits in CR4 */ | |
168 | specs->cr4_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED0, 0xFFFFFFFF); | |
169 | specs->cr4_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED1, 0xFFFFFFFF); | |
170 | } | |
171 | ||
172 | /* ----------------------------------------------------------------------------- | |
173 | * vmx_on() | |
174 | * Enter VMX root operation on this CPU. | |
175 | * -------------------------------------------------------------------------- */ | |
176 | static void | |
177 | vmx_on(void *arg __unused) | |
178 | { | |
179 | vmx_cpu_t *cpu = ¤t_cpu_datap()->cpu_vmx; | |
180 | addr64_t vmxon_region_paddr; | |
181 | int result; | |
182 | ||
183 | VMX_KPRINTF("[%d]vmx_on() entry state: %d\n", | |
184 | cpu_number(), cpu->specs.vmx_on); | |
185 | ||
186 | assert(cpu->specs.vmx_present); | |
187 | ||
188 | if (NULL == cpu->vmxon_region) { | |
189 | panic("vmx_on: VMXON region not allocated"); | |
190 | } | |
191 | vmxon_region_paddr = vmx_paddr(cpu->vmxon_region); | |
192 | ||
193 | /* | |
194 | * Enable VMX operation. | |
195 | */ | |
196 | if (FALSE == cpu->specs.vmx_on) { | |
197 | assert(vmx_is_cr0_valid(&cpu->specs)); | |
198 | assert(vmx_is_cr4_valid(&cpu->specs)); | |
199 | ||
200 | result = __vmxon(vmxon_region_paddr); | |
201 | ||
202 | if (result != VMX_SUCCEED) { | |
203 | panic("vmx_on: unexpected return %d from __vmxon()", result); | |
204 | } | |
205 | ||
206 | cpu->specs.vmx_on = TRUE; | |
207 | } | |
208 | VMX_KPRINTF("[%d]vmx_on() return state: %d\n", | |
209 | cpu_number(), cpu->specs.vmx_on); | |
210 | } | |
211 | ||
212 | /* ----------------------------------------------------------------------------- | |
213 | * vmx_off() | |
214 | * Leave VMX root operation on this CPU. | |
215 | * -------------------------------------------------------------------------- */ | |
216 | static void | |
217 | vmx_off(void *arg __unused) | |
218 | { | |
219 | vmx_cpu_t *cpu = ¤t_cpu_datap()->cpu_vmx; | |
220 | int result; | |
221 | ||
222 | VMX_KPRINTF("[%d]vmx_off() entry state: %d\n", | |
223 | cpu_number(), cpu->specs.vmx_on); | |
224 | ||
225 | if (TRUE == cpu->specs.vmx_on) { | |
226 | /* Tell the CPU to release the VMXON region */ | |
227 | result = __vmxoff(); | |
228 | ||
229 | if (result != VMX_SUCCEED) { | |
230 | panic("vmx_off: unexpected return %d from __vmxoff()", result); | |
231 | } | |
232 | ||
233 | cpu->specs.vmx_on = FALSE; | |
234 | } | |
235 | ||
236 | VMX_KPRINTF("[%d]vmx_off() return state: %d\n", | |
237 | cpu_number(), cpu->specs.vmx_on); | |
238 | } | |
239 | ||
240 | /* ----------------------------------------------------------------------------- | |
241 | * vmx_allocate_vmxon_regions() | |
242 | * Allocate, clear and init VMXON regions for all CPUs. | |
243 | * -------------------------------------------------------------------------- */ | |
244 | static void | |
245 | vmx_allocate_vmxon_regions(void) | |
246 | { | |
247 | unsigned int i; | |
248 | ||
249 | for (i = 0; i < real_ncpus; i++) { | |
250 | vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; | |
251 | ||
252 | /* The size is defined to be always <= 4K, so we just allocate a page */ | |
253 | cpu->vmxon_region = vmx_pcalloc(); | |
254 | if (NULL == cpu->vmxon_region) { | |
255 | panic("vmx_allocate_vmxon_regions: unable to allocate VMXON region"); | |
256 | } | |
257 | *(uint32_t*)(cpu->vmxon_region) = cpu->specs.vmcs_id; | |
258 | } | |
259 | } | |
260 | ||
261 | /* ----------------------------------------------------------------------------- | |
262 | * vmx_free_vmxon_regions() | |
263 | * Free VMXON regions for all CPUs. | |
264 | * -------------------------------------------------------------------------- */ | |
265 | static void | |
266 | vmx_free_vmxon_regions(void) | |
267 | { | |
268 | unsigned int i; | |
269 | ||
270 | for (i = 0; i < real_ncpus; i++) { | |
271 | vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; | |
272 | ||
273 | vmx_pfree(cpu->vmxon_region); | |
274 | cpu->vmxon_region = NULL; | |
275 | } | |
276 | } | |
277 | ||
278 | /* ----------------------------------------------------------------------------- | |
279 | * vmx_globally_available() | |
280 | * Checks whether VT can be turned on for all CPUs. | |
281 | * -------------------------------------------------------------------------- */ | |
282 | static boolean_t | |
283 | vmx_globally_available(void) | |
284 | { | |
285 | unsigned int i; | |
286 | unsigned int ncpus = ml_get_max_cpus(); | |
287 | boolean_t available = TRUE; | |
288 | ||
289 | for (i = 0; i < ncpus; i++) { | |
290 | vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; | |
291 | ||
292 | if (!cpu->specs.vmx_present) { | |
293 | available = FALSE; | |
294 | } | |
295 | } | |
296 | VMX_KPRINTF("VMX available: %d\n", available); | |
297 | return available; | |
298 | } | |
299 | ||
300 | ||
301 | /* ----------------------------------------------------------------------------- | |
302 | * vmx_turn_on() | |
303 | * Turn on VT operation on all CPUs. | |
304 | * -------------------------------------------------------------------------- */ | |
305 | int | |
306 | host_vmxon(boolean_t exclusive) | |
307 | { | |
308 | int error; | |
309 | ||
310 | assert(0 == get_preemption_level()); | |
311 | ||
312 | if (!vmx_globally_available()) { | |
313 | return VMX_UNSUPPORTED; | |
314 | } | |
315 | ||
316 | lck_mtx_lock(vmx_lck_mtx); | |
317 | ||
318 | if (vmx_exclusive || (exclusive && vmx_use_count)) { | |
319 | error = VMX_INUSE; | |
320 | } else { | |
321 | if (0 == vmx_use_count) { | |
322 | vmx_allocate_vmxon_regions(); | |
323 | vmx_exclusive = exclusive; | |
324 | vmx_use_count = 1; | |
325 | mp_cpus_call(CPUMASK_ALL, ASYNC, vmx_on, NULL); | |
326 | } else { | |
327 | vmx_use_count++; | |
328 | } | |
329 | ||
330 | VMX_KPRINTF("VMX use count: %d\n", vmx_use_count); | |
331 | error = VMX_OK; | |
332 | } | |
333 | ||
334 | lck_mtx_unlock(vmx_lck_mtx); | |
335 | ||
336 | return error; | |
337 | } | |
338 | ||
339 | /* ----------------------------------------------------------------------------- | |
340 | * vmx_turn_off() | |
341 | * Turn off VT operation on all CPUs. | |
342 | * -------------------------------------------------------------------------- */ | |
343 | void | |
344 | host_vmxoff() | |
345 | { | |
346 | assert(0 == get_preemption_level()); | |
347 | ||
348 | lck_mtx_lock(vmx_lck_mtx); | |
349 | ||
350 | if (1 == vmx_use_count) { | |
351 | vmx_exclusive = FALSE; | |
352 | vmx_use_count = 0; | |
353 | mp_cpus_call(CPUMASK_ALL, ASYNC, vmx_off, NULL); | |
354 | vmx_free_vmxon_regions(); | |
355 | } else { | |
356 | vmx_use_count--; | |
357 | } | |
358 | ||
359 | lck_mtx_unlock(vmx_lck_mtx); | |
360 | ||
361 | VMX_KPRINTF("VMX use count: %d\n", vmx_use_count); | |
362 | } | |
363 | ||
364 | /* ----------------------------------------------------------------------------- | |
365 | * vmx_suspend() | |
366 | * Turn off VT operation on this CPU if it was on. | |
367 | * Called when a CPU goes offline. | |
368 | * -------------------------------------------------------------------------- */ | |
369 | void | |
370 | vmx_suspend() | |
371 | { | |
372 | VMX_KPRINTF("vmx_suspend\n"); | |
373 | ||
374 | if (vmx_use_count) { | |
375 | vmx_off(NULL); | |
376 | } | |
377 | } | |
378 | ||
379 | /* ----------------------------------------------------------------------------- | |
380 | * vmx_suspend() | |
381 | * Restore the previous VT state. Called when CPU comes back online. | |
382 | * -------------------------------------------------------------------------- */ | |
383 | void | |
384 | vmx_resume(boolean_t is_wake_from_hibernate) | |
385 | { | |
386 | VMX_KPRINTF("vmx_resume\n"); | |
387 | ||
388 | vmx_enable(); | |
389 | ||
390 | if (vmx_use_count == 0) { | |
391 | return; | |
392 | } | |
393 | ||
394 | /* | |
395 | * When resuming from hiberate on the boot cpu, | |
396 | * we must mark VMX as off since that's the state at wake-up | |
397 | * because the restored state in memory records otherwise. | |
398 | * This results in vmx_on() doing the right thing. | |
399 | */ | |
400 | if (is_wake_from_hibernate) { | |
401 | vmx_cpu_t *cpu = ¤t_cpu_datap()->cpu_vmx; | |
402 | cpu->specs.vmx_on = FALSE; | |
403 | } | |
404 | ||
405 | vmx_on(NULL); | |
406 | } | |
407 | ||
408 | /* ----------------------------------------------------------------------------- | |
409 | * vmx_hv_support() | |
410 | * Determine if the VMX feature set is sufficent for kernel HV support. | |
411 | * -------------------------------------------------------------------------- */ | |
412 | boolean_t | |
413 | vmx_hv_support() | |
414 | { | |
415 | if (!vmx_is_available()) { | |
416 | return FALSE; | |
417 | } | |
418 | ||
419 | #define CHK(msr, shift, mask) if (!VMX_CAP(msr, shift, mask)) return FALSE; | |
420 | ||
421 | /* 'EPT' and 'Unrestricted Mode' are part of the secondary processor-based | |
422 | * VM-execution controls */ | |
423 | CHK(MSR_IA32_VMX_BASIC, 0, VMX_BASIC_TRUE_CTLS) | |
424 | CHK(MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 32, VMX_TRUE_PROCBASED_SECONDARY_CTLS) | |
425 | ||
426 | /* if we have these, check for 'EPT' and 'Unrestricted Mode' */ | |
427 | CHK(MSR_IA32_VMX_PROCBASED_CTLS2, 32, VMX_PROCBASED_CTLS2_EPT) | |
428 | CHK(MSR_IA32_VMX_PROCBASED_CTLS2, 32, VMX_PROCBASED_CTLS2_UNRESTRICTED) | |
429 | ||
430 | return TRUE; | |
431 | } |