]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2006-2012 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <pexpert/pexpert.h> | |
30 | #include <i386/cpuid.h> | |
31 | #include <i386/cpu_data.h> | |
32 | #include <i386/mp.h> | |
33 | #include <i386/proc_reg.h> | |
34 | #include <i386/vmx.h> | |
35 | #include <i386/vmx/vmx_asm.h> | |
36 | #include <i386/vmx/vmx_shims.h> | |
37 | #include <i386/vmx/vmx_cpu.h> | |
38 | #include <mach/mach_host.h> /* for host_info() */ | |
39 | ||
40 | #define VMX_KPRINTF(x...) /* kprintf("vmx: " x) */ | |
41 | ||
42 | int vmx_use_count = 0; | |
43 | boolean_t vmx_exclusive = FALSE; | |
44 | ||
45 | static LCK_GRP_DECLARE(vmx_lck_grp, "vmx"); | |
46 | static LCK_MTX_DECLARE(vmx_lck_mtx, &vmx_lck_grp); | |
47 | ||
48 | /* ----------------------------------------------------------------------------- | |
49 | * vmx_is_available() | |
50 | * Is the VMX facility available on this CPU? | |
51 | * -------------------------------------------------------------------------- */ | |
52 | static inline boolean_t | |
53 | vmx_is_available(void) | |
54 | { | |
55 | return 0 != (cpuid_features() & CPUID_FEATURE_VMX); | |
56 | } | |
57 | ||
58 | /* ----------------------------------------------------------------------------- | |
59 | * vmxon_is_enabled() | |
60 | * Is the VMXON instruction enabled on this CPU? | |
61 | * -------------------------------------------------------------------------- */ | |
62 | static inline boolean_t | |
63 | vmxon_is_enabled(void) | |
64 | { | |
65 | return vmx_is_available() && | |
66 | (rdmsr64(MSR_IA32_FEATURE_CONTROL) & MSR_IA32_FEATCTL_VMXON); | |
67 | } | |
68 | ||
69 | #if MACH_ASSERT | |
70 | /* ----------------------------------------------------------------------------- | |
71 | * vmx_is_cr0_valid() | |
72 | * Is CR0 valid for executing VMXON on this CPU? | |
73 | * -------------------------------------------------------------------------- */ | |
74 | static inline boolean_t | |
75 | vmx_is_cr0_valid(vmx_specs_t *specs) | |
76 | { | |
77 | uintptr_t cr0 = get_cr0(); | |
78 | return 0 == ((~cr0 & specs->cr0_fixed_0) | (cr0 & ~specs->cr0_fixed_1)); | |
79 | } | |
80 | ||
81 | /* ----------------------------------------------------------------------------- | |
82 | * vmx_is_cr4_valid() | |
83 | * Is CR4 valid for executing VMXON on this CPU? | |
84 | * -------------------------------------------------------------------------- */ | |
85 | static inline boolean_t | |
86 | vmx_is_cr4_valid(vmx_specs_t *specs) | |
87 | { | |
88 | uintptr_t cr4 = get_cr4(); | |
89 | return 0 == ((~cr4 & specs->cr4_fixed_0) | (cr4 & ~specs->cr4_fixed_1)); | |
90 | } | |
91 | ||
92 | #endif | |
93 | ||
94 | static void | |
95 | vmx_enable(void) | |
96 | { | |
97 | uint64_t msr_image; | |
98 | ||
99 | if (!vmx_is_available()) { | |
100 | return; | |
101 | } | |
102 | ||
103 | /* | |
104 | * We don't count on EFI initializing MSR_IA32_FEATURE_CONTROL | |
105 | * and turning VMXON on and locking the bit, so we do that now. | |
106 | */ | |
107 | msr_image = rdmsr64(MSR_IA32_FEATURE_CONTROL); | |
108 | if (0 == ((msr_image & MSR_IA32_FEATCTL_LOCK))) { | |
109 | wrmsr64(MSR_IA32_FEATURE_CONTROL, | |
110 | (msr_image | | |
111 | MSR_IA32_FEATCTL_VMXON | | |
112 | MSR_IA32_FEATCTL_LOCK)); | |
113 | } | |
114 | ||
115 | set_cr4(get_cr4() | CR4_VMXE); | |
116 | } | |
117 | ||
118 | /* ----------------------------------------------------------------------------- | |
119 | * vmx_get_specs() | |
120 | * Obtain VMX facility specifications for this CPU and | |
121 | * enter them into the vmx_specs_t structure. If VMX is not available or | |
122 | * disabled on this CPU, set vmx_present to false and return leaving | |
123 | * the remainder of the vmx_specs_t uninitialized. | |
124 | * -------------------------------------------------------------------------- */ | |
125 | void | |
126 | vmx_cpu_init() | |
127 | { | |
128 | vmx_specs_t *specs = ¤t_cpu_datap()->cpu_vmx.specs; | |
129 | ||
130 | vmx_enable(); | |
131 | ||
132 | VMX_KPRINTF("[%d]vmx_cpu_init() initialized: %d\n", | |
133 | cpu_number(), specs->initialized); | |
134 | ||
135 | /* if we have read the data on boot, we won't read it again on wakeup */ | |
136 | if (specs->initialized) { | |
137 | return; | |
138 | } else { | |
139 | specs->initialized = TRUE; | |
140 | } | |
141 | ||
142 | /* See if VMX is present, return if it is not */ | |
143 | specs->vmx_present = vmx_is_available() && vmxon_is_enabled(); | |
144 | VMX_KPRINTF("[%d]vmx_cpu_init() vmx_present: %d\n", | |
145 | cpu_number(), specs->vmx_present); | |
146 | if (!specs->vmx_present) { | |
147 | return; | |
148 | } | |
149 | ||
150 | #define rdmsr_mask(msr, mask) (uint32_t)(rdmsr64(msr) & (mask)) | |
151 | specs->vmcs_id = rdmsr_mask(MSR_IA32_VMX_BASIC, VMX_VCR_VMCS_REV_ID); | |
152 | ||
153 | /* Obtain VMX-fixed bits in CR0 */ | |
154 | specs->cr0_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED0, 0xFFFFFFFF); | |
155 | specs->cr0_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED1, 0xFFFFFFFF); | |
156 | ||
157 | /* Obtain VMX-fixed bits in CR4 */ | |
158 | specs->cr4_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED0, 0xFFFFFFFF); | |
159 | specs->cr4_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED1, 0xFFFFFFFF); | |
160 | } | |
161 | ||
162 | /* ----------------------------------------------------------------------------- | |
163 | * vmx_on() | |
164 | * Enter VMX root operation on this CPU. | |
165 | * -------------------------------------------------------------------------- */ | |
166 | static void | |
167 | vmx_on(void *arg __unused) | |
168 | { | |
169 | vmx_cpu_t *cpu = ¤t_cpu_datap()->cpu_vmx; | |
170 | addr64_t vmxon_region_paddr; | |
171 | int result; | |
172 | ||
173 | VMX_KPRINTF("[%d]vmx_on() entry state: %d\n", | |
174 | cpu_number(), cpu->specs.vmx_on); | |
175 | ||
176 | assert(cpu->specs.vmx_present); | |
177 | ||
178 | if (NULL == cpu->vmxon_region) { | |
179 | panic("vmx_on: VMXON region not allocated"); | |
180 | } | |
181 | vmxon_region_paddr = vmx_paddr(cpu->vmxon_region); | |
182 | ||
183 | /* | |
184 | * Enable VMX operation. | |
185 | */ | |
186 | if (FALSE == cpu->specs.vmx_on) { | |
187 | assert(vmx_is_cr0_valid(&cpu->specs)); | |
188 | assert(vmx_is_cr4_valid(&cpu->specs)); | |
189 | ||
190 | result = __vmxon(vmxon_region_paddr); | |
191 | ||
192 | if (result != VMX_SUCCEED) { | |
193 | panic("vmx_on: unexpected return %d from __vmxon()", result); | |
194 | } | |
195 | ||
196 | cpu->specs.vmx_on = TRUE; | |
197 | } | |
198 | VMX_KPRINTF("[%d]vmx_on() return state: %d\n", | |
199 | cpu_number(), cpu->specs.vmx_on); | |
200 | } | |
201 | ||
202 | /* ----------------------------------------------------------------------------- | |
203 | * vmx_off() | |
204 | * Leave VMX root operation on this CPU. | |
205 | * -------------------------------------------------------------------------- */ | |
206 | static void | |
207 | vmx_off(void *arg __unused) | |
208 | { | |
209 | vmx_cpu_t *cpu = ¤t_cpu_datap()->cpu_vmx; | |
210 | int result; | |
211 | ||
212 | VMX_KPRINTF("[%d]vmx_off() entry state: %d\n", | |
213 | cpu_number(), cpu->specs.vmx_on); | |
214 | ||
215 | if (TRUE == cpu->specs.vmx_on) { | |
216 | /* Tell the CPU to release the VMXON region */ | |
217 | result = __vmxoff(); | |
218 | ||
219 | if (result != VMX_SUCCEED) { | |
220 | panic("vmx_off: unexpected return %d from __vmxoff()", result); | |
221 | } | |
222 | ||
223 | cpu->specs.vmx_on = FALSE; | |
224 | } | |
225 | ||
226 | VMX_KPRINTF("[%d]vmx_off() return state: %d\n", | |
227 | cpu_number(), cpu->specs.vmx_on); | |
228 | } | |
229 | ||
230 | /* ----------------------------------------------------------------------------- | |
231 | * vmx_allocate_vmxon_regions() | |
232 | * Allocate, clear and init VMXON regions for all CPUs. | |
233 | * -------------------------------------------------------------------------- */ | |
234 | static void | |
235 | vmx_allocate_vmxon_regions(void) | |
236 | { | |
237 | unsigned int i; | |
238 | ||
239 | for (i = 0; i < real_ncpus; i++) { | |
240 | vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; | |
241 | ||
242 | /* The size is defined to be always <= 4K, so we just allocate a page */ | |
243 | cpu->vmxon_region = vmx_pcalloc(); | |
244 | if (NULL == cpu->vmxon_region) { | |
245 | panic("vmx_allocate_vmxon_regions: unable to allocate VMXON region"); | |
246 | } | |
247 | *(uint32_t*)(cpu->vmxon_region) = cpu->specs.vmcs_id; | |
248 | } | |
249 | } | |
250 | ||
251 | /* ----------------------------------------------------------------------------- | |
252 | * vmx_free_vmxon_regions() | |
253 | * Free VMXON regions for all CPUs. | |
254 | * -------------------------------------------------------------------------- */ | |
255 | static void | |
256 | vmx_free_vmxon_regions(void) | |
257 | { | |
258 | unsigned int i; | |
259 | ||
260 | for (i = 0; i < real_ncpus; i++) { | |
261 | vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; | |
262 | ||
263 | vmx_pfree(cpu->vmxon_region); | |
264 | cpu->vmxon_region = NULL; | |
265 | } | |
266 | } | |
267 | ||
268 | /* ----------------------------------------------------------------------------- | |
269 | * vmx_globally_available() | |
270 | * Checks whether VT can be turned on for all CPUs. | |
271 | * -------------------------------------------------------------------------- */ | |
272 | static boolean_t | |
273 | vmx_globally_available(void) | |
274 | { | |
275 | unsigned int i; | |
276 | unsigned int ncpus = ml_wait_max_cpus(); | |
277 | boolean_t available = TRUE; | |
278 | ||
279 | for (i = 0; i < ncpus; i++) { | |
280 | vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; | |
281 | ||
282 | if (!cpu->specs.vmx_present) { | |
283 | available = FALSE; | |
284 | } | |
285 | } | |
286 | VMX_KPRINTF("VMX available: %d\n", available); | |
287 | return available; | |
288 | } | |
289 | ||
290 | ||
291 | /* ----------------------------------------------------------------------------- | |
292 | * vmx_turn_on() | |
293 | * Turn on VT operation on all CPUs. | |
294 | * -------------------------------------------------------------------------- */ | |
295 | int | |
296 | host_vmxon(boolean_t exclusive) | |
297 | { | |
298 | int error; | |
299 | ||
300 | assert(0 == get_preemption_level()); | |
301 | ||
302 | if (!vmx_globally_available()) { | |
303 | return VMX_UNSUPPORTED; | |
304 | } | |
305 | ||
306 | lck_mtx_lock(&vmx_lck_mtx); | |
307 | ||
308 | if (vmx_exclusive || (exclusive && vmx_use_count)) { | |
309 | error = VMX_INUSE; | |
310 | } else { | |
311 | if (0 == vmx_use_count) { | |
312 | vmx_allocate_vmxon_regions(); | |
313 | vmx_exclusive = exclusive; | |
314 | vmx_use_count = 1; | |
315 | mp_cpus_call(CPUMASK_ALL, ASYNC, vmx_on, NULL); | |
316 | } else { | |
317 | vmx_use_count++; | |
318 | } | |
319 | ||
320 | VMX_KPRINTF("VMX use count: %d\n", vmx_use_count); | |
321 | error = VMX_OK; | |
322 | } | |
323 | ||
324 | lck_mtx_unlock(&vmx_lck_mtx); | |
325 | ||
326 | return error; | |
327 | } | |
328 | ||
329 | /* ----------------------------------------------------------------------------- | |
330 | * vmx_turn_off() | |
331 | * Turn off VT operation on all CPUs. | |
332 | * -------------------------------------------------------------------------- */ | |
333 | void | |
334 | host_vmxoff() | |
335 | { | |
336 | assert(0 == get_preemption_level()); | |
337 | ||
338 | lck_mtx_lock(&vmx_lck_mtx); | |
339 | ||
340 | if (1 == vmx_use_count) { | |
341 | vmx_exclusive = FALSE; | |
342 | vmx_use_count = 0; | |
343 | mp_cpus_call(CPUMASK_ALL, ASYNC, vmx_off, NULL); | |
344 | vmx_free_vmxon_regions(); | |
345 | } else { | |
346 | vmx_use_count--; | |
347 | } | |
348 | ||
349 | lck_mtx_unlock(&vmx_lck_mtx); | |
350 | ||
351 | VMX_KPRINTF("VMX use count: %d\n", vmx_use_count); | |
352 | } | |
353 | ||
354 | /* ----------------------------------------------------------------------------- | |
355 | * vmx_suspend() | |
356 | * Turn off VT operation on this CPU if it was on. | |
357 | * Called when a CPU goes offline. | |
358 | * -------------------------------------------------------------------------- */ | |
359 | void | |
360 | vmx_suspend() | |
361 | { | |
362 | VMX_KPRINTF("vmx_suspend\n"); | |
363 | ||
364 | if (vmx_use_count) { | |
365 | vmx_off(NULL); | |
366 | } | |
367 | } | |
368 | ||
369 | /* ----------------------------------------------------------------------------- | |
370 | * vmx_suspend() | |
371 | * Restore the previous VT state. Called when CPU comes back online. | |
372 | * -------------------------------------------------------------------------- */ | |
373 | void | |
374 | vmx_resume(boolean_t is_wake_from_hibernate) | |
375 | { | |
376 | VMX_KPRINTF("vmx_resume\n"); | |
377 | ||
378 | vmx_enable(); | |
379 | ||
380 | if (vmx_use_count == 0) { | |
381 | return; | |
382 | } | |
383 | ||
384 | /* | |
385 | * When resuming from hiberate on the boot cpu, | |
386 | * we must mark VMX as off since that's the state at wake-up | |
387 | * because the restored state in memory records otherwise. | |
388 | * This results in vmx_on() doing the right thing. | |
389 | */ | |
390 | if (is_wake_from_hibernate) { | |
391 | vmx_cpu_t *cpu = ¤t_cpu_datap()->cpu_vmx; | |
392 | cpu->specs.vmx_on = FALSE; | |
393 | } | |
394 | ||
395 | vmx_on(NULL); | |
396 | } | |
397 | ||
398 | /* ----------------------------------------------------------------------------- | |
399 | * vmx_hv_support() | |
400 | * Determine if the VMX feature set is sufficent for kernel HV support. | |
401 | * -------------------------------------------------------------------------- */ | |
402 | boolean_t | |
403 | vmx_hv_support() | |
404 | { | |
405 | if (!vmx_is_available()) { | |
406 | return FALSE; | |
407 | } | |
408 | ||
409 | #define CHK(msr, shift, mask) if (!VMX_CAP(msr, shift, mask)) return FALSE; | |
410 | ||
411 | /* 'EPT' and 'Unrestricted Mode' are part of the secondary processor-based | |
412 | * VM-execution controls */ | |
413 | CHK(MSR_IA32_VMX_BASIC, 0, VMX_BASIC_TRUE_CTLS) | |
414 | CHK(MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 32, VMX_TRUE_PROCBASED_SECONDARY_CTLS) | |
415 | ||
416 | /* if we have these, check for 'EPT' and 'Unrestricted Mode' */ | |
417 | CHK(MSR_IA32_VMX_PROCBASED_CTLS2, 32, VMX_PROCBASED_CTLS2_EPT) | |
418 | CHK(MSR_IA32_VMX_PROCBASED_CTLS2, 32, VMX_PROCBASED_CTLS2_UNRESTRICTED) | |
419 | ||
420 | return TRUE; | |
421 | } |