]>
Commit | Line | Data |
---|---|---|
2d21ac55 A |
1 | /* |
2 | * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <pexpert/pexpert.h> | |
30 | #include <i386/cpuid.h> | |
31 | #include <i386/cpu_data.h> | |
b0d623f7 | 32 | #include <i386/mp.h> |
2d21ac55 A |
33 | #include <i386/proc_reg.h> |
34 | #include <i386/vmx.h> | |
35 | #include <i386/vmx/vmx_asm.h> | |
36 | #include <i386/vmx/vmx_shims.h> | |
37 | #include <i386/vmx/vmx_cpu.h> | |
38 | #include <i386/mtrr.h> | |
39 | #include <mach/mach_host.h> /* for host_info() */ | |
2d21ac55 A |
40 | |
41 | #define VMX_KPRINTF(x...) /* kprintf("vmx: " x) */ | |
42 | ||
43 | int vmx_use_count = 0; | |
b0d623f7 | 44 | boolean_t vmx_exclusive = FALSE; |
2d21ac55 A |
45 | decl_simple_lock_data(static,vmx_use_count_lock) |
46 | ||
47 | /* ----------------------------------------------------------------------------- | |
48 | vmx_is_available() | |
49 | Is the VMX facility available on this CPU? | |
50 | -------------------------------------------------------------------------- */ | |
51 | static inline boolean_t | |
52 | vmx_is_available(void) | |
53 | { | |
54 | return (0 != (cpuid_features() & CPUID_FEATURE_VMX)); | |
55 | } | |
56 | ||
57 | /* ----------------------------------------------------------------------------- | |
58 | vmxon_is_enabled() | |
59 | Is the VMXON instruction enabled on this CPU? | |
60 | -------------------------------------------------------------------------- */ | |
61 | static inline boolean_t | |
62 | vmxon_is_enabled(void) | |
63 | { | |
64 | return (vmx_is_available() && | |
65 | (rdmsr64(MSR_IA32_FEATURE_CONTROL) & MSR_IA32_FEATCTL_VMXON)); | |
66 | } | |
67 | ||
68 | /* ----------------------------------------------------------------------------- | |
69 | vmx_is_cr0_valid() | |
70 | Is CR0 valid for executing VMXON on this CPU? | |
71 | -------------------------------------------------------------------------- */ | |
72 | static inline boolean_t | |
73 | vmx_is_cr0_valid(vmx_specs_t *specs) | |
74 | { | |
b0d623f7 | 75 | uintptr_t cr0 = get_cr0(); |
2d21ac55 A |
76 | return (0 == ((~cr0 & specs->cr0_fixed_0)|(cr0 & ~specs->cr0_fixed_1))); |
77 | } | |
78 | ||
79 | /* ----------------------------------------------------------------------------- | |
80 | vmx_is_cr4_valid() | |
81 | Is CR4 valid for executing VMXON on this CPU? | |
82 | -------------------------------------------------------------------------- */ | |
83 | static inline boolean_t | |
84 | vmx_is_cr4_valid(vmx_specs_t *specs) | |
85 | { | |
b0d623f7 | 86 | uintptr_t cr4 = get_cr4(); |
2d21ac55 A |
87 | return (0 == ((~cr4 & specs->cr4_fixed_0)|(cr4 & ~specs->cr4_fixed_1))); |
88 | } | |
89 | ||
90 | static void | |
91 | vmx_init(void) | |
92 | { | |
93 | uint64_t msr_image; | |
94 | ||
95 | if (!vmx_is_available()) | |
96 | return; | |
97 | ||
98 | /* | |
99 | * We don't count on EFI initializing MSR_IA32_FEATURE_CONTROL | |
100 | * and turning VMXON on and locking the bit, so we do that now. | |
101 | */ | |
102 | msr_image = rdmsr64(MSR_IA32_FEATURE_CONTROL); | |
103 | if (0 == ((msr_image & MSR_IA32_FEATCTL_LOCK))) | |
104 | wrmsr64(MSR_IA32_FEATURE_CONTROL, | |
105 | (msr_image | | |
106 | MSR_IA32_FEATCTL_VMXON | | |
107 | MSR_IA32_FEATCTL_LOCK)); | |
108 | } | |
109 | ||
110 | /* ----------------------------------------------------------------------------- | |
111 | vmx_get_specs() | |
112 | Obtain VMX facility specifications for this CPU and | |
113 | enter them into the vmx_specs_t structure. If VMX is not available or | |
114 | disabled on this CPU, set vmx_present to false and return leaving | |
115 | the remainder of the vmx_specs_t uninitialized. | |
116 | -------------------------------------------------------------------------- */ | |
117 | void | |
118 | vmx_get_specs() | |
119 | { | |
120 | vmx_specs_t *specs = ¤t_cpu_datap()->cpu_vmx.specs; | |
121 | uint64_t msr_image; | |
122 | ||
123 | /* this is called once for every CPU, but the lock doesn't care :-) */ | |
124 | simple_lock_init(&vmx_use_count_lock, 0); | |
125 | ||
126 | vmx_init(); | |
127 | ||
128 | /* | |
129 | * if we have read the data on boot, we won't read it | |
130 | * again on wakeup, otherwise *bad* things will happen | |
131 | */ | |
132 | if (specs->initialized) | |
133 | return; | |
134 | else | |
135 | specs->initialized = TRUE; | |
136 | ||
137 | /* See if VMX is present, return if it is not */ | |
138 | specs->vmx_present = vmx_is_available() && vmxon_is_enabled(); | |
139 | if (!specs->vmx_present) | |
140 | return; | |
141 | ||
142 | #define bitfield(x,f) ((x >> f##_BIT) & f##_MASK) | |
143 | /* Obtain and decode VMX general capabilities */ | |
144 | msr_image = rdmsr64(MSR_IA32_VMX_BASIC); | |
b0d623f7 | 145 | specs->vmcs_id = (uint32_t)(msr_image & VMX_VCR_VMCS_REV_ID); |
2d21ac55 A |
146 | specs->vmcs_mem_type = bitfield(msr_image, VMX_VCR_VMCS_MEM_TYPE) != 0; |
147 | specs->vmcs_size = bitfield(msr_image, VMX_VCR_VMCS_SIZE); | |
148 | ||
149 | /* Obtain allowed settings for pin-based execution controls */ | |
150 | msr_image = rdmsr64(MSR_IA32_VMXPINBASED_CTLS); | |
b0d623f7 A |
151 | specs->pin_exctls_0 = (uint32_t)(msr_image & 0xFFFFFFFF); |
152 | specs->pin_exctls_1 = (uint32_t)(msr_image >> 32); | |
2d21ac55 A |
153 | |
154 | /* Obtain allowed settings for processor-based execution controls */ | |
155 | msr_image = rdmsr64(MSR_IA32_PROCBASED_CTLS); | |
b0d623f7 A |
156 | specs->proc_exctls_0 = (uint32_t)(msr_image & 0xFFFFFFFF); |
157 | specs->proc_exctls_1 = (uint32_t)(msr_image >> 32); | |
2d21ac55 A |
158 | |
159 | /* Obtain allowed settings for VM-exit controls */ | |
160 | msr_image = rdmsr64(MSR_IA32_VMX_EXIT_CTLS); | |
b0d623f7 A |
161 | specs->exit_ctls_0 = (uint32_t)(msr_image & 0xFFFFFFFF); |
162 | specs->exit_ctls_1 = (uint32_t)(msr_image >> 32); | |
2d21ac55 A |
163 | |
164 | /* Obtain allowed settings for VM-entry controls */ | |
165 | msr_image = rdmsr64(MSR_IA32_VMX_ENTRY_CTLS); | |
b0d623f7 A |
166 | specs->enter_ctls_0 = (uint32_t)(msr_image & 0xFFFFFFFF); |
167 | specs->enter_ctls_0 = (uint32_t)(msr_image >> 32); | |
2d21ac55 A |
168 | |
169 | /* Obtain and decode miscellaneous capabilities */ | |
170 | msr_image = rdmsr64(MSR_IA32_VMX_MISC); | |
171 | specs->act_halt = bitfield(msr_image, VMX_VCR_ACT_HLT) != 0; | |
172 | specs->act_shutdown = bitfield(msr_image, VMX_VCR_ACT_SHUTDOWN) != 0; | |
173 | specs->act_SIPI = bitfield(msr_image, VMX_VCR_ACT_SIPI) != 0; | |
174 | specs->act_CSTATE = bitfield(msr_image, VMX_VCR_ACT_CSTATE) != 0; | |
175 | specs->cr3_targs = bitfield(msr_image, VMX_VCR_CR3_TARGS); | |
b0d623f7 A |
176 | specs->max_msrs = (uint32_t)(512 * (1 + bitfield(msr_image, VMX_VCR_MAX_MSRS))); |
177 | specs->mseg_id = (uint32_t)bitfield(msr_image, VMX_VCR_MSEG_ID); | |
2d21ac55 A |
178 | |
179 | /* Obtain VMX-fixed bits in CR0 */ | |
b0d623f7 A |
180 | specs->cr0_fixed_0 = (uint32_t)rdmsr64(MSR_IA32_VMX_CR0_FIXED0) & 0xFFFFFFFF; |
181 | specs->cr0_fixed_1 = (uint32_t)rdmsr64(MSR_IA32_VMX_CR0_FIXED1) & 0xFFFFFFFF; | |
2d21ac55 A |
182 | |
183 | /* Obtain VMX-fixed bits in CR4 */ | |
b0d623f7 A |
184 | specs->cr4_fixed_0 = (uint32_t)rdmsr64(MSR_IA32_VMX_CR4_FIXED0) & 0xFFFFFFFF; |
185 | specs->cr4_fixed_1 = (uint32_t)rdmsr64(MSR_IA32_VMX_CR4_FIXED1) & 0xFFFFFFFF; | |
2d21ac55 A |
186 | } |
187 | ||
188 | /* ----------------------------------------------------------------------------- | |
189 | vmx_on() | |
190 | Enter VMX root operation on this CPU. | |
191 | -------------------------------------------------------------------------- */ | |
192 | static void | |
193 | vmx_on(void) | |
194 | { | |
195 | vmx_cpu_t *cpu = ¤t_cpu_datap()->cpu_vmx; | |
196 | addr64_t vmxon_region_paddr; | |
197 | int result; | |
198 | ||
199 | vmx_init(); | |
200 | ||
201 | assert(cpu->specs.vmx_present); | |
202 | ||
203 | if (NULL == cpu->vmxon_region) | |
204 | panic("vmx_on: VMXON region not allocated"); | |
205 | vmxon_region_paddr = vmx_paddr(cpu->vmxon_region); | |
206 | ||
207 | /* | |
208 | * Enable VMX operation. | |
209 | */ | |
210 | set_cr4(get_cr4() | CR4_VMXE); | |
211 | ||
212 | assert(vmx_is_cr0_valid(&cpu->specs)); | |
213 | assert(vmx_is_cr4_valid(&cpu->specs)); | |
214 | ||
215 | if ((result = __vmxon(&vmxon_region_paddr)) != VMX_SUCCEED) { | |
216 | panic("vmx_on: unexpected return %d from __vmxon()", result); | |
217 | } | |
218 | } | |
219 | ||
220 | /* ----------------------------------------------------------------------------- | |
221 | vmx_off() | |
222 | Leave VMX root operation on this CPU. | |
223 | -------------------------------------------------------------------------- */ | |
224 | static void | |
225 | vmx_off(void) | |
226 | { | |
227 | int result; | |
228 | ||
229 | /* Tell the CPU to release the VMXON region */ | |
230 | if ((result = __vmxoff()) != VMX_SUCCEED) { | |
231 | panic("vmx_off: unexpected return %d from __vmxoff()", result); | |
232 | } | |
233 | } | |
234 | ||
235 | /* ----------------------------------------------------------------------------- | |
236 | vmx_allocate_vmxon_regions() | |
237 | Allocate, clear and init VMXON regions for all CPUs. | |
238 | -------------------------------------------------------------------------- */ | |
239 | static void | |
240 | vmx_allocate_vmxon_regions(void) | |
241 | { | |
242 | unsigned int i; | |
243 | ||
244 | for (i=0; i<real_ncpus; i++) { | |
245 | vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; | |
246 | ||
247 | /* The size is defined to be always <= 4K, so we just allocate a page */ | |
248 | cpu->vmxon_region = vmx_pcalloc(); | |
249 | if (NULL == cpu->vmxon_region) | |
250 | panic("vmx_allocate_vmxon_regions: unable to allocate VMXON region"); | |
251 | *(uint32_t*)(cpu->vmxon_region) = cpu->specs.vmcs_id; | |
252 | } | |
253 | } | |
254 | ||
255 | /* ----------------------------------------------------------------------------- | |
256 | vmx_free_vmxon_regions() | |
257 | Free VMXON regions for all CPUs. | |
258 | -------------------------------------------------------------------------- */ | |
259 | static void | |
260 | vmx_free_vmxon_regions(void) | |
261 | { | |
262 | unsigned int i; | |
263 | ||
264 | for (i=0; i<real_ncpus; i++) { | |
265 | vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; | |
266 | ||
267 | vmx_pfree(cpu->vmxon_region); | |
268 | cpu->vmxon_region = NULL; | |
269 | } | |
270 | } | |
271 | ||
272 | /* ----------------------------------------------------------------------------- | |
273 | vmx_globally_available() | |
274 | Checks whether VT can be turned on for all CPUs. | |
275 | -------------------------------------------------------------------------- */ | |
276 | static boolean_t | |
277 | vmx_globally_available(void) | |
278 | { | |
279 | unsigned int i; | |
280 | ||
281 | boolean_t available = TRUE; | |
282 | ||
283 | for (i=0; i<real_ncpus; i++) { | |
284 | vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx; | |
285 | ||
286 | if (!cpu->specs.vmx_present) | |
287 | available = FALSE; | |
288 | } | |
289 | VMX_KPRINTF("VMX available: %d\n", available); | |
290 | return available; | |
291 | } | |
292 | ||
293 | ||
294 | /* ----------------------------------------------------------------------------- | |
295 | vmx_turn_on() | |
296 | Turn on VT operation on all CPUs. | |
297 | -------------------------------------------------------------------------- */ | |
298 | int | |
299 | host_vmxon(boolean_t exclusive) | |
300 | { | |
301 | int error; | |
b0d623f7 | 302 | boolean_t do_it = FALSE; /* do the cpu sync outside of the area holding the lock */ |
2d21ac55 A |
303 | |
304 | if (!vmx_globally_available()) | |
305 | return VMX_UNSUPPORTED; | |
306 | ||
307 | simple_lock(&vmx_use_count_lock); | |
308 | ||
309 | if (vmx_exclusive) { | |
310 | error = VMX_INUSE; | |
b0d623f7 A |
311 | } else { |
312 | vmx_use_count++; | |
313 | if (vmx_use_count == 1) /* was turned off before */ | |
314 | do_it = TRUE; | |
315 | vmx_exclusive = exclusive; | |
316 | ||
317 | VMX_KPRINTF("VMX use count: %d\n", vmx_use_count); | |
318 | error = VMX_OK; | |
2d21ac55 | 319 | } |
2d21ac55 | 320 | |
2d21ac55 A |
321 | simple_unlock(&vmx_use_count_lock); |
322 | ||
b0d623f7 A |
323 | if (do_it) { |
324 | vmx_allocate_vmxon_regions(); | |
325 | mp_rendezvous(NULL, (void (*)(void *))vmx_on, NULL, NULL); | |
326 | } | |
2d21ac55 A |
327 | return error; |
328 | } | |
329 | ||
330 | /* ----------------------------------------------------------------------------- | |
331 | vmx_turn_off() | |
332 | Turn off VT operation on all CPUs. | |
333 | -------------------------------------------------------------------------- */ | |
334 | void | |
335 | host_vmxoff() | |
336 | { | |
b0d623f7 A |
337 | boolean_t do_it = FALSE; /* do the cpu sync outside of the area holding the lock */ |
338 | ||
2d21ac55 A |
339 | simple_lock(&vmx_use_count_lock); |
340 | ||
341 | if (vmx_use_count) { | |
342 | vmx_use_count--; | |
b0d623f7 A |
343 | vmx_exclusive = FALSE; |
344 | if (!vmx_use_count) | |
345 | do_it = TRUE; | |
2d21ac55 A |
346 | } |
347 | ||
348 | simple_unlock(&vmx_use_count_lock); | |
349 | ||
b0d623f7 A |
350 | if (do_it) { |
351 | mp_rendezvous(NULL, (void (*)(void *))vmx_off, NULL, NULL); | |
352 | vmx_free_vmxon_regions(); | |
353 | } | |
354 | ||
2d21ac55 A |
355 | VMX_KPRINTF("VMX use count: %d\n", vmx_use_count); |
356 | } | |
357 | ||
358 | /* ----------------------------------------------------------------------------- | |
359 | vmx_suspend() | |
360 | Turn off VT operation on this CPU if it was on. | |
361 | Called when a CPU goes offline. | |
362 | -------------------------------------------------------------------------- */ | |
363 | void | |
364 | vmx_suspend() | |
365 | { | |
366 | VMX_KPRINTF("vmx_suspend\n"); | |
367 | if (vmx_use_count) | |
368 | vmx_off(); | |
369 | } | |
370 | ||
371 | /* ----------------------------------------------------------------------------- | |
372 | vmx_suspend() | |
373 | Restore the previous VT state. Called when CPU comes back online. | |
374 | -------------------------------------------------------------------------- */ | |
375 | void | |
376 | vmx_resume() | |
377 | { | |
378 | VMX_KPRINTF("vmx_resume\n"); | |
c910b4d9 | 379 | vmx_init(); /* init VMX on CPU #0 */ |
2d21ac55 A |
380 | if (vmx_use_count) |
381 | vmx_on(); | |
382 | } |