]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/vmx/vmx_cpu.c
xnu-1699.22.81.tar.gz
[apple/xnu.git] / osfmk / i386 / vmx / vmx_cpu.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <pexpert/pexpert.h>
30 #include <i386/cpuid.h>
31 #include <i386/cpu_data.h>
32 #include <i386/mp.h>
33 #include <i386/proc_reg.h>
34 #include <i386/vmx.h>
35 #include <i386/vmx/vmx_asm.h>
36 #include <i386/vmx/vmx_shims.h>
37 #include <i386/vmx/vmx_cpu.h>
38 #include <mach/mach_host.h> /* for host_info() */
39
40 #define VMX_KPRINTF(x...) /* kprintf("vmx: " x) */
41
42 int vmx_use_count = 0;
43 boolean_t vmx_exclusive = FALSE;
44 decl_simple_lock_data(static,vmx_use_count_lock)
45
46 /* -----------------------------------------------------------------------------
47 vmx_is_available()
48 Is the VMX facility available on this CPU?
49 -------------------------------------------------------------------------- */
50 static inline boolean_t
51 vmx_is_available(void)
52 {
53 return (0 != (cpuid_features() & CPUID_FEATURE_VMX));
54 }
55
56 /* -----------------------------------------------------------------------------
57 vmxon_is_enabled()
58 Is the VMXON instruction enabled on this CPU?
59 -------------------------------------------------------------------------- */
60 static inline boolean_t
61 vmxon_is_enabled(void)
62 {
63 return (vmx_is_available() &&
64 (rdmsr64(MSR_IA32_FEATURE_CONTROL) & MSR_IA32_FEATCTL_VMXON));
65 }
66
67 /* -----------------------------------------------------------------------------
68 vmx_is_cr0_valid()
69 Is CR0 valid for executing VMXON on this CPU?
70 -------------------------------------------------------------------------- */
71 static inline boolean_t
72 vmx_is_cr0_valid(vmx_specs_t *specs)
73 {
74 uintptr_t cr0 = get_cr0();
75 return (0 == ((~cr0 & specs->cr0_fixed_0)|(cr0 & ~specs->cr0_fixed_1)));
76 }
77
78 /* -----------------------------------------------------------------------------
79 vmx_is_cr4_valid()
80 Is CR4 valid for executing VMXON on this CPU?
81 -------------------------------------------------------------------------- */
82 static inline boolean_t
83 vmx_is_cr4_valid(vmx_specs_t *specs)
84 {
85 uintptr_t cr4 = get_cr4();
86 return (0 == ((~cr4 & specs->cr4_fixed_0)|(cr4 & ~specs->cr4_fixed_1)));
87 }
88
89 static void
90 vmx_init(void)
91 {
92 uint64_t msr_image;
93
94 if (!vmx_is_available())
95 return;
96
97 /*
98 * We don't count on EFI initializing MSR_IA32_FEATURE_CONTROL
99 * and turning VMXON on and locking the bit, so we do that now.
100 */
101 msr_image = rdmsr64(MSR_IA32_FEATURE_CONTROL);
102 if (0 == ((msr_image & MSR_IA32_FEATCTL_LOCK)))
103 wrmsr64(MSR_IA32_FEATURE_CONTROL,
104 (msr_image |
105 MSR_IA32_FEATCTL_VMXON |
106 MSR_IA32_FEATCTL_LOCK));
107 }
108
109 /* -----------------------------------------------------------------------------
110 vmx_get_specs()
111 Obtain VMX facility specifications for this CPU and
112 enter them into the vmx_specs_t structure. If VMX is not available or
113 disabled on this CPU, set vmx_present to false and return leaving
114 the remainder of the vmx_specs_t uninitialized.
115 -------------------------------------------------------------------------- */
116 void
117 vmx_get_specs()
118 {
119 vmx_specs_t *specs = &current_cpu_datap()->cpu_vmx.specs;
120 uint64_t msr_image;
121
122 /* this is called once for every CPU, but the lock doesn't care :-) */
123 simple_lock_init(&vmx_use_count_lock, 0);
124
125 vmx_init();
126
127 /*
128 * if we have read the data on boot, we won't read it
129 * again on wakeup, otherwise *bad* things will happen
130 */
131 if (specs->initialized)
132 return;
133 else
134 specs->initialized = TRUE;
135
136 /* See if VMX is present, return if it is not */
137 specs->vmx_present = vmx_is_available() && vmxon_is_enabled();
138 if (!specs->vmx_present)
139 return;
140
141 #define bitfield(x,f) ((x >> f##_BIT) & f##_MASK)
142 /* Obtain and decode VMX general capabilities */
143 msr_image = rdmsr64(MSR_IA32_VMX_BASIC);
144 specs->vmcs_id = (uint32_t)(msr_image & VMX_VCR_VMCS_REV_ID);
145 specs->vmcs_mem_type = bitfield(msr_image, VMX_VCR_VMCS_MEM_TYPE) != 0;
146 specs->vmcs_size = bitfield(msr_image, VMX_VCR_VMCS_SIZE);
147
148 /* Obtain allowed settings for pin-based execution controls */
149 msr_image = rdmsr64(MSR_IA32_VMXPINBASED_CTLS);
150 specs->pin_exctls_0 = (uint32_t)(msr_image & 0xFFFFFFFF);
151 specs->pin_exctls_1 = (uint32_t)(msr_image >> 32);
152
153 /* Obtain allowed settings for processor-based execution controls */
154 msr_image = rdmsr64(MSR_IA32_PROCBASED_CTLS);
155 specs->proc_exctls_0 = (uint32_t)(msr_image & 0xFFFFFFFF);
156 specs->proc_exctls_1 = (uint32_t)(msr_image >> 32);
157
158 /* Obtain allowed settings for VM-exit controls */
159 msr_image = rdmsr64(MSR_IA32_VMX_EXIT_CTLS);
160 specs->exit_ctls_0 = (uint32_t)(msr_image & 0xFFFFFFFF);
161 specs->exit_ctls_1 = (uint32_t)(msr_image >> 32);
162
163 /* Obtain allowed settings for VM-entry controls */
164 msr_image = rdmsr64(MSR_IA32_VMX_ENTRY_CTLS);
165 specs->enter_ctls_0 = (uint32_t)(msr_image & 0xFFFFFFFF);
166 specs->enter_ctls_0 = (uint32_t)(msr_image >> 32);
167
168 /* Obtain and decode miscellaneous capabilities */
169 msr_image = rdmsr64(MSR_IA32_VMX_MISC);
170 specs->act_halt = bitfield(msr_image, VMX_VCR_ACT_HLT) != 0;
171 specs->act_shutdown = bitfield(msr_image, VMX_VCR_ACT_SHUTDOWN) != 0;
172 specs->act_SIPI = bitfield(msr_image, VMX_VCR_ACT_SIPI) != 0;
173 specs->act_CSTATE = bitfield(msr_image, VMX_VCR_ACT_CSTATE) != 0;
174 specs->cr3_targs = bitfield(msr_image, VMX_VCR_CR3_TARGS);
175 specs->max_msrs = (uint32_t)(512 * (1 + bitfield(msr_image, VMX_VCR_MAX_MSRS)));
176 specs->mseg_id = (uint32_t)bitfield(msr_image, VMX_VCR_MSEG_ID);
177
178 /* Obtain VMX-fixed bits in CR0 */
179 specs->cr0_fixed_0 = (uint32_t)rdmsr64(MSR_IA32_VMX_CR0_FIXED0) & 0xFFFFFFFF;
180 specs->cr0_fixed_1 = (uint32_t)rdmsr64(MSR_IA32_VMX_CR0_FIXED1) & 0xFFFFFFFF;
181
182 /* Obtain VMX-fixed bits in CR4 */
183 specs->cr4_fixed_0 = (uint32_t)rdmsr64(MSR_IA32_VMX_CR4_FIXED0) & 0xFFFFFFFF;
184 specs->cr4_fixed_1 = (uint32_t)rdmsr64(MSR_IA32_VMX_CR4_FIXED1) & 0xFFFFFFFF;
185 }
186
187 /* -----------------------------------------------------------------------------
188 vmx_on()
189 Enter VMX root operation on this CPU.
190 -------------------------------------------------------------------------- */
191 static void
192 vmx_on(void *arg __unused)
193 {
194 vmx_cpu_t *cpu = &current_cpu_datap()->cpu_vmx;
195 addr64_t vmxon_region_paddr;
196 int result;
197
198 vmx_init();
199
200 assert(cpu->specs.vmx_present);
201
202 if (NULL == cpu->vmxon_region)
203 panic("vmx_on: VMXON region not allocated");
204 vmxon_region_paddr = vmx_paddr(cpu->vmxon_region);
205
206 /*
207 * Enable VMX operation.
208 */
209 set_cr4(get_cr4() | CR4_VMXE);
210
211 assert(vmx_is_cr0_valid(&cpu->specs));
212 assert(vmx_is_cr4_valid(&cpu->specs));
213
214 if ((result = __vmxon(&vmxon_region_paddr)) != VMX_SUCCEED) {
215 panic("vmx_on: unexpected return %d from __vmxon()", result);
216 }
217 }
218
219 /* -----------------------------------------------------------------------------
220 vmx_off()
221 Leave VMX root operation on this CPU.
222 -------------------------------------------------------------------------- */
223 static void
224 vmx_off(void *arg __unused)
225 {
226 int result;
227
228 /* Tell the CPU to release the VMXON region */
229 if ((result = __vmxoff()) != VMX_SUCCEED) {
230 panic("vmx_off: unexpected return %d from __vmxoff()", result);
231 }
232 }
233
234 /* -----------------------------------------------------------------------------
235 vmx_allocate_vmxon_regions()
236 Allocate, clear and init VMXON regions for all CPUs.
237 -------------------------------------------------------------------------- */
238 static void
239 vmx_allocate_vmxon_regions(void)
240 {
241 unsigned int i;
242
243 for (i=0; i<real_ncpus; i++) {
244 vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
245
246 /* The size is defined to be always <= 4K, so we just allocate a page */
247 cpu->vmxon_region = vmx_pcalloc();
248 if (NULL == cpu->vmxon_region)
249 panic("vmx_allocate_vmxon_regions: unable to allocate VMXON region");
250 *(uint32_t*)(cpu->vmxon_region) = cpu->specs.vmcs_id;
251 }
252 }
253
254 /* -----------------------------------------------------------------------------
255 vmx_free_vmxon_regions()
256 Free VMXON regions for all CPUs.
257 -------------------------------------------------------------------------- */
258 static void
259 vmx_free_vmxon_regions(void)
260 {
261 unsigned int i;
262
263 for (i=0; i<real_ncpus; i++) {
264 vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
265
266 vmx_pfree(cpu->vmxon_region);
267 cpu->vmxon_region = NULL;
268 }
269 }
270
271 /* -----------------------------------------------------------------------------
272 vmx_globally_available()
273 Checks whether VT can be turned on for all CPUs.
274 -------------------------------------------------------------------------- */
275 static boolean_t
276 vmx_globally_available(void)
277 {
278 unsigned int i;
279
280 boolean_t available = TRUE;
281
282 for (i=0; i<real_ncpus; i++) {
283 vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
284
285 if (!cpu->specs.vmx_present)
286 available = FALSE;
287 }
288 VMX_KPRINTF("VMX available: %d\n", available);
289 return available;
290 }
291
292
293 /* -----------------------------------------------------------------------------
294 vmx_turn_on()
295 Turn on VT operation on all CPUs.
296 -------------------------------------------------------------------------- */
297 int
298 host_vmxon(boolean_t exclusive)
299 {
300 int error;
301 boolean_t do_it = FALSE; /* do the cpu sync outside of the area holding the lock */
302
303 if (!vmx_globally_available())
304 return VMX_UNSUPPORTED;
305
306 simple_lock(&vmx_use_count_lock);
307
308 if (vmx_exclusive) {
309 error = VMX_INUSE;
310 } else {
311 vmx_use_count++;
312 if (vmx_use_count == 1) /* was turned off before */
313 do_it = TRUE;
314 vmx_exclusive = exclusive;
315
316 VMX_KPRINTF("VMX use count: %d\n", vmx_use_count);
317 error = VMX_OK;
318 }
319
320 simple_unlock(&vmx_use_count_lock);
321
322 if (do_it) {
323 vmx_allocate_vmxon_regions();
324 mp_rendezvous(NULL, vmx_on, NULL, NULL);
325 }
326 return error;
327 }
328
329 /* -----------------------------------------------------------------------------
330 vmx_turn_off()
331 Turn off VT operation on all CPUs.
332 -------------------------------------------------------------------------- */
333 void
334 host_vmxoff()
335 {
336 boolean_t do_it = FALSE; /* do the cpu sync outside of the area holding the lock */
337
338 simple_lock(&vmx_use_count_lock);
339
340 if (vmx_use_count) {
341 vmx_use_count--;
342 vmx_exclusive = FALSE;
343 if (!vmx_use_count)
344 do_it = TRUE;
345 }
346
347 simple_unlock(&vmx_use_count_lock);
348
349 if (do_it) {
350 mp_rendezvous(NULL, vmx_off, NULL, NULL);
351 vmx_free_vmxon_regions();
352 }
353
354 VMX_KPRINTF("VMX use count: %d\n", vmx_use_count);
355 }
356
357 /* -----------------------------------------------------------------------------
358 vmx_suspend()
359 Turn off VT operation on this CPU if it was on.
360 Called when a CPU goes offline.
361 -------------------------------------------------------------------------- */
362 void
363 vmx_suspend()
364 {
365 VMX_KPRINTF("vmx_suspend\n");
366 if (vmx_use_count)
367 vmx_off(NULL);
368 }
369
370 /* -----------------------------------------------------------------------------
371 vmx_suspend()
372 Restore the previous VT state. Called when CPU comes back online.
373 -------------------------------------------------------------------------- */
374 void
375 vmx_resume()
376 {
377 VMX_KPRINTF("vmx_resume\n");
378 vmx_init(); /* init VMX on CPU #0 */
379 if (vmx_use_count)
380 vmx_on(NULL);
381 }