]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpuid.c
xnu-2050.18.24.tar.gz
[apple/xnu.git] / osfmk / i386 / cpuid.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 #include <platforms.h>
32 #include <vm/vm_page.h>
33 #include <pexpert/pexpert.h>
34
35 #include <i386/cpuid.h>
36
37 static boolean_t cpuid_dbg
38 #if DEBUG
39 = TRUE;
40 #else
41 = FALSE;
42 #endif
43 #define DBG(x...) \
44 do { \
45 if (cpuid_dbg) \
46 kprintf(x); \
47 } while (0) \
48
49 #define min(a,b) ((a) < (b) ? (a) : (b))
50 #define quad(hi,lo) (((uint64_t)(hi)) << 32 | (lo))
51
52 /* Only for 32bit values */
53 #define bit32(n) (1U << (n))
54 #define bitmask32(h,l) ((bit32(h)|(bit32(h)-1)) & ~(bit32(l)-1))
55 #define bitfield32(x,h,l) ((((x) & bitmask32(h,l)) >> l))
56
57 /*
58 * Leaf 2 cache descriptor encodings.
59 */
60 typedef enum {
61 _NULL_, /* NULL (empty) descriptor */
62 CACHE, /* Cache */
63 TLB, /* TLB */
64 STLB, /* Shared second-level unified TLB */
65 PREFETCH /* Prefetch size */
66 } cpuid_leaf2_desc_type_t;
67
68 typedef enum {
69 NA, /* Not Applicable */
70 FULLY, /* Fully-associative */
71 TRACE, /* Trace Cache (P4 only) */
72 INST, /* Instruction TLB */
73 DATA, /* Data TLB */
74 DATA0, /* Data TLB, 1st level */
75 DATA1, /* Data TLB, 2nd level */
76 L1, /* L1 (unified) cache */
77 L1_INST, /* L1 Instruction cache */
78 L1_DATA, /* L1 Data cache */
79 L2, /* L2 (unified) cache */
80 L3, /* L3 (unified) cache */
81 L2_2LINESECTOR, /* L2 (unified) cache with 2 lines per sector */
82 L3_2LINESECTOR, /* L3(unified) cache with 2 lines per sector */
83 SMALL, /* Small page TLB */
84 LARGE, /* Large page TLB */
85 BOTH /* Small and Large page TLB */
86 } cpuid_leaf2_qualifier_t;
87
88 typedef struct cpuid_cache_descriptor {
89 uint8_t value; /* descriptor code */
90 uint8_t type; /* cpuid_leaf2_desc_type_t */
91 uint8_t level; /* level of cache/TLB hierachy */
92 uint8_t ways; /* wayness of cache */
93 uint16_t size; /* cachesize or TLB pagesize */
94 uint16_t entries; /* number of TLB entries or linesize */
95 } cpuid_cache_descriptor_t;
96
97 /*
98 * These multipliers are used to encode 1*K .. 64*M in a 16 bit size field
99 */
100 #define K (1)
101 #define M (1024)
102
103 /*
104 * Intel cache descriptor table:
105 */
106 static cpuid_cache_descriptor_t intel_cpuid_leaf2_descriptor_table[] = {
107 // -------------------------------------------------------
108 // value type level ways size entries
109 // -------------------------------------------------------
110 { 0x00, _NULL_, NA, NA, NA, NA },
111 { 0x01, TLB, INST, 4, SMALL, 32 },
112 { 0x02, TLB, INST, FULLY, LARGE, 2 },
113 { 0x03, TLB, DATA, 4, SMALL, 64 },
114 { 0x04, TLB, DATA, 4, LARGE, 8 },
115 { 0x05, TLB, DATA1, 4, LARGE, 32 },
116 { 0x06, CACHE, L1_INST, 4, 8*K, 32 },
117 { 0x08, CACHE, L1_INST, 4, 16*K, 32 },
118 { 0x09, CACHE, L1_INST, 4, 32*K, 64 },
119 { 0x0A, CACHE, L1_DATA, 2, 8*K, 32 },
120 { 0x0B, TLB, INST, 4, LARGE, 4 },
121 { 0x0C, CACHE, L1_DATA, 4, 16*K, 32 },
122 { 0x0D, CACHE, L1_DATA, 4, 16*K, 64 },
123 { 0x0E, CACHE, L1_DATA, 6, 24*K, 64 },
124 { 0x21, CACHE, L2, 8, 256*K, 64 },
125 { 0x22, CACHE, L3_2LINESECTOR, 4, 512*K, 64 },
126 { 0x23, CACHE, L3_2LINESECTOR, 8, 1*M, 64 },
127 { 0x25, CACHE, L3_2LINESECTOR, 8, 2*M, 64 },
128 { 0x29, CACHE, L3_2LINESECTOR, 8, 4*M, 64 },
129 { 0x2C, CACHE, L1_DATA, 8, 32*K, 64 },
130 { 0x30, CACHE, L1_INST, 8, 32*K, 64 },
131 { 0x40, CACHE, L2, NA, 0, NA },
132 { 0x41, CACHE, L2, 4, 128*K, 32 },
133 { 0x42, CACHE, L2, 4, 256*K, 32 },
134 { 0x43, CACHE, L2, 4, 512*K, 32 },
135 { 0x44, CACHE, L2, 4, 1*M, 32 },
136 { 0x45, CACHE, L2, 4, 2*M, 32 },
137 { 0x46, CACHE, L3, 4, 4*M, 64 },
138 { 0x47, CACHE, L3, 8, 8*M, 64 },
139 { 0x48, CACHE, L2, 12, 3*M, 64 },
140 { 0x49, CACHE, L2, 16, 4*M, 64 },
141 { 0x4A, CACHE, L3, 12, 6*M, 64 },
142 { 0x4B, CACHE, L3, 16, 8*M, 64 },
143 { 0x4C, CACHE, L3, 12, 12*M, 64 },
144 { 0x4D, CACHE, L3, 16, 16*M, 64 },
145 { 0x4E, CACHE, L2, 24, 6*M, 64 },
146 { 0x4F, TLB, INST, NA, SMALL, 32 },
147 { 0x50, TLB, INST, NA, BOTH, 64 },
148 { 0x51, TLB, INST, NA, BOTH, 128 },
149 { 0x52, TLB, INST, NA, BOTH, 256 },
150 { 0x55, TLB, INST, FULLY, BOTH, 7 },
151 { 0x56, TLB, DATA0, 4, LARGE, 16 },
152 { 0x57, TLB, DATA0, 4, SMALL, 16 },
153 { 0x59, TLB, DATA0, FULLY, SMALL, 16 },
154 { 0x5A, TLB, DATA0, 4, LARGE, 32 },
155 { 0x5B, TLB, DATA, NA, BOTH, 64 },
156 { 0x5C, TLB, DATA, NA, BOTH, 128 },
157 { 0x5D, TLB, DATA, NA, BOTH, 256 },
158 { 0x60, CACHE, L1, 16*K, 8, 64 },
159 { 0x61, CACHE, L1, 4, 8*K, 64 },
160 { 0x62, CACHE, L1, 4, 16*K, 64 },
161 { 0x63, CACHE, L1, 4, 32*K, 64 },
162 { 0x70, CACHE, TRACE, 8, 12*K, NA },
163 { 0x71, CACHE, TRACE, 8, 16*K, NA },
164 { 0x72, CACHE, TRACE, 8, 32*K, NA },
165 { 0x78, CACHE, L2, 4, 1*M, 64 },
166 { 0x79, CACHE, L2_2LINESECTOR, 8, 128*K, 64 },
167 { 0x7A, CACHE, L2_2LINESECTOR, 8, 256*K, 64 },
168 { 0x7B, CACHE, L2_2LINESECTOR, 8, 512*K, 64 },
169 { 0x7C, CACHE, L2_2LINESECTOR, 8, 1*M, 64 },
170 { 0x7D, CACHE, L2, 8, 2*M, 64 },
171 { 0x7F, CACHE, L2, 2, 512*K, 64 },
172 { 0x80, CACHE, L2, 8, 512*K, 64 },
173 { 0x82, CACHE, L2, 8, 256*K, 32 },
174 { 0x83, CACHE, L2, 8, 512*K, 32 },
175 { 0x84, CACHE, L2, 8, 1*M, 32 },
176 { 0x85, CACHE, L2, 8, 2*M, 32 },
177 { 0x86, CACHE, L2, 4, 512*K, 64 },
178 { 0x87, CACHE, L2, 8, 1*M, 64 },
179 { 0xB0, TLB, INST, 4, SMALL, 128 },
180 { 0xB1, TLB, INST, 4, LARGE, 8 },
181 { 0xB2, TLB, INST, 4, SMALL, 64 },
182 { 0xB3, TLB, DATA, 4, SMALL, 128 },
183 { 0xB4, TLB, DATA1, 4, SMALL, 256 },
184 { 0xBA, TLB, DATA1, 4, BOTH, 64 },
185 { 0xCA, STLB, DATA1, 4, BOTH, 512 },
186 { 0xD0, CACHE, L3, 4, 512*K, 64 },
187 { 0xD1, CACHE, L3, 4, 1*M, 64 },
188 { 0xD2, CACHE, L3, 4, 2*M, 64 },
189 { 0xD3, CACHE, L3, 4, 4*M, 64 },
190 { 0xD4, CACHE, L3, 4, 8*M, 64 },
191 { 0xD6, CACHE, L3, 8, 1*M, 64 },
192 { 0xD7, CACHE, L3, 8, 2*M, 64 },
193 { 0xD8, CACHE, L3, 8, 4*M, 64 },
194 { 0xD9, CACHE, L3, 8, 8*M, 64 },
195 { 0xDA, CACHE, L3, 8, 12*M, 64 },
196 { 0xDC, CACHE, L3, 12, 1536*K, 64 },
197 { 0xDD, CACHE, L3, 12, 3*M, 64 },
198 { 0xDE, CACHE, L3, 12, 6*M, 64 },
199 { 0xDF, CACHE, L3, 12, 12*M, 64 },
200 { 0xE0, CACHE, L3, 12, 18*M, 64 },
201 { 0xE2, CACHE, L3, 16, 2*M, 64 },
202 { 0xE3, CACHE, L3, 16, 4*M, 64 },
203 { 0xE4, CACHE, L3, 16, 8*M, 64 },
204 { 0xE5, CACHE, L3, 16, 16*M, 64 },
205 { 0xE6, CACHE, L3, 16, 24*M, 64 },
206 { 0xF0, PREFETCH, NA, NA, 64, NA },
207 { 0xF1, PREFETCH, NA, NA, 128, NA },
208 { 0xFF, CACHE, NA, NA, 0, NA }
209 };
210 #define INTEL_LEAF2_DESC_NUM (sizeof(intel_cpuid_leaf2_descriptor_table) / \
211 sizeof(cpuid_cache_descriptor_t))
212
213 static inline cpuid_cache_descriptor_t *
214 cpuid_leaf2_find(uint8_t value)
215 {
216 unsigned int i;
217
218 for (i = 0; i < INTEL_LEAF2_DESC_NUM; i++)
219 if (intel_cpuid_leaf2_descriptor_table[i].value == value)
220 return &intel_cpuid_leaf2_descriptor_table[i];
221 return NULL;
222 }
223
224 /*
225 * CPU identification routines.
226 */
227
228 static i386_cpu_info_t *cpuid_cpu_infop = NULL;
229 static i386_cpu_info_t cpuid_cpu_info;
230
231 #if defined(__x86_64__)
232 static void cpuid_fn(uint32_t selector, uint32_t *result)
233 {
234 do_cpuid(selector, result);
235 DBG("cpuid_fn(0x%08x) eax:0x%08x ebx:0x%08x ecx:0x%08x edx:0x%08x\n",
236 selector, result[0], result[1], result[2], result[3]);
237 }
238 #else
239 static void cpuid_fn(uint32_t selector, uint32_t *result)
240 {
241 if (get_is64bit()) {
242 asm("call _cpuid64"
243 : "=a" (result[0]),
244 "=b" (result[1]),
245 "=c" (result[2]),
246 "=d" (result[3])
247 : "a"(selector),
248 "b" (0),
249 "c" (0),
250 "d" (0));
251 } else {
252 do_cpuid(selector, result);
253 }
254 DBG("cpuid_fn(0x%08x) eax:0x%08x ebx:0x%08x ecx:0x%08x edx:0x%08x\n",
255 selector, result[0], result[1], result[2], result[3]);
256 }
257 #endif
258
259 static const char *cache_type_str[LCACHE_MAX] = {
260 "Lnone", "L1I", "L1D", "L2U", "L3U"
261 };
262
263 /* this function is Intel-specific */
264 static void
265 cpuid_set_cache_info( i386_cpu_info_t * info_p )
266 {
267 uint32_t cpuid_result[4];
268 uint32_t reg[4];
269 uint32_t index;
270 uint32_t linesizes[LCACHE_MAX];
271 unsigned int i;
272 unsigned int j;
273 boolean_t cpuid_deterministic_supported = FALSE;
274
275 DBG("cpuid_set_cache_info(%p)\n", info_p);
276
277 bzero( linesizes, sizeof(linesizes) );
278
279 /* Get processor cache descriptor info using leaf 2. We don't use
280 * this internally, but must publish it for KEXTs.
281 */
282 cpuid_fn(2, cpuid_result);
283 for (j = 0; j < 4; j++) {
284 if ((cpuid_result[j] >> 31) == 1) /* bit31 is validity */
285 continue;
286 ((uint32_t *) info_p->cache_info)[j] = cpuid_result[j];
287 }
288 /* first byte gives number of cpuid calls to get all descriptors */
289 for (i = 1; i < info_p->cache_info[0]; i++) {
290 if (i*16 > sizeof(info_p->cache_info))
291 break;
292 cpuid_fn(2, cpuid_result);
293 for (j = 0; j < 4; j++) {
294 if ((cpuid_result[j] >> 31) == 1)
295 continue;
296 ((uint32_t *) info_p->cache_info)[4*i+j] =
297 cpuid_result[j];
298 }
299 }
300
301 /*
302 * Get cache info using leaf 4, the "deterministic cache parameters."
303 * Most processors Mac OS X supports implement this flavor of CPUID.
304 * Loop over each cache on the processor.
305 */
306 cpuid_fn(0, cpuid_result);
307 if (cpuid_result[eax] >= 4)
308 cpuid_deterministic_supported = TRUE;
309
310 for (index = 0; cpuid_deterministic_supported; index++) {
311 cache_type_t type = Lnone;
312 uint32_t cache_type;
313 uint32_t cache_level;
314 uint32_t cache_sharing;
315 uint32_t cache_linesize;
316 uint32_t cache_sets;
317 uint32_t cache_associativity;
318 uint32_t cache_size;
319 uint32_t cache_partitions;
320 uint32_t colors;
321
322 reg[eax] = 4; /* cpuid request 4 */
323 reg[ecx] = index; /* index starting at 0 */
324 cpuid(reg);
325 DBG("cpuid(4) index=%d eax=0x%x\n", index, reg[eax]);
326 cache_type = bitfield32(reg[eax], 4, 0);
327 if (cache_type == 0)
328 break; /* no more caches */
329 cache_level = bitfield32(reg[eax], 7, 5);
330 cache_sharing = bitfield32(reg[eax], 25, 14) + 1;
331 info_p->cpuid_cores_per_package
332 = bitfield32(reg[eax], 31, 26) + 1;
333 cache_linesize = bitfield32(reg[ebx], 11, 0) + 1;
334 cache_partitions = bitfield32(reg[ebx], 21, 12) + 1;
335 cache_associativity = bitfield32(reg[ebx], 31, 22) + 1;
336 cache_sets = bitfield32(reg[ecx], 31, 0) + 1;
337
338 /* Map type/levels returned by CPUID into cache_type_t */
339 switch (cache_level) {
340 case 1:
341 type = cache_type == 1 ? L1D :
342 cache_type == 2 ? L1I :
343 Lnone;
344 break;
345 case 2:
346 type = cache_type == 3 ? L2U :
347 Lnone;
348 break;
349 case 3:
350 type = cache_type == 3 ? L3U :
351 Lnone;
352 break;
353 default:
354 type = Lnone;
355 }
356
357 /* The total size of a cache is:
358 * ( linesize * sets * associativity * partitions )
359 */
360 if (type != Lnone) {
361 cache_size = cache_linesize * cache_sets *
362 cache_associativity * cache_partitions;
363 info_p->cache_size[type] = cache_size;
364 info_p->cache_sharing[type] = cache_sharing;
365 info_p->cache_partitions[type] = cache_partitions;
366 linesizes[type] = cache_linesize;
367
368 DBG(" cache_size[%s] : %d\n",
369 cache_type_str[type], cache_size);
370 DBG(" cache_sharing[%s] : %d\n",
371 cache_type_str[type], cache_sharing);
372 DBG(" cache_partitions[%s]: %d\n",
373 cache_type_str[type], cache_partitions);
374
375 /*
376 * Overwrite associativity determined via
377 * CPUID.0x80000006 -- this leaf is more
378 * accurate
379 */
380 if (type == L2U)
381 info_p->cpuid_cache_L2_associativity = cache_associativity;
382
383 /* Compute the number of page colors for this cache,
384 * which is:
385 * ( linesize * sets ) / page_size
386 *
387 * To help visualize this, consider two views of a
388 * physical address. To the cache, it is composed
389 * of a line offset, a set selector, and a tag.
390 * To VM, it is composed of a page offset, a page
391 * color, and other bits in the pageframe number:
392 *
393 * +-----------------+---------+--------+
394 * cache: | tag | set | offset |
395 * +-----------------+---------+--------+
396 *
397 * +-----------------+-------+----------+
398 * VM: | don't care | color | pg offset|
399 * +-----------------+-------+----------+
400 *
401 * The color is those bits in (set+offset) not covered
402 * by the page offset.
403 */
404 colors = ( cache_linesize * cache_sets ) >> 12;
405
406 if ( colors > vm_cache_geometry_colors )
407 vm_cache_geometry_colors = colors;
408 }
409 }
410 DBG(" vm_cache_geometry_colors: %d\n", vm_cache_geometry_colors);
411
412 /*
413 * If deterministic cache parameters are not available, use
414 * something else
415 */
416 if (info_p->cpuid_cores_per_package == 0) {
417 info_p->cpuid_cores_per_package = 1;
418
419 /* cpuid define in 1024 quantities */
420 info_p->cache_size[L2U] = info_p->cpuid_cache_size * 1024;
421 info_p->cache_sharing[L2U] = 1;
422 info_p->cache_partitions[L2U] = 1;
423
424 linesizes[L2U] = info_p->cpuid_cache_linesize;
425
426 DBG(" cache_size[L2U] : %d\n",
427 info_p->cache_size[L2U]);
428 DBG(" cache_sharing[L2U] : 1\n");
429 DBG(" cache_partitions[L2U]: 1\n");
430 DBG(" linesizes[L2U] : %d\n",
431 info_p->cpuid_cache_linesize);
432 }
433
434 /*
435 * What linesize to publish? We use the L2 linesize if any,
436 * else the L1D.
437 */
438 if ( linesizes[L2U] )
439 info_p->cache_linesize = linesizes[L2U];
440 else if (linesizes[L1D])
441 info_p->cache_linesize = linesizes[L1D];
442 else panic("no linesize");
443 DBG(" cache_linesize : %d\n", info_p->cache_linesize);
444
445 /*
446 * Extract and publish TLB information from Leaf 2 descriptors.
447 */
448 DBG(" %ld leaf2 descriptors:\n", sizeof(info_p->cache_info));
449 for (i = 1; i < sizeof(info_p->cache_info); i++) {
450 cpuid_cache_descriptor_t *descp;
451 int id;
452 int level;
453 int page;
454
455 DBG(" 0x%02x", info_p->cache_info[i]);
456 descp = cpuid_leaf2_find(info_p->cache_info[i]);
457 if (descp == NULL)
458 continue;
459
460 switch (descp->type) {
461 case TLB:
462 page = (descp->size == SMALL) ? TLB_SMALL : TLB_LARGE;
463 /* determine I or D: */
464 switch (descp->level) {
465 case INST:
466 id = TLB_INST;
467 break;
468 case DATA:
469 case DATA0:
470 case DATA1:
471 id = TLB_DATA;
472 break;
473 default:
474 continue;
475 }
476 /* determine level: */
477 switch (descp->level) {
478 case DATA1:
479 level = 1;
480 break;
481 default:
482 level = 0;
483 }
484 info_p->cpuid_tlb[id][page][level] = descp->entries;
485 break;
486 case STLB:
487 info_p->cpuid_stlb = descp->entries;
488 }
489 }
490 DBG("\n");
491 }
492
493 static void
494 cpuid_set_generic_info(i386_cpu_info_t *info_p)
495 {
496 uint32_t reg[4];
497 char str[128], *p;
498
499 DBG("cpuid_set_generic_info(%p)\n", info_p);
500
501 /* do cpuid 0 to get vendor */
502 cpuid_fn(0, reg);
503 info_p->cpuid_max_basic = reg[eax];
504 bcopy((char *)&reg[ebx], &info_p->cpuid_vendor[0], 4); /* ug */
505 bcopy((char *)&reg[ecx], &info_p->cpuid_vendor[8], 4);
506 bcopy((char *)&reg[edx], &info_p->cpuid_vendor[4], 4);
507 info_p->cpuid_vendor[12] = 0;
508
509 /* get extended cpuid results */
510 cpuid_fn(0x80000000, reg);
511 info_p->cpuid_max_ext = reg[eax];
512
513 /* check to see if we can get brand string */
514 if (info_p->cpuid_max_ext >= 0x80000004) {
515 /*
516 * The brand string 48 bytes (max), guaranteed to
517 * be NUL terminated.
518 */
519 cpuid_fn(0x80000002, reg);
520 bcopy((char *)reg, &str[0], 16);
521 cpuid_fn(0x80000003, reg);
522 bcopy((char *)reg, &str[16], 16);
523 cpuid_fn(0x80000004, reg);
524 bcopy((char *)reg, &str[32], 16);
525 for (p = str; *p != '\0'; p++) {
526 if (*p != ' ') break;
527 }
528 strlcpy(info_p->cpuid_brand_string,
529 p, sizeof(info_p->cpuid_brand_string));
530
531 if (!strncmp(info_p->cpuid_brand_string, CPUID_STRING_UNKNOWN,
532 min(sizeof(info_p->cpuid_brand_string),
533 strlen(CPUID_STRING_UNKNOWN) + 1))) {
534 /*
535 * This string means we have a firmware-programmable brand string,
536 * and the firmware couldn't figure out what sort of CPU we have.
537 */
538 info_p->cpuid_brand_string[0] = '\0';
539 }
540 }
541
542 /* Get cache and addressing info. */
543 if (info_p->cpuid_max_ext >= 0x80000006) {
544 uint32_t assoc;
545 cpuid_fn(0x80000006, reg);
546 info_p->cpuid_cache_linesize = bitfield32(reg[ecx], 7, 0);
547 assoc = bitfield32(reg[ecx],15,12);
548 /*
549 * L2 associativity is encoded, though in an insufficiently
550 * descriptive fashion, e.g. 24-way is mapped to 16-way.
551 * Represent a fully associative cache as 0xFFFF.
552 * Overwritten by associativity as determined via CPUID.4
553 * if available.
554 */
555 if (assoc == 6)
556 assoc = 8;
557 else if (assoc == 8)
558 assoc = 16;
559 else if (assoc == 0xF)
560 assoc = 0xFFFF;
561 info_p->cpuid_cache_L2_associativity = assoc;
562 info_p->cpuid_cache_size = bitfield32(reg[ecx],31,16);
563 cpuid_fn(0x80000008, reg);
564 info_p->cpuid_address_bits_physical =
565 bitfield32(reg[eax], 7, 0);
566 info_p->cpuid_address_bits_virtual =
567 bitfield32(reg[eax],15, 8);
568 }
569
570 /*
571 * Get processor signature and decode
572 * and bracket this with the approved procedure for reading the
573 * the microcode version number a.k.a. signature a.k.a. BIOS ID
574 */
575 wrmsr64(MSR_IA32_BIOS_SIGN_ID, 0);
576 cpuid_fn(1, reg);
577 info_p->cpuid_microcode_version =
578 (uint32_t) (rdmsr64(MSR_IA32_BIOS_SIGN_ID) >> 32);
579 info_p->cpuid_signature = reg[eax];
580 info_p->cpuid_stepping = bitfield32(reg[eax], 3, 0);
581 info_p->cpuid_model = bitfield32(reg[eax], 7, 4);
582 info_p->cpuid_family = bitfield32(reg[eax], 11, 8);
583 info_p->cpuid_type = bitfield32(reg[eax], 13, 12);
584 info_p->cpuid_extmodel = bitfield32(reg[eax], 19, 16);
585 info_p->cpuid_extfamily = bitfield32(reg[eax], 27, 20);
586 info_p->cpuid_brand = bitfield32(reg[ebx], 7, 0);
587 info_p->cpuid_features = quad(reg[ecx], reg[edx]);
588
589 /* Get "processor flag"; necessary for microcode update matching */
590 info_p->cpuid_processor_flag = (rdmsr64(MSR_IA32_PLATFORM_ID)>> 50) & 3;
591
592 /* Fold extensions into family/model */
593 if (info_p->cpuid_family == 0x0f)
594 info_p->cpuid_family += info_p->cpuid_extfamily;
595 if (info_p->cpuid_family == 0x0f || info_p->cpuid_family == 0x06)
596 info_p->cpuid_model += (info_p->cpuid_extmodel << 4);
597
598 if (info_p->cpuid_features & CPUID_FEATURE_HTT)
599 info_p->cpuid_logical_per_package =
600 bitfield32(reg[ebx], 23, 16);
601 else
602 info_p->cpuid_logical_per_package = 1;
603
604 if (info_p->cpuid_max_ext >= 0x80000001) {
605 cpuid_fn(0x80000001, reg);
606 info_p->cpuid_extfeatures =
607 quad(reg[ecx], reg[edx]);
608 }
609
610 DBG(" max_basic : %d\n", info_p->cpuid_max_basic);
611 DBG(" max_ext : 0x%08x\n", info_p->cpuid_max_ext);
612 DBG(" vendor : %s\n", info_p->cpuid_vendor);
613 DBG(" brand_string : %s\n", info_p->cpuid_brand_string);
614 DBG(" signature : 0x%08x\n", info_p->cpuid_signature);
615 DBG(" stepping : %d\n", info_p->cpuid_stepping);
616 DBG(" model : %d\n", info_p->cpuid_model);
617 DBG(" family : %d\n", info_p->cpuid_family);
618 DBG(" type : %d\n", info_p->cpuid_type);
619 DBG(" extmodel : %d\n", info_p->cpuid_extmodel);
620 DBG(" extfamily : %d\n", info_p->cpuid_extfamily);
621 DBG(" brand : %d\n", info_p->cpuid_brand);
622 DBG(" features : 0x%016llx\n", info_p->cpuid_features);
623 DBG(" extfeatures : 0x%016llx\n", info_p->cpuid_extfeatures);
624 DBG(" logical_per_package : %d\n", info_p->cpuid_logical_per_package);
625 DBG(" microcode_version : 0x%08x\n", info_p->cpuid_microcode_version);
626
627 /* Fold in the Invariant TSC feature bit, if present */
628 if (info_p->cpuid_max_ext >= 0x80000007) {
629 cpuid_fn(0x80000007, reg);
630 info_p->cpuid_extfeatures |=
631 reg[edx] & (uint32_t)CPUID_EXTFEATURE_TSCI;
632 DBG(" extfeatures : 0x%016llx\n",
633 info_p->cpuid_extfeatures);
634 }
635
636 if (info_p->cpuid_max_basic >= 0x5) {
637 cpuid_mwait_leaf_t *cmp = &info_p->cpuid_mwait_leaf;
638
639 /*
640 * Extract the Monitor/Mwait Leaf info:
641 */
642 cpuid_fn(5, reg);
643 cmp->linesize_min = reg[eax];
644 cmp->linesize_max = reg[ebx];
645 cmp->extensions = reg[ecx];
646 cmp->sub_Cstates = reg[edx];
647 info_p->cpuid_mwait_leafp = cmp;
648
649 DBG(" Monitor/Mwait Leaf:\n");
650 DBG(" linesize_min : %d\n", cmp->linesize_min);
651 DBG(" linesize_max : %d\n", cmp->linesize_max);
652 DBG(" extensions : %d\n", cmp->extensions);
653 DBG(" sub_Cstates : 0x%08x\n", cmp->sub_Cstates);
654 }
655
656 if (info_p->cpuid_max_basic >= 0x6) {
657 cpuid_thermal_leaf_t *ctp = &info_p->cpuid_thermal_leaf;
658
659 /*
660 * The thermal and Power Leaf:
661 */
662 cpuid_fn(6, reg);
663 ctp->sensor = bitfield32(reg[eax], 0, 0);
664 ctp->dynamic_acceleration = bitfield32(reg[eax], 1, 1);
665 ctp->invariant_APIC_timer = bitfield32(reg[eax], 2, 2);
666 ctp->core_power_limits = bitfield32(reg[eax], 3, 3);
667 ctp->fine_grain_clock_mod = bitfield32(reg[eax], 4, 4);
668 ctp->package_thermal_intr = bitfield32(reg[eax], 5, 5);
669 ctp->thresholds = bitfield32(reg[ebx], 3, 0);
670 ctp->ACNT_MCNT = bitfield32(reg[ecx], 0, 0);
671 ctp->hardware_feedback = bitfield32(reg[ecx], 1, 1);
672 ctp->energy_policy = bitfield32(reg[ecx], 2, 2);
673 info_p->cpuid_thermal_leafp = ctp;
674
675 DBG(" Thermal/Power Leaf:\n");
676 DBG(" sensor : %d\n", ctp->sensor);
677 DBG(" dynamic_acceleration : %d\n", ctp->dynamic_acceleration);
678 DBG(" invariant_APIC_timer : %d\n", ctp->invariant_APIC_timer);
679 DBG(" core_power_limits : %d\n", ctp->core_power_limits);
680 DBG(" fine_grain_clock_mod : %d\n", ctp->fine_grain_clock_mod);
681 DBG(" package_thermal_intr : %d\n", ctp->package_thermal_intr);
682 DBG(" thresholds : %d\n", ctp->thresholds);
683 DBG(" ACNT_MCNT : %d\n", ctp->ACNT_MCNT);
684 DBG(" hardware_feedback : %d\n", ctp->hardware_feedback);
685 DBG(" energy_policy : %d\n", ctp->energy_policy);
686 }
687
688 if (info_p->cpuid_max_basic >= 0xa) {
689 cpuid_arch_perf_leaf_t *capp = &info_p->cpuid_arch_perf_leaf;
690
691 /*
692 * Architectural Performance Monitoring Leaf:
693 */
694 cpuid_fn(0xa, reg);
695 capp->version = bitfield32(reg[eax], 7, 0);
696 capp->number = bitfield32(reg[eax], 15, 8);
697 capp->width = bitfield32(reg[eax], 23, 16);
698 capp->events_number = bitfield32(reg[eax], 31, 24);
699 capp->events = reg[ebx];
700 capp->fixed_number = bitfield32(reg[edx], 4, 0);
701 capp->fixed_width = bitfield32(reg[edx], 12, 5);
702 info_p->cpuid_arch_perf_leafp = capp;
703
704 DBG(" Architectural Performance Monitoring Leaf:\n");
705 DBG(" version : %d\n", capp->version);
706 DBG(" number : %d\n", capp->number);
707 DBG(" width : %d\n", capp->width);
708 DBG(" events_number : %d\n", capp->events_number);
709 DBG(" events : %d\n", capp->events);
710 DBG(" fixed_number : %d\n", capp->fixed_number);
711 DBG(" fixed_width : %d\n", capp->fixed_width);
712 }
713
714 if (info_p->cpuid_max_basic >= 0xd) {
715 cpuid_xsave_leaf_t *xsp = &info_p->cpuid_xsave_leaf;
716 /*
717 * XSAVE Features:
718 */
719 cpuid_fn(0xd, info_p->cpuid_xsave_leaf.extended_state);
720 info_p->cpuid_xsave_leafp = xsp;
721
722 DBG(" XSAVE Leaf:\n");
723 DBG(" EAX : 0x%x\n", xsp->extended_state[eax]);
724 DBG(" EBX : 0x%x\n", xsp->extended_state[ebx]);
725 DBG(" ECX : 0x%x\n", xsp->extended_state[ecx]);
726 DBG(" EDX : 0x%x\n", xsp->extended_state[edx]);
727 }
728
729 if (info_p->cpuid_model == CPUID_MODEL_IVYBRIDGE) {
730 /*
731 * XSAVE Features:
732 */
733 cpuid_fn(0x7, reg);
734 info_p->cpuid_leaf7_features = reg[ebx];
735
736 DBG(" Feature Leaf7:\n");
737 DBG(" EBX : 0x%x\n", reg[ebx]);
738 }
739
740 return;
741 }
742
743 static uint32_t
744 cpuid_set_cpufamily(i386_cpu_info_t *info_p)
745 {
746 uint32_t cpufamily = CPUFAMILY_UNKNOWN;
747
748 switch (info_p->cpuid_family) {
749 case 6:
750 switch (info_p->cpuid_model) {
751 #if CONFIG_YONAH
752 case 14:
753 cpufamily = CPUFAMILY_INTEL_YONAH;
754 break;
755 #endif
756 case 15:
757 cpufamily = CPUFAMILY_INTEL_MEROM;
758 break;
759 case 23:
760 cpufamily = CPUFAMILY_INTEL_PENRYN;
761 break;
762 case CPUID_MODEL_NEHALEM:
763 case CPUID_MODEL_FIELDS:
764 case CPUID_MODEL_DALES:
765 case CPUID_MODEL_NEHALEM_EX:
766 cpufamily = CPUFAMILY_INTEL_NEHALEM;
767 break;
768 case CPUID_MODEL_DALES_32NM:
769 case CPUID_MODEL_WESTMERE:
770 case CPUID_MODEL_WESTMERE_EX:
771 cpufamily = CPUFAMILY_INTEL_WESTMERE;
772 break;
773 case CPUID_MODEL_SANDYBRIDGE:
774 case CPUID_MODEL_JAKETOWN:
775 cpufamily = CPUFAMILY_INTEL_SANDYBRIDGE;
776 break;
777 case CPUID_MODEL_IVYBRIDGE:
778 cpufamily = CPUFAMILY_INTEL_IVYBRIDGE;
779 break;
780 }
781 break;
782 }
783
784 info_p->cpuid_cpufamily = cpufamily;
785 DBG("cpuid_set_cpufamily(%p) returning 0x%x\n", info_p, cpufamily);
786 return cpufamily;
787 }
788 /*
789 * Must be invoked either when executing single threaded, or with
790 * independent synchronization.
791 */
792 void
793 cpuid_set_info(void)
794 {
795 i386_cpu_info_t *info_p = &cpuid_cpu_info;
796
797 PE_parse_boot_argn("-cpuid", &cpuid_dbg, sizeof(cpuid_dbg));
798
799 bzero((void *)info_p, sizeof(cpuid_cpu_info));
800
801 cpuid_set_generic_info(info_p);
802
803 /* verify we are running on a supported CPU */
804 if ((strncmp(CPUID_VID_INTEL, info_p->cpuid_vendor,
805 min(strlen(CPUID_STRING_UNKNOWN) + 1,
806 sizeof(info_p->cpuid_vendor)))) ||
807 (cpuid_set_cpufamily(info_p) == CPUFAMILY_UNKNOWN))
808 panic("Unsupported CPU");
809
810 info_p->cpuid_cpu_type = CPU_TYPE_X86;
811 info_p->cpuid_cpu_subtype = CPU_SUBTYPE_X86_ARCH1;
812 /* Must be invoked after set_generic_info */
813 cpuid_set_cache_info(&cpuid_cpu_info);
814
815 /*
816 * Find the number of enabled cores and threads
817 * (which determines whether SMT/Hyperthreading is active).
818 */
819 switch (info_p->cpuid_cpufamily) {
820 case CPUFAMILY_INTEL_WESTMERE: {
821 uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
822 info_p->core_count = bitfield32((uint32_t)msr, 19, 16);
823 info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
824 break;
825 }
826 case CPUFAMILY_INTEL_IVYBRIDGE:
827 case CPUFAMILY_INTEL_SANDYBRIDGE:
828 case CPUFAMILY_INTEL_NEHALEM: {
829 uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
830 info_p->core_count = bitfield32((uint32_t)msr, 31, 16);
831 info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
832 break;
833 }
834 }
835 if (info_p->core_count == 0) {
836 info_p->core_count = info_p->cpuid_cores_per_package;
837 info_p->thread_count = info_p->cpuid_logical_per_package;
838 }
839 DBG("cpuid_set_info():\n");
840 DBG(" core_count : %d\n", info_p->core_count);
841 DBG(" thread_count : %d\n", info_p->thread_count);
842
843 cpuid_cpu_info.cpuid_model_string = ""; /* deprecated */
844 }
845
846 static struct table {
847 uint64_t mask;
848 const char *name;
849 } feature_map[] = {
850 {CPUID_FEATURE_FPU, "FPU"},
851 {CPUID_FEATURE_VME, "VME"},
852 {CPUID_FEATURE_DE, "DE"},
853 {CPUID_FEATURE_PSE, "PSE"},
854 {CPUID_FEATURE_TSC, "TSC"},
855 {CPUID_FEATURE_MSR, "MSR"},
856 {CPUID_FEATURE_PAE, "PAE"},
857 {CPUID_FEATURE_MCE, "MCE"},
858 {CPUID_FEATURE_CX8, "CX8"},
859 {CPUID_FEATURE_APIC, "APIC"},
860 {CPUID_FEATURE_SEP, "SEP"},
861 {CPUID_FEATURE_MTRR, "MTRR"},
862 {CPUID_FEATURE_PGE, "PGE"},
863 {CPUID_FEATURE_MCA, "MCA"},
864 {CPUID_FEATURE_CMOV, "CMOV"},
865 {CPUID_FEATURE_PAT, "PAT"},
866 {CPUID_FEATURE_PSE36, "PSE36"},
867 {CPUID_FEATURE_PSN, "PSN"},
868 {CPUID_FEATURE_CLFSH, "CLFSH"},
869 {CPUID_FEATURE_DS, "DS"},
870 {CPUID_FEATURE_ACPI, "ACPI"},
871 {CPUID_FEATURE_MMX, "MMX"},
872 {CPUID_FEATURE_FXSR, "FXSR"},
873 {CPUID_FEATURE_SSE, "SSE"},
874 {CPUID_FEATURE_SSE2, "SSE2"},
875 {CPUID_FEATURE_SS, "SS"},
876 {CPUID_FEATURE_HTT, "HTT"},
877 {CPUID_FEATURE_TM, "TM"},
878 {CPUID_FEATURE_PBE, "PBE"},
879 {CPUID_FEATURE_SSE3, "SSE3"},
880 {CPUID_FEATURE_PCLMULQDQ, "PCLMULQDQ"},
881 {CPUID_FEATURE_DTES64, "DTES64"},
882 {CPUID_FEATURE_MONITOR, "MON"},
883 {CPUID_FEATURE_DSCPL, "DSCPL"},
884 {CPUID_FEATURE_VMX, "VMX"},
885 {CPUID_FEATURE_SMX, "SMX"},
886 {CPUID_FEATURE_EST, "EST"},
887 {CPUID_FEATURE_TM2, "TM2"},
888 {CPUID_FEATURE_SSSE3, "SSSE3"},
889 {CPUID_FEATURE_CID, "CID"},
890 {CPUID_FEATURE_CX16, "CX16"},
891 {CPUID_FEATURE_xTPR, "TPR"},
892 {CPUID_FEATURE_PDCM, "PDCM"},
893 {CPUID_FEATURE_SSE4_1, "SSE4.1"},
894 {CPUID_FEATURE_SSE4_2, "SSE4.2"},
895 {CPUID_FEATURE_xAPIC, "xAPIC"},
896 {CPUID_FEATURE_MOVBE, "MOVBE"},
897 {CPUID_FEATURE_POPCNT, "POPCNT"},
898 {CPUID_FEATURE_AES, "AES"},
899 {CPUID_FEATURE_VMM, "VMM"},
900 {CPUID_FEATURE_PCID, "PCID"},
901 {CPUID_FEATURE_XSAVE, "XSAVE"},
902 {CPUID_FEATURE_OSXSAVE, "OSXSAVE"},
903 {CPUID_FEATURE_SEGLIM64, "SEGLIM64"},
904 {CPUID_FEATURE_TSCTMR, "TSCTMR"},
905 {CPUID_FEATURE_AVX1_0, "AVX1.0"},
906 {CPUID_FEATURE_RDRAND, "RDRAND"},
907 {CPUID_FEATURE_F16C, "F16C"},
908 {0, 0}
909 },
910 extfeature_map[] = {
911 {CPUID_EXTFEATURE_SYSCALL, "SYSCALL"},
912 {CPUID_EXTFEATURE_XD, "XD"},
913 {CPUID_EXTFEATURE_1GBPAGE, "1GBPAGE"},
914 {CPUID_EXTFEATURE_EM64T, "EM64T"},
915 {CPUID_EXTFEATURE_LAHF, "LAHF"},
916 {CPUID_EXTFEATURE_RDTSCP, "RDTSCP"},
917 {CPUID_EXTFEATURE_TSCI, "TSCI"},
918 {0, 0}
919
920 },
921 leaf7_feature_map[] = {
922 {CPUID_LEAF7_FEATURE_RDWRFSGS, "RDWRFSGS"},
923 {CPUID_LEAF7_FEATURE_SMEP, "SMEP"},
924 {CPUID_LEAF7_FEATURE_ENFSTRG, "ENFSTRG"},
925 {0, 0}
926 };
927
928 static char *
929 cpuid_get_names(struct table *map, uint64_t bits, char *buf, unsigned buf_len)
930 {
931 size_t len = 0;
932 char *p = buf;
933 int i;
934
935 for (i = 0; map[i].mask != 0; i++) {
936 if ((bits & map[i].mask) == 0)
937 continue;
938 if (len && ((size_t) (p - buf) < (buf_len - 1)))
939 *p++ = ' ';
940 len = min(strlen(map[i].name), (size_t)((buf_len-1)-(p-buf)));
941 if (len == 0)
942 break;
943 bcopy(map[i].name, p, len);
944 p += len;
945 }
946 *p = '\0';
947 return buf;
948 }
949
950 i386_cpu_info_t *
951 cpuid_info(void)
952 {
953 /* Set-up the cpuid_info stucture lazily */
954 if (cpuid_cpu_infop == NULL) {
955 cpuid_set_info();
956 cpuid_cpu_infop = &cpuid_cpu_info;
957 }
958 return cpuid_cpu_infop;
959 }
960
961 char *
962 cpuid_get_feature_names(uint64_t features, char *buf, unsigned buf_len)
963 {
964 return cpuid_get_names(feature_map, features, buf, buf_len);
965 }
966
967 char *
968 cpuid_get_extfeature_names(uint64_t extfeatures, char *buf, unsigned buf_len)
969 {
970 return cpuid_get_names(extfeature_map, extfeatures, buf, buf_len);
971 }
972
973 char *
974 cpuid_get_leaf7_feature_names(uint64_t features, char *buf, unsigned buf_len)
975 {
976 return cpuid_get_names(leaf7_feature_map, features, buf, buf_len);
977 }
978
979 void
980 cpuid_feature_display(
981 const char *header)
982 {
983 char buf[256];
984
985 kprintf("%s: %s", header,
986 cpuid_get_feature_names(cpuid_features(), buf, sizeof(buf)));
987 if (cpuid_leaf7_features())
988 kprintf(" %s", cpuid_get_leaf7_feature_names(
989 cpuid_leaf7_features(), buf, sizeof(buf)));
990 kprintf("\n");
991 if (cpuid_features() & CPUID_FEATURE_HTT) {
992 #define s_if_plural(n) ((n > 1) ? "s" : "")
993 kprintf(" HTT: %d core%s per package;"
994 " %d logical cpu%s per package\n",
995 cpuid_cpu_info.cpuid_cores_per_package,
996 s_if_plural(cpuid_cpu_info.cpuid_cores_per_package),
997 cpuid_cpu_info.cpuid_logical_per_package,
998 s_if_plural(cpuid_cpu_info.cpuid_logical_per_package));
999 }
1000 }
1001
1002 void
1003 cpuid_extfeature_display(
1004 const char *header)
1005 {
1006 char buf[256];
1007
1008 kprintf("%s: %s\n", header,
1009 cpuid_get_extfeature_names(cpuid_extfeatures(),
1010 buf, sizeof(buf)));
1011 }
1012
1013 void
1014 cpuid_cpu_display(
1015 const char *header)
1016 {
1017 if (cpuid_cpu_info.cpuid_brand_string[0] != '\0') {
1018 kprintf("%s: %s\n", header, cpuid_cpu_info.cpuid_brand_string);
1019 }
1020 }
1021
1022 unsigned int
1023 cpuid_family(void)
1024 {
1025 return cpuid_info()->cpuid_family;
1026 }
1027
1028 uint32_t
1029 cpuid_cpufamily(void)
1030 {
1031 return cpuid_info()->cpuid_cpufamily;
1032 }
1033
1034 cpu_type_t
1035 cpuid_cputype(void)
1036 {
1037 return cpuid_info()->cpuid_cpu_type;
1038 }
1039
1040 cpu_subtype_t
1041 cpuid_cpusubtype(void)
1042 {
1043 return cpuid_info()->cpuid_cpu_subtype;
1044 }
1045
1046 uint64_t
1047 cpuid_features(void)
1048 {
1049 static int checked = 0;
1050 char fpu_arg[20] = { 0 };
1051
1052 (void) cpuid_info();
1053 if (!checked) {
1054 /* check for boot-time fpu limitations */
1055 if (PE_parse_boot_argn("_fpu", &fpu_arg[0], sizeof (fpu_arg))) {
1056 printf("limiting fpu features to: %s\n", fpu_arg);
1057 if (!strncmp("387", fpu_arg, sizeof("387")) || !strncmp("mmx", fpu_arg, sizeof("mmx"))) {
1058 printf("no sse or sse2\n");
1059 cpuid_cpu_info.cpuid_features &= ~(CPUID_FEATURE_SSE | CPUID_FEATURE_SSE2 | CPUID_FEATURE_FXSR);
1060 } else if (!strncmp("sse", fpu_arg, sizeof("sse"))) {
1061 printf("no sse2\n");
1062 cpuid_cpu_info.cpuid_features &= ~(CPUID_FEATURE_SSE2);
1063 }
1064 }
1065 checked = 1;
1066 }
1067 return cpuid_cpu_info.cpuid_features;
1068 }
1069
1070 uint64_t
1071 cpuid_extfeatures(void)
1072 {
1073 return cpuid_info()->cpuid_extfeatures;
1074 }
1075
1076 uint64_t
1077 cpuid_leaf7_features(void)
1078 {
1079 return cpuid_info()->cpuid_leaf7_features;
1080 }
1081
1082 static i386_vmm_info_t *_cpuid_vmm_infop = NULL;
1083 static i386_vmm_info_t _cpuid_vmm_info;
1084
1085 static void
1086 cpuid_init_vmm_info(i386_vmm_info_t *info_p)
1087 {
1088 uint32_t reg[4];
1089 uint32_t max_vmm_leaf;
1090
1091 bzero(info_p, sizeof(*info_p));
1092
1093 if (!cpuid_vmm_present())
1094 return;
1095
1096 DBG("cpuid_init_vmm_info(%p)\n", info_p);
1097
1098 /* do cpuid 0x40000000 to get VMM vendor */
1099 cpuid_fn(0x40000000, reg);
1100 max_vmm_leaf = reg[eax];
1101 bcopy((char *)&reg[ebx], &info_p->cpuid_vmm_vendor[0], 4);
1102 bcopy((char *)&reg[ecx], &info_p->cpuid_vmm_vendor[4], 4);
1103 bcopy((char *)&reg[edx], &info_p->cpuid_vmm_vendor[8], 4);
1104 info_p->cpuid_vmm_vendor[12] = '\0';
1105
1106 if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_VMWARE)) {
1107 /* VMware identification string: kb.vmware.com/kb/1009458 */
1108 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_VMWARE;
1109 } else {
1110 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_UNKNOWN;
1111 }
1112
1113 /* VMM generic leaves: https://lkml.org/lkml/2008/10/1/246 */
1114 if (max_vmm_leaf >= 0x40000010) {
1115 cpuid_fn(0x40000010, reg);
1116
1117 info_p->cpuid_vmm_tsc_frequency = reg[eax];
1118 info_p->cpuid_vmm_bus_frequency = reg[ebx];
1119 }
1120
1121 DBG(" vmm_vendor : %s\n", info_p->cpuid_vmm_vendor);
1122 DBG(" vmm_family : %u\n", info_p->cpuid_vmm_family);
1123 DBG(" vmm_bus_frequency : %u\n", info_p->cpuid_vmm_bus_frequency);
1124 DBG(" vmm_tsc_frequency : %u\n", info_p->cpuid_vmm_tsc_frequency);
1125 }
1126
1127 boolean_t
1128 cpuid_vmm_present(void)
1129 {
1130 return (cpuid_features() & CPUID_FEATURE_VMM) ? TRUE : FALSE;
1131 }
1132
1133 i386_vmm_info_t *
1134 cpuid_vmm_info(void)
1135 {
1136 if (_cpuid_vmm_infop == NULL) {
1137 cpuid_init_vmm_info(&_cpuid_vmm_info);
1138 _cpuid_vmm_infop = &_cpuid_vmm_info;
1139 }
1140 return _cpuid_vmm_infop;
1141 }
1142
1143 uint32_t
1144 cpuid_vmm_family(void)
1145 {
1146 return cpuid_vmm_info()->cpuid_vmm_family;
1147 }