]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpuid.c
xnu-2782.20.48.tar.gz
[apple/xnu.git] / osfmk / i386 / cpuid.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 #include <vm/vm_page.h>
32 #include <pexpert/pexpert.h>
33
34 #include <i386/cpuid.h>
35
36 static boolean_t cpuid_dbg
37 #if DEBUG
38 = TRUE;
39 #else
40 = FALSE;
41 #endif
42 #define DBG(x...) \
43 do { \
44 if (cpuid_dbg) \
45 kprintf(x); \
46 } while (0) \
47
48 #define min(a,b) ((a) < (b) ? (a) : (b))
49 #define quad(hi,lo) (((uint64_t)(hi)) << 32 | (lo))
50
51 /* Only for 32bit values */
52 #define bit32(n) (1U << (n))
53 #define bitmask32(h,l) ((bit32(h)|(bit32(h)-1)) & ~(bit32(l)-1))
54 #define bitfield32(x,h,l) ((((x) & bitmask32(h,l)) >> l))
55
56 /*
57 * Leaf 2 cache descriptor encodings.
58 */
59 typedef enum {
60 _NULL_, /* NULL (empty) descriptor */
61 CACHE, /* Cache */
62 TLB, /* TLB */
63 STLB, /* Shared second-level unified TLB */
64 PREFETCH /* Prefetch size */
65 } cpuid_leaf2_desc_type_t;
66
67 typedef enum {
68 NA, /* Not Applicable */
69 FULLY, /* Fully-associative */
70 TRACE, /* Trace Cache (P4 only) */
71 INST, /* Instruction TLB */
72 DATA, /* Data TLB */
73 DATA0, /* Data TLB, 1st level */
74 DATA1, /* Data TLB, 2nd level */
75 L1, /* L1 (unified) cache */
76 L1_INST, /* L1 Instruction cache */
77 L1_DATA, /* L1 Data cache */
78 L2, /* L2 (unified) cache */
79 L3, /* L3 (unified) cache */
80 L2_2LINESECTOR, /* L2 (unified) cache with 2 lines per sector */
81 L3_2LINESECTOR, /* L3(unified) cache with 2 lines per sector */
82 SMALL, /* Small page TLB */
83 LARGE, /* Large page TLB */
84 BOTH /* Small and Large page TLB */
85 } cpuid_leaf2_qualifier_t;
86
87 typedef struct cpuid_cache_descriptor {
88 uint8_t value; /* descriptor code */
89 uint8_t type; /* cpuid_leaf2_desc_type_t */
90 uint8_t level; /* level of cache/TLB hierachy */
91 uint8_t ways; /* wayness of cache */
92 uint16_t size; /* cachesize or TLB pagesize */
93 uint16_t entries; /* number of TLB entries or linesize */
94 } cpuid_cache_descriptor_t;
95
96 /*
97 * These multipliers are used to encode 1*K .. 64*M in a 16 bit size field
98 */
99 #define K (1)
100 #define M (1024)
101
102 /*
103 * Intel cache descriptor table:
104 */
105 static cpuid_cache_descriptor_t intel_cpuid_leaf2_descriptor_table[] = {
106 // -------------------------------------------------------
107 // value type level ways size entries
108 // -------------------------------------------------------
109 { 0x00, _NULL_, NA, NA, NA, NA },
110 { 0x01, TLB, INST, 4, SMALL, 32 },
111 { 0x02, TLB, INST, FULLY, LARGE, 2 },
112 { 0x03, TLB, DATA, 4, SMALL, 64 },
113 { 0x04, TLB, DATA, 4, LARGE, 8 },
114 { 0x05, TLB, DATA1, 4, LARGE, 32 },
115 { 0x06, CACHE, L1_INST, 4, 8*K, 32 },
116 { 0x08, CACHE, L1_INST, 4, 16*K, 32 },
117 { 0x09, CACHE, L1_INST, 4, 32*K, 64 },
118 { 0x0A, CACHE, L1_DATA, 2, 8*K, 32 },
119 { 0x0B, TLB, INST, 4, LARGE, 4 },
120 { 0x0C, CACHE, L1_DATA, 4, 16*K, 32 },
121 { 0x0D, CACHE, L1_DATA, 4, 16*K, 64 },
122 { 0x0E, CACHE, L1_DATA, 6, 24*K, 64 },
123 { 0x21, CACHE, L2, 8, 256*K, 64 },
124 { 0x22, CACHE, L3_2LINESECTOR, 4, 512*K, 64 },
125 { 0x23, CACHE, L3_2LINESECTOR, 8, 1*M, 64 },
126 { 0x25, CACHE, L3_2LINESECTOR, 8, 2*M, 64 },
127 { 0x29, CACHE, L3_2LINESECTOR, 8, 4*M, 64 },
128 { 0x2C, CACHE, L1_DATA, 8, 32*K, 64 },
129 { 0x30, CACHE, L1_INST, 8, 32*K, 64 },
130 { 0x40, CACHE, L2, NA, 0, NA },
131 { 0x41, CACHE, L2, 4, 128*K, 32 },
132 { 0x42, CACHE, L2, 4, 256*K, 32 },
133 { 0x43, CACHE, L2, 4, 512*K, 32 },
134 { 0x44, CACHE, L2, 4, 1*M, 32 },
135 { 0x45, CACHE, L2, 4, 2*M, 32 },
136 { 0x46, CACHE, L3, 4, 4*M, 64 },
137 { 0x47, CACHE, L3, 8, 8*M, 64 },
138 { 0x48, CACHE, L2, 12, 3*M, 64 },
139 { 0x49, CACHE, L2, 16, 4*M, 64 },
140 { 0x4A, CACHE, L3, 12, 6*M, 64 },
141 { 0x4B, CACHE, L3, 16, 8*M, 64 },
142 { 0x4C, CACHE, L3, 12, 12*M, 64 },
143 { 0x4D, CACHE, L3, 16, 16*M, 64 },
144 { 0x4E, CACHE, L2, 24, 6*M, 64 },
145 { 0x4F, TLB, INST, NA, SMALL, 32 },
146 { 0x50, TLB, INST, NA, BOTH, 64 },
147 { 0x51, TLB, INST, NA, BOTH, 128 },
148 { 0x52, TLB, INST, NA, BOTH, 256 },
149 { 0x55, TLB, INST, FULLY, BOTH, 7 },
150 { 0x56, TLB, DATA0, 4, LARGE, 16 },
151 { 0x57, TLB, DATA0, 4, SMALL, 16 },
152 { 0x59, TLB, DATA0, FULLY, SMALL, 16 },
153 { 0x5A, TLB, DATA0, 4, LARGE, 32 },
154 { 0x5B, TLB, DATA, NA, BOTH, 64 },
155 { 0x5C, TLB, DATA, NA, BOTH, 128 },
156 { 0x5D, TLB, DATA, NA, BOTH, 256 },
157 { 0x60, CACHE, L1, 16*K, 8, 64 },
158 { 0x61, CACHE, L1, 4, 8*K, 64 },
159 { 0x62, CACHE, L1, 4, 16*K, 64 },
160 { 0x63, CACHE, L1, 4, 32*K, 64 },
161 { 0x70, CACHE, TRACE, 8, 12*K, NA },
162 { 0x71, CACHE, TRACE, 8, 16*K, NA },
163 { 0x72, CACHE, TRACE, 8, 32*K, NA },
164 { 0x76, TLB, INST, NA, BOTH, 8 },
165 { 0x78, CACHE, L2, 4, 1*M, 64 },
166 { 0x79, CACHE, L2_2LINESECTOR, 8, 128*K, 64 },
167 { 0x7A, CACHE, L2_2LINESECTOR, 8, 256*K, 64 },
168 { 0x7B, CACHE, L2_2LINESECTOR, 8, 512*K, 64 },
169 { 0x7C, CACHE, L2_2LINESECTOR, 8, 1*M, 64 },
170 { 0x7D, CACHE, L2, 8, 2*M, 64 },
171 { 0x7F, CACHE, L2, 2, 512*K, 64 },
172 { 0x80, CACHE, L2, 8, 512*K, 64 },
173 { 0x82, CACHE, L2, 8, 256*K, 32 },
174 { 0x83, CACHE, L2, 8, 512*K, 32 },
175 { 0x84, CACHE, L2, 8, 1*M, 32 },
176 { 0x85, CACHE, L2, 8, 2*M, 32 },
177 { 0x86, CACHE, L2, 4, 512*K, 64 },
178 { 0x87, CACHE, L2, 8, 1*M, 64 },
179 { 0xB0, TLB, INST, 4, SMALL, 128 },
180 { 0xB1, TLB, INST, 4, LARGE, 8 },
181 { 0xB2, TLB, INST, 4, SMALL, 64 },
182 { 0xB3, TLB, DATA, 4, SMALL, 128 },
183 { 0xB4, TLB, DATA1, 4, SMALL, 256 },
184 { 0xB5, TLB, DATA1, 8, SMALL, 64 },
185 { 0xB6, TLB, DATA1, 8, SMALL, 128 },
186 { 0xBA, TLB, DATA1, 4, BOTH, 64 },
187 { 0xC1, STLB, DATA1, 8, SMALL, 1024},
188 { 0xCA, STLB, DATA1, 4, SMALL, 512 },
189 { 0xD0, CACHE, L3, 4, 512*K, 64 },
190 { 0xD1, CACHE, L3, 4, 1*M, 64 },
191 { 0xD2, CACHE, L3, 4, 2*M, 64 },
192 { 0xD3, CACHE, L3, 4, 4*M, 64 },
193 { 0xD4, CACHE, L3, 4, 8*M, 64 },
194 { 0xD6, CACHE, L3, 8, 1*M, 64 },
195 { 0xD7, CACHE, L3, 8, 2*M, 64 },
196 { 0xD8, CACHE, L3, 8, 4*M, 64 },
197 { 0xD9, CACHE, L3, 8, 8*M, 64 },
198 { 0xDA, CACHE, L3, 8, 12*M, 64 },
199 { 0xDC, CACHE, L3, 12, 1536*K, 64 },
200 { 0xDD, CACHE, L3, 12, 3*M, 64 },
201 { 0xDE, CACHE, L3, 12, 6*M, 64 },
202 { 0xDF, CACHE, L3, 12, 12*M, 64 },
203 { 0xE0, CACHE, L3, 12, 18*M, 64 },
204 { 0xE2, CACHE, L3, 16, 2*M, 64 },
205 { 0xE3, CACHE, L3, 16, 4*M, 64 },
206 { 0xE4, CACHE, L3, 16, 8*M, 64 },
207 { 0xE5, CACHE, L3, 16, 16*M, 64 },
208 { 0xE6, CACHE, L3, 16, 24*M, 64 },
209 { 0xF0, PREFETCH, NA, NA, 64, NA },
210 { 0xF1, PREFETCH, NA, NA, 128, NA },
211 { 0xFF, CACHE, NA, NA, 0, NA }
212 };
213 #define INTEL_LEAF2_DESC_NUM (sizeof(intel_cpuid_leaf2_descriptor_table) / \
214 sizeof(cpuid_cache_descriptor_t))
215
216 static inline cpuid_cache_descriptor_t *
217 cpuid_leaf2_find(uint8_t value)
218 {
219 unsigned int i;
220
221 for (i = 0; i < INTEL_LEAF2_DESC_NUM; i++)
222 if (intel_cpuid_leaf2_descriptor_table[i].value == value)
223 return &intel_cpuid_leaf2_descriptor_table[i];
224 return NULL;
225 }
226
227 /*
228 * CPU identification routines.
229 */
230
231 static i386_cpu_info_t cpuid_cpu_info;
232 static i386_cpu_info_t *cpuid_cpu_infop = NULL;
233
234 static void cpuid_fn(uint32_t selector, uint32_t *result)
235 {
236 do_cpuid(selector, result);
237 DBG("cpuid_fn(0x%08x) eax:0x%08x ebx:0x%08x ecx:0x%08x edx:0x%08x\n",
238 selector, result[0], result[1], result[2], result[3]);
239 }
240
241 static const char *cache_type_str[LCACHE_MAX] = {
242 "Lnone", "L1I", "L1D", "L2U", "L3U"
243 };
244
245 /* this function is Intel-specific */
246 static void
247 cpuid_set_cache_info( i386_cpu_info_t * info_p )
248 {
249 uint32_t cpuid_result[4];
250 uint32_t reg[4];
251 uint32_t index;
252 uint32_t linesizes[LCACHE_MAX];
253 unsigned int i;
254 unsigned int j;
255 boolean_t cpuid_deterministic_supported = FALSE;
256
257 DBG("cpuid_set_cache_info(%p)\n", info_p);
258
259 bzero( linesizes, sizeof(linesizes) );
260
261 /* Get processor cache descriptor info using leaf 2. We don't use
262 * this internally, but must publish it for KEXTs.
263 */
264 cpuid_fn(2, cpuid_result);
265 for (j = 0; j < 4; j++) {
266 if ((cpuid_result[j] >> 31) == 1) /* bit31 is validity */
267 continue;
268 ((uint32_t *) info_p->cache_info)[j] = cpuid_result[j];
269 }
270 /* first byte gives number of cpuid calls to get all descriptors */
271 for (i = 1; i < info_p->cache_info[0]; i++) {
272 if (i*16 > sizeof(info_p->cache_info))
273 break;
274 cpuid_fn(2, cpuid_result);
275 for (j = 0; j < 4; j++) {
276 if ((cpuid_result[j] >> 31) == 1)
277 continue;
278 ((uint32_t *) info_p->cache_info)[4*i+j] =
279 cpuid_result[j];
280 }
281 }
282
283 /*
284 * Get cache info using leaf 4, the "deterministic cache parameters."
285 * Most processors Mac OS X supports implement this flavor of CPUID.
286 * Loop over each cache on the processor.
287 */
288 cpuid_fn(0, cpuid_result);
289 if (cpuid_result[eax] >= 4)
290 cpuid_deterministic_supported = TRUE;
291
292 for (index = 0; cpuid_deterministic_supported; index++) {
293 cache_type_t type = Lnone;
294 uint32_t cache_type;
295 uint32_t cache_level;
296 uint32_t cache_sharing;
297 uint32_t cache_linesize;
298 uint32_t cache_sets;
299 uint32_t cache_associativity;
300 uint32_t cache_size;
301 uint32_t cache_partitions;
302 uint32_t colors;
303
304 reg[eax] = 4; /* cpuid request 4 */
305 reg[ecx] = index; /* index starting at 0 */
306 cpuid(reg);
307 DBG("cpuid(4) index=%d eax=0x%x\n", index, reg[eax]);
308 cache_type = bitfield32(reg[eax], 4, 0);
309 if (cache_type == 0)
310 break; /* no more caches */
311 cache_level = bitfield32(reg[eax], 7, 5);
312 cache_sharing = bitfield32(reg[eax], 25, 14) + 1;
313 info_p->cpuid_cores_per_package
314 = bitfield32(reg[eax], 31, 26) + 1;
315 cache_linesize = bitfield32(reg[ebx], 11, 0) + 1;
316 cache_partitions = bitfield32(reg[ebx], 21, 12) + 1;
317 cache_associativity = bitfield32(reg[ebx], 31, 22) + 1;
318 cache_sets = bitfield32(reg[ecx], 31, 0) + 1;
319
320 /* Map type/levels returned by CPUID into cache_type_t */
321 switch (cache_level) {
322 case 1:
323 type = cache_type == 1 ? L1D :
324 cache_type == 2 ? L1I :
325 Lnone;
326 break;
327 case 2:
328 type = cache_type == 3 ? L2U :
329 Lnone;
330 break;
331 case 3:
332 type = cache_type == 3 ? L3U :
333 Lnone;
334 break;
335 default:
336 type = Lnone;
337 }
338
339 /* The total size of a cache is:
340 * ( linesize * sets * associativity * partitions )
341 */
342 if (type != Lnone) {
343 cache_size = cache_linesize * cache_sets *
344 cache_associativity * cache_partitions;
345 info_p->cache_size[type] = cache_size;
346 info_p->cache_sharing[type] = cache_sharing;
347 info_p->cache_partitions[type] = cache_partitions;
348 linesizes[type] = cache_linesize;
349
350 DBG(" cache_size[%s] : %d\n",
351 cache_type_str[type], cache_size);
352 DBG(" cache_sharing[%s] : %d\n",
353 cache_type_str[type], cache_sharing);
354 DBG(" cache_partitions[%s]: %d\n",
355 cache_type_str[type], cache_partitions);
356
357 /*
358 * Overwrite associativity determined via
359 * CPUID.0x80000006 -- this leaf is more
360 * accurate
361 */
362 if (type == L2U)
363 info_p->cpuid_cache_L2_associativity = cache_associativity;
364
365 /* Compute the number of page colors for this cache,
366 * which is:
367 * ( linesize * sets ) / page_size
368 *
369 * To help visualize this, consider two views of a
370 * physical address. To the cache, it is composed
371 * of a line offset, a set selector, and a tag.
372 * To VM, it is composed of a page offset, a page
373 * color, and other bits in the pageframe number:
374 *
375 * +-----------------+---------+--------+
376 * cache: | tag | set | offset |
377 * +-----------------+---------+--------+
378 *
379 * +-----------------+-------+----------+
380 * VM: | don't care | color | pg offset|
381 * +-----------------+-------+----------+
382 *
383 * The color is those bits in (set+offset) not covered
384 * by the page offset.
385 */
386 colors = ( cache_linesize * cache_sets ) >> 12;
387
388 if ( colors > vm_cache_geometry_colors )
389 vm_cache_geometry_colors = colors;
390 }
391 }
392 DBG(" vm_cache_geometry_colors: %d\n", vm_cache_geometry_colors);
393
394 /*
395 * If deterministic cache parameters are not available, use
396 * something else
397 */
398 if (info_p->cpuid_cores_per_package == 0) {
399 info_p->cpuid_cores_per_package = 1;
400
401 /* cpuid define in 1024 quantities */
402 info_p->cache_size[L2U] = info_p->cpuid_cache_size * 1024;
403 info_p->cache_sharing[L2U] = 1;
404 info_p->cache_partitions[L2U] = 1;
405
406 linesizes[L2U] = info_p->cpuid_cache_linesize;
407
408 DBG(" cache_size[L2U] : %d\n",
409 info_p->cache_size[L2U]);
410 DBG(" cache_sharing[L2U] : 1\n");
411 DBG(" cache_partitions[L2U]: 1\n");
412 DBG(" linesizes[L2U] : %d\n",
413 info_p->cpuid_cache_linesize);
414 }
415
416 /*
417 * What linesize to publish? We use the L2 linesize if any,
418 * else the L1D.
419 */
420 if ( linesizes[L2U] )
421 info_p->cache_linesize = linesizes[L2U];
422 else if (linesizes[L1D])
423 info_p->cache_linesize = linesizes[L1D];
424 else panic("no linesize");
425 DBG(" cache_linesize : %d\n", info_p->cache_linesize);
426
427 /*
428 * Extract and publish TLB information from Leaf 2 descriptors.
429 */
430 DBG(" %ld leaf2 descriptors:\n", sizeof(info_p->cache_info));
431 for (i = 1; i < sizeof(info_p->cache_info); i++) {
432 cpuid_cache_descriptor_t *descp;
433 int id;
434 int level;
435 int page;
436
437 DBG(" 0x%02x", info_p->cache_info[i]);
438 descp = cpuid_leaf2_find(info_p->cache_info[i]);
439 if (descp == NULL)
440 continue;
441
442 switch (descp->type) {
443 case TLB:
444 page = (descp->size == SMALL) ? TLB_SMALL : TLB_LARGE;
445 /* determine I or D: */
446 switch (descp->level) {
447 case INST:
448 id = TLB_INST;
449 break;
450 case DATA:
451 case DATA0:
452 case DATA1:
453 id = TLB_DATA;
454 break;
455 default:
456 continue;
457 }
458 /* determine level: */
459 switch (descp->level) {
460 case DATA1:
461 level = 1;
462 break;
463 default:
464 level = 0;
465 }
466 info_p->cpuid_tlb[id][page][level] = descp->entries;
467 break;
468 case STLB:
469 info_p->cpuid_stlb = descp->entries;
470 }
471 }
472 DBG("\n");
473 }
474
475 static void
476 cpuid_set_generic_info(i386_cpu_info_t *info_p)
477 {
478 uint32_t reg[4];
479 char str[128], *p;
480
481 DBG("cpuid_set_generic_info(%p)\n", info_p);
482
483 /* do cpuid 0 to get vendor */
484 cpuid_fn(0, reg);
485 info_p->cpuid_max_basic = reg[eax];
486 bcopy((char *)&reg[ebx], &info_p->cpuid_vendor[0], 4); /* ug */
487 bcopy((char *)&reg[ecx], &info_p->cpuid_vendor[8], 4);
488 bcopy((char *)&reg[edx], &info_p->cpuid_vendor[4], 4);
489 info_p->cpuid_vendor[12] = 0;
490
491 /* get extended cpuid results */
492 cpuid_fn(0x80000000, reg);
493 info_p->cpuid_max_ext = reg[eax];
494
495 /* check to see if we can get brand string */
496 if (info_p->cpuid_max_ext >= 0x80000004) {
497 /*
498 * The brand string 48 bytes (max), guaranteed to
499 * be NUL terminated.
500 */
501 cpuid_fn(0x80000002, reg);
502 bcopy((char *)reg, &str[0], 16);
503 cpuid_fn(0x80000003, reg);
504 bcopy((char *)reg, &str[16], 16);
505 cpuid_fn(0x80000004, reg);
506 bcopy((char *)reg, &str[32], 16);
507 for (p = str; *p != '\0'; p++) {
508 if (*p != ' ') break;
509 }
510 strlcpy(info_p->cpuid_brand_string,
511 p, sizeof(info_p->cpuid_brand_string));
512
513 if (!strncmp(info_p->cpuid_brand_string, CPUID_STRING_UNKNOWN,
514 min(sizeof(info_p->cpuid_brand_string),
515 strlen(CPUID_STRING_UNKNOWN) + 1))) {
516 /*
517 * This string means we have a firmware-programmable brand string,
518 * and the firmware couldn't figure out what sort of CPU we have.
519 */
520 info_p->cpuid_brand_string[0] = '\0';
521 }
522 }
523
524 /* Get cache and addressing info. */
525 if (info_p->cpuid_max_ext >= 0x80000006) {
526 uint32_t assoc;
527 cpuid_fn(0x80000006, reg);
528 info_p->cpuid_cache_linesize = bitfield32(reg[ecx], 7, 0);
529 assoc = bitfield32(reg[ecx],15,12);
530 /*
531 * L2 associativity is encoded, though in an insufficiently
532 * descriptive fashion, e.g. 24-way is mapped to 16-way.
533 * Represent a fully associative cache as 0xFFFF.
534 * Overwritten by associativity as determined via CPUID.4
535 * if available.
536 */
537 if (assoc == 6)
538 assoc = 8;
539 else if (assoc == 8)
540 assoc = 16;
541 else if (assoc == 0xF)
542 assoc = 0xFFFF;
543 info_p->cpuid_cache_L2_associativity = assoc;
544 info_p->cpuid_cache_size = bitfield32(reg[ecx],31,16);
545 cpuid_fn(0x80000008, reg);
546 info_p->cpuid_address_bits_physical =
547 bitfield32(reg[eax], 7, 0);
548 info_p->cpuid_address_bits_virtual =
549 bitfield32(reg[eax],15, 8);
550 }
551
552 /*
553 * Get processor signature and decode
554 * and bracket this with the approved procedure for reading the
555 * the microcode version number a.k.a. signature a.k.a. BIOS ID
556 */
557 wrmsr64(MSR_IA32_BIOS_SIGN_ID, 0);
558 cpuid_fn(1, reg);
559 info_p->cpuid_microcode_version =
560 (uint32_t) (rdmsr64(MSR_IA32_BIOS_SIGN_ID) >> 32);
561 info_p->cpuid_signature = reg[eax];
562 info_p->cpuid_stepping = bitfield32(reg[eax], 3, 0);
563 info_p->cpuid_model = bitfield32(reg[eax], 7, 4);
564 info_p->cpuid_family = bitfield32(reg[eax], 11, 8);
565 info_p->cpuid_type = bitfield32(reg[eax], 13, 12);
566 info_p->cpuid_extmodel = bitfield32(reg[eax], 19, 16);
567 info_p->cpuid_extfamily = bitfield32(reg[eax], 27, 20);
568 info_p->cpuid_brand = bitfield32(reg[ebx], 7, 0);
569 info_p->cpuid_features = quad(reg[ecx], reg[edx]);
570
571 /* Get "processor flag"; necessary for microcode update matching */
572 info_p->cpuid_processor_flag = (rdmsr64(MSR_IA32_PLATFORM_ID)>> 50) & 0x7;
573
574 /* Fold extensions into family/model */
575 if (info_p->cpuid_family == 0x0f)
576 info_p->cpuid_family += info_p->cpuid_extfamily;
577 if (info_p->cpuid_family == 0x0f || info_p->cpuid_family == 0x06)
578 info_p->cpuid_model += (info_p->cpuid_extmodel << 4);
579
580 if (info_p->cpuid_features & CPUID_FEATURE_HTT)
581 info_p->cpuid_logical_per_package =
582 bitfield32(reg[ebx], 23, 16);
583 else
584 info_p->cpuid_logical_per_package = 1;
585
586 if (info_p->cpuid_max_ext >= 0x80000001) {
587 cpuid_fn(0x80000001, reg);
588 info_p->cpuid_extfeatures =
589 quad(reg[ecx], reg[edx]);
590 }
591
592 DBG(" max_basic : %d\n", info_p->cpuid_max_basic);
593 DBG(" max_ext : 0x%08x\n", info_p->cpuid_max_ext);
594 DBG(" vendor : %s\n", info_p->cpuid_vendor);
595 DBG(" brand_string : %s\n", info_p->cpuid_brand_string);
596 DBG(" signature : 0x%08x\n", info_p->cpuid_signature);
597 DBG(" stepping : %d\n", info_p->cpuid_stepping);
598 DBG(" model : %d\n", info_p->cpuid_model);
599 DBG(" family : %d\n", info_p->cpuid_family);
600 DBG(" type : %d\n", info_p->cpuid_type);
601 DBG(" extmodel : %d\n", info_p->cpuid_extmodel);
602 DBG(" extfamily : %d\n", info_p->cpuid_extfamily);
603 DBG(" brand : %d\n", info_p->cpuid_brand);
604 DBG(" features : 0x%016llx\n", info_p->cpuid_features);
605 DBG(" extfeatures : 0x%016llx\n", info_p->cpuid_extfeatures);
606 DBG(" logical_per_package : %d\n", info_p->cpuid_logical_per_package);
607 DBG(" microcode_version : 0x%08x\n", info_p->cpuid_microcode_version);
608
609 /* Fold in the Invariant TSC feature bit, if present */
610 if (info_p->cpuid_max_ext >= 0x80000007) {
611 cpuid_fn(0x80000007, reg);
612 info_p->cpuid_extfeatures |=
613 reg[edx] & (uint32_t)CPUID_EXTFEATURE_TSCI;
614 DBG(" extfeatures : 0x%016llx\n",
615 info_p->cpuid_extfeatures);
616 }
617
618 if (info_p->cpuid_max_basic >= 0x5) {
619 cpuid_mwait_leaf_t *cmp = &info_p->cpuid_mwait_leaf;
620
621 /*
622 * Extract the Monitor/Mwait Leaf info:
623 */
624 cpuid_fn(5, reg);
625 cmp->linesize_min = reg[eax];
626 cmp->linesize_max = reg[ebx];
627 cmp->extensions = reg[ecx];
628 cmp->sub_Cstates = reg[edx];
629 info_p->cpuid_mwait_leafp = cmp;
630
631 DBG(" Monitor/Mwait Leaf:\n");
632 DBG(" linesize_min : %d\n", cmp->linesize_min);
633 DBG(" linesize_max : %d\n", cmp->linesize_max);
634 DBG(" extensions : %d\n", cmp->extensions);
635 DBG(" sub_Cstates : 0x%08x\n", cmp->sub_Cstates);
636 }
637
638 if (info_p->cpuid_max_basic >= 0x6) {
639 cpuid_thermal_leaf_t *ctp = &info_p->cpuid_thermal_leaf;
640
641 /*
642 * The thermal and Power Leaf:
643 */
644 cpuid_fn(6, reg);
645 ctp->sensor = bitfield32(reg[eax], 0, 0);
646 ctp->dynamic_acceleration = bitfield32(reg[eax], 1, 1);
647 ctp->invariant_APIC_timer = bitfield32(reg[eax], 2, 2);
648 ctp->core_power_limits = bitfield32(reg[eax], 4, 4);
649 ctp->fine_grain_clock_mod = bitfield32(reg[eax], 5, 5);
650 ctp->package_thermal_intr = bitfield32(reg[eax], 6, 6);
651 ctp->thresholds = bitfield32(reg[ebx], 3, 0);
652 ctp->ACNT_MCNT = bitfield32(reg[ecx], 0, 0);
653 ctp->hardware_feedback = bitfield32(reg[ecx], 1, 1);
654 ctp->energy_policy = bitfield32(reg[ecx], 3, 3);
655 info_p->cpuid_thermal_leafp = ctp;
656
657 DBG(" Thermal/Power Leaf:\n");
658 DBG(" sensor : %d\n", ctp->sensor);
659 DBG(" dynamic_acceleration : %d\n", ctp->dynamic_acceleration);
660 DBG(" invariant_APIC_timer : %d\n", ctp->invariant_APIC_timer);
661 DBG(" core_power_limits : %d\n", ctp->core_power_limits);
662 DBG(" fine_grain_clock_mod : %d\n", ctp->fine_grain_clock_mod);
663 DBG(" package_thermal_intr : %d\n", ctp->package_thermal_intr);
664 DBG(" thresholds : %d\n", ctp->thresholds);
665 DBG(" ACNT_MCNT : %d\n", ctp->ACNT_MCNT);
666 DBG(" ACNT2 : %d\n", ctp->hardware_feedback);
667 DBG(" energy_policy : %d\n", ctp->energy_policy);
668 }
669
670 if (info_p->cpuid_max_basic >= 0xa) {
671 cpuid_arch_perf_leaf_t *capp = &info_p->cpuid_arch_perf_leaf;
672
673 /*
674 * Architectural Performance Monitoring Leaf:
675 */
676 cpuid_fn(0xa, reg);
677 capp->version = bitfield32(reg[eax], 7, 0);
678 capp->number = bitfield32(reg[eax], 15, 8);
679 capp->width = bitfield32(reg[eax], 23, 16);
680 capp->events_number = bitfield32(reg[eax], 31, 24);
681 capp->events = reg[ebx];
682 capp->fixed_number = bitfield32(reg[edx], 4, 0);
683 capp->fixed_width = bitfield32(reg[edx], 12, 5);
684 info_p->cpuid_arch_perf_leafp = capp;
685
686 DBG(" Architectural Performance Monitoring Leaf:\n");
687 DBG(" version : %d\n", capp->version);
688 DBG(" number : %d\n", capp->number);
689 DBG(" width : %d\n", capp->width);
690 DBG(" events_number : %d\n", capp->events_number);
691 DBG(" events : %d\n", capp->events);
692 DBG(" fixed_number : %d\n", capp->fixed_number);
693 DBG(" fixed_width : %d\n", capp->fixed_width);
694 }
695
696 if (info_p->cpuid_max_basic >= 0xd) {
697 cpuid_xsave_leaf_t *xsp = &info_p->cpuid_xsave_leaf;
698 /*
699 * XSAVE Features:
700 */
701 cpuid_fn(0xd, info_p->cpuid_xsave_leaf.extended_state);
702 info_p->cpuid_xsave_leafp = xsp;
703
704 DBG(" XSAVE Leaf:\n");
705 DBG(" EAX : 0x%x\n", xsp->extended_state[eax]);
706 DBG(" EBX : 0x%x\n", xsp->extended_state[ebx]);
707 DBG(" ECX : 0x%x\n", xsp->extended_state[ecx]);
708 DBG(" EDX : 0x%x\n", xsp->extended_state[edx]);
709 }
710
711 if (info_p->cpuid_model >= CPUID_MODEL_IVYBRIDGE) {
712 /*
713 * Leaf7 Features:
714 */
715 cpuid_fn(0x7, reg);
716 info_p->cpuid_leaf7_features = quad(reg[ecx], reg[ebx]);
717
718 DBG(" Feature Leaf7:\n");
719 DBG(" EBX : 0x%x\n", reg[ebx]);
720 DBG(" ECX : 0x%x\n", reg[ecx]);
721 }
722
723 return;
724 }
725
726 static uint32_t
727 cpuid_set_cpufamily(i386_cpu_info_t *info_p)
728 {
729 uint32_t cpufamily = CPUFAMILY_UNKNOWN;
730
731 switch (info_p->cpuid_family) {
732 case 6:
733 switch (info_p->cpuid_model) {
734 case 15:
735 cpufamily = CPUFAMILY_INTEL_MEROM;
736 break;
737 case 23:
738 cpufamily = CPUFAMILY_INTEL_PENRYN;
739 break;
740 case CPUID_MODEL_NEHALEM:
741 case CPUID_MODEL_FIELDS:
742 case CPUID_MODEL_DALES:
743 case CPUID_MODEL_NEHALEM_EX:
744 cpufamily = CPUFAMILY_INTEL_NEHALEM;
745 break;
746 case CPUID_MODEL_DALES_32NM:
747 case CPUID_MODEL_WESTMERE:
748 case CPUID_MODEL_WESTMERE_EX:
749 cpufamily = CPUFAMILY_INTEL_WESTMERE;
750 break;
751 case CPUID_MODEL_SANDYBRIDGE:
752 case CPUID_MODEL_JAKETOWN:
753 cpufamily = CPUFAMILY_INTEL_SANDYBRIDGE;
754 break;
755 case CPUID_MODEL_IVYBRIDGE:
756 case CPUID_MODEL_IVYBRIDGE_EP:
757 cpufamily = CPUFAMILY_INTEL_IVYBRIDGE;
758 break;
759 case CPUID_MODEL_HASWELL:
760 case CPUID_MODEL_HASWELL_EP:
761 case CPUID_MODEL_HASWELL_ULT:
762 case CPUID_MODEL_CRYSTALWELL:
763 cpufamily = CPUFAMILY_INTEL_HASWELL;
764 break;
765 case CPUID_MODEL_BROADWELL:
766 case CPUID_MODEL_BRYSTALWELL:
767 cpufamily = CPUFAMILY_INTEL_BROADWELL;
768 break;
769 }
770 break;
771 }
772
773 info_p->cpuid_cpufamily = cpufamily;
774 DBG("cpuid_set_cpufamily(%p) returning 0x%x\n", info_p, cpufamily);
775 return cpufamily;
776 }
777 /*
778 * Must be invoked either when executing single threaded, or with
779 * independent synchronization.
780 */
781 void
782 cpuid_set_info(void)
783 {
784 i386_cpu_info_t *info_p = &cpuid_cpu_info;
785 boolean_t enable_x86_64h = TRUE;
786
787 cpuid_set_generic_info(info_p);
788
789 /* verify we are running on a supported CPU */
790 if ((strncmp(CPUID_VID_INTEL, info_p->cpuid_vendor,
791 min(strlen(CPUID_STRING_UNKNOWN) + 1,
792 sizeof(info_p->cpuid_vendor)))) ||
793 (cpuid_set_cpufamily(info_p) == CPUFAMILY_UNKNOWN))
794 panic("Unsupported CPU");
795
796 info_p->cpuid_cpu_type = CPU_TYPE_X86;
797
798 if (!PE_parse_boot_argn("-enable_x86_64h", &enable_x86_64h, sizeof(enable_x86_64h))) {
799 boolean_t disable_x86_64h = FALSE;
800
801 if (PE_parse_boot_argn("-disable_x86_64h", &disable_x86_64h, sizeof(disable_x86_64h))) {
802 enable_x86_64h = FALSE;
803 }
804 }
805
806 if (enable_x86_64h &&
807 ((info_p->cpuid_features & CPUID_X86_64_H_FEATURE_SUBSET) == CPUID_X86_64_H_FEATURE_SUBSET) &&
808 ((info_p->cpuid_extfeatures & CPUID_X86_64_H_EXTFEATURE_SUBSET) == CPUID_X86_64_H_EXTFEATURE_SUBSET) &&
809 ((info_p->cpuid_leaf7_features & CPUID_X86_64_H_LEAF7_FEATURE_SUBSET) == CPUID_X86_64_H_LEAF7_FEATURE_SUBSET)) {
810 info_p->cpuid_cpu_subtype = CPU_SUBTYPE_X86_64_H;
811 } else {
812 info_p->cpuid_cpu_subtype = CPU_SUBTYPE_X86_ARCH1;
813 }
814
815 /* Must be invoked after set_generic_info */
816 cpuid_set_cache_info(info_p);
817
818 /*
819 * Find the number of enabled cores and threads
820 * (which determines whether SMT/Hyperthreading is active).
821 */
822 switch (info_p->cpuid_cpufamily) {
823 case CPUFAMILY_INTEL_MEROM:
824 case CPUFAMILY_INTEL_PENRYN:
825 info_p->core_count = info_p->cpuid_cores_per_package;
826 info_p->thread_count = info_p->cpuid_logical_per_package;
827 break;
828 case CPUFAMILY_INTEL_WESTMERE: {
829 uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
830 info_p->core_count = bitfield32((uint32_t)msr, 19, 16);
831 info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
832 break;
833 }
834 default: {
835 uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
836 info_p->core_count = bitfield32((uint32_t)msr, 31, 16);
837 info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
838 break;
839 }
840 }
841 if (info_p->core_count == 0) {
842 info_p->core_count = info_p->cpuid_cores_per_package;
843 info_p->thread_count = info_p->cpuid_logical_per_package;
844 }
845 DBG("cpuid_set_info():\n");
846 DBG(" core_count : %d\n", info_p->core_count);
847 DBG(" thread_count : %d\n", info_p->thread_count);
848 DBG(" cpu_type: 0x%08x\n", info_p->cpuid_cpu_type);
849 DBG(" cpu_subtype: 0x%08x\n", info_p->cpuid_cpu_subtype);
850
851 info_p->cpuid_model_string = ""; /* deprecated */
852 }
853
854 static struct table {
855 uint64_t mask;
856 const char *name;
857 } feature_map[] = {
858 {CPUID_FEATURE_FPU, "FPU"},
859 {CPUID_FEATURE_VME, "VME"},
860 {CPUID_FEATURE_DE, "DE"},
861 {CPUID_FEATURE_PSE, "PSE"},
862 {CPUID_FEATURE_TSC, "TSC"},
863 {CPUID_FEATURE_MSR, "MSR"},
864 {CPUID_FEATURE_PAE, "PAE"},
865 {CPUID_FEATURE_MCE, "MCE"},
866 {CPUID_FEATURE_CX8, "CX8"},
867 {CPUID_FEATURE_APIC, "APIC"},
868 {CPUID_FEATURE_SEP, "SEP"},
869 {CPUID_FEATURE_MTRR, "MTRR"},
870 {CPUID_FEATURE_PGE, "PGE"},
871 {CPUID_FEATURE_MCA, "MCA"},
872 {CPUID_FEATURE_CMOV, "CMOV"},
873 {CPUID_FEATURE_PAT, "PAT"},
874 {CPUID_FEATURE_PSE36, "PSE36"},
875 {CPUID_FEATURE_PSN, "PSN"},
876 {CPUID_FEATURE_CLFSH, "CLFSH"},
877 {CPUID_FEATURE_DS, "DS"},
878 {CPUID_FEATURE_ACPI, "ACPI"},
879 {CPUID_FEATURE_MMX, "MMX"},
880 {CPUID_FEATURE_FXSR, "FXSR"},
881 {CPUID_FEATURE_SSE, "SSE"},
882 {CPUID_FEATURE_SSE2, "SSE2"},
883 {CPUID_FEATURE_SS, "SS"},
884 {CPUID_FEATURE_HTT, "HTT"},
885 {CPUID_FEATURE_TM, "TM"},
886 {CPUID_FEATURE_PBE, "PBE"},
887 {CPUID_FEATURE_SSE3, "SSE3"},
888 {CPUID_FEATURE_PCLMULQDQ, "PCLMULQDQ"},
889 {CPUID_FEATURE_DTES64, "DTES64"},
890 {CPUID_FEATURE_MONITOR, "MON"},
891 {CPUID_FEATURE_DSCPL, "DSCPL"},
892 {CPUID_FEATURE_VMX, "VMX"},
893 {CPUID_FEATURE_SMX, "SMX"},
894 {CPUID_FEATURE_EST, "EST"},
895 {CPUID_FEATURE_TM2, "TM2"},
896 {CPUID_FEATURE_SSSE3, "SSSE3"},
897 {CPUID_FEATURE_CID, "CID"},
898 {CPUID_FEATURE_FMA, "FMA"},
899 {CPUID_FEATURE_CX16, "CX16"},
900 {CPUID_FEATURE_xTPR, "TPR"},
901 {CPUID_FEATURE_PDCM, "PDCM"},
902 {CPUID_FEATURE_SSE4_1, "SSE4.1"},
903 {CPUID_FEATURE_SSE4_2, "SSE4.2"},
904 {CPUID_FEATURE_x2APIC, "x2APIC"},
905 {CPUID_FEATURE_MOVBE, "MOVBE"},
906 {CPUID_FEATURE_POPCNT, "POPCNT"},
907 {CPUID_FEATURE_AES, "AES"},
908 {CPUID_FEATURE_VMM, "VMM"},
909 {CPUID_FEATURE_PCID, "PCID"},
910 {CPUID_FEATURE_XSAVE, "XSAVE"},
911 {CPUID_FEATURE_OSXSAVE, "OSXSAVE"},
912 {CPUID_FEATURE_SEGLIM64, "SEGLIM64"},
913 {CPUID_FEATURE_TSCTMR, "TSCTMR"},
914 {CPUID_FEATURE_AVX1_0, "AVX1.0"},
915 {CPUID_FEATURE_RDRAND, "RDRAND"},
916 {CPUID_FEATURE_F16C, "F16C"},
917 {0, 0}
918 },
919 extfeature_map[] = {
920 {CPUID_EXTFEATURE_SYSCALL, "SYSCALL"},
921 {CPUID_EXTFEATURE_XD, "XD"},
922 {CPUID_EXTFEATURE_1GBPAGE, "1GBPAGE"},
923 {CPUID_EXTFEATURE_EM64T, "EM64T"},
924 {CPUID_EXTFEATURE_LAHF, "LAHF"},
925 {CPUID_EXTFEATURE_LZCNT, "LZCNT"},
926 {CPUID_EXTFEATURE_PREFETCHW, "PREFETCHW"},
927 {CPUID_EXTFEATURE_RDTSCP, "RDTSCP"},
928 {CPUID_EXTFEATURE_TSCI, "TSCI"},
929 {0, 0}
930
931 },
932 leaf7_feature_map[] = {
933 {CPUID_LEAF7_FEATURE_SMEP, "SMEP"},
934 {CPUID_LEAF7_FEATURE_ERMS, "ERMS"},
935 {CPUID_LEAF7_FEATURE_RDWRFSGS, "RDWRFSGS"},
936 {CPUID_LEAF7_FEATURE_TSCOFF, "TSC_THREAD_OFFSET"},
937 {CPUID_LEAF7_FEATURE_BMI1, "BMI1"},
938 {CPUID_LEAF7_FEATURE_HLE, "HLE"},
939 {CPUID_LEAF7_FEATURE_AVX2, "AVX2"},
940 {CPUID_LEAF7_FEATURE_BMI2, "BMI2"},
941 {CPUID_LEAF7_FEATURE_INVPCID, "INVPCID"},
942 {CPUID_LEAF7_FEATURE_RTM, "RTM"},
943 {CPUID_LEAF7_FEATURE_RDSEED, "RDSEED"},
944 {CPUID_LEAF7_FEATURE_ADX, "ADX"},
945 {CPUID_LEAF7_FEATURE_SMAP, "SMAP"},
946 {0, 0}
947 };
948
949 static char *
950 cpuid_get_names(struct table *map, uint64_t bits, char *buf, unsigned buf_len)
951 {
952 size_t len = 0;
953 char *p = buf;
954 int i;
955
956 for (i = 0; map[i].mask != 0; i++) {
957 if ((bits & map[i].mask) == 0)
958 continue;
959 if (len && ((size_t) (p - buf) < (buf_len - 1)))
960 *p++ = ' ';
961 len = min(strlen(map[i].name), (size_t)((buf_len-1)-(p-buf)));
962 if (len == 0)
963 break;
964 bcopy(map[i].name, p, len);
965 p += len;
966 }
967 *p = '\0';
968 return buf;
969 }
970
971 i386_cpu_info_t *
972 cpuid_info(void)
973 {
974 /* Set-up the cpuid_info stucture lazily */
975 if (cpuid_cpu_infop == NULL) {
976 PE_parse_boot_argn("-cpuid", &cpuid_dbg, sizeof(cpuid_dbg));
977 cpuid_set_info();
978 cpuid_cpu_infop = &cpuid_cpu_info;
979 }
980 return cpuid_cpu_infop;
981 }
982
983 char *
984 cpuid_get_feature_names(uint64_t features, char *buf, unsigned buf_len)
985 {
986 return cpuid_get_names(feature_map, features, buf, buf_len);
987 }
988
989 char *
990 cpuid_get_extfeature_names(uint64_t extfeatures, char *buf, unsigned buf_len)
991 {
992 return cpuid_get_names(extfeature_map, extfeatures, buf, buf_len);
993 }
994
995 char *
996 cpuid_get_leaf7_feature_names(uint64_t features, char *buf, unsigned buf_len)
997 {
998 return cpuid_get_names(leaf7_feature_map, features, buf, buf_len);
999 }
1000
1001 void
1002 cpuid_feature_display(
1003 const char *header)
1004 {
1005 char buf[256];
1006
1007 kprintf("%s: %s", header,
1008 cpuid_get_feature_names(cpuid_features(), buf, sizeof(buf)));
1009 if (cpuid_leaf7_features())
1010 kprintf(" %s", cpuid_get_leaf7_feature_names(
1011 cpuid_leaf7_features(), buf, sizeof(buf)));
1012 kprintf("\n");
1013 if (cpuid_features() & CPUID_FEATURE_HTT) {
1014 #define s_if_plural(n) ((n > 1) ? "s" : "")
1015 kprintf(" HTT: %d core%s per package;"
1016 " %d logical cpu%s per package\n",
1017 cpuid_cpu_infop->cpuid_cores_per_package,
1018 s_if_plural(cpuid_cpu_infop->cpuid_cores_per_package),
1019 cpuid_cpu_infop->cpuid_logical_per_package,
1020 s_if_plural(cpuid_cpu_infop->cpuid_logical_per_package));
1021 }
1022 }
1023
1024 void
1025 cpuid_extfeature_display(
1026 const char *header)
1027 {
1028 char buf[256];
1029
1030 kprintf("%s: %s\n", header,
1031 cpuid_get_extfeature_names(cpuid_extfeatures(),
1032 buf, sizeof(buf)));
1033 }
1034
1035 void
1036 cpuid_cpu_display(
1037 const char *header)
1038 {
1039 if (cpuid_cpu_infop->cpuid_brand_string[0] != '\0') {
1040 kprintf("%s: %s\n", header, cpuid_cpu_infop->cpuid_brand_string);
1041 }
1042 }
1043
1044 unsigned int
1045 cpuid_family(void)
1046 {
1047 return cpuid_info()->cpuid_family;
1048 }
1049
1050 uint32_t
1051 cpuid_cpufamily(void)
1052 {
1053 return cpuid_info()->cpuid_cpufamily;
1054 }
1055
1056 cpu_type_t
1057 cpuid_cputype(void)
1058 {
1059 return cpuid_info()->cpuid_cpu_type;
1060 }
1061
1062 cpu_subtype_t
1063 cpuid_cpusubtype(void)
1064 {
1065 return cpuid_info()->cpuid_cpu_subtype;
1066 }
1067
1068 uint64_t
1069 cpuid_features(void)
1070 {
1071 static int checked = 0;
1072 char fpu_arg[20] = { 0 };
1073
1074 (void) cpuid_info();
1075 if (!checked) {
1076 /* check for boot-time fpu limitations */
1077 if (PE_parse_boot_argn("_fpu", &fpu_arg[0], sizeof (fpu_arg))) {
1078 printf("limiting fpu features to: %s\n", fpu_arg);
1079 if (!strncmp("387", fpu_arg, sizeof("387")) || !strncmp("mmx", fpu_arg, sizeof("mmx"))) {
1080 printf("no sse or sse2\n");
1081 cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE | CPUID_FEATURE_SSE2 | CPUID_FEATURE_FXSR);
1082 } else if (!strncmp("sse", fpu_arg, sizeof("sse"))) {
1083 printf("no sse2\n");
1084 cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE2);
1085 }
1086 }
1087 checked = 1;
1088 }
1089 return cpuid_cpu_infop->cpuid_features;
1090 }
1091
1092 uint64_t
1093 cpuid_extfeatures(void)
1094 {
1095 return cpuid_info()->cpuid_extfeatures;
1096 }
1097
1098 uint64_t
1099 cpuid_leaf7_features(void)
1100 {
1101 return cpuid_info()->cpuid_leaf7_features;
1102 }
1103
1104 static i386_vmm_info_t *_cpuid_vmm_infop = NULL;
1105 static i386_vmm_info_t _cpuid_vmm_info;
1106
1107 static void
1108 cpuid_init_vmm_info(i386_vmm_info_t *info_p)
1109 {
1110 uint32_t reg[4];
1111 uint32_t max_vmm_leaf;
1112
1113 bzero(info_p, sizeof(*info_p));
1114
1115 if (!cpuid_vmm_present())
1116 return;
1117
1118 DBG("cpuid_init_vmm_info(%p)\n", info_p);
1119
1120 /* do cpuid 0x40000000 to get VMM vendor */
1121 cpuid_fn(0x40000000, reg);
1122 max_vmm_leaf = reg[eax];
1123 bcopy((char *)&reg[ebx], &info_p->cpuid_vmm_vendor[0], 4);
1124 bcopy((char *)&reg[ecx], &info_p->cpuid_vmm_vendor[4], 4);
1125 bcopy((char *)&reg[edx], &info_p->cpuid_vmm_vendor[8], 4);
1126 info_p->cpuid_vmm_vendor[12] = '\0';
1127
1128 if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_VMWARE)) {
1129 /* VMware identification string: kb.vmware.com/kb/1009458 */
1130 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_VMWARE;
1131 } else if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_PARALLELS)) {
1132 /* Parallels identification string */
1133 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_PARALLELS;
1134 } else {
1135 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_UNKNOWN;
1136 }
1137
1138 /* VMM generic leaves: https://lkml.org/lkml/2008/10/1/246 */
1139 if (max_vmm_leaf >= 0x40000010) {
1140 cpuid_fn(0x40000010, reg);
1141
1142 info_p->cpuid_vmm_tsc_frequency = reg[eax];
1143 info_p->cpuid_vmm_bus_frequency = reg[ebx];
1144 }
1145
1146 DBG(" vmm_vendor : %s\n", info_p->cpuid_vmm_vendor);
1147 DBG(" vmm_family : %u\n", info_p->cpuid_vmm_family);
1148 DBG(" vmm_bus_frequency : %u\n", info_p->cpuid_vmm_bus_frequency);
1149 DBG(" vmm_tsc_frequency : %u\n", info_p->cpuid_vmm_tsc_frequency);
1150 }
1151
1152 boolean_t
1153 cpuid_vmm_present(void)
1154 {
1155 return (cpuid_features() & CPUID_FEATURE_VMM) ? TRUE : FALSE;
1156 }
1157
1158 i386_vmm_info_t *
1159 cpuid_vmm_info(void)
1160 {
1161 if (_cpuid_vmm_infop == NULL) {
1162 cpuid_init_vmm_info(&_cpuid_vmm_info);
1163 _cpuid_vmm_infop = &_cpuid_vmm_info;
1164 }
1165 return _cpuid_vmm_infop;
1166 }
1167
1168 uint32_t
1169 cpuid_vmm_family(void)
1170 {
1171 return cpuid_vmm_info()->cpuid_vmm_family;
1172 }
1173