]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/i386/cpuid.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / osfmk / i386 / cpuid.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31#include <vm/vm_page.h>
32#include <pexpert/pexpert.h>
33
34#include <i386/cpuid.h>
35
36static boolean_t cpuid_dbg
37#if DEBUG
38 = TRUE;
39#else
40 = FALSE;
41#endif
42#define DBG(x...) \
43 do { \
44 if (cpuid_dbg) \
45 kprintf(x); \
46 } while (0) \
47
48#define min(a,b) ((a) < (b) ? (a) : (b))
49#define quad(hi,lo) (((uint64_t)(hi)) << 32 | (lo))
50
51/* Only for 32bit values */
52#define bit32(n) (1U << (n))
53#define bitmask32(h,l) ((bit32(h)|(bit32(h)-1)) & ~(bit32(l)-1))
54#define bitfield32(x,h,l) ((((x) & bitmask32(h,l)) >> l))
55
56/*
57 * Leaf 2 cache descriptor encodings.
58 */
59typedef enum {
60 _NULL_, /* NULL (empty) descriptor */
61 CACHE, /* Cache */
62 TLB, /* TLB */
63 STLB, /* Shared second-level unified TLB */
64 PREFETCH /* Prefetch size */
65} cpuid_leaf2_desc_type_t;
66
67typedef enum {
68 NA, /* Not Applicable */
69 FULLY, /* Fully-associative */
70 TRACE, /* Trace Cache (P4 only) */
71 INST, /* Instruction TLB */
72 DATA, /* Data TLB */
73 DATA0, /* Data TLB, 1st level */
74 DATA1, /* Data TLB, 2nd level */
75 L1, /* L1 (unified) cache */
76 L1_INST, /* L1 Instruction cache */
77 L1_DATA, /* L1 Data cache */
78 L2, /* L2 (unified) cache */
79 L3, /* L3 (unified) cache */
80 L2_2LINESECTOR, /* L2 (unified) cache with 2 lines per sector */
81 L3_2LINESECTOR, /* L3(unified) cache with 2 lines per sector */
82 SMALL, /* Small page TLB */
83 LARGE, /* Large page TLB */
84 BOTH /* Small and Large page TLB */
85} cpuid_leaf2_qualifier_t;
86
87typedef struct cpuid_cache_descriptor {
88 uint8_t value; /* descriptor code */
89 uint8_t type; /* cpuid_leaf2_desc_type_t */
90 uint8_t level; /* level of cache/TLB hierachy */
91 uint8_t ways; /* wayness of cache */
92 uint16_t size; /* cachesize or TLB pagesize */
93 uint16_t entries; /* number of TLB entries or linesize */
94} cpuid_cache_descriptor_t;
95
96/*
97 * These multipliers are used to encode 1*K .. 64*M in a 16 bit size field
98 */
99#define K (1)
100#define M (1024)
101
102/*
103 * Intel cache descriptor table:
104 */
105static cpuid_cache_descriptor_t intel_cpuid_leaf2_descriptor_table[] = {
106// -------------------------------------------------------
107// value type level ways size entries
108// -------------------------------------------------------
109 { 0x00, _NULL_, NA, NA, NA, NA },
110 { 0x01, TLB, INST, 4, SMALL, 32 },
111 { 0x02, TLB, INST, FULLY, LARGE, 2 },
112 { 0x03, TLB, DATA, 4, SMALL, 64 },
113 { 0x04, TLB, DATA, 4, LARGE, 8 },
114 { 0x05, TLB, DATA1, 4, LARGE, 32 },
115 { 0x06, CACHE, L1_INST, 4, 8*K, 32 },
116 { 0x08, CACHE, L1_INST, 4, 16*K, 32 },
117 { 0x09, CACHE, L1_INST, 4, 32*K, 64 },
118 { 0x0A, CACHE, L1_DATA, 2, 8*K, 32 },
119 { 0x0B, TLB, INST, 4, LARGE, 4 },
120 { 0x0C, CACHE, L1_DATA, 4, 16*K, 32 },
121 { 0x0D, CACHE, L1_DATA, 4, 16*K, 64 },
122 { 0x0E, CACHE, L1_DATA, 6, 24*K, 64 },
123 { 0x21, CACHE, L2, 8, 256*K, 64 },
124 { 0x22, CACHE, L3_2LINESECTOR, 4, 512*K, 64 },
125 { 0x23, CACHE, L3_2LINESECTOR, 8, 1*M, 64 },
126 { 0x25, CACHE, L3_2LINESECTOR, 8, 2*M, 64 },
127 { 0x29, CACHE, L3_2LINESECTOR, 8, 4*M, 64 },
128 { 0x2C, CACHE, L1_DATA, 8, 32*K, 64 },
129 { 0x30, CACHE, L1_INST, 8, 32*K, 64 },
130 { 0x40, CACHE, L2, NA, 0, NA },
131 { 0x41, CACHE, L2, 4, 128*K, 32 },
132 { 0x42, CACHE, L2, 4, 256*K, 32 },
133 { 0x43, CACHE, L2, 4, 512*K, 32 },
134 { 0x44, CACHE, L2, 4, 1*M, 32 },
135 { 0x45, CACHE, L2, 4, 2*M, 32 },
136 { 0x46, CACHE, L3, 4, 4*M, 64 },
137 { 0x47, CACHE, L3, 8, 8*M, 64 },
138 { 0x48, CACHE, L2, 12, 3*M, 64 },
139 { 0x49, CACHE, L2, 16, 4*M, 64 },
140 { 0x4A, CACHE, L3, 12, 6*M, 64 },
141 { 0x4B, CACHE, L3, 16, 8*M, 64 },
142 { 0x4C, CACHE, L3, 12, 12*M, 64 },
143 { 0x4D, CACHE, L3, 16, 16*M, 64 },
144 { 0x4E, CACHE, L2, 24, 6*M, 64 },
145 { 0x4F, TLB, INST, NA, SMALL, 32 },
146 { 0x50, TLB, INST, NA, BOTH, 64 },
147 { 0x51, TLB, INST, NA, BOTH, 128 },
148 { 0x52, TLB, INST, NA, BOTH, 256 },
149 { 0x55, TLB, INST, FULLY, BOTH, 7 },
150 { 0x56, TLB, DATA0, 4, LARGE, 16 },
151 { 0x57, TLB, DATA0, 4, SMALL, 16 },
152 { 0x59, TLB, DATA0, FULLY, SMALL, 16 },
153 { 0x5A, TLB, DATA0, 4, LARGE, 32 },
154 { 0x5B, TLB, DATA, NA, BOTH, 64 },
155 { 0x5C, TLB, DATA, NA, BOTH, 128 },
156 { 0x5D, TLB, DATA, NA, BOTH, 256 },
157 { 0x60, CACHE, L1, 16*K, 8, 64 },
158 { 0x61, CACHE, L1, 4, 8*K, 64 },
159 { 0x62, CACHE, L1, 4, 16*K, 64 },
160 { 0x63, CACHE, L1, 4, 32*K, 64 },
161 { 0x70, CACHE, TRACE, 8, 12*K, NA },
162 { 0x71, CACHE, TRACE, 8, 16*K, NA },
163 { 0x72, CACHE, TRACE, 8, 32*K, NA },
164 { 0x76, TLB, INST, NA, BOTH, 8 },
165 { 0x78, CACHE, L2, 4, 1*M, 64 },
166 { 0x79, CACHE, L2_2LINESECTOR, 8, 128*K, 64 },
167 { 0x7A, CACHE, L2_2LINESECTOR, 8, 256*K, 64 },
168 { 0x7B, CACHE, L2_2LINESECTOR, 8, 512*K, 64 },
169 { 0x7C, CACHE, L2_2LINESECTOR, 8, 1*M, 64 },
170 { 0x7D, CACHE, L2, 8, 2*M, 64 },
171 { 0x7F, CACHE, L2, 2, 512*K, 64 },
172 { 0x80, CACHE, L2, 8, 512*K, 64 },
173 { 0x82, CACHE, L2, 8, 256*K, 32 },
174 { 0x83, CACHE, L2, 8, 512*K, 32 },
175 { 0x84, CACHE, L2, 8, 1*M, 32 },
176 { 0x85, CACHE, L2, 8, 2*M, 32 },
177 { 0x86, CACHE, L2, 4, 512*K, 64 },
178 { 0x87, CACHE, L2, 8, 1*M, 64 },
179 { 0xB0, TLB, INST, 4, SMALL, 128 },
180 { 0xB1, TLB, INST, 4, LARGE, 8 },
181 { 0xB2, TLB, INST, 4, SMALL, 64 },
182 { 0xB3, TLB, DATA, 4, SMALL, 128 },
183 { 0xB4, TLB, DATA1, 4, SMALL, 256 },
184 { 0xB5, TLB, DATA1, 8, SMALL, 64 },
185 { 0xB6, TLB, DATA1, 8, SMALL, 128 },
186 { 0xBA, TLB, DATA1, 4, BOTH, 64 },
187 { 0xC1, STLB, DATA1, 8, SMALL, 1024},
188 { 0xCA, STLB, DATA1, 4, SMALL, 512 },
189 { 0xD0, CACHE, L3, 4, 512*K, 64 },
190 { 0xD1, CACHE, L3, 4, 1*M, 64 },
191 { 0xD2, CACHE, L3, 4, 2*M, 64 },
192 { 0xD3, CACHE, L3, 4, 4*M, 64 },
193 { 0xD4, CACHE, L3, 4, 8*M, 64 },
194 { 0xD6, CACHE, L3, 8, 1*M, 64 },
195 { 0xD7, CACHE, L3, 8, 2*M, 64 },
196 { 0xD8, CACHE, L3, 8, 4*M, 64 },
197 { 0xD9, CACHE, L3, 8, 8*M, 64 },
198 { 0xDA, CACHE, L3, 8, 12*M, 64 },
199 { 0xDC, CACHE, L3, 12, 1536*K, 64 },
200 { 0xDD, CACHE, L3, 12, 3*M, 64 },
201 { 0xDE, CACHE, L3, 12, 6*M, 64 },
202 { 0xDF, CACHE, L3, 12, 12*M, 64 },
203 { 0xE0, CACHE, L3, 12, 18*M, 64 },
204 { 0xE2, CACHE, L3, 16, 2*M, 64 },
205 { 0xE3, CACHE, L3, 16, 4*M, 64 },
206 { 0xE4, CACHE, L3, 16, 8*M, 64 },
207 { 0xE5, CACHE, L3, 16, 16*M, 64 },
208 { 0xE6, CACHE, L3, 16, 24*M, 64 },
209 { 0xF0, PREFETCH, NA, NA, 64, NA },
210 { 0xF1, PREFETCH, NA, NA, 128, NA },
211 { 0xFF, CACHE, NA, NA, 0, NA }
212};
213#define INTEL_LEAF2_DESC_NUM (sizeof(intel_cpuid_leaf2_descriptor_table) / \
214 sizeof(cpuid_cache_descriptor_t))
215
216static inline cpuid_cache_descriptor_t *
217cpuid_leaf2_find(uint8_t value)
218{
219 unsigned int i;
220
221 for (i = 0; i < INTEL_LEAF2_DESC_NUM; i++)
222 if (intel_cpuid_leaf2_descriptor_table[i].value == value)
223 return &intel_cpuid_leaf2_descriptor_table[i];
224 return NULL;
225}
226
227/*
228 * CPU identification routines.
229 */
230
231static i386_cpu_info_t cpuid_cpu_info;
232static i386_cpu_info_t *cpuid_cpu_infop = NULL;
233
234static void cpuid_fn(uint32_t selector, uint32_t *result)
235{
236 do_cpuid(selector, result);
237 DBG("cpuid_fn(0x%08x) eax:0x%08x ebx:0x%08x ecx:0x%08x edx:0x%08x\n",
238 selector, result[0], result[1], result[2], result[3]);
239}
240
241static const char *cache_type_str[LCACHE_MAX] = {
242 "Lnone", "L1I", "L1D", "L2U", "L3U"
243};
244
245/* this function is Intel-specific */
246static void
247cpuid_set_cache_info( i386_cpu_info_t * info_p )
248{
249 uint32_t cpuid_result[4];
250 uint32_t reg[4];
251 uint32_t index;
252 uint32_t linesizes[LCACHE_MAX];
253 unsigned int i;
254 unsigned int j;
255 boolean_t cpuid_deterministic_supported = FALSE;
256
257 DBG("cpuid_set_cache_info(%p)\n", info_p);
258
259 bzero( linesizes, sizeof(linesizes) );
260
261 /* Get processor cache descriptor info using leaf 2. We don't use
262 * this internally, but must publish it for KEXTs.
263 */
264 cpuid_fn(2, cpuid_result);
265 for (j = 0; j < 4; j++) {
266 if ((cpuid_result[j] >> 31) == 1) /* bit31 is validity */
267 continue;
268 ((uint32_t *) info_p->cache_info)[j] = cpuid_result[j];
269 }
270 /* first byte gives number of cpuid calls to get all descriptors */
271 for (i = 1; i < info_p->cache_info[0]; i++) {
272 if (i*16 > sizeof(info_p->cache_info))
273 break;
274 cpuid_fn(2, cpuid_result);
275 for (j = 0; j < 4; j++) {
276 if ((cpuid_result[j] >> 31) == 1)
277 continue;
278 ((uint32_t *) info_p->cache_info)[4*i+j] =
279 cpuid_result[j];
280 }
281 }
282
283 /*
284 * Get cache info using leaf 4, the "deterministic cache parameters."
285 * Most processors Mac OS X supports implement this flavor of CPUID.
286 * Loop over each cache on the processor.
287 */
288 cpuid_fn(0, cpuid_result);
289 if (cpuid_result[eax] >= 4)
290 cpuid_deterministic_supported = TRUE;
291
292 for (index = 0; cpuid_deterministic_supported; index++) {
293 cache_type_t type = Lnone;
294 uint32_t cache_type;
295 uint32_t cache_level;
296 uint32_t cache_sharing;
297 uint32_t cache_linesize;
298 uint32_t cache_sets;
299 uint32_t cache_associativity;
300 uint32_t cache_size;
301 uint32_t cache_partitions;
302 uint32_t colors;
303
304 reg[eax] = 4; /* cpuid request 4 */
305 reg[ecx] = index; /* index starting at 0 */
306 cpuid(reg);
307 DBG("cpuid(4) index=%d eax=0x%x\n", index, reg[eax]);
308 cache_type = bitfield32(reg[eax], 4, 0);
309 if (cache_type == 0)
310 break; /* no more caches */
311 cache_level = bitfield32(reg[eax], 7, 5);
312 cache_sharing = bitfield32(reg[eax], 25, 14) + 1;
313 info_p->cpuid_cores_per_package
314 = bitfield32(reg[eax], 31, 26) + 1;
315 cache_linesize = bitfield32(reg[ebx], 11, 0) + 1;
316 cache_partitions = bitfield32(reg[ebx], 21, 12) + 1;
317 cache_associativity = bitfield32(reg[ebx], 31, 22) + 1;
318 cache_sets = bitfield32(reg[ecx], 31, 0) + 1;
319
320 /* Map type/levels returned by CPUID into cache_type_t */
321 switch (cache_level) {
322 case 1:
323 type = cache_type == 1 ? L1D :
324 cache_type == 2 ? L1I :
325 Lnone;
326 break;
327 case 2:
328 type = cache_type == 3 ? L2U :
329 Lnone;
330 break;
331 case 3:
332 type = cache_type == 3 ? L3U :
333 Lnone;
334 break;
335 default:
336 type = Lnone;
337 }
338
339 /* The total size of a cache is:
340 * ( linesize * sets * associativity * partitions )
341 */
342 if (type != Lnone) {
343 cache_size = cache_linesize * cache_sets *
344 cache_associativity * cache_partitions;
345 info_p->cache_size[type] = cache_size;
346 info_p->cache_sharing[type] = cache_sharing;
347 info_p->cache_partitions[type] = cache_partitions;
348 linesizes[type] = cache_linesize;
349
350 DBG(" cache_size[%s] : %d\n",
351 cache_type_str[type], cache_size);
352 DBG(" cache_sharing[%s] : %d\n",
353 cache_type_str[type], cache_sharing);
354 DBG(" cache_partitions[%s]: %d\n",
355 cache_type_str[type], cache_partitions);
356
357 /*
358 * Overwrite associativity determined via
359 * CPUID.0x80000006 -- this leaf is more
360 * accurate
361 */
362 if (type == L2U)
363 info_p->cpuid_cache_L2_associativity = cache_associativity;
364 /*
365 * Adjust #sets to account for the N CBos
366 * This is because addresses are hashed across CBos
367 */
368 if (type == L3U && info_p->core_count)
369 cache_sets = cache_sets / info_p->core_count;
370
371 /* Compute the number of page colors for this cache,
372 * which is:
373 * ( linesize * sets ) / page_size
374 *
375 * To help visualize this, consider two views of a
376 * physical address. To the cache, it is composed
377 * of a line offset, a set selector, and a tag.
378 * To VM, it is composed of a page offset, a page
379 * color, and other bits in the pageframe number:
380 *
381 * +-----------------+---------+--------+
382 * cache: | tag | set | offset |
383 * +-----------------+---------+--------+
384 *
385 * +-----------------+-------+----------+
386 * VM: | don't care | color | pg offset|
387 * +-----------------+-------+----------+
388 *
389 * The color is those bits in (set+offset) not covered
390 * by the page offset.
391 */
392 colors = ( cache_linesize * cache_sets ) >> 12;
393
394 if ( colors > vm_cache_geometry_colors )
395 vm_cache_geometry_colors = colors;
396 }
397 }
398 DBG(" vm_cache_geometry_colors: %d\n", vm_cache_geometry_colors);
399
400 /*
401 * If deterministic cache parameters are not available, use
402 * something else
403 */
404 if (info_p->cpuid_cores_per_package == 0) {
405 info_p->cpuid_cores_per_package = 1;
406
407 /* cpuid define in 1024 quantities */
408 info_p->cache_size[L2U] = info_p->cpuid_cache_size * 1024;
409 info_p->cache_sharing[L2U] = 1;
410 info_p->cache_partitions[L2U] = 1;
411
412 linesizes[L2U] = info_p->cpuid_cache_linesize;
413
414 DBG(" cache_size[L2U] : %d\n",
415 info_p->cache_size[L2U]);
416 DBG(" cache_sharing[L2U] : 1\n");
417 DBG(" cache_partitions[L2U]: 1\n");
418 DBG(" linesizes[L2U] : %d\n",
419 info_p->cpuid_cache_linesize);
420 }
421
422 /*
423 * What linesize to publish? We use the L2 linesize if any,
424 * else the L1D.
425 */
426 if ( linesizes[L2U] )
427 info_p->cache_linesize = linesizes[L2U];
428 else if (linesizes[L1D])
429 info_p->cache_linesize = linesizes[L1D];
430 else panic("no linesize");
431 DBG(" cache_linesize : %d\n", info_p->cache_linesize);
432
433 /*
434 * Extract and publish TLB information from Leaf 2 descriptors.
435 */
436 DBG(" %ld leaf2 descriptors:\n", sizeof(info_p->cache_info));
437 for (i = 1; i < sizeof(info_p->cache_info); i++) {
438 cpuid_cache_descriptor_t *descp;
439 int id;
440 int level;
441 int page;
442
443 DBG(" 0x%02x", info_p->cache_info[i]);
444 descp = cpuid_leaf2_find(info_p->cache_info[i]);
445 if (descp == NULL)
446 continue;
447
448 switch (descp->type) {
449 case TLB:
450 page = (descp->size == SMALL) ? TLB_SMALL : TLB_LARGE;
451 /* determine I or D: */
452 switch (descp->level) {
453 case INST:
454 id = TLB_INST;
455 break;
456 case DATA:
457 case DATA0:
458 case DATA1:
459 id = TLB_DATA;
460 break;
461 default:
462 continue;
463 }
464 /* determine level: */
465 switch (descp->level) {
466 case DATA1:
467 level = 1;
468 break;
469 default:
470 level = 0;
471 }
472 info_p->cpuid_tlb[id][page][level] = descp->entries;
473 break;
474 case STLB:
475 info_p->cpuid_stlb = descp->entries;
476 }
477 }
478 DBG("\n");
479}
480
481static void
482cpuid_set_generic_info(i386_cpu_info_t *info_p)
483{
484 uint32_t reg[4];
485 char str[128], *p;
486
487 DBG("cpuid_set_generic_info(%p)\n", info_p);
488
489 /* do cpuid 0 to get vendor */
490 cpuid_fn(0, reg);
491 info_p->cpuid_max_basic = reg[eax];
492 bcopy((char *)&reg[ebx], &info_p->cpuid_vendor[0], 4); /* ug */
493 bcopy((char *)&reg[ecx], &info_p->cpuid_vendor[8], 4);
494 bcopy((char *)&reg[edx], &info_p->cpuid_vendor[4], 4);
495 info_p->cpuid_vendor[12] = 0;
496
497 /* get extended cpuid results */
498 cpuid_fn(0x80000000, reg);
499 info_p->cpuid_max_ext = reg[eax];
500
501 /* check to see if we can get brand string */
502 if (info_p->cpuid_max_ext >= 0x80000004) {
503 /*
504 * The brand string 48 bytes (max), guaranteed to
505 * be NUL terminated.
506 */
507 cpuid_fn(0x80000002, reg);
508 bcopy((char *)reg, &str[0], 16);
509 cpuid_fn(0x80000003, reg);
510 bcopy((char *)reg, &str[16], 16);
511 cpuid_fn(0x80000004, reg);
512 bcopy((char *)reg, &str[32], 16);
513 for (p = str; *p != '\0'; p++) {
514 if (*p != ' ') break;
515 }
516 strlcpy(info_p->cpuid_brand_string,
517 p, sizeof(info_p->cpuid_brand_string));
518
519 if (!strncmp(info_p->cpuid_brand_string, CPUID_STRING_UNKNOWN,
520 min(sizeof(info_p->cpuid_brand_string),
521 strlen(CPUID_STRING_UNKNOWN) + 1))) {
522 /*
523 * This string means we have a firmware-programmable brand string,
524 * and the firmware couldn't figure out what sort of CPU we have.
525 */
526 info_p->cpuid_brand_string[0] = '\0';
527 }
528 }
529
530 /* Get cache and addressing info. */
531 if (info_p->cpuid_max_ext >= 0x80000006) {
532 uint32_t assoc;
533 cpuid_fn(0x80000006, reg);
534 info_p->cpuid_cache_linesize = bitfield32(reg[ecx], 7, 0);
535 assoc = bitfield32(reg[ecx],15,12);
536 /*
537 * L2 associativity is encoded, though in an insufficiently
538 * descriptive fashion, e.g. 24-way is mapped to 16-way.
539 * Represent a fully associative cache as 0xFFFF.
540 * Overwritten by associativity as determined via CPUID.4
541 * if available.
542 */
543 if (assoc == 6)
544 assoc = 8;
545 else if (assoc == 8)
546 assoc = 16;
547 else if (assoc == 0xF)
548 assoc = 0xFFFF;
549 info_p->cpuid_cache_L2_associativity = assoc;
550 info_p->cpuid_cache_size = bitfield32(reg[ecx],31,16);
551 cpuid_fn(0x80000008, reg);
552 info_p->cpuid_address_bits_physical =
553 bitfield32(reg[eax], 7, 0);
554 info_p->cpuid_address_bits_virtual =
555 bitfield32(reg[eax],15, 8);
556 }
557
558 /*
559 * Get processor signature and decode
560 * and bracket this with the approved procedure for reading the
561 * the microcode version number a.k.a. signature a.k.a. BIOS ID
562 */
563 wrmsr64(MSR_IA32_BIOS_SIGN_ID, 0);
564 cpuid_fn(1, reg);
565 info_p->cpuid_microcode_version =
566 (uint32_t) (rdmsr64(MSR_IA32_BIOS_SIGN_ID) >> 32);
567 info_p->cpuid_signature = reg[eax];
568 info_p->cpuid_stepping = bitfield32(reg[eax], 3, 0);
569 info_p->cpuid_model = bitfield32(reg[eax], 7, 4);
570 info_p->cpuid_family = bitfield32(reg[eax], 11, 8);
571 info_p->cpuid_type = bitfield32(reg[eax], 13, 12);
572 info_p->cpuid_extmodel = bitfield32(reg[eax], 19, 16);
573 info_p->cpuid_extfamily = bitfield32(reg[eax], 27, 20);
574 info_p->cpuid_brand = bitfield32(reg[ebx], 7, 0);
575 info_p->cpuid_features = quad(reg[ecx], reg[edx]);
576
577 /* Get "processor flag"; necessary for microcode update matching */
578 info_p->cpuid_processor_flag = (rdmsr64(MSR_IA32_PLATFORM_ID)>> 50) & 0x7;
579
580 /* Fold extensions into family/model */
581 if (info_p->cpuid_family == 0x0f)
582 info_p->cpuid_family += info_p->cpuid_extfamily;
583 if (info_p->cpuid_family == 0x0f || info_p->cpuid_family == 0x06)
584 info_p->cpuid_model += (info_p->cpuid_extmodel << 4);
585
586 if (info_p->cpuid_features & CPUID_FEATURE_HTT)
587 info_p->cpuid_logical_per_package =
588 bitfield32(reg[ebx], 23, 16);
589 else
590 info_p->cpuid_logical_per_package = 1;
591
592 if (info_p->cpuid_max_ext >= 0x80000001) {
593 cpuid_fn(0x80000001, reg);
594 info_p->cpuid_extfeatures =
595 quad(reg[ecx], reg[edx]);
596 }
597
598 DBG(" max_basic : %d\n", info_p->cpuid_max_basic);
599 DBG(" max_ext : 0x%08x\n", info_p->cpuid_max_ext);
600 DBG(" vendor : %s\n", info_p->cpuid_vendor);
601 DBG(" brand_string : %s\n", info_p->cpuid_brand_string);
602 DBG(" signature : 0x%08x\n", info_p->cpuid_signature);
603 DBG(" stepping : %d\n", info_p->cpuid_stepping);
604 DBG(" model : %d\n", info_p->cpuid_model);
605 DBG(" family : %d\n", info_p->cpuid_family);
606 DBG(" type : %d\n", info_p->cpuid_type);
607 DBG(" extmodel : %d\n", info_p->cpuid_extmodel);
608 DBG(" extfamily : %d\n", info_p->cpuid_extfamily);
609 DBG(" brand : %d\n", info_p->cpuid_brand);
610 DBG(" features : 0x%016llx\n", info_p->cpuid_features);
611 DBG(" extfeatures : 0x%016llx\n", info_p->cpuid_extfeatures);
612 DBG(" logical_per_package : %d\n", info_p->cpuid_logical_per_package);
613 DBG(" microcode_version : 0x%08x\n", info_p->cpuid_microcode_version);
614
615 /* Fold in the Invariant TSC feature bit, if present */
616 if (info_p->cpuid_max_ext >= 0x80000007) {
617 cpuid_fn(0x80000007, reg);
618 info_p->cpuid_extfeatures |=
619 reg[edx] & (uint32_t)CPUID_EXTFEATURE_TSCI;
620 DBG(" extfeatures : 0x%016llx\n",
621 info_p->cpuid_extfeatures);
622 }
623
624 if (info_p->cpuid_max_basic >= 0x5) {
625 cpuid_mwait_leaf_t *cmp = &info_p->cpuid_mwait_leaf;
626
627 /*
628 * Extract the Monitor/Mwait Leaf info:
629 */
630 cpuid_fn(5, reg);
631 cmp->linesize_min = reg[eax];
632 cmp->linesize_max = reg[ebx];
633 cmp->extensions = reg[ecx];
634 cmp->sub_Cstates = reg[edx];
635 info_p->cpuid_mwait_leafp = cmp;
636
637 DBG(" Monitor/Mwait Leaf:\n");
638 DBG(" linesize_min : %d\n", cmp->linesize_min);
639 DBG(" linesize_max : %d\n", cmp->linesize_max);
640 DBG(" extensions : %d\n", cmp->extensions);
641 DBG(" sub_Cstates : 0x%08x\n", cmp->sub_Cstates);
642 }
643
644 if (info_p->cpuid_max_basic >= 0x6) {
645 cpuid_thermal_leaf_t *ctp = &info_p->cpuid_thermal_leaf;
646
647 /*
648 * The thermal and Power Leaf:
649 */
650 cpuid_fn(6, reg);
651 ctp->sensor = bitfield32(reg[eax], 0, 0);
652 ctp->dynamic_acceleration = bitfield32(reg[eax], 1, 1);
653 ctp->invariant_APIC_timer = bitfield32(reg[eax], 2, 2);
654 ctp->core_power_limits = bitfield32(reg[eax], 4, 4);
655 ctp->fine_grain_clock_mod = bitfield32(reg[eax], 5, 5);
656 ctp->package_thermal_intr = bitfield32(reg[eax], 6, 6);
657 ctp->thresholds = bitfield32(reg[ebx], 3, 0);
658 ctp->ACNT_MCNT = bitfield32(reg[ecx], 0, 0);
659 ctp->hardware_feedback = bitfield32(reg[ecx], 1, 1);
660 ctp->energy_policy = bitfield32(reg[ecx], 3, 3);
661 info_p->cpuid_thermal_leafp = ctp;
662
663 DBG(" Thermal/Power Leaf:\n");
664 DBG(" sensor : %d\n", ctp->sensor);
665 DBG(" dynamic_acceleration : %d\n", ctp->dynamic_acceleration);
666 DBG(" invariant_APIC_timer : %d\n", ctp->invariant_APIC_timer);
667 DBG(" core_power_limits : %d\n", ctp->core_power_limits);
668 DBG(" fine_grain_clock_mod : %d\n", ctp->fine_grain_clock_mod);
669 DBG(" package_thermal_intr : %d\n", ctp->package_thermal_intr);
670 DBG(" thresholds : %d\n", ctp->thresholds);
671 DBG(" ACNT_MCNT : %d\n", ctp->ACNT_MCNT);
672 DBG(" ACNT2 : %d\n", ctp->hardware_feedback);
673 DBG(" energy_policy : %d\n", ctp->energy_policy);
674 }
675
676 if (info_p->cpuid_max_basic >= 0xa) {
677 cpuid_arch_perf_leaf_t *capp = &info_p->cpuid_arch_perf_leaf;
678
679 /*
680 * Architectural Performance Monitoring Leaf:
681 */
682 cpuid_fn(0xa, reg);
683 capp->version = bitfield32(reg[eax], 7, 0);
684 capp->number = bitfield32(reg[eax], 15, 8);
685 capp->width = bitfield32(reg[eax], 23, 16);
686 capp->events_number = bitfield32(reg[eax], 31, 24);
687 capp->events = reg[ebx];
688 capp->fixed_number = bitfield32(reg[edx], 4, 0);
689 capp->fixed_width = bitfield32(reg[edx], 12, 5);
690 info_p->cpuid_arch_perf_leafp = capp;
691
692 DBG(" Architectural Performance Monitoring Leaf:\n");
693 DBG(" version : %d\n", capp->version);
694 DBG(" number : %d\n", capp->number);
695 DBG(" width : %d\n", capp->width);
696 DBG(" events_number : %d\n", capp->events_number);
697 DBG(" events : %d\n", capp->events);
698 DBG(" fixed_number : %d\n", capp->fixed_number);
699 DBG(" fixed_width : %d\n", capp->fixed_width);
700 }
701
702 if (info_p->cpuid_max_basic >= 0xd) {
703 cpuid_xsave_leaf_t *xsp;
704 /*
705 * XSAVE Features:
706 */
707 xsp = &info_p->cpuid_xsave_leaf[0];
708 info_p->cpuid_xsave_leafp = xsp;
709 xsp->extended_state[eax] = 0xd;
710 xsp->extended_state[ecx] = 0;
711 cpuid(xsp->extended_state);
712 DBG(" XSAVE Main leaf:\n");
713 DBG(" EAX : 0x%x\n", xsp->extended_state[eax]);
714 DBG(" EBX : 0x%x\n", xsp->extended_state[ebx]);
715 DBG(" ECX : 0x%x\n", xsp->extended_state[ecx]);
716 DBG(" EDX : 0x%x\n", xsp->extended_state[edx]);
717
718 xsp = &info_p->cpuid_xsave_leaf[1];
719 xsp->extended_state[eax] = 0xd;
720 xsp->extended_state[ecx] = 1;
721 cpuid(xsp->extended_state);
722 DBG(" XSAVE Sub-leaf1:\n");
723 DBG(" EAX : 0x%x\n", xsp->extended_state[eax]);
724 DBG(" EBX : 0x%x\n", xsp->extended_state[ebx]);
725 DBG(" ECX : 0x%x\n", xsp->extended_state[ecx]);
726 DBG(" EDX : 0x%x\n", xsp->extended_state[edx]);
727
728 }
729
730 if (info_p->cpuid_model >= CPUID_MODEL_IVYBRIDGE) {
731 /*
732 * Leaf7 Features:
733 */
734 cpuid_fn(0x7, reg);
735 info_p->cpuid_leaf7_features = quad(reg[ecx], reg[ebx]);
736
737 DBG(" Feature Leaf7:\n");
738 DBG(" EBX : 0x%x\n", reg[ebx]);
739 DBG(" ECX : 0x%x\n", reg[ecx]);
740 }
741
742 if (info_p->cpuid_max_basic >= 0x15) {
743 /*
744 * TCS/CCC frequency leaf:
745 */
746 cpuid_fn(0x15, reg);
747 info_p->cpuid_tsc_leaf.denominator = reg[eax];
748 info_p->cpuid_tsc_leaf.numerator = reg[ebx];
749
750 DBG(" TSC/CCC Information Leaf:\n");
751 DBG(" numerator : 0x%x\n", reg[ebx]);
752 DBG(" denominator : 0x%x\n", reg[eax]);
753 }
754
755 return;
756}
757
758static uint32_t
759cpuid_set_cpufamily(i386_cpu_info_t *info_p)
760{
761 uint32_t cpufamily = CPUFAMILY_UNKNOWN;
762
763 switch (info_p->cpuid_family) {
764 case 6:
765 switch (info_p->cpuid_model) {
766 case 23:
767 cpufamily = CPUFAMILY_INTEL_PENRYN;
768 break;
769 case CPUID_MODEL_NEHALEM:
770 case CPUID_MODEL_FIELDS:
771 case CPUID_MODEL_DALES:
772 case CPUID_MODEL_NEHALEM_EX:
773 cpufamily = CPUFAMILY_INTEL_NEHALEM;
774 break;
775 case CPUID_MODEL_DALES_32NM:
776 case CPUID_MODEL_WESTMERE:
777 case CPUID_MODEL_WESTMERE_EX:
778 cpufamily = CPUFAMILY_INTEL_WESTMERE;
779 break;
780 case CPUID_MODEL_SANDYBRIDGE:
781 case CPUID_MODEL_JAKETOWN:
782 cpufamily = CPUFAMILY_INTEL_SANDYBRIDGE;
783 break;
784 case CPUID_MODEL_IVYBRIDGE:
785 case CPUID_MODEL_IVYBRIDGE_EP:
786 cpufamily = CPUFAMILY_INTEL_IVYBRIDGE;
787 break;
788 case CPUID_MODEL_HASWELL:
789 case CPUID_MODEL_HASWELL_EP:
790 case CPUID_MODEL_HASWELL_ULT:
791 case CPUID_MODEL_CRYSTALWELL:
792 cpufamily = CPUFAMILY_INTEL_HASWELL;
793 break;
794 case CPUID_MODEL_BROADWELL:
795 case CPUID_MODEL_BRYSTALWELL:
796 cpufamily = CPUFAMILY_INTEL_BROADWELL;
797 break;
798 case CPUID_MODEL_SKYLAKE:
799 case CPUID_MODEL_SKYLAKE_DT:
800#if !defined(RC_HIDE_XNU_J137)
801 case CPUID_MODEL_SKYLAKE_W:
802#endif
803 cpufamily = CPUFAMILY_INTEL_SKYLAKE;
804 break;
805 case CPUID_MODEL_KABYLAKE:
806 case CPUID_MODEL_KABYLAKE_DT:
807 cpufamily = CPUFAMILY_INTEL_KABYLAKE;
808 break;
809 }
810 break;
811 }
812
813 info_p->cpuid_cpufamily = cpufamily;
814 DBG("cpuid_set_cpufamily(%p) returning 0x%x\n", info_p, cpufamily);
815 return cpufamily;
816}
817/*
818 * Must be invoked either when executing single threaded, or with
819 * independent synchronization.
820 */
821void
822cpuid_set_info(void)
823{
824 i386_cpu_info_t *info_p = &cpuid_cpu_info;
825 boolean_t enable_x86_64h = TRUE;
826
827 cpuid_set_generic_info(info_p);
828
829 /* verify we are running on a supported CPU */
830 if ((strncmp(CPUID_VID_INTEL, info_p->cpuid_vendor,
831 min(strlen(CPUID_STRING_UNKNOWN) + 1,
832 sizeof(info_p->cpuid_vendor)))) ||
833 (cpuid_set_cpufamily(info_p) == CPUFAMILY_UNKNOWN))
834 panic("Unsupported CPU");
835
836 info_p->cpuid_cpu_type = CPU_TYPE_X86;
837
838 if (!PE_parse_boot_argn("-enable_x86_64h", &enable_x86_64h, sizeof(enable_x86_64h))) {
839 boolean_t disable_x86_64h = FALSE;
840
841 if (PE_parse_boot_argn("-disable_x86_64h", &disable_x86_64h, sizeof(disable_x86_64h))) {
842 enable_x86_64h = FALSE;
843 }
844 }
845
846 if (enable_x86_64h &&
847 ((info_p->cpuid_features & CPUID_X86_64_H_FEATURE_SUBSET) == CPUID_X86_64_H_FEATURE_SUBSET) &&
848 ((info_p->cpuid_extfeatures & CPUID_X86_64_H_EXTFEATURE_SUBSET) == CPUID_X86_64_H_EXTFEATURE_SUBSET) &&
849 ((info_p->cpuid_leaf7_features & CPUID_X86_64_H_LEAF7_FEATURE_SUBSET) == CPUID_X86_64_H_LEAF7_FEATURE_SUBSET)) {
850 info_p->cpuid_cpu_subtype = CPU_SUBTYPE_X86_64_H;
851 } else {
852 info_p->cpuid_cpu_subtype = CPU_SUBTYPE_X86_ARCH1;
853 }
854 /* cpuid_set_cache_info must be invoked after set_generic_info */
855
856 if (info_p->cpuid_cpufamily == CPUFAMILY_INTEL_PENRYN)
857 cpuid_set_cache_info(info_p);
858
859 /*
860 * Find the number of enabled cores and threads
861 * (which determines whether SMT/Hyperthreading is active).
862 */
863 switch (info_p->cpuid_cpufamily) {
864 case CPUFAMILY_INTEL_PENRYN:
865 info_p->core_count = info_p->cpuid_cores_per_package;
866 info_p->thread_count = info_p->cpuid_logical_per_package;
867 break;
868 case CPUFAMILY_INTEL_WESTMERE: {
869 uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
870 info_p->core_count = bitfield32((uint32_t)msr, 19, 16);
871 info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
872 break;
873 }
874 default: {
875 uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
876 if (msr == 0)
877 /* Provide a non-zero default for some VMMs */
878 msr = (1 << 16) + 1;
879 info_p->core_count = bitfield32((uint32_t)msr, 31, 16);
880 info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
881 break;
882 }
883 }
884 if (info_p->core_count == 0) {
885 info_p->core_count = info_p->cpuid_cores_per_package;
886 info_p->thread_count = info_p->cpuid_logical_per_package;
887 }
888
889 if (info_p->cpuid_cpufamily != CPUFAMILY_INTEL_PENRYN)
890 cpuid_set_cache_info(info_p);
891
892 DBG("cpuid_set_info():\n");
893 DBG(" core_count : %d\n", info_p->core_count);
894 DBG(" thread_count : %d\n", info_p->thread_count);
895 DBG(" cpu_type: 0x%08x\n", info_p->cpuid_cpu_type);
896 DBG(" cpu_subtype: 0x%08x\n", info_p->cpuid_cpu_subtype);
897
898 info_p->cpuid_model_string = ""; /* deprecated */
899}
900
901static struct table {
902 uint64_t mask;
903 const char *name;
904} feature_map[] = {
905 {CPUID_FEATURE_FPU, "FPU"},
906 {CPUID_FEATURE_VME, "VME"},
907 {CPUID_FEATURE_DE, "DE"},
908 {CPUID_FEATURE_PSE, "PSE"},
909 {CPUID_FEATURE_TSC, "TSC"},
910 {CPUID_FEATURE_MSR, "MSR"},
911 {CPUID_FEATURE_PAE, "PAE"},
912 {CPUID_FEATURE_MCE, "MCE"},
913 {CPUID_FEATURE_CX8, "CX8"},
914 {CPUID_FEATURE_APIC, "APIC"},
915 {CPUID_FEATURE_SEP, "SEP"},
916 {CPUID_FEATURE_MTRR, "MTRR"},
917 {CPUID_FEATURE_PGE, "PGE"},
918 {CPUID_FEATURE_MCA, "MCA"},
919 {CPUID_FEATURE_CMOV, "CMOV"},
920 {CPUID_FEATURE_PAT, "PAT"},
921 {CPUID_FEATURE_PSE36, "PSE36"},
922 {CPUID_FEATURE_PSN, "PSN"},
923 {CPUID_FEATURE_CLFSH, "CLFSH"},
924 {CPUID_FEATURE_DS, "DS"},
925 {CPUID_FEATURE_ACPI, "ACPI"},
926 {CPUID_FEATURE_MMX, "MMX"},
927 {CPUID_FEATURE_FXSR, "FXSR"},
928 {CPUID_FEATURE_SSE, "SSE"},
929 {CPUID_FEATURE_SSE2, "SSE2"},
930 {CPUID_FEATURE_SS, "SS"},
931 {CPUID_FEATURE_HTT, "HTT"},
932 {CPUID_FEATURE_TM, "TM"},
933 {CPUID_FEATURE_PBE, "PBE"},
934 {CPUID_FEATURE_SSE3, "SSE3"},
935 {CPUID_FEATURE_PCLMULQDQ, "PCLMULQDQ"},
936 {CPUID_FEATURE_DTES64, "DTES64"},
937 {CPUID_FEATURE_MONITOR, "MON"},
938 {CPUID_FEATURE_DSCPL, "DSCPL"},
939 {CPUID_FEATURE_VMX, "VMX"},
940 {CPUID_FEATURE_SMX, "SMX"},
941 {CPUID_FEATURE_EST, "EST"},
942 {CPUID_FEATURE_TM2, "TM2"},
943 {CPUID_FEATURE_SSSE3, "SSSE3"},
944 {CPUID_FEATURE_CID, "CID"},
945 {CPUID_FEATURE_FMA, "FMA"},
946 {CPUID_FEATURE_CX16, "CX16"},
947 {CPUID_FEATURE_xTPR, "TPR"},
948 {CPUID_FEATURE_PDCM, "PDCM"},
949 {CPUID_FEATURE_SSE4_1, "SSE4.1"},
950 {CPUID_FEATURE_SSE4_2, "SSE4.2"},
951 {CPUID_FEATURE_x2APIC, "x2APIC"},
952 {CPUID_FEATURE_MOVBE, "MOVBE"},
953 {CPUID_FEATURE_POPCNT, "POPCNT"},
954 {CPUID_FEATURE_AES, "AES"},
955 {CPUID_FEATURE_VMM, "VMM"},
956 {CPUID_FEATURE_PCID, "PCID"},
957 {CPUID_FEATURE_XSAVE, "XSAVE"},
958 {CPUID_FEATURE_OSXSAVE, "OSXSAVE"},
959 {CPUID_FEATURE_SEGLIM64, "SEGLIM64"},
960 {CPUID_FEATURE_TSCTMR, "TSCTMR"},
961 {CPUID_FEATURE_AVX1_0, "AVX1.0"},
962 {CPUID_FEATURE_RDRAND, "RDRAND"},
963 {CPUID_FEATURE_F16C, "F16C"},
964 {0, 0}
965},
966extfeature_map[] = {
967 {CPUID_EXTFEATURE_SYSCALL, "SYSCALL"},
968 {CPUID_EXTFEATURE_XD, "XD"},
969 {CPUID_EXTFEATURE_1GBPAGE, "1GBPAGE"},
970 {CPUID_EXTFEATURE_EM64T, "EM64T"},
971 {CPUID_EXTFEATURE_LAHF, "LAHF"},
972 {CPUID_EXTFEATURE_LZCNT, "LZCNT"},
973 {CPUID_EXTFEATURE_PREFETCHW, "PREFETCHW"},
974 {CPUID_EXTFEATURE_RDTSCP, "RDTSCP"},
975 {CPUID_EXTFEATURE_TSCI, "TSCI"},
976 {0, 0}
977
978},
979leaf7_feature_map[] = {
980 {CPUID_LEAF7_FEATURE_SMEP, "SMEP"},
981 {CPUID_LEAF7_FEATURE_ERMS, "ERMS"},
982 {CPUID_LEAF7_FEATURE_RDWRFSGS, "RDWRFSGS"},
983 {CPUID_LEAF7_FEATURE_TSCOFF, "TSC_THREAD_OFFSET"},
984 {CPUID_LEAF7_FEATURE_BMI1, "BMI1"},
985 {CPUID_LEAF7_FEATURE_HLE, "HLE"},
986 {CPUID_LEAF7_FEATURE_AVX2, "AVX2"},
987 {CPUID_LEAF7_FEATURE_BMI2, "BMI2"},
988 {CPUID_LEAF7_FEATURE_INVPCID, "INVPCID"},
989 {CPUID_LEAF7_FEATURE_RTM, "RTM"},
990 {CPUID_LEAF7_FEATURE_SMAP, "SMAP"},
991 {CPUID_LEAF7_FEATURE_RDSEED, "RDSEED"},
992 {CPUID_LEAF7_FEATURE_ADX, "ADX"},
993 {CPUID_LEAF7_FEATURE_IPT, "IPT"},
994#if !defined(RC_HIDE_XNU_J137)
995 {CPUID_LEAF7_FEATURE_AVX512F, "AVX512F"},
996 {CPUID_LEAF7_FEATURE_AVX512CD, "AVX512CD"},
997 {CPUID_LEAF7_FEATURE_AVX512DQ, "AVX512DQ"},
998 {CPUID_LEAF7_FEATURE_AVX512BW, "AVX512BW"},
999 {CPUID_LEAF7_FEATURE_AVX512VL, "AVX512VL"},
1000 {CPUID_LEAF7_FEATURE_AVX512IFMA, "AVX512IFMA"},
1001 {CPUID_LEAF7_FEATURE_AVX512VBMI, "AVX512VBMI"},
1002#endif /* not RC_HIDE_XNU_J137 */
1003 {CPUID_LEAF7_FEATURE_SGX, "SGX"},
1004 {CPUID_LEAF7_FEATURE_PQM, "PQM"},
1005 {CPUID_LEAF7_FEATURE_FPU_CSDS, "FPU_CSDS"},
1006 {CPUID_LEAF7_FEATURE_MPX, "MPX"},
1007 {CPUID_LEAF7_FEATURE_PQE, "PQE"},
1008 {CPUID_LEAF7_FEATURE_CLFSOPT, "CLFSOPT"},
1009 {CPUID_LEAF7_FEATURE_SHA, "SHA"},
1010 {0, 0}
1011};
1012
1013static char *
1014cpuid_get_names(struct table *map, uint64_t bits, char *buf, unsigned buf_len)
1015{
1016 size_t len = 0;
1017 char *p = buf;
1018 int i;
1019
1020 for (i = 0; map[i].mask != 0; i++) {
1021 if ((bits & map[i].mask) == 0)
1022 continue;
1023 if (len && ((size_t) (p - buf) < (buf_len - 1)))
1024 *p++ = ' ';
1025 len = min(strlen(map[i].name), (size_t)((buf_len-1)-(p-buf)));
1026 if (len == 0)
1027 break;
1028 bcopy(map[i].name, p, len);
1029 p += len;
1030 }
1031 *p = '\0';
1032 return buf;
1033}
1034
1035i386_cpu_info_t *
1036cpuid_info(void)
1037{
1038 /* Set-up the cpuid_info stucture lazily */
1039 if (cpuid_cpu_infop == NULL) {
1040 PE_parse_boot_argn("-cpuid", &cpuid_dbg, sizeof(cpuid_dbg));
1041 cpuid_set_info();
1042 cpuid_cpu_infop = &cpuid_cpu_info;
1043 }
1044 return cpuid_cpu_infop;
1045}
1046
1047char *
1048cpuid_get_feature_names(uint64_t features, char *buf, unsigned buf_len)
1049{
1050 return cpuid_get_names(feature_map, features, buf, buf_len);
1051}
1052
1053char *
1054cpuid_get_extfeature_names(uint64_t extfeatures, char *buf, unsigned buf_len)
1055{
1056 return cpuid_get_names(extfeature_map, extfeatures, buf, buf_len);
1057}
1058
1059char *
1060cpuid_get_leaf7_feature_names(uint64_t features, char *buf, unsigned buf_len)
1061{
1062 return cpuid_get_names(leaf7_feature_map, features, buf, buf_len);
1063}
1064
1065void
1066cpuid_feature_display(
1067 const char *header)
1068{
1069 char buf[320];
1070
1071 kprintf("%s: %s", header,
1072 cpuid_get_feature_names(cpuid_features(), buf, sizeof(buf)));
1073 if (cpuid_leaf7_features())
1074 kprintf(" %s", cpuid_get_leaf7_feature_names(
1075 cpuid_leaf7_features(), buf, sizeof(buf)));
1076 kprintf("\n");
1077 if (cpuid_features() & CPUID_FEATURE_HTT) {
1078#define s_if_plural(n) ((n > 1) ? "s" : "")
1079 kprintf(" HTT: %d core%s per package;"
1080 " %d logical cpu%s per package\n",
1081 cpuid_cpu_infop->cpuid_cores_per_package,
1082 s_if_plural(cpuid_cpu_infop->cpuid_cores_per_package),
1083 cpuid_cpu_infop->cpuid_logical_per_package,
1084 s_if_plural(cpuid_cpu_infop->cpuid_logical_per_package));
1085 }
1086}
1087
1088void
1089cpuid_extfeature_display(
1090 const char *header)
1091{
1092 char buf[256];
1093
1094 kprintf("%s: %s\n", header,
1095 cpuid_get_extfeature_names(cpuid_extfeatures(),
1096 buf, sizeof(buf)));
1097}
1098
1099void
1100cpuid_cpu_display(
1101 const char *header)
1102{
1103 if (cpuid_cpu_infop->cpuid_brand_string[0] != '\0') {
1104 kprintf("%s: %s\n", header, cpuid_cpu_infop->cpuid_brand_string);
1105 }
1106}
1107
1108unsigned int
1109cpuid_family(void)
1110{
1111 return cpuid_info()->cpuid_family;
1112}
1113
1114uint32_t
1115cpuid_cpufamily(void)
1116{
1117 return cpuid_info()->cpuid_cpufamily;
1118}
1119
1120cpu_type_t
1121cpuid_cputype(void)
1122{
1123 return cpuid_info()->cpuid_cpu_type;
1124}
1125
1126cpu_subtype_t
1127cpuid_cpusubtype(void)
1128{
1129 return cpuid_info()->cpuid_cpu_subtype;
1130}
1131
1132uint64_t
1133cpuid_features(void)
1134{
1135 static int checked = 0;
1136 char fpu_arg[20] = { 0 };
1137
1138 (void) cpuid_info();
1139 if (!checked) {
1140 /* check for boot-time fpu limitations */
1141 if (PE_parse_boot_argn("_fpu", &fpu_arg[0], sizeof (fpu_arg))) {
1142 printf("limiting fpu features to: %s\n", fpu_arg);
1143 if (!strncmp("387", fpu_arg, sizeof("387")) || !strncmp("mmx", fpu_arg, sizeof("mmx"))) {
1144 printf("no sse or sse2\n");
1145 cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE | CPUID_FEATURE_SSE2 | CPUID_FEATURE_FXSR);
1146 } else if (!strncmp("sse", fpu_arg, sizeof("sse"))) {
1147 printf("no sse2\n");
1148 cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE2);
1149 }
1150 }
1151 checked = 1;
1152 }
1153 return cpuid_cpu_infop->cpuid_features;
1154}
1155
1156uint64_t
1157cpuid_extfeatures(void)
1158{
1159 return cpuid_info()->cpuid_extfeatures;
1160}
1161
1162uint64_t
1163cpuid_leaf7_features(void)
1164{
1165 return cpuid_info()->cpuid_leaf7_features;
1166}
1167
1168static i386_vmm_info_t *_cpuid_vmm_infop = NULL;
1169static i386_vmm_info_t _cpuid_vmm_info;
1170
1171static void
1172cpuid_init_vmm_info(i386_vmm_info_t *info_p)
1173{
1174 uint32_t reg[4];
1175 uint32_t max_vmm_leaf;
1176
1177 bzero(info_p, sizeof(*info_p));
1178
1179 if (!cpuid_vmm_present())
1180 return;
1181
1182 DBG("cpuid_init_vmm_info(%p)\n", info_p);
1183
1184 /* do cpuid 0x40000000 to get VMM vendor */
1185 cpuid_fn(0x40000000, reg);
1186 max_vmm_leaf = reg[eax];
1187 bcopy((char *)&reg[ebx], &info_p->cpuid_vmm_vendor[0], 4);
1188 bcopy((char *)&reg[ecx], &info_p->cpuid_vmm_vendor[4], 4);
1189 bcopy((char *)&reg[edx], &info_p->cpuid_vmm_vendor[8], 4);
1190 info_p->cpuid_vmm_vendor[12] = '\0';
1191
1192 if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_VMWARE)) {
1193 /* VMware identification string: kb.vmware.com/kb/1009458 */
1194 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_VMWARE;
1195 } else if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_PARALLELS)) {
1196 /* Parallels identification string */
1197 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_PARALLELS;
1198 } else {
1199 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_UNKNOWN;
1200 }
1201
1202 /* VMM generic leaves: https://lkml.org/lkml/2008/10/1/246 */
1203 if (max_vmm_leaf >= 0x40000010) {
1204 cpuid_fn(0x40000010, reg);
1205
1206 info_p->cpuid_vmm_tsc_frequency = reg[eax];
1207 info_p->cpuid_vmm_bus_frequency = reg[ebx];
1208 }
1209
1210 DBG(" vmm_vendor : %s\n", info_p->cpuid_vmm_vendor);
1211 DBG(" vmm_family : %u\n", info_p->cpuid_vmm_family);
1212 DBG(" vmm_bus_frequency : %u\n", info_p->cpuid_vmm_bus_frequency);
1213 DBG(" vmm_tsc_frequency : %u\n", info_p->cpuid_vmm_tsc_frequency);
1214}
1215
1216boolean_t
1217cpuid_vmm_present(void)
1218{
1219 return (cpuid_features() & CPUID_FEATURE_VMM) ? TRUE : FALSE;
1220}
1221
1222i386_vmm_info_t *
1223cpuid_vmm_info(void)
1224{
1225 if (_cpuid_vmm_infop == NULL) {
1226 cpuid_init_vmm_info(&_cpuid_vmm_info);
1227 _cpuid_vmm_infop = &_cpuid_vmm_info;
1228 }
1229 return _cpuid_vmm_infop;
1230}
1231
1232uint32_t
1233cpuid_vmm_family(void)
1234{
1235 return cpuid_vmm_info()->cpuid_vmm_family;
1236}
1237