]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpuid.c
0fafb3aad65d1e2a230a925ac1c93b66c9043830
[apple/xnu.git] / osfmk / i386 / cpuid.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 #include <vm/vm_page.h>
32 #include <pexpert/pexpert.h>
33
34 #include <i386/cpu_threads.h>
35 #include <i386/cpuid.h>
36
37 int force_tecs_at_idle;
38 int tecs_mode_supported;
39
40 static boolean_t cpuid_dbg
41 #if DEBUG
42 = TRUE;
43 #else
44 = FALSE;
45 #endif
46 #define DBG(x...) \
47 do { \
48 if (cpuid_dbg) \
49 kprintf(x); \
50 } while (0) \
51
52 #define min(a, b) ((a) < (b) ? (a) : (b))
53 #define quad(hi, lo) (((uint64_t)(hi)) << 32 | (lo))
54
55 /*
56 * Leaf 2 cache descriptor encodings.
57 */
58 typedef enum {
59 _NULL_, /* NULL (empty) descriptor */
60 CACHE, /* Cache */
61 TLB, /* TLB */
62 STLB, /* Shared second-level unified TLB */
63 PREFETCH /* Prefetch size */
64 } cpuid_leaf2_desc_type_t;
65
66 typedef enum {
67 NA, /* Not Applicable */
68 FULLY, /* Fully-associative */
69 TRACE, /* Trace Cache (P4 only) */
70 INST, /* Instruction TLB */
71 DATA, /* Data TLB */
72 DATA0, /* Data TLB, 1st level */
73 DATA1, /* Data TLB, 2nd level */
74 L1, /* L1 (unified) cache */
75 L1_INST, /* L1 Instruction cache */
76 L1_DATA, /* L1 Data cache */
77 L2, /* L2 (unified) cache */
78 L3, /* L3 (unified) cache */
79 L2_2LINESECTOR, /* L2 (unified) cache with 2 lines per sector */
80 L3_2LINESECTOR, /* L3(unified) cache with 2 lines per sector */
81 SMALL, /* Small page TLB */
82 LARGE, /* Large page TLB */
83 BOTH /* Small and Large page TLB */
84 } cpuid_leaf2_qualifier_t;
85
86 typedef struct cpuid_cache_descriptor {
87 uint8_t value; /* descriptor code */
88 uint8_t type; /* cpuid_leaf2_desc_type_t */
89 uint8_t level; /* level of cache/TLB hierachy */
90 uint8_t ways; /* wayness of cache */
91 uint16_t size; /* cachesize or TLB pagesize */
92 uint16_t entries; /* number of TLB entries or linesize */
93 } cpuid_cache_descriptor_t;
94
95 /*
96 * These multipliers are used to encode 1*K .. 64*M in a 16 bit size field
97 */
98 #define K (1)
99 #define M (1024)
100
101 /*
102 * Intel cache descriptor table:
103 */
104 static cpuid_cache_descriptor_t intel_cpuid_leaf2_descriptor_table[] = {
105 // -------------------------------------------------------
106 // value type level ways size entries
107 // -------------------------------------------------------
108 { 0x00, _NULL_, NA, NA, NA, NA },
109 { 0x01, TLB, INST, 4, SMALL, 32 },
110 { 0x02, TLB, INST, FULLY, LARGE, 2 },
111 { 0x03, TLB, DATA, 4, SMALL, 64 },
112 { 0x04, TLB, DATA, 4, LARGE, 8 },
113 { 0x05, TLB, DATA1, 4, LARGE, 32 },
114 { 0x06, CACHE, L1_INST, 4, 8 * K, 32 },
115 { 0x08, CACHE, L1_INST, 4, 16 * K, 32 },
116 { 0x09, CACHE, L1_INST, 4, 32 * K, 64 },
117 { 0x0A, CACHE, L1_DATA, 2, 8 * K, 32 },
118 { 0x0B, TLB, INST, 4, LARGE, 4 },
119 { 0x0C, CACHE, L1_DATA, 4, 16 * K, 32 },
120 { 0x0D, CACHE, L1_DATA, 4, 16 * K, 64 },
121 { 0x0E, CACHE, L1_DATA, 6, 24 * K, 64 },
122 { 0x21, CACHE, L2, 8, 256 * K, 64 },
123 { 0x22, CACHE, L3_2LINESECTOR, 4, 512 * K, 64 },
124 { 0x23, CACHE, L3_2LINESECTOR, 8, 1 * M, 64 },
125 { 0x25, CACHE, L3_2LINESECTOR, 8, 2 * M, 64 },
126 { 0x29, CACHE, L3_2LINESECTOR, 8, 4 * M, 64 },
127 { 0x2C, CACHE, L1_DATA, 8, 32 * K, 64 },
128 { 0x30, CACHE, L1_INST, 8, 32 * K, 64 },
129 { 0x40, CACHE, L2, NA, 0, NA },
130 { 0x41, CACHE, L2, 4, 128 * K, 32 },
131 { 0x42, CACHE, L2, 4, 256 * K, 32 },
132 { 0x43, CACHE, L2, 4, 512 * K, 32 },
133 { 0x44, CACHE, L2, 4, 1 * M, 32 },
134 { 0x45, CACHE, L2, 4, 2 * M, 32 },
135 { 0x46, CACHE, L3, 4, 4 * M, 64 },
136 { 0x47, CACHE, L3, 8, 8 * M, 64 },
137 { 0x48, CACHE, L2, 12, 3 * M, 64 },
138 { 0x49, CACHE, L2, 16, 4 * M, 64 },
139 { 0x4A, CACHE, L3, 12, 6 * M, 64 },
140 { 0x4B, CACHE, L3, 16, 8 * M, 64 },
141 { 0x4C, CACHE, L3, 12, 12 * M, 64 },
142 { 0x4D, CACHE, L3, 16, 16 * M, 64 },
143 { 0x4E, CACHE, L2, 24, 6 * M, 64 },
144 { 0x4F, TLB, INST, NA, SMALL, 32 },
145 { 0x50, TLB, INST, NA, BOTH, 64 },
146 { 0x51, TLB, INST, NA, BOTH, 128 },
147 { 0x52, TLB, INST, NA, BOTH, 256 },
148 { 0x55, TLB, INST, FULLY, BOTH, 7 },
149 { 0x56, TLB, DATA0, 4, LARGE, 16 },
150 { 0x57, TLB, DATA0, 4, SMALL, 16 },
151 { 0x59, TLB, DATA0, FULLY, SMALL, 16 },
152 { 0x5A, TLB, DATA0, 4, LARGE, 32 },
153 { 0x5B, TLB, DATA, NA, BOTH, 64 },
154 { 0x5C, TLB, DATA, NA, BOTH, 128 },
155 { 0x5D, TLB, DATA, NA, BOTH, 256 },
156 { 0x60, CACHE, L1, 16 * K, 8, 64 },
157 { 0x61, CACHE, L1, 4, 8 * K, 64 },
158 { 0x62, CACHE, L1, 4, 16 * K, 64 },
159 { 0x63, CACHE, L1, 4, 32 * K, 64 },
160 { 0x70, CACHE, TRACE, 8, 12 * K, NA },
161 { 0x71, CACHE, TRACE, 8, 16 * K, NA },
162 { 0x72, CACHE, TRACE, 8, 32 * K, NA },
163 { 0x76, TLB, INST, NA, BOTH, 8 },
164 { 0x78, CACHE, L2, 4, 1 * M, 64 },
165 { 0x79, CACHE, L2_2LINESECTOR, 8, 128 * K, 64 },
166 { 0x7A, CACHE, L2_2LINESECTOR, 8, 256 * K, 64 },
167 { 0x7B, CACHE, L2_2LINESECTOR, 8, 512 * K, 64 },
168 { 0x7C, CACHE, L2_2LINESECTOR, 8, 1 * M, 64 },
169 { 0x7D, CACHE, L2, 8, 2 * M, 64 },
170 { 0x7F, CACHE, L2, 2, 512 * K, 64 },
171 { 0x80, CACHE, L2, 8, 512 * K, 64 },
172 { 0x82, CACHE, L2, 8, 256 * K, 32 },
173 { 0x83, CACHE, L2, 8, 512 * K, 32 },
174 { 0x84, CACHE, L2, 8, 1 * M, 32 },
175 { 0x85, CACHE, L2, 8, 2 * M, 32 },
176 { 0x86, CACHE, L2, 4, 512 * K, 64 },
177 { 0x87, CACHE, L2, 8, 1 * M, 64 },
178 { 0xB0, TLB, INST, 4, SMALL, 128 },
179 { 0xB1, TLB, INST, 4, LARGE, 8 },
180 { 0xB2, TLB, INST, 4, SMALL, 64 },
181 { 0xB3, TLB, DATA, 4, SMALL, 128 },
182 { 0xB4, TLB, DATA1, 4, SMALL, 256 },
183 { 0xB5, TLB, DATA1, 8, SMALL, 64 },
184 { 0xB6, TLB, DATA1, 8, SMALL, 128 },
185 { 0xBA, TLB, DATA1, 4, BOTH, 64 },
186 { 0xC1, STLB, DATA1, 8, SMALL, 1024},
187 { 0xCA, STLB, DATA1, 4, SMALL, 512 },
188 { 0xD0, CACHE, L3, 4, 512 * K, 64 },
189 { 0xD1, CACHE, L3, 4, 1 * M, 64 },
190 { 0xD2, CACHE, L3, 4, 2 * M, 64 },
191 { 0xD3, CACHE, L3, 4, 4 * M, 64 },
192 { 0xD4, CACHE, L3, 4, 8 * M, 64 },
193 { 0xD6, CACHE, L3, 8, 1 * M, 64 },
194 { 0xD7, CACHE, L3, 8, 2 * M, 64 },
195 { 0xD8, CACHE, L3, 8, 4 * M, 64 },
196 { 0xD9, CACHE, L3, 8, 8 * M, 64 },
197 { 0xDA, CACHE, L3, 8, 12 * M, 64 },
198 { 0xDC, CACHE, L3, 12, 1536 * K, 64 },
199 { 0xDD, CACHE, L3, 12, 3 * M, 64 },
200 { 0xDE, CACHE, L3, 12, 6 * M, 64 },
201 { 0xDF, CACHE, L3, 12, 12 * M, 64 },
202 { 0xE0, CACHE, L3, 12, 18 * M, 64 },
203 { 0xE2, CACHE, L3, 16, 2 * M, 64 },
204 { 0xE3, CACHE, L3, 16, 4 * M, 64 },
205 { 0xE4, CACHE, L3, 16, 8 * M, 64 },
206 { 0xE5, CACHE, L3, 16, 16 * M, 64 },
207 { 0xE6, CACHE, L3, 16, 24 * M, 64 },
208 { 0xF0, PREFETCH, NA, NA, 64, NA },
209 { 0xF1, PREFETCH, NA, NA, 128, NA },
210 { 0xFF, CACHE, NA, NA, 0, NA }
211 };
212 #define INTEL_LEAF2_DESC_NUM (sizeof(intel_cpuid_leaf2_descriptor_table) / \
213 sizeof(cpuid_cache_descriptor_t))
214
215 static void do_cwas(i386_cpu_info_t *cpuinfo, boolean_t on_slave);
216
217 static inline cpuid_cache_descriptor_t *
218 cpuid_leaf2_find(uint8_t value)
219 {
220 unsigned int i;
221
222 for (i = 0; i < INTEL_LEAF2_DESC_NUM; i++) {
223 if (intel_cpuid_leaf2_descriptor_table[i].value == value) {
224 return &intel_cpuid_leaf2_descriptor_table[i];
225 }
226 }
227 return NULL;
228 }
229
230 /*
231 * CPU identification routines.
232 */
233
234 static i386_cpu_info_t cpuid_cpu_info;
235 static i386_cpu_info_t *cpuid_cpu_infop = NULL;
236
237 static void
238 cpuid_fn(uint32_t selector, uint32_t *result)
239 {
240 do_cpuid(selector, result);
241 DBG("cpuid_fn(0x%08x) eax:0x%08x ebx:0x%08x ecx:0x%08x edx:0x%08x\n",
242 selector, result[0], result[1], result[2], result[3]);
243 }
244
245 static const char *cache_type_str[LCACHE_MAX] = {
246 "Lnone", "L1I", "L1D", "L2U", "L3U"
247 };
248
249 static void
250 do_cwas(i386_cpu_info_t *cpuinfo, boolean_t on_slave)
251 {
252 extern int force_thread_policy_tecs;
253
254 /*
255 * Workaround for reclaiming perf counter 3 due to TSX memory ordering erratum.
256 * This workaround does not support being forcibly set (since an MSR must be
257 * enumerated, lest we #GP when forced to access it.)
258 */
259 if (cpuid_wa_required(CPU_INTEL_TSXFA) == CWA_ON) {
260 wrmsr64(MSR_IA32_TSX_FORCE_ABORT,
261 rdmsr64(MSR_IA32_TSX_FORCE_ABORT) | MSR_IA32_TSXFA_RTM_FORCE_ABORT);
262 }
263
264 if (on_slave) {
265 return;
266 }
267
268 switch (cpuid_wa_required(CPU_INTEL_SEGCHK)) {
269 case CWA_FORCE_ON:
270 force_thread_policy_tecs = 1;
271
272 /* If hyperthreaded, enable idle workaround */
273 if (cpuinfo->thread_count > cpuinfo->core_count) {
274 force_tecs_at_idle = 1;
275 }
276
277 /*FALLTHROUGH*/
278 case CWA_ON:
279 tecs_mode_supported = 1;
280 break;
281
282 case CWA_FORCE_OFF:
283 case CWA_OFF:
284 tecs_mode_supported = 0;
285 force_tecs_at_idle = 0;
286 force_thread_policy_tecs = 0;
287 break;
288
289 default:
290 break;
291 }
292 }
293
294 void
295 cpuid_do_was(void)
296 {
297 do_cwas(cpuid_info(), TRUE);
298 }
299
300 /* this function is Intel-specific */
301 static void
302 cpuid_set_cache_info( i386_cpu_info_t * info_p )
303 {
304 uint32_t cpuid_result[4];
305 uint32_t reg[4];
306 uint32_t index;
307 uint32_t linesizes[LCACHE_MAX];
308 unsigned int i;
309 unsigned int j;
310 boolean_t cpuid_deterministic_supported = FALSE;
311
312 DBG("cpuid_set_cache_info(%p)\n", info_p);
313
314 bzero( linesizes, sizeof(linesizes));
315
316 /* Get processor cache descriptor info using leaf 2. We don't use
317 * this internally, but must publish it for KEXTs.
318 */
319 cpuid_fn(2, cpuid_result);
320 for (j = 0; j < 4; j++) {
321 if ((cpuid_result[j] >> 31) == 1) { /* bit31 is validity */
322 continue;
323 }
324 ((uint32_t *) info_p->cache_info)[j] = cpuid_result[j];
325 }
326 /* first byte gives number of cpuid calls to get all descriptors */
327 for (i = 1; i < info_p->cache_info[0]; i++) {
328 if (i * 16 > sizeof(info_p->cache_info)) {
329 break;
330 }
331 cpuid_fn(2, cpuid_result);
332 for (j = 0; j < 4; j++) {
333 if ((cpuid_result[j] >> 31) == 1) {
334 continue;
335 }
336 ((uint32_t *) info_p->cache_info)[4 * i + j] =
337 cpuid_result[j];
338 }
339 }
340
341 /*
342 * Get cache info using leaf 4, the "deterministic cache parameters."
343 * Most processors Mac OS X supports implement this flavor of CPUID.
344 * Loop over each cache on the processor.
345 */
346 cpuid_fn(0, cpuid_result);
347 if (cpuid_result[eax] >= 4) {
348 cpuid_deterministic_supported = TRUE;
349 }
350
351 for (index = 0; cpuid_deterministic_supported; index++) {
352 cache_type_t type = Lnone;
353 uint32_t cache_type;
354 uint32_t cache_level;
355 uint32_t cache_sharing;
356 uint32_t cache_linesize;
357 uint32_t cache_sets;
358 uint32_t cache_associativity;
359 uint32_t cache_size;
360 uint32_t cache_partitions;
361 uint32_t colors;
362
363 reg[eax] = 4; /* cpuid request 4 */
364 reg[ecx] = index; /* index starting at 0 */
365 cpuid(reg);
366 DBG("cpuid(4) index=%d eax=0x%x\n", index, reg[eax]);
367 cache_type = bitfield32(reg[eax], 4, 0);
368 if (cache_type == 0) {
369 break; /* no more caches */
370 }
371 cache_level = bitfield32(reg[eax], 7, 5);
372 cache_sharing = bitfield32(reg[eax], 25, 14) + 1;
373 info_p->cpuid_cores_per_package
374 = bitfield32(reg[eax], 31, 26) + 1;
375 cache_linesize = bitfield32(reg[ebx], 11, 0) + 1;
376 cache_partitions = bitfield32(reg[ebx], 21, 12) + 1;
377 cache_associativity = bitfield32(reg[ebx], 31, 22) + 1;
378 cache_sets = bitfield32(reg[ecx], 31, 0) + 1;
379
380 /* Map type/levels returned by CPUID into cache_type_t */
381 switch (cache_level) {
382 case 1:
383 type = cache_type == 1 ? L1D :
384 cache_type == 2 ? L1I :
385 Lnone;
386 break;
387 case 2:
388 type = cache_type == 3 ? L2U :
389 Lnone;
390 break;
391 case 3:
392 type = cache_type == 3 ? L3U :
393 Lnone;
394 break;
395 default:
396 type = Lnone;
397 }
398
399 /* The total size of a cache is:
400 * ( linesize * sets * associativity * partitions )
401 */
402 if (type != Lnone) {
403 cache_size = cache_linesize * cache_sets *
404 cache_associativity * cache_partitions;
405 info_p->cache_size[type] = cache_size;
406 info_p->cache_sharing[type] = cache_sharing;
407 info_p->cache_partitions[type] = cache_partitions;
408 linesizes[type] = cache_linesize;
409
410 DBG(" cache_size[%s] : %d\n",
411 cache_type_str[type], cache_size);
412 DBG(" cache_sharing[%s] : %d\n",
413 cache_type_str[type], cache_sharing);
414 DBG(" cache_partitions[%s]: %d\n",
415 cache_type_str[type], cache_partitions);
416
417 /*
418 * Overwrite associativity determined via
419 * CPUID.0x80000006 -- this leaf is more
420 * accurate
421 */
422 if (type == L2U) {
423 info_p->cpuid_cache_L2_associativity = cache_associativity;
424 }
425 /*
426 * Adjust #sets to account for the N CBos
427 * This is because addresses are hashed across CBos
428 */
429 if (type == L3U && info_p->core_count) {
430 cache_sets = cache_sets / info_p->core_count;
431 }
432
433 /* Compute the number of page colors for this cache,
434 * which is:
435 * ( linesize * sets ) / page_size
436 *
437 * To help visualize this, consider two views of a
438 * physical address. To the cache, it is composed
439 * of a line offset, a set selector, and a tag.
440 * To VM, it is composed of a page offset, a page
441 * color, and other bits in the pageframe number:
442 *
443 * +-----------------+---------+--------+
444 * cache: | tag | set | offset |
445 * +-----------------+---------+--------+
446 *
447 * +-----------------+-------+----------+
448 * VM: | don't care | color | pg offset|
449 * +-----------------+-------+----------+
450 *
451 * The color is those bits in (set+offset) not covered
452 * by the page offset.
453 */
454 colors = (cache_linesize * cache_sets) >> 12;
455
456 if (colors > vm_cache_geometry_colors) {
457 vm_cache_geometry_colors = colors;
458 }
459 }
460 }
461 DBG(" vm_cache_geometry_colors: %d\n", vm_cache_geometry_colors);
462
463 /*
464 * If deterministic cache parameters are not available, use
465 * something else
466 */
467 if (info_p->cpuid_cores_per_package == 0) {
468 info_p->cpuid_cores_per_package = 1;
469
470 /* cpuid define in 1024 quantities */
471 info_p->cache_size[L2U] = info_p->cpuid_cache_size * 1024;
472 info_p->cache_sharing[L2U] = 1;
473 info_p->cache_partitions[L2U] = 1;
474
475 linesizes[L2U] = info_p->cpuid_cache_linesize;
476
477 DBG(" cache_size[L2U] : %d\n",
478 info_p->cache_size[L2U]);
479 DBG(" cache_sharing[L2U] : 1\n");
480 DBG(" cache_partitions[L2U]: 1\n");
481 DBG(" linesizes[L2U] : %d\n",
482 info_p->cpuid_cache_linesize);
483 }
484
485 /*
486 * What linesize to publish? We use the L2 linesize if any,
487 * else the L1D.
488 */
489 if (linesizes[L2U]) {
490 info_p->cache_linesize = linesizes[L2U];
491 } else if (linesizes[L1D]) {
492 info_p->cache_linesize = linesizes[L1D];
493 } else {
494 panic("no linesize");
495 }
496 DBG(" cache_linesize : %d\n", info_p->cache_linesize);
497
498 /*
499 * Extract and publish TLB information from Leaf 2 descriptors.
500 */
501 DBG(" %ld leaf2 descriptors:\n", sizeof(info_p->cache_info));
502 for (i = 1; i < sizeof(info_p->cache_info); i++) {
503 cpuid_cache_descriptor_t *descp;
504 int id;
505 int level;
506 int page;
507
508 DBG(" 0x%02x", info_p->cache_info[i]);
509 descp = cpuid_leaf2_find(info_p->cache_info[i]);
510 if (descp == NULL) {
511 continue;
512 }
513
514 switch (descp->type) {
515 case TLB:
516 page = (descp->size == SMALL) ? TLB_SMALL : TLB_LARGE;
517 /* determine I or D: */
518 switch (descp->level) {
519 case INST:
520 id = TLB_INST;
521 break;
522 case DATA:
523 case DATA0:
524 case DATA1:
525 id = TLB_DATA;
526 break;
527 default:
528 continue;
529 }
530 /* determine level: */
531 switch (descp->level) {
532 case DATA1:
533 level = 1;
534 break;
535 default:
536 level = 0;
537 }
538 info_p->cpuid_tlb[id][page][level] = descp->entries;
539 break;
540 case STLB:
541 info_p->cpuid_stlb = descp->entries;
542 }
543 }
544 DBG("\n");
545 }
546
547 static void
548 cpuid_set_generic_info(i386_cpu_info_t *info_p)
549 {
550 uint32_t reg[4];
551 char str[128], *p;
552
553 DBG("cpuid_set_generic_info(%p)\n", info_p);
554
555 /* do cpuid 0 to get vendor */
556 cpuid_fn(0, reg);
557 info_p->cpuid_max_basic = reg[eax];
558 bcopy((char *)&reg[ebx], &info_p->cpuid_vendor[0], 4); /* ug */
559 bcopy((char *)&reg[ecx], &info_p->cpuid_vendor[8], 4);
560 bcopy((char *)&reg[edx], &info_p->cpuid_vendor[4], 4);
561 info_p->cpuid_vendor[12] = 0;
562
563 /* get extended cpuid results */
564 cpuid_fn(0x80000000, reg);
565 info_p->cpuid_max_ext = reg[eax];
566
567 /* check to see if we can get brand string */
568 if (info_p->cpuid_max_ext >= 0x80000004) {
569 /*
570 * The brand string 48 bytes (max), guaranteed to
571 * be NUL terminated.
572 */
573 cpuid_fn(0x80000002, reg);
574 bcopy((char *)reg, &str[0], 16);
575 cpuid_fn(0x80000003, reg);
576 bcopy((char *)reg, &str[16], 16);
577 cpuid_fn(0x80000004, reg);
578 bcopy((char *)reg, &str[32], 16);
579 for (p = str; *p != '\0'; p++) {
580 if (*p != ' ') {
581 break;
582 }
583 }
584 strlcpy(info_p->cpuid_brand_string,
585 p, sizeof(info_p->cpuid_brand_string));
586
587 if (!strncmp(info_p->cpuid_brand_string, CPUID_STRING_UNKNOWN,
588 min(sizeof(info_p->cpuid_brand_string),
589 strlen(CPUID_STRING_UNKNOWN) + 1))) {
590 /*
591 * This string means we have a firmware-programmable brand string,
592 * and the firmware couldn't figure out what sort of CPU we have.
593 */
594 info_p->cpuid_brand_string[0] = '\0';
595 }
596 }
597
598 /* Get cache and addressing info. */
599 if (info_p->cpuid_max_ext >= 0x80000006) {
600 uint32_t assoc;
601 cpuid_fn(0x80000006, reg);
602 info_p->cpuid_cache_linesize = bitfield32(reg[ecx], 7, 0);
603 assoc = bitfield32(reg[ecx], 15, 12);
604 /*
605 * L2 associativity is encoded, though in an insufficiently
606 * descriptive fashion, e.g. 24-way is mapped to 16-way.
607 * Represent a fully associative cache as 0xFFFF.
608 * Overwritten by associativity as determined via CPUID.4
609 * if available.
610 */
611 if (assoc == 6) {
612 assoc = 8;
613 } else if (assoc == 8) {
614 assoc = 16;
615 } else if (assoc == 0xF) {
616 assoc = 0xFFFF;
617 }
618 info_p->cpuid_cache_L2_associativity = assoc;
619 info_p->cpuid_cache_size = bitfield32(reg[ecx], 31, 16);
620 cpuid_fn(0x80000008, reg);
621 info_p->cpuid_address_bits_physical =
622 bitfield32(reg[eax], 7, 0);
623 info_p->cpuid_address_bits_virtual =
624 bitfield32(reg[eax], 15, 8);
625 }
626
627 /*
628 * Get processor signature and decode
629 * and bracket this with the approved procedure for reading the
630 * the microcode version number a.k.a. signature a.k.a. BIOS ID
631 */
632 wrmsr64(MSR_IA32_BIOS_SIGN_ID, 0);
633 cpuid_fn(1, reg);
634 info_p->cpuid_microcode_version =
635 (uint32_t) (rdmsr64(MSR_IA32_BIOS_SIGN_ID) >> 32);
636 info_p->cpuid_signature = reg[eax];
637 info_p->cpuid_stepping = bitfield32(reg[eax], 3, 0);
638 info_p->cpuid_model = bitfield32(reg[eax], 7, 4);
639 info_p->cpuid_family = bitfield32(reg[eax], 11, 8);
640 info_p->cpuid_type = bitfield32(reg[eax], 13, 12);
641 info_p->cpuid_extmodel = bitfield32(reg[eax], 19, 16);
642 info_p->cpuid_extfamily = bitfield32(reg[eax], 27, 20);
643 info_p->cpuid_brand = bitfield32(reg[ebx], 7, 0);
644 info_p->cpuid_features = quad(reg[ecx], reg[edx]);
645
646 /* Get "processor flag"; necessary for microcode update matching */
647 info_p->cpuid_processor_flag = (rdmsr64(MSR_IA32_PLATFORM_ID) >> 50) & 0x7;
648
649 /* Fold extensions into family/model */
650 if (info_p->cpuid_family == 0x0f) {
651 info_p->cpuid_family += info_p->cpuid_extfamily;
652 }
653 if (info_p->cpuid_family == 0x0f || info_p->cpuid_family == 0x06) {
654 info_p->cpuid_model += (info_p->cpuid_extmodel << 4);
655 }
656
657 if (info_p->cpuid_features & CPUID_FEATURE_HTT) {
658 info_p->cpuid_logical_per_package =
659 bitfield32(reg[ebx], 23, 16);
660 } else {
661 info_p->cpuid_logical_per_package = 1;
662 }
663
664 if (info_p->cpuid_max_ext >= 0x80000001) {
665 cpuid_fn(0x80000001, reg);
666 info_p->cpuid_extfeatures =
667 quad(reg[ecx], reg[edx]);
668 }
669
670 DBG(" max_basic : %d\n", info_p->cpuid_max_basic);
671 DBG(" max_ext : 0x%08x\n", info_p->cpuid_max_ext);
672 DBG(" vendor : %s\n", info_p->cpuid_vendor);
673 DBG(" brand_string : %s\n", info_p->cpuid_brand_string);
674 DBG(" signature : 0x%08x\n", info_p->cpuid_signature);
675 DBG(" stepping : %d\n", info_p->cpuid_stepping);
676 DBG(" model : %d\n", info_p->cpuid_model);
677 DBG(" family : %d\n", info_p->cpuid_family);
678 DBG(" type : %d\n", info_p->cpuid_type);
679 DBG(" extmodel : %d\n", info_p->cpuid_extmodel);
680 DBG(" extfamily : %d\n", info_p->cpuid_extfamily);
681 DBG(" brand : %d\n", info_p->cpuid_brand);
682 DBG(" features : 0x%016llx\n", info_p->cpuid_features);
683 DBG(" extfeatures : 0x%016llx\n", info_p->cpuid_extfeatures);
684 DBG(" logical_per_package : %d\n", info_p->cpuid_logical_per_package);
685 DBG(" microcode_version : 0x%08x\n", info_p->cpuid_microcode_version);
686
687 /* Fold in the Invariant TSC feature bit, if present */
688 if (info_p->cpuid_max_ext >= 0x80000007) {
689 cpuid_fn(0x80000007, reg);
690 info_p->cpuid_extfeatures |=
691 reg[edx] & (uint32_t)CPUID_EXTFEATURE_TSCI;
692 DBG(" extfeatures : 0x%016llx\n",
693 info_p->cpuid_extfeatures);
694 }
695
696 if (info_p->cpuid_max_basic >= 0x5) {
697 cpuid_mwait_leaf_t *cmp = &info_p->cpuid_mwait_leaf;
698
699 /*
700 * Extract the Monitor/Mwait Leaf info:
701 */
702 cpuid_fn(5, reg);
703 cmp->linesize_min = reg[eax];
704 cmp->linesize_max = reg[ebx];
705 cmp->extensions = reg[ecx];
706 cmp->sub_Cstates = reg[edx];
707 info_p->cpuid_mwait_leafp = cmp;
708
709 DBG(" Monitor/Mwait Leaf:\n");
710 DBG(" linesize_min : %d\n", cmp->linesize_min);
711 DBG(" linesize_max : %d\n", cmp->linesize_max);
712 DBG(" extensions : %d\n", cmp->extensions);
713 DBG(" sub_Cstates : 0x%08x\n", cmp->sub_Cstates);
714 }
715
716 if (info_p->cpuid_max_basic >= 0x6) {
717 cpuid_thermal_leaf_t *ctp = &info_p->cpuid_thermal_leaf;
718
719 /*
720 * The thermal and Power Leaf:
721 */
722 cpuid_fn(6, reg);
723 ctp->sensor = bitfield32(reg[eax], 0, 0);
724 ctp->dynamic_acceleration = bitfield32(reg[eax], 1, 1);
725 ctp->invariant_APIC_timer = bitfield32(reg[eax], 2, 2);
726 ctp->core_power_limits = bitfield32(reg[eax], 4, 4);
727 ctp->fine_grain_clock_mod = bitfield32(reg[eax], 5, 5);
728 ctp->package_thermal_intr = bitfield32(reg[eax], 6, 6);
729 ctp->thresholds = bitfield32(reg[ebx], 3, 0);
730 ctp->ACNT_MCNT = bitfield32(reg[ecx], 0, 0);
731 ctp->hardware_feedback = bitfield32(reg[ecx], 1, 1);
732 ctp->energy_policy = bitfield32(reg[ecx], 3, 3);
733 info_p->cpuid_thermal_leafp = ctp;
734
735 DBG(" Thermal/Power Leaf:\n");
736 DBG(" sensor : %d\n", ctp->sensor);
737 DBG(" dynamic_acceleration : %d\n", ctp->dynamic_acceleration);
738 DBG(" invariant_APIC_timer : %d\n", ctp->invariant_APIC_timer);
739 DBG(" core_power_limits : %d\n", ctp->core_power_limits);
740 DBG(" fine_grain_clock_mod : %d\n", ctp->fine_grain_clock_mod);
741 DBG(" package_thermal_intr : %d\n", ctp->package_thermal_intr);
742 DBG(" thresholds : %d\n", ctp->thresholds);
743 DBG(" ACNT_MCNT : %d\n", ctp->ACNT_MCNT);
744 DBG(" ACNT2 : %d\n", ctp->hardware_feedback);
745 DBG(" energy_policy : %d\n", ctp->energy_policy);
746 }
747
748 if (info_p->cpuid_max_basic >= 0xa) {
749 cpuid_arch_perf_leaf_t *capp = &info_p->cpuid_arch_perf_leaf;
750
751 /*
752 * Architectural Performance Monitoring Leaf:
753 */
754 cpuid_fn(0xa, reg);
755 capp->version = bitfield32(reg[eax], 7, 0);
756 capp->number = bitfield32(reg[eax], 15, 8);
757 capp->width = bitfield32(reg[eax], 23, 16);
758 capp->events_number = bitfield32(reg[eax], 31, 24);
759 capp->events = reg[ebx];
760 capp->fixed_number = bitfield32(reg[edx], 4, 0);
761 capp->fixed_width = bitfield32(reg[edx], 12, 5);
762 info_p->cpuid_arch_perf_leafp = capp;
763
764 DBG(" Architectural Performance Monitoring Leaf:\n");
765 DBG(" version : %d\n", capp->version);
766 DBG(" number : %d\n", capp->number);
767 DBG(" width : %d\n", capp->width);
768 DBG(" events_number : %d\n", capp->events_number);
769 DBG(" events : %d\n", capp->events);
770 DBG(" fixed_number : %d\n", capp->fixed_number);
771 DBG(" fixed_width : %d\n", capp->fixed_width);
772 }
773
774 if (info_p->cpuid_max_basic >= 0xd) {
775 cpuid_xsave_leaf_t *xsp;
776 /*
777 * XSAVE Features:
778 */
779 xsp = &info_p->cpuid_xsave_leaf[0];
780 info_p->cpuid_xsave_leafp = xsp;
781 xsp->extended_state[eax] = 0xd;
782 xsp->extended_state[ecx] = 0;
783 cpuid(xsp->extended_state);
784 DBG(" XSAVE Main leaf:\n");
785 DBG(" EAX : 0x%x\n", xsp->extended_state[eax]);
786 DBG(" EBX : 0x%x\n", xsp->extended_state[ebx]);
787 DBG(" ECX : 0x%x\n", xsp->extended_state[ecx]);
788 DBG(" EDX : 0x%x\n", xsp->extended_state[edx]);
789
790 xsp = &info_p->cpuid_xsave_leaf[1];
791 xsp->extended_state[eax] = 0xd;
792 xsp->extended_state[ecx] = 1;
793 cpuid(xsp->extended_state);
794 DBG(" XSAVE Sub-leaf1:\n");
795 DBG(" EAX : 0x%x\n", xsp->extended_state[eax]);
796 DBG(" EBX : 0x%x\n", xsp->extended_state[ebx]);
797 DBG(" ECX : 0x%x\n", xsp->extended_state[ecx]);
798 DBG(" EDX : 0x%x\n", xsp->extended_state[edx]);
799 }
800
801 if (info_p->cpuid_model >= CPUID_MODEL_IVYBRIDGE) {
802 /*
803 * Leaf7 Features:
804 */
805 cpuid_fn(0x7, reg);
806 info_p->cpuid_leaf7_features = quad(reg[ecx], reg[ebx]);
807 info_p->cpuid_leaf7_extfeatures = reg[edx];
808
809 DBG(" Feature Leaf7:\n");
810 DBG(" EBX : 0x%x\n", reg[ebx]);
811 DBG(" ECX : 0x%x\n", reg[ecx]);
812 DBG(" EDX : 0x%x\n", reg[edx]);
813 }
814
815 if (info_p->cpuid_max_basic >= 0x15) {
816 /*
817 * TCS/CCC frequency leaf:
818 */
819 cpuid_fn(0x15, reg);
820 info_p->cpuid_tsc_leaf.denominator = reg[eax];
821 info_p->cpuid_tsc_leaf.numerator = reg[ebx];
822
823 DBG(" TSC/CCC Information Leaf:\n");
824 DBG(" numerator : 0x%x\n", reg[ebx]);
825 DBG(" denominator : 0x%x\n", reg[eax]);
826 }
827
828 return;
829 }
830
831 static uint32_t
832 cpuid_set_cpufamily(i386_cpu_info_t *info_p)
833 {
834 uint32_t cpufamily = CPUFAMILY_UNKNOWN;
835
836 switch (info_p->cpuid_family) {
837 case 6:
838 switch (info_p->cpuid_model) {
839 case 23:
840 cpufamily = CPUFAMILY_INTEL_PENRYN;
841 break;
842 case CPUID_MODEL_NEHALEM:
843 case CPUID_MODEL_FIELDS:
844 case CPUID_MODEL_DALES:
845 case CPUID_MODEL_NEHALEM_EX:
846 cpufamily = CPUFAMILY_INTEL_NEHALEM;
847 break;
848 case CPUID_MODEL_DALES_32NM:
849 case CPUID_MODEL_WESTMERE:
850 case CPUID_MODEL_WESTMERE_EX:
851 cpufamily = CPUFAMILY_INTEL_WESTMERE;
852 break;
853 case CPUID_MODEL_SANDYBRIDGE:
854 case CPUID_MODEL_JAKETOWN:
855 cpufamily = CPUFAMILY_INTEL_SANDYBRIDGE;
856 break;
857 case CPUID_MODEL_IVYBRIDGE:
858 case CPUID_MODEL_IVYBRIDGE_EP:
859 cpufamily = CPUFAMILY_INTEL_IVYBRIDGE;
860 break;
861 case CPUID_MODEL_HASWELL:
862 case CPUID_MODEL_HASWELL_EP:
863 case CPUID_MODEL_HASWELL_ULT:
864 case CPUID_MODEL_CRYSTALWELL:
865 cpufamily = CPUFAMILY_INTEL_HASWELL;
866 break;
867 case CPUID_MODEL_BROADWELL:
868 case CPUID_MODEL_BRYSTALWELL:
869 cpufamily = CPUFAMILY_INTEL_BROADWELL;
870 break;
871 case CPUID_MODEL_SKYLAKE:
872 case CPUID_MODEL_SKYLAKE_DT:
873 #if !defined(RC_HIDE_XNU_J137)
874 case CPUID_MODEL_SKYLAKE_W:
875 #endif
876 cpufamily = CPUFAMILY_INTEL_SKYLAKE;
877 break;
878 case CPUID_MODEL_KABYLAKE:
879 case CPUID_MODEL_KABYLAKE_DT:
880 cpufamily = CPUFAMILY_INTEL_KABYLAKE;
881 break;
882 }
883 break;
884 }
885
886 info_p->cpuid_cpufamily = cpufamily;
887 DBG("cpuid_set_cpufamily(%p) returning 0x%x\n", info_p, cpufamily);
888 return cpufamily;
889 }
890 /*
891 * Must be invoked either when executing single threaded, or with
892 * independent synchronization.
893 */
894 void
895 cpuid_set_info(void)
896 {
897 i386_cpu_info_t *info_p = &cpuid_cpu_info;
898 boolean_t enable_x86_64h = TRUE;
899
900 cpuid_set_generic_info(info_p);
901
902 /* verify we are running on a supported CPU */
903 if ((strncmp(CPUID_VID_INTEL, info_p->cpuid_vendor,
904 min(strlen(CPUID_STRING_UNKNOWN) + 1,
905 sizeof(info_p->cpuid_vendor)))) ||
906 (cpuid_set_cpufamily(info_p) == CPUFAMILY_UNKNOWN)) {
907 panic("Unsupported CPU");
908 }
909
910 info_p->cpuid_cpu_type = CPU_TYPE_X86;
911
912 if (!PE_parse_boot_argn("-enable_x86_64h", &enable_x86_64h, sizeof(enable_x86_64h))) {
913 boolean_t disable_x86_64h = FALSE;
914
915 if (PE_parse_boot_argn("-disable_x86_64h", &disable_x86_64h, sizeof(disable_x86_64h))) {
916 enable_x86_64h = FALSE;
917 }
918 }
919
920 if (enable_x86_64h &&
921 ((info_p->cpuid_features & CPUID_X86_64_H_FEATURE_SUBSET) == CPUID_X86_64_H_FEATURE_SUBSET) &&
922 ((info_p->cpuid_extfeatures & CPUID_X86_64_H_EXTFEATURE_SUBSET) == CPUID_X86_64_H_EXTFEATURE_SUBSET) &&
923 ((info_p->cpuid_leaf7_features & CPUID_X86_64_H_LEAF7_FEATURE_SUBSET) == CPUID_X86_64_H_LEAF7_FEATURE_SUBSET)) {
924 info_p->cpuid_cpu_subtype = CPU_SUBTYPE_X86_64_H;
925 } else {
926 info_p->cpuid_cpu_subtype = CPU_SUBTYPE_X86_ARCH1;
927 }
928 /* cpuid_set_cache_info must be invoked after set_generic_info */
929
930 /*
931 * Find the number of enabled cores and threads
932 * (which determines whether SMT/Hyperthreading is active).
933 */
934
935 if (0 != (info_p->cpuid_features & CPUID_FEATURE_VMM) &&
936 PE_parse_boot_argn("-nomsr35h", NULL, 0)) {
937 info_p->core_count = 1;
938 info_p->thread_count = 1;
939 cpuid_set_cache_info(info_p);
940 } else {
941 switch (info_p->cpuid_cpufamily) {
942 case CPUFAMILY_INTEL_PENRYN:
943 cpuid_set_cache_info(info_p);
944 info_p->core_count = info_p->cpuid_cores_per_package;
945 info_p->thread_count = info_p->cpuid_logical_per_package;
946 break;
947 case CPUFAMILY_INTEL_WESTMERE: {
948 uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
949 if (0 == msr) {
950 /* Provide a non-zero default for some VMMs */
951 msr = (1 << 16) | 1;
952 }
953 info_p->core_count = bitfield32((uint32_t)msr, 19, 16);
954 info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
955 cpuid_set_cache_info(info_p);
956 break;
957 }
958 default: {
959 uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
960 if (0 == msr) {
961 /* Provide a non-zero default for some VMMs */
962 msr = (1 << 16) | 1;
963 }
964 info_p->core_count = bitfield32((uint32_t)msr, 31, 16);
965 info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
966 cpuid_set_cache_info(info_p);
967 break;
968 }
969 }
970 }
971
972 DBG("cpuid_set_info():\n");
973 DBG(" core_count : %d\n", info_p->core_count);
974 DBG(" thread_count : %d\n", info_p->thread_count);
975 DBG(" cpu_type: 0x%08x\n", info_p->cpuid_cpu_type);
976 DBG(" cpu_subtype: 0x%08x\n", info_p->cpuid_cpu_subtype);
977
978 info_p->cpuid_model_string = ""; /* deprecated */
979
980 do_cwas(info_p, FALSE);
981 }
982
983 static struct table {
984 uint64_t mask;
985 const char *name;
986 } feature_map[] = {
987 {CPUID_FEATURE_FPU, "FPU"},
988 {CPUID_FEATURE_VME, "VME"},
989 {CPUID_FEATURE_DE, "DE"},
990 {CPUID_FEATURE_PSE, "PSE"},
991 {CPUID_FEATURE_TSC, "TSC"},
992 {CPUID_FEATURE_MSR, "MSR"},
993 {CPUID_FEATURE_PAE, "PAE"},
994 {CPUID_FEATURE_MCE, "MCE"},
995 {CPUID_FEATURE_CX8, "CX8"},
996 {CPUID_FEATURE_APIC, "APIC"},
997 {CPUID_FEATURE_SEP, "SEP"},
998 {CPUID_FEATURE_MTRR, "MTRR"},
999 {CPUID_FEATURE_PGE, "PGE"},
1000 {CPUID_FEATURE_MCA, "MCA"},
1001 {CPUID_FEATURE_CMOV, "CMOV"},
1002 {CPUID_FEATURE_PAT, "PAT"},
1003 {CPUID_FEATURE_PSE36, "PSE36"},
1004 {CPUID_FEATURE_PSN, "PSN"},
1005 {CPUID_FEATURE_CLFSH, "CLFSH"},
1006 {CPUID_FEATURE_DS, "DS"},
1007 {CPUID_FEATURE_ACPI, "ACPI"},
1008 {CPUID_FEATURE_MMX, "MMX"},
1009 {CPUID_FEATURE_FXSR, "FXSR"},
1010 {CPUID_FEATURE_SSE, "SSE"},
1011 {CPUID_FEATURE_SSE2, "SSE2"},
1012 {CPUID_FEATURE_SS, "SS"},
1013 {CPUID_FEATURE_HTT, "HTT"},
1014 {CPUID_FEATURE_TM, "TM"},
1015 {CPUID_FEATURE_PBE, "PBE"},
1016 {CPUID_FEATURE_SSE3, "SSE3"},
1017 {CPUID_FEATURE_PCLMULQDQ, "PCLMULQDQ"},
1018 {CPUID_FEATURE_DTES64, "DTES64"},
1019 {CPUID_FEATURE_MONITOR, "MON"},
1020 {CPUID_FEATURE_DSCPL, "DSCPL"},
1021 {CPUID_FEATURE_VMX, "VMX"},
1022 {CPUID_FEATURE_SMX, "SMX"},
1023 {CPUID_FEATURE_EST, "EST"},
1024 {CPUID_FEATURE_TM2, "TM2"},
1025 {CPUID_FEATURE_SSSE3, "SSSE3"},
1026 {CPUID_FEATURE_CID, "CID"},
1027 {CPUID_FEATURE_FMA, "FMA"},
1028 {CPUID_FEATURE_CX16, "CX16"},
1029 {CPUID_FEATURE_xTPR, "TPR"},
1030 {CPUID_FEATURE_PDCM, "PDCM"},
1031 {CPUID_FEATURE_SSE4_1, "SSE4.1"},
1032 {CPUID_FEATURE_SSE4_2, "SSE4.2"},
1033 {CPUID_FEATURE_x2APIC, "x2APIC"},
1034 {CPUID_FEATURE_MOVBE, "MOVBE"},
1035 {CPUID_FEATURE_POPCNT, "POPCNT"},
1036 {CPUID_FEATURE_AES, "AES"},
1037 {CPUID_FEATURE_VMM, "VMM"},
1038 {CPUID_FEATURE_PCID, "PCID"},
1039 {CPUID_FEATURE_XSAVE, "XSAVE"},
1040 {CPUID_FEATURE_OSXSAVE, "OSXSAVE"},
1041 {CPUID_FEATURE_SEGLIM64, "SEGLIM64"},
1042 {CPUID_FEATURE_TSCTMR, "TSCTMR"},
1043 {CPUID_FEATURE_AVX1_0, "AVX1.0"},
1044 {CPUID_FEATURE_RDRAND, "RDRAND"},
1045 {CPUID_FEATURE_F16C, "F16C"},
1046 {0, 0}
1047 },
1048 extfeature_map[] = {
1049 {CPUID_EXTFEATURE_SYSCALL, "SYSCALL"},
1050 {CPUID_EXTFEATURE_XD, "XD"},
1051 {CPUID_EXTFEATURE_1GBPAGE, "1GBPAGE"},
1052 {CPUID_EXTFEATURE_EM64T, "EM64T"},
1053 {CPUID_EXTFEATURE_LAHF, "LAHF"},
1054 {CPUID_EXTFEATURE_LZCNT, "LZCNT"},
1055 {CPUID_EXTFEATURE_PREFETCHW, "PREFETCHW"},
1056 {CPUID_EXTFEATURE_RDTSCP, "RDTSCP"},
1057 {CPUID_EXTFEATURE_TSCI, "TSCI"},
1058 {0, 0}
1059 },
1060 leaf7_feature_map[] = {
1061 {CPUID_LEAF7_FEATURE_RDWRFSGS, "RDWRFSGS"},
1062 {CPUID_LEAF7_FEATURE_TSCOFF, "TSC_THREAD_OFFSET"},
1063 {CPUID_LEAF7_FEATURE_SGX, "SGX"},
1064 {CPUID_LEAF7_FEATURE_BMI1, "BMI1"},
1065 {CPUID_LEAF7_FEATURE_HLE, "HLE"},
1066 {CPUID_LEAF7_FEATURE_AVX2, "AVX2"},
1067 {CPUID_LEAF7_FEATURE_FDPEO, "FDPEO"},
1068 {CPUID_LEAF7_FEATURE_SMEP, "SMEP"},
1069 {CPUID_LEAF7_FEATURE_BMI2, "BMI2"},
1070 {CPUID_LEAF7_FEATURE_ERMS, "ERMS"},
1071 {CPUID_LEAF7_FEATURE_INVPCID, "INVPCID"},
1072 {CPUID_LEAF7_FEATURE_RTM, "RTM"},
1073 {CPUID_LEAF7_FEATURE_PQM, "PQM"},
1074 {CPUID_LEAF7_FEATURE_FPU_CSDS, "FPU_CSDS"},
1075 {CPUID_LEAF7_FEATURE_MPX, "MPX"},
1076 {CPUID_LEAF7_FEATURE_PQE, "PQE"},
1077 {CPUID_LEAF7_FEATURE_AVX512F, "AVX512F"},
1078 {CPUID_LEAF7_FEATURE_AVX512DQ, "AVX512DQ"},
1079 {CPUID_LEAF7_FEATURE_RDSEED, "RDSEED"},
1080 {CPUID_LEAF7_FEATURE_ADX, "ADX"},
1081 {CPUID_LEAF7_FEATURE_SMAP, "SMAP"},
1082 {CPUID_LEAF7_FEATURE_AVX512IFMA, "AVX512IFMA"},
1083 {CPUID_LEAF7_FEATURE_CLFSOPT, "CLFSOPT"},
1084 {CPUID_LEAF7_FEATURE_CLWB, "CLWB"},
1085 {CPUID_LEAF7_FEATURE_IPT, "IPT"},
1086 {CPUID_LEAF7_FEATURE_AVX512CD, "AVX512CD"},
1087 {CPUID_LEAF7_FEATURE_SHA, "SHA"},
1088 {CPUID_LEAF7_FEATURE_AVX512BW, "AVX512BW"},
1089 {CPUID_LEAF7_FEATURE_AVX512VL, "AVX512VL"},
1090 {CPUID_LEAF7_FEATURE_PREFETCHWT1, "PREFETCHWT1"},
1091 {CPUID_LEAF7_FEATURE_AVX512VBMI, "AVX512VBMI"},
1092 {CPUID_LEAF7_FEATURE_UMIP, "UMIP"},
1093 {CPUID_LEAF7_FEATURE_PKU, "PKU"},
1094 {CPUID_LEAF7_FEATURE_OSPKE, "OSPKE"},
1095 {CPUID_LEAF7_FEATURE_WAITPKG, "WAITPKG"},
1096 {CPUID_LEAF7_FEATURE_GFNI, "GFNI"},
1097 {CPUID_LEAF7_FEATURE_VAES, "VAES"},
1098 {CPUID_LEAF7_FEATURE_VPCLMULQDQ, "VPCLMULQDQ"},
1099 {CPUID_LEAF7_FEATURE_AVX512VNNI, "AVX512VNNI"},
1100 {CPUID_LEAF7_FEATURE_AVX512BITALG, "AVX512BITALG"},
1101 {CPUID_LEAF7_FEATURE_AVX512VPCDQ, "AVX512VPOPCNTDQ"},
1102 {CPUID_LEAF7_FEATURE_RDPID, "RDPID"},
1103 {CPUID_LEAF7_FEATURE_CLDEMOTE, "CLDEMOTE"},
1104 {CPUID_LEAF7_FEATURE_MOVDIRI, "MOVDIRI"},
1105 {CPUID_LEAF7_FEATURE_MOVDIRI64B, "MOVDIRI64B"},
1106 {CPUID_LEAF7_FEATURE_SGXLC, "SGXLC"},
1107 {0, 0}
1108 },
1109 leaf7_extfeature_map[] = {
1110 { CPUID_LEAF7_EXTFEATURE_AVX5124VNNIW, "AVX5124VNNIW" },
1111 { CPUID_LEAF7_EXTFEATURE_AVX5124FMAPS, "AVX5124FMAPS" },
1112 { CPUID_LEAF7_EXTFEATURE_FSREPMOV, "FSREPMOV" },
1113 { CPUID_LEAF7_EXTFEATURE_MDCLEAR, "MDCLEAR" },
1114 { CPUID_LEAF7_EXTFEATURE_TSXFA, "TSXFA" },
1115 { CPUID_LEAF7_EXTFEATURE_IBRS, "IBRS" },
1116 { CPUID_LEAF7_EXTFEATURE_STIBP, "STIBP" },
1117 { CPUID_LEAF7_EXTFEATURE_L1DF, "L1DF" },
1118 { CPUID_LEAF7_EXTFEATURE_ACAPMSR, "ACAPMSR" },
1119 { CPUID_LEAF7_EXTFEATURE_CCAPMSR, "CCAPMSR" },
1120 { CPUID_LEAF7_EXTFEATURE_SSBD, "SSBD" },
1121 {0, 0}
1122 };
1123
1124 static char *
1125 cpuid_get_names(struct table *map, uint64_t bits, char *buf, unsigned buf_len)
1126 {
1127 size_t len = 0;
1128 char *p = buf;
1129 int i;
1130
1131 for (i = 0; map[i].mask != 0; i++) {
1132 if ((bits & map[i].mask) == 0) {
1133 continue;
1134 }
1135 if (len && ((size_t) (p - buf) < (buf_len - 1))) {
1136 *p++ = ' ';
1137 }
1138 len = min(strlen(map[i].name), (size_t)((buf_len - 1) - (p - buf)));
1139 if (len == 0) {
1140 break;
1141 }
1142 bcopy(map[i].name, p, len);
1143 p += len;
1144 }
1145 *p = '\0';
1146 return buf;
1147 }
1148
1149 i386_cpu_info_t *
1150 cpuid_info(void)
1151 {
1152 /* Set-up the cpuid_info stucture lazily */
1153 if (cpuid_cpu_infop == NULL) {
1154 PE_parse_boot_argn("-cpuid", &cpuid_dbg, sizeof(cpuid_dbg));
1155 cpuid_set_info();
1156 cpuid_cpu_infop = &cpuid_cpu_info;
1157 }
1158 return cpuid_cpu_infop;
1159 }
1160
1161 char *
1162 cpuid_get_feature_names(uint64_t features, char *buf, unsigned buf_len)
1163 {
1164 return cpuid_get_names(feature_map, features, buf, buf_len);
1165 }
1166
1167 char *
1168 cpuid_get_extfeature_names(uint64_t extfeatures, char *buf, unsigned buf_len)
1169 {
1170 return cpuid_get_names(extfeature_map, extfeatures, buf, buf_len);
1171 }
1172
1173 char *
1174 cpuid_get_leaf7_feature_names(uint64_t features, char *buf, unsigned buf_len)
1175 {
1176 return cpuid_get_names(leaf7_feature_map, features, buf, buf_len);
1177 }
1178
1179 char *
1180 cpuid_get_leaf7_extfeature_names(uint64_t features, char *buf, unsigned buf_len)
1181 {
1182 return cpuid_get_names(leaf7_extfeature_map, features, buf, buf_len);
1183 }
1184
1185 void
1186 cpuid_feature_display(
1187 const char *header)
1188 {
1189 char buf[320];
1190
1191 kprintf("%s: %s", header,
1192 cpuid_get_feature_names(cpuid_features(), buf, sizeof(buf)));
1193 if (cpuid_leaf7_features()) {
1194 kprintf(" %s", cpuid_get_leaf7_feature_names(
1195 cpuid_leaf7_features(), buf, sizeof(buf)));
1196 }
1197 if (cpuid_leaf7_extfeatures()) {
1198 kprintf(" %s", cpuid_get_leaf7_extfeature_names(
1199 cpuid_leaf7_extfeatures(), buf, sizeof(buf)));
1200 }
1201 kprintf("\n");
1202 if (cpuid_features() & CPUID_FEATURE_HTT) {
1203 #define s_if_plural(n) ((n > 1) ? "s" : "")
1204 kprintf(" HTT: %d core%s per package;"
1205 " %d logical cpu%s per package\n",
1206 cpuid_cpu_infop->cpuid_cores_per_package,
1207 s_if_plural(cpuid_cpu_infop->cpuid_cores_per_package),
1208 cpuid_cpu_infop->cpuid_logical_per_package,
1209 s_if_plural(cpuid_cpu_infop->cpuid_logical_per_package));
1210 }
1211 }
1212
1213 void
1214 cpuid_extfeature_display(
1215 const char *header)
1216 {
1217 char buf[256];
1218
1219 kprintf("%s: %s\n", header,
1220 cpuid_get_extfeature_names(cpuid_extfeatures(),
1221 buf, sizeof(buf)));
1222 }
1223
1224 void
1225 cpuid_cpu_display(
1226 const char *header)
1227 {
1228 if (cpuid_cpu_infop->cpuid_brand_string[0] != '\0') {
1229 kprintf("%s: %s\n", header, cpuid_cpu_infop->cpuid_brand_string);
1230 }
1231 }
1232
1233 unsigned int
1234 cpuid_family(void)
1235 {
1236 return cpuid_info()->cpuid_family;
1237 }
1238
1239 uint32_t
1240 cpuid_cpufamily(void)
1241 {
1242 return cpuid_info()->cpuid_cpufamily;
1243 }
1244
1245 cpu_type_t
1246 cpuid_cputype(void)
1247 {
1248 return cpuid_info()->cpuid_cpu_type;
1249 }
1250
1251 cpu_subtype_t
1252 cpuid_cpusubtype(void)
1253 {
1254 return cpuid_info()->cpuid_cpu_subtype;
1255 }
1256
1257 uint64_t
1258 cpuid_features(void)
1259 {
1260 static int checked = 0;
1261 char fpu_arg[20] = { 0 };
1262
1263 (void) cpuid_info();
1264 if (!checked) {
1265 /* check for boot-time fpu limitations */
1266 if (PE_parse_boot_argn("_fpu", &fpu_arg[0], sizeof(fpu_arg))) {
1267 printf("limiting fpu features to: %s\n", fpu_arg);
1268 if (!strncmp("387", fpu_arg, sizeof("387")) || !strncmp("mmx", fpu_arg, sizeof("mmx"))) {
1269 printf("no sse or sse2\n");
1270 cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE | CPUID_FEATURE_SSE2 | CPUID_FEATURE_FXSR);
1271 } else if (!strncmp("sse", fpu_arg, sizeof("sse"))) {
1272 printf("no sse2\n");
1273 cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE2);
1274 }
1275 }
1276 checked = 1;
1277 }
1278 return cpuid_cpu_infop->cpuid_features;
1279 }
1280
1281 uint64_t
1282 cpuid_extfeatures(void)
1283 {
1284 return cpuid_info()->cpuid_extfeatures;
1285 }
1286
1287 uint64_t
1288 cpuid_leaf7_features(void)
1289 {
1290 return cpuid_info()->cpuid_leaf7_features;
1291 }
1292
1293 uint64_t
1294 cpuid_leaf7_extfeatures(void)
1295 {
1296 return cpuid_info()->cpuid_leaf7_extfeatures;
1297 }
1298
1299 static i386_vmm_info_t *_cpuid_vmm_infop = NULL;
1300 static i386_vmm_info_t _cpuid_vmm_info;
1301
1302 static void
1303 cpuid_init_vmm_info(i386_vmm_info_t *info_p)
1304 {
1305 uint32_t reg[4];
1306 uint32_t max_vmm_leaf;
1307
1308 bzero(info_p, sizeof(*info_p));
1309
1310 if (!cpuid_vmm_present()) {
1311 return;
1312 }
1313
1314 DBG("cpuid_init_vmm_info(%p)\n", info_p);
1315
1316 /* do cpuid 0x40000000 to get VMM vendor */
1317 cpuid_fn(0x40000000, reg);
1318 max_vmm_leaf = reg[eax];
1319 bcopy((char *)&reg[ebx], &info_p->cpuid_vmm_vendor[0], 4);
1320 bcopy((char *)&reg[ecx], &info_p->cpuid_vmm_vendor[4], 4);
1321 bcopy((char *)&reg[edx], &info_p->cpuid_vmm_vendor[8], 4);
1322 info_p->cpuid_vmm_vendor[12] = '\0';
1323
1324 if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_VMWARE)) {
1325 /* VMware identification string: kb.vmware.com/kb/1009458 */
1326 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_VMWARE;
1327 } else if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_PARALLELS)) {
1328 /* Parallels identification string */
1329 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_PARALLELS;
1330 } else {
1331 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_UNKNOWN;
1332 }
1333
1334 /* VMM generic leaves: https://lkml.org/lkml/2008/10/1/246 */
1335 if (max_vmm_leaf >= 0x40000010) {
1336 cpuid_fn(0x40000010, reg);
1337
1338 info_p->cpuid_vmm_tsc_frequency = reg[eax];
1339 info_p->cpuid_vmm_bus_frequency = reg[ebx];
1340 }
1341
1342 DBG(" vmm_vendor : %s\n", info_p->cpuid_vmm_vendor);
1343 DBG(" vmm_family : %u\n", info_p->cpuid_vmm_family);
1344 DBG(" vmm_bus_frequency : %u\n", info_p->cpuid_vmm_bus_frequency);
1345 DBG(" vmm_tsc_frequency : %u\n", info_p->cpuid_vmm_tsc_frequency);
1346 }
1347
1348 boolean_t
1349 cpuid_vmm_present(void)
1350 {
1351 return (cpuid_features() & CPUID_FEATURE_VMM) ? TRUE : FALSE;
1352 }
1353
1354 i386_vmm_info_t *
1355 cpuid_vmm_info(void)
1356 {
1357 if (_cpuid_vmm_infop == NULL) {
1358 cpuid_init_vmm_info(&_cpuid_vmm_info);
1359 _cpuid_vmm_infop = &_cpuid_vmm_info;
1360 }
1361 return _cpuid_vmm_infop;
1362 }
1363
1364 uint32_t
1365 cpuid_vmm_family(void)
1366 {
1367 return cpuid_vmm_info()->cpuid_vmm_family;
1368 }
1369
1370 cwa_classifier_e
1371 cpuid_wa_required(cpu_wa_e wa)
1372 {
1373 static uint64_t bootarg_cpu_wa_enables = 0;
1374 static uint64_t bootarg_cpu_wa_disables = 0;
1375 static int bootargs_overrides_processed = 0;
1376 i386_cpu_info_t *info_p = &cpuid_cpu_info;
1377
1378 if (!bootargs_overrides_processed) {
1379 if (!PE_parse_boot_argn("cwae", &bootarg_cpu_wa_enables, sizeof(bootarg_cpu_wa_enables))) {
1380 bootarg_cpu_wa_enables = 0;
1381 }
1382
1383 if (!PE_parse_boot_argn("cwad", &bootarg_cpu_wa_disables, sizeof(bootarg_cpu_wa_disables))) {
1384 bootarg_cpu_wa_disables = 0;
1385 }
1386 bootargs_overrides_processed = 1;
1387 }
1388
1389 if (bootarg_cpu_wa_enables & (1 << wa)) {
1390 return CWA_FORCE_ON;
1391 }
1392
1393 if (bootarg_cpu_wa_disables & (1 << wa)) {
1394 return CWA_FORCE_OFF;
1395 }
1396
1397 switch (wa) {
1398 case CPU_INTEL_SEGCHK:
1399 /* First, check to see if this CPU requires the workaround */
1400 if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_ACAPMSR) != 0) {
1401 /* We have ARCHCAP, so check it for either RDCL_NO or MDS_NO */
1402 uint64_t archcap_msr = rdmsr64(MSR_IA32_ARCH_CAPABILITIES);
1403 if ((archcap_msr & (MSR_IA32_ARCH_CAPABILITIES_RDCL_NO | MSR_IA32_ARCH_CAPABILITIES_MDS_NO)) != 0) {
1404 /* Workaround not needed */
1405 return CWA_OFF;
1406 }
1407 }
1408
1409 if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_MDCLEAR) != 0) {
1410 return CWA_ON;
1411 }
1412
1413 /*
1414 * If the CPU supports the ARCHCAP MSR and neither the RDCL_NO bit nor the MDS_NO
1415 * bit are set, OR the CPU does not support the ARCHCAP MSR and the CPU does
1416 * not enumerate the presence of the enhanced VERW instruction, report
1417 * that the workaround should not be enabled.
1418 */
1419 break;
1420
1421 case CPU_INTEL_TSXFA:
1422 /*
1423 * If this CPU supports RTM and supports FORCE_ABORT, return that
1424 * the workaround should be enabled.
1425 */
1426 if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_TSXFA) != 0 &&
1427 (info_p->cpuid_leaf7_features & CPUID_LEAF7_FEATURE_RTM) != 0) {
1428 return CWA_ON;
1429 }
1430 break;
1431
1432 default:
1433 break;
1434 }
1435
1436 return CWA_OFF;
1437 }