]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpuid.c
25a26de3bd1d298d1e5bef0efbb6f0de746edfed
[apple/xnu.git] / osfmk / i386 / cpuid.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 #include <vm/vm_page.h>
32 #include <pexpert/pexpert.h>
33
34 #include <i386/cpu_threads.h>
35 #include <i386/cpuid.h>
36
37 int force_tecs_at_idle;
38 int tecs_mode_supported;
39
40 static boolean_t cpuid_dbg
41 #if DEBUG
42 = TRUE;
43 #else
44 = FALSE;
45 #endif
46 #define DBG(x...) \
47 do { \
48 if (cpuid_dbg) \
49 kprintf(x); \
50 } while (0) \
51
52 #define min(a, b) ((a) < (b) ? (a) : (b))
53 #define quad(hi, lo) (((uint64_t)(hi)) << 32 | (lo))
54
55 /*
56 * Leaf 2 cache descriptor encodings.
57 */
58 typedef enum {
59 _NULL_, /* NULL (empty) descriptor */
60 CACHE, /* Cache */
61 TLB, /* TLB */
62 STLB, /* Shared second-level unified TLB */
63 PREFETCH /* Prefetch size */
64 } cpuid_leaf2_desc_type_t;
65
66 typedef enum {
67 NA, /* Not Applicable */
68 FULLY, /* Fully-associative */
69 TRACE, /* Trace Cache (P4 only) */
70 INST, /* Instruction TLB */
71 DATA, /* Data TLB */
72 DATA0, /* Data TLB, 1st level */
73 DATA1, /* Data TLB, 2nd level */
74 L1, /* L1 (unified) cache */
75 L1_INST, /* L1 Instruction cache */
76 L1_DATA, /* L1 Data cache */
77 L2, /* L2 (unified) cache */
78 L3, /* L3 (unified) cache */
79 L2_2LINESECTOR, /* L2 (unified) cache with 2 lines per sector */
80 L3_2LINESECTOR, /* L3(unified) cache with 2 lines per sector */
81 SMALL, /* Small page TLB */
82 LARGE, /* Large page TLB */
83 BOTH /* Small and Large page TLB */
84 } cpuid_leaf2_qualifier_t;
85
86 typedef struct cpuid_cache_descriptor {
87 uint8_t value; /* descriptor code */
88 uint8_t type; /* cpuid_leaf2_desc_type_t */
89 uint8_t level; /* level of cache/TLB hierachy */
90 uint8_t ways; /* wayness of cache */
91 uint16_t size; /* cachesize or TLB pagesize */
92 uint16_t entries; /* number of TLB entries or linesize */
93 } cpuid_cache_descriptor_t;
94
95 /*
96 * These multipliers are used to encode 1*K .. 64*M in a 16 bit size field
97 */
98 #define K (1)
99 #define M (1024)
100
101 /*
102 * Intel cache descriptor table:
103 */
104 static cpuid_cache_descriptor_t intel_cpuid_leaf2_descriptor_table[] = {
105 // -------------------------------------------------------
106 // value type level ways size entries
107 // -------------------------------------------------------
108 { 0x00, _NULL_, NA, NA, NA, NA },
109 { 0x01, TLB, INST, 4, SMALL, 32 },
110 { 0x02, TLB, INST, FULLY, LARGE, 2 },
111 { 0x03, TLB, DATA, 4, SMALL, 64 },
112 { 0x04, TLB, DATA, 4, LARGE, 8 },
113 { 0x05, TLB, DATA1, 4, LARGE, 32 },
114 { 0x06, CACHE, L1_INST, 4, 8 * K, 32 },
115 { 0x08, CACHE, L1_INST, 4, 16 * K, 32 },
116 { 0x09, CACHE, L1_INST, 4, 32 * K, 64 },
117 { 0x0A, CACHE, L1_DATA, 2, 8 * K, 32 },
118 { 0x0B, TLB, INST, 4, LARGE, 4 },
119 { 0x0C, CACHE, L1_DATA, 4, 16 * K, 32 },
120 { 0x0D, CACHE, L1_DATA, 4, 16 * K, 64 },
121 { 0x0E, CACHE, L1_DATA, 6, 24 * K, 64 },
122 { 0x21, CACHE, L2, 8, 256 * K, 64 },
123 { 0x22, CACHE, L3_2LINESECTOR, 4, 512 * K, 64 },
124 { 0x23, CACHE, L3_2LINESECTOR, 8, 1 * M, 64 },
125 { 0x25, CACHE, L3_2LINESECTOR, 8, 2 * M, 64 },
126 { 0x29, CACHE, L3_2LINESECTOR, 8, 4 * M, 64 },
127 { 0x2C, CACHE, L1_DATA, 8, 32 * K, 64 },
128 { 0x30, CACHE, L1_INST, 8, 32 * K, 64 },
129 { 0x40, CACHE, L2, NA, 0, NA },
130 { 0x41, CACHE, L2, 4, 128 * K, 32 },
131 { 0x42, CACHE, L2, 4, 256 * K, 32 },
132 { 0x43, CACHE, L2, 4, 512 * K, 32 },
133 { 0x44, CACHE, L2, 4, 1 * M, 32 },
134 { 0x45, CACHE, L2, 4, 2 * M, 32 },
135 { 0x46, CACHE, L3, 4, 4 * M, 64 },
136 { 0x47, CACHE, L3, 8, 8 * M, 64 },
137 { 0x48, CACHE, L2, 12, 3 * M, 64 },
138 { 0x49, CACHE, L2, 16, 4 * M, 64 },
139 { 0x4A, CACHE, L3, 12, 6 * M, 64 },
140 { 0x4B, CACHE, L3, 16, 8 * M, 64 },
141 { 0x4C, CACHE, L3, 12, 12 * M, 64 },
142 { 0x4D, CACHE, L3, 16, 16 * M, 64 },
143 { 0x4E, CACHE, L2, 24, 6 * M, 64 },
144 { 0x4F, TLB, INST, NA, SMALL, 32 },
145 { 0x50, TLB, INST, NA, BOTH, 64 },
146 { 0x51, TLB, INST, NA, BOTH, 128 },
147 { 0x52, TLB, INST, NA, BOTH, 256 },
148 { 0x55, TLB, INST, FULLY, BOTH, 7 },
149 { 0x56, TLB, DATA0, 4, LARGE, 16 },
150 { 0x57, TLB, DATA0, 4, SMALL, 16 },
151 { 0x59, TLB, DATA0, FULLY, SMALL, 16 },
152 { 0x5A, TLB, DATA0, 4, LARGE, 32 },
153 { 0x5B, TLB, DATA, NA, BOTH, 64 },
154 { 0x5C, TLB, DATA, NA, BOTH, 128 },
155 { 0x5D, TLB, DATA, NA, BOTH, 256 },
156 { 0x60, CACHE, L1, 16 * K, 8, 64 },
157 { 0x61, CACHE, L1, 4, 8 * K, 64 },
158 { 0x62, CACHE, L1, 4, 16 * K, 64 },
159 { 0x63, CACHE, L1, 4, 32 * K, 64 },
160 { 0x70, CACHE, TRACE, 8, 12 * K, NA },
161 { 0x71, CACHE, TRACE, 8, 16 * K, NA },
162 { 0x72, CACHE, TRACE, 8, 32 * K, NA },
163 { 0x76, TLB, INST, NA, BOTH, 8 },
164 { 0x78, CACHE, L2, 4, 1 * M, 64 },
165 { 0x79, CACHE, L2_2LINESECTOR, 8, 128 * K, 64 },
166 { 0x7A, CACHE, L2_2LINESECTOR, 8, 256 * K, 64 },
167 { 0x7B, CACHE, L2_2LINESECTOR, 8, 512 * K, 64 },
168 { 0x7C, CACHE, L2_2LINESECTOR, 8, 1 * M, 64 },
169 { 0x7D, CACHE, L2, 8, 2 * M, 64 },
170 { 0x7F, CACHE, L2, 2, 512 * K, 64 },
171 { 0x80, CACHE, L2, 8, 512 * K, 64 },
172 { 0x82, CACHE, L2, 8, 256 * K, 32 },
173 { 0x83, CACHE, L2, 8, 512 * K, 32 },
174 { 0x84, CACHE, L2, 8, 1 * M, 32 },
175 { 0x85, CACHE, L2, 8, 2 * M, 32 },
176 { 0x86, CACHE, L2, 4, 512 * K, 64 },
177 { 0x87, CACHE, L2, 8, 1 * M, 64 },
178 { 0xB0, TLB, INST, 4, SMALL, 128 },
179 { 0xB1, TLB, INST, 4, LARGE, 8 },
180 { 0xB2, TLB, INST, 4, SMALL, 64 },
181 { 0xB3, TLB, DATA, 4, SMALL, 128 },
182 { 0xB4, TLB, DATA1, 4, SMALL, 256 },
183 { 0xB5, TLB, DATA1, 8, SMALL, 64 },
184 { 0xB6, TLB, DATA1, 8, SMALL, 128 },
185 { 0xBA, TLB, DATA1, 4, BOTH, 64 },
186 { 0xC1, STLB, DATA1, 8, SMALL, 1024},
187 { 0xCA, STLB, DATA1, 4, SMALL, 512 },
188 { 0xD0, CACHE, L3, 4, 512 * K, 64 },
189 { 0xD1, CACHE, L3, 4, 1 * M, 64 },
190 { 0xD2, CACHE, L3, 4, 2 * M, 64 },
191 { 0xD3, CACHE, L3, 4, 4 * M, 64 },
192 { 0xD4, CACHE, L3, 4, 8 * M, 64 },
193 { 0xD6, CACHE, L3, 8, 1 * M, 64 },
194 { 0xD7, CACHE, L3, 8, 2 * M, 64 },
195 { 0xD8, CACHE, L3, 8, 4 * M, 64 },
196 { 0xD9, CACHE, L3, 8, 8 * M, 64 },
197 { 0xDA, CACHE, L3, 8, 12 * M, 64 },
198 { 0xDC, CACHE, L3, 12, 1536 * K, 64 },
199 { 0xDD, CACHE, L3, 12, 3 * M, 64 },
200 { 0xDE, CACHE, L3, 12, 6 * M, 64 },
201 { 0xDF, CACHE, L3, 12, 12 * M, 64 },
202 { 0xE0, CACHE, L3, 12, 18 * M, 64 },
203 { 0xE2, CACHE, L3, 16, 2 * M, 64 },
204 { 0xE3, CACHE, L3, 16, 4 * M, 64 },
205 { 0xE4, CACHE, L3, 16, 8 * M, 64 },
206 { 0xE5, CACHE, L3, 16, 16 * M, 64 },
207 { 0xE6, CACHE, L3, 16, 24 * M, 64 },
208 { 0xF0, PREFETCH, NA, NA, 64, NA },
209 { 0xF1, PREFETCH, NA, NA, 128, NA },
210 { 0xFF, CACHE, NA, NA, 0, NA }
211 };
212 #define INTEL_LEAF2_DESC_NUM (sizeof(intel_cpuid_leaf2_descriptor_table) / \
213 sizeof(cpuid_cache_descriptor_t))
214
215 static void do_cwas(i386_cpu_info_t *cpuinfo, boolean_t on_slave);
216 static void cpuid_do_precpuid_was(void);
217
218 static inline cpuid_cache_descriptor_t *
219 cpuid_leaf2_find(uint8_t value)
220 {
221 unsigned int i;
222
223 for (i = 0; i < INTEL_LEAF2_DESC_NUM; i++) {
224 if (intel_cpuid_leaf2_descriptor_table[i].value == value) {
225 return &intel_cpuid_leaf2_descriptor_table[i];
226 }
227 }
228 return NULL;
229 }
230
231 /*
232 * CPU identification routines.
233 */
234
235 static i386_cpu_info_t cpuid_cpu_info;
236 static i386_cpu_info_t *cpuid_cpu_infop = NULL;
237
238 static void
239 cpuid_fn(uint32_t selector, uint32_t *result)
240 {
241 do_cpuid(selector, result);
242 DBG("cpuid_fn(0x%08x) eax:0x%08x ebx:0x%08x ecx:0x%08x edx:0x%08x\n",
243 selector, result[0], result[1], result[2], result[3]);
244 }
245
246 static const char *cache_type_str[LCACHE_MAX] = {
247 "Lnone", "L1I", "L1D", "L2U", "L3U"
248 };
249
250 static void
251 do_cwas(i386_cpu_info_t *cpuinfo, boolean_t on_slave)
252 {
253 extern int force_thread_policy_tecs;
254
255 /*
256 * Workaround for reclaiming perf counter 3 due to TSX memory ordering erratum.
257 * This workaround does not support being forcibly set (since an MSR must be
258 * enumerated, lest we #GP when forced to access it.)
259 */
260 if (cpuid_wa_required(CPU_INTEL_TSXFA) == CWA_ON) {
261 /* This must be executed on all logical processors */
262 wrmsr64(MSR_IA32_TSX_FORCE_ABORT,
263 rdmsr64(MSR_IA32_TSX_FORCE_ABORT) | MSR_IA32_TSXFA_RTM_FORCE_ABORT);
264 }
265
266 if (on_slave) {
267 return;
268 }
269
270 switch (cpuid_wa_required(CPU_INTEL_SEGCHK)) {
271 case CWA_FORCE_ON:
272 force_thread_policy_tecs = 1;
273
274 /* If hyperthreaded, enable idle workaround */
275 if (cpuinfo->thread_count > cpuinfo->core_count) {
276 force_tecs_at_idle = 1;
277 }
278
279 /*FALLTHROUGH*/
280 case CWA_ON:
281 tecs_mode_supported = 1;
282 break;
283
284 case CWA_FORCE_OFF:
285 case CWA_OFF:
286 tecs_mode_supported = 0;
287 force_tecs_at_idle = 0;
288 force_thread_policy_tecs = 0;
289 break;
290
291 default:
292 break;
293 }
294 }
295
296 void
297 cpuid_do_was(void)
298 {
299 do_cwas(cpuid_info(), TRUE);
300 }
301
302 /* this function is Intel-specific */
303 static void
304 cpuid_set_cache_info( i386_cpu_info_t * info_p )
305 {
306 uint32_t cpuid_result[4];
307 uint32_t reg[4];
308 uint32_t index;
309 uint32_t linesizes[LCACHE_MAX];
310 unsigned int i;
311 unsigned int j;
312 boolean_t cpuid_deterministic_supported = FALSE;
313
314 DBG("cpuid_set_cache_info(%p)\n", info_p);
315
316 bzero( linesizes, sizeof(linesizes));
317
318 /* Get processor cache descriptor info using leaf 2. We don't use
319 * this internally, but must publish it for KEXTs.
320 */
321 cpuid_fn(2, cpuid_result);
322 for (j = 0; j < 4; j++) {
323 if ((cpuid_result[j] >> 31) == 1) { /* bit31 is validity */
324 continue;
325 }
326 ((uint32_t *) info_p->cache_info)[j] = cpuid_result[j];
327 }
328 /* first byte gives number of cpuid calls to get all descriptors */
329 for (i = 1; i < info_p->cache_info[0]; i++) {
330 if (i * 16 > sizeof(info_p->cache_info)) {
331 break;
332 }
333 cpuid_fn(2, cpuid_result);
334 for (j = 0; j < 4; j++) {
335 if ((cpuid_result[j] >> 31) == 1) {
336 continue;
337 }
338 ((uint32_t *) info_p->cache_info)[4 * i + j] =
339 cpuid_result[j];
340 }
341 }
342
343 /*
344 * Get cache info using leaf 4, the "deterministic cache parameters."
345 * Most processors Mac OS X supports implement this flavor of CPUID.
346 * Loop over each cache on the processor.
347 */
348 cpuid_fn(0, cpuid_result);
349 if (cpuid_result[eax] >= 4) {
350 cpuid_deterministic_supported = TRUE;
351 }
352
353 for (index = 0; cpuid_deterministic_supported; index++) {
354 cache_type_t type = Lnone;
355 uint32_t cache_type;
356 uint32_t cache_level;
357 uint32_t cache_sharing;
358 uint32_t cache_linesize;
359 uint32_t cache_sets;
360 uint32_t cache_associativity;
361 uint32_t cache_size;
362 uint32_t cache_partitions;
363 uint32_t colors;
364
365 reg[eax] = 4; /* cpuid request 4 */
366 reg[ecx] = index; /* index starting at 0 */
367 cpuid(reg);
368 DBG("cpuid(4) index=%d eax=0x%x\n", index, reg[eax]);
369 cache_type = bitfield32(reg[eax], 4, 0);
370 if (cache_type == 0) {
371 break; /* no more caches */
372 }
373 cache_level = bitfield32(reg[eax], 7, 5);
374 cache_sharing = bitfield32(reg[eax], 25, 14) + 1;
375 info_p->cpuid_cores_per_package
376 = bitfield32(reg[eax], 31, 26) + 1;
377 cache_linesize = bitfield32(reg[ebx], 11, 0) + 1;
378 cache_partitions = bitfield32(reg[ebx], 21, 12) + 1;
379 cache_associativity = bitfield32(reg[ebx], 31, 22) + 1;
380 cache_sets = bitfield32(reg[ecx], 31, 0) + 1;
381
382 /* Map type/levels returned by CPUID into cache_type_t */
383 switch (cache_level) {
384 case 1:
385 type = cache_type == 1 ? L1D :
386 cache_type == 2 ? L1I :
387 Lnone;
388 break;
389 case 2:
390 type = cache_type == 3 ? L2U :
391 Lnone;
392 break;
393 case 3:
394 type = cache_type == 3 ? L3U :
395 Lnone;
396 break;
397 default:
398 type = Lnone;
399 }
400
401 /* The total size of a cache is:
402 * ( linesize * sets * associativity * partitions )
403 */
404 if (type != Lnone) {
405 cache_size = cache_linesize * cache_sets *
406 cache_associativity * cache_partitions;
407 info_p->cache_size[type] = cache_size;
408 info_p->cache_sharing[type] = cache_sharing;
409 info_p->cache_partitions[type] = cache_partitions;
410 linesizes[type] = cache_linesize;
411
412 DBG(" cache_size[%s] : %d\n",
413 cache_type_str[type], cache_size);
414 DBG(" cache_sharing[%s] : %d\n",
415 cache_type_str[type], cache_sharing);
416 DBG(" cache_partitions[%s]: %d\n",
417 cache_type_str[type], cache_partitions);
418
419 /*
420 * Overwrite associativity determined via
421 * CPUID.0x80000006 -- this leaf is more
422 * accurate
423 */
424 if (type == L2U) {
425 info_p->cpuid_cache_L2_associativity = cache_associativity;
426 }
427 /*
428 * Adjust #sets to account for the N CBos
429 * This is because addresses are hashed across CBos
430 */
431 if (type == L3U && info_p->core_count) {
432 cache_sets = cache_sets / info_p->core_count;
433 }
434
435 /* Compute the number of page colors for this cache,
436 * which is:
437 * ( linesize * sets ) / page_size
438 *
439 * To help visualize this, consider two views of a
440 * physical address. To the cache, it is composed
441 * of a line offset, a set selector, and a tag.
442 * To VM, it is composed of a page offset, a page
443 * color, and other bits in the pageframe number:
444 *
445 * +-----------------+---------+--------+
446 * cache: | tag | set | offset |
447 * +-----------------+---------+--------+
448 *
449 * +-----------------+-------+----------+
450 * VM: | don't care | color | pg offset|
451 * +-----------------+-------+----------+
452 *
453 * The color is those bits in (set+offset) not covered
454 * by the page offset.
455 */
456 colors = (cache_linesize * cache_sets) >> 12;
457
458 if (colors > vm_cache_geometry_colors) {
459 vm_cache_geometry_colors = colors;
460 }
461 }
462 }
463 DBG(" vm_cache_geometry_colors: %d\n", vm_cache_geometry_colors);
464
465 /*
466 * If deterministic cache parameters are not available, use
467 * something else
468 */
469 if (info_p->cpuid_cores_per_package == 0) {
470 info_p->cpuid_cores_per_package = 1;
471
472 /* cpuid define in 1024 quantities */
473 info_p->cache_size[L2U] = info_p->cpuid_cache_size * 1024;
474 info_p->cache_sharing[L2U] = 1;
475 info_p->cache_partitions[L2U] = 1;
476
477 linesizes[L2U] = info_p->cpuid_cache_linesize;
478
479 DBG(" cache_size[L2U] : %d\n",
480 info_p->cache_size[L2U]);
481 DBG(" cache_sharing[L2U] : 1\n");
482 DBG(" cache_partitions[L2U]: 1\n");
483 DBG(" linesizes[L2U] : %d\n",
484 info_p->cpuid_cache_linesize);
485 }
486
487 /*
488 * What linesize to publish? We use the L2 linesize if any,
489 * else the L1D.
490 */
491 if (linesizes[L2U]) {
492 info_p->cache_linesize = linesizes[L2U];
493 } else if (linesizes[L1D]) {
494 info_p->cache_linesize = linesizes[L1D];
495 } else {
496 panic("no linesize");
497 }
498 DBG(" cache_linesize : %d\n", info_p->cache_linesize);
499
500 /*
501 * Extract and publish TLB information from Leaf 2 descriptors.
502 */
503 DBG(" %ld leaf2 descriptors:\n", sizeof(info_p->cache_info));
504 for (i = 1; i < sizeof(info_p->cache_info); i++) {
505 cpuid_cache_descriptor_t *descp;
506 int id;
507 int level;
508 int page;
509
510 DBG(" 0x%02x", info_p->cache_info[i]);
511 descp = cpuid_leaf2_find(info_p->cache_info[i]);
512 if (descp == NULL) {
513 continue;
514 }
515
516 switch (descp->type) {
517 case TLB:
518 page = (descp->size == SMALL) ? TLB_SMALL : TLB_LARGE;
519 /* determine I or D: */
520 switch (descp->level) {
521 case INST:
522 id = TLB_INST;
523 break;
524 case DATA:
525 case DATA0:
526 case DATA1:
527 id = TLB_DATA;
528 break;
529 default:
530 continue;
531 }
532 /* determine level: */
533 switch (descp->level) {
534 case DATA1:
535 level = 1;
536 break;
537 default:
538 level = 0;
539 }
540 info_p->cpuid_tlb[id][page][level] = descp->entries;
541 break;
542 case STLB:
543 info_p->cpuid_stlb = descp->entries;
544 }
545 }
546 DBG("\n");
547 }
548
549 static void
550 cpuid_set_generic_info(i386_cpu_info_t *info_p)
551 {
552 uint32_t reg[4];
553 char str[128], *p;
554
555 DBG("cpuid_set_generic_info(%p)\n", info_p);
556
557 /* do cpuid 0 to get vendor */
558 cpuid_fn(0, reg);
559 info_p->cpuid_max_basic = reg[eax];
560 bcopy((char *)&reg[ebx], &info_p->cpuid_vendor[0], 4); /* ug */
561 bcopy((char *)&reg[ecx], &info_p->cpuid_vendor[8], 4);
562 bcopy((char *)&reg[edx], &info_p->cpuid_vendor[4], 4);
563 info_p->cpuid_vendor[12] = 0;
564
565 /* get extended cpuid results */
566 cpuid_fn(0x80000000, reg);
567 info_p->cpuid_max_ext = reg[eax];
568
569 /* check to see if we can get brand string */
570 if (info_p->cpuid_max_ext >= 0x80000004) {
571 /*
572 * The brand string 48 bytes (max), guaranteed to
573 * be NUL terminated.
574 */
575 cpuid_fn(0x80000002, reg);
576 bcopy((char *)reg, &str[0], 16);
577 cpuid_fn(0x80000003, reg);
578 bcopy((char *)reg, &str[16], 16);
579 cpuid_fn(0x80000004, reg);
580 bcopy((char *)reg, &str[32], 16);
581 for (p = str; *p != '\0'; p++) {
582 if (*p != ' ') {
583 break;
584 }
585 }
586 strlcpy(info_p->cpuid_brand_string,
587 p, sizeof(info_p->cpuid_brand_string));
588
589 if (!strncmp(info_p->cpuid_brand_string, CPUID_STRING_UNKNOWN,
590 min(sizeof(info_p->cpuid_brand_string),
591 strlen(CPUID_STRING_UNKNOWN) + 1))) {
592 /*
593 * This string means we have a firmware-programmable brand string,
594 * and the firmware couldn't figure out what sort of CPU we have.
595 */
596 info_p->cpuid_brand_string[0] = '\0';
597 }
598 }
599
600 /* Get cache and addressing info. */
601 if (info_p->cpuid_max_ext >= 0x80000006) {
602 uint32_t assoc;
603 cpuid_fn(0x80000006, reg);
604 info_p->cpuid_cache_linesize = bitfield32(reg[ecx], 7, 0);
605 assoc = bitfield32(reg[ecx], 15, 12);
606 /*
607 * L2 associativity is encoded, though in an insufficiently
608 * descriptive fashion, e.g. 24-way is mapped to 16-way.
609 * Represent a fully associative cache as 0xFFFF.
610 * Overwritten by associativity as determined via CPUID.4
611 * if available.
612 */
613 if (assoc == 6) {
614 assoc = 8;
615 } else if (assoc == 8) {
616 assoc = 16;
617 } else if (assoc == 0xF) {
618 assoc = 0xFFFF;
619 }
620 info_p->cpuid_cache_L2_associativity = assoc;
621 info_p->cpuid_cache_size = bitfield32(reg[ecx], 31, 16);
622 cpuid_fn(0x80000008, reg);
623 info_p->cpuid_address_bits_physical =
624 bitfield32(reg[eax], 7, 0);
625 info_p->cpuid_address_bits_virtual =
626 bitfield32(reg[eax], 15, 8);
627 }
628
629 /*
630 * Get processor signature and decode
631 * and bracket this with the approved procedure for reading the
632 * the microcode version number a.k.a. signature a.k.a. BIOS ID
633 */
634 wrmsr64(MSR_IA32_BIOS_SIGN_ID, 0);
635 cpuid_fn(1, reg);
636 info_p->cpuid_microcode_version =
637 (uint32_t) (rdmsr64(MSR_IA32_BIOS_SIGN_ID) >> 32);
638 info_p->cpuid_signature = reg[eax];
639 info_p->cpuid_stepping = bitfield32(reg[eax], 3, 0);
640 info_p->cpuid_model = bitfield32(reg[eax], 7, 4);
641 info_p->cpuid_family = bitfield32(reg[eax], 11, 8);
642 info_p->cpuid_type = bitfield32(reg[eax], 13, 12);
643 info_p->cpuid_extmodel = bitfield32(reg[eax], 19, 16);
644 info_p->cpuid_extfamily = bitfield32(reg[eax], 27, 20);
645 info_p->cpuid_brand = bitfield32(reg[ebx], 7, 0);
646 info_p->cpuid_features = quad(reg[ecx], reg[edx]);
647
648 /* Get "processor flag"; necessary for microcode update matching */
649 info_p->cpuid_processor_flag = (rdmsr64(MSR_IA32_PLATFORM_ID) >> 50) & 0x7;
650
651 /* Fold extensions into family/model */
652 if (info_p->cpuid_family == 0x0f) {
653 info_p->cpuid_family += info_p->cpuid_extfamily;
654 }
655 if (info_p->cpuid_family == 0x0f || info_p->cpuid_family == 0x06) {
656 info_p->cpuid_model += (info_p->cpuid_extmodel << 4);
657 }
658
659 if (info_p->cpuid_features & CPUID_FEATURE_HTT) {
660 info_p->cpuid_logical_per_package =
661 bitfield32(reg[ebx], 23, 16);
662 } else {
663 info_p->cpuid_logical_per_package = 1;
664 }
665
666 if (info_p->cpuid_max_ext >= 0x80000001) {
667 cpuid_fn(0x80000001, reg);
668 info_p->cpuid_extfeatures =
669 quad(reg[ecx], reg[edx]);
670 }
671
672 DBG(" max_basic : %d\n", info_p->cpuid_max_basic);
673 DBG(" max_ext : 0x%08x\n", info_p->cpuid_max_ext);
674 DBG(" vendor : %s\n", info_p->cpuid_vendor);
675 DBG(" brand_string : %s\n", info_p->cpuid_brand_string);
676 DBG(" signature : 0x%08x\n", info_p->cpuid_signature);
677 DBG(" stepping : %d\n", info_p->cpuid_stepping);
678 DBG(" model : %d\n", info_p->cpuid_model);
679 DBG(" family : %d\n", info_p->cpuid_family);
680 DBG(" type : %d\n", info_p->cpuid_type);
681 DBG(" extmodel : %d\n", info_p->cpuid_extmodel);
682 DBG(" extfamily : %d\n", info_p->cpuid_extfamily);
683 DBG(" brand : %d\n", info_p->cpuid_brand);
684 DBG(" features : 0x%016llx\n", info_p->cpuid_features);
685 DBG(" extfeatures : 0x%016llx\n", info_p->cpuid_extfeatures);
686 DBG(" logical_per_package : %d\n", info_p->cpuid_logical_per_package);
687 DBG(" microcode_version : 0x%08x\n", info_p->cpuid_microcode_version);
688
689 /* Fold in the Invariant TSC feature bit, if present */
690 if (info_p->cpuid_max_ext >= 0x80000007) {
691 cpuid_fn(0x80000007, reg);
692 info_p->cpuid_extfeatures |=
693 reg[edx] & (uint32_t)CPUID_EXTFEATURE_TSCI;
694 DBG(" extfeatures : 0x%016llx\n",
695 info_p->cpuid_extfeatures);
696 }
697
698 if (info_p->cpuid_max_basic >= 0x5) {
699 cpuid_mwait_leaf_t *cmp = &info_p->cpuid_mwait_leaf;
700
701 /*
702 * Extract the Monitor/Mwait Leaf info:
703 */
704 cpuid_fn(5, reg);
705 cmp->linesize_min = reg[eax];
706 cmp->linesize_max = reg[ebx];
707 cmp->extensions = reg[ecx];
708 cmp->sub_Cstates = reg[edx];
709 info_p->cpuid_mwait_leafp = cmp;
710
711 DBG(" Monitor/Mwait Leaf:\n");
712 DBG(" linesize_min : %d\n", cmp->linesize_min);
713 DBG(" linesize_max : %d\n", cmp->linesize_max);
714 DBG(" extensions : %d\n", cmp->extensions);
715 DBG(" sub_Cstates : 0x%08x\n", cmp->sub_Cstates);
716 }
717
718 if (info_p->cpuid_max_basic >= 0x6) {
719 cpuid_thermal_leaf_t *ctp = &info_p->cpuid_thermal_leaf;
720
721 /*
722 * The thermal and Power Leaf:
723 */
724 cpuid_fn(6, reg);
725 ctp->sensor = bitfield32(reg[eax], 0, 0);
726 ctp->dynamic_acceleration = bitfield32(reg[eax], 1, 1);
727 ctp->invariant_APIC_timer = bitfield32(reg[eax], 2, 2);
728 ctp->core_power_limits = bitfield32(reg[eax], 4, 4);
729 ctp->fine_grain_clock_mod = bitfield32(reg[eax], 5, 5);
730 ctp->package_thermal_intr = bitfield32(reg[eax], 6, 6);
731 ctp->thresholds = bitfield32(reg[ebx], 3, 0);
732 ctp->ACNT_MCNT = bitfield32(reg[ecx], 0, 0);
733 ctp->hardware_feedback = bitfield32(reg[ecx], 1, 1);
734 ctp->energy_policy = bitfield32(reg[ecx], 3, 3);
735 info_p->cpuid_thermal_leafp = ctp;
736
737 DBG(" Thermal/Power Leaf:\n");
738 DBG(" sensor : %d\n", ctp->sensor);
739 DBG(" dynamic_acceleration : %d\n", ctp->dynamic_acceleration);
740 DBG(" invariant_APIC_timer : %d\n", ctp->invariant_APIC_timer);
741 DBG(" core_power_limits : %d\n", ctp->core_power_limits);
742 DBG(" fine_grain_clock_mod : %d\n", ctp->fine_grain_clock_mod);
743 DBG(" package_thermal_intr : %d\n", ctp->package_thermal_intr);
744 DBG(" thresholds : %d\n", ctp->thresholds);
745 DBG(" ACNT_MCNT : %d\n", ctp->ACNT_MCNT);
746 DBG(" ACNT2 : %d\n", ctp->hardware_feedback);
747 DBG(" energy_policy : %d\n", ctp->energy_policy);
748 }
749
750 if (info_p->cpuid_max_basic >= 0xa) {
751 cpuid_arch_perf_leaf_t *capp = &info_p->cpuid_arch_perf_leaf;
752
753 /*
754 * Architectural Performance Monitoring Leaf:
755 */
756 cpuid_fn(0xa, reg);
757 capp->version = bitfield32(reg[eax], 7, 0);
758 capp->number = bitfield32(reg[eax], 15, 8);
759 capp->width = bitfield32(reg[eax], 23, 16);
760 capp->events_number = bitfield32(reg[eax], 31, 24);
761 capp->events = reg[ebx];
762 capp->fixed_number = bitfield32(reg[edx], 4, 0);
763 capp->fixed_width = bitfield32(reg[edx], 12, 5);
764 info_p->cpuid_arch_perf_leafp = capp;
765
766 DBG(" Architectural Performance Monitoring Leaf:\n");
767 DBG(" version : %d\n", capp->version);
768 DBG(" number : %d\n", capp->number);
769 DBG(" width : %d\n", capp->width);
770 DBG(" events_number : %d\n", capp->events_number);
771 DBG(" events : %d\n", capp->events);
772 DBG(" fixed_number : %d\n", capp->fixed_number);
773 DBG(" fixed_width : %d\n", capp->fixed_width);
774 }
775
776 if (info_p->cpuid_max_basic >= 0xd) {
777 cpuid_xsave_leaf_t *xsp;
778 /*
779 * XSAVE Features:
780 */
781 xsp = &info_p->cpuid_xsave_leaf[0];
782 info_p->cpuid_xsave_leafp = xsp;
783 xsp->extended_state[eax] = 0xd;
784 xsp->extended_state[ecx] = 0;
785 cpuid(xsp->extended_state);
786 DBG(" XSAVE Main leaf:\n");
787 DBG(" EAX : 0x%x\n", xsp->extended_state[eax]);
788 DBG(" EBX : 0x%x\n", xsp->extended_state[ebx]);
789 DBG(" ECX : 0x%x\n", xsp->extended_state[ecx]);
790 DBG(" EDX : 0x%x\n", xsp->extended_state[edx]);
791
792 xsp = &info_p->cpuid_xsave_leaf[1];
793 xsp->extended_state[eax] = 0xd;
794 xsp->extended_state[ecx] = 1;
795 cpuid(xsp->extended_state);
796 DBG(" XSAVE Sub-leaf1:\n");
797 DBG(" EAX : 0x%x\n", xsp->extended_state[eax]);
798 DBG(" EBX : 0x%x\n", xsp->extended_state[ebx]);
799 DBG(" ECX : 0x%x\n", xsp->extended_state[ecx]);
800 DBG(" EDX : 0x%x\n", xsp->extended_state[edx]);
801 }
802
803 if (info_p->cpuid_model >= CPUID_MODEL_IVYBRIDGE) {
804 /*
805 * Leaf7 Features:
806 */
807 cpuid_fn(0x7, reg);
808 info_p->cpuid_leaf7_features = quad(reg[ecx], reg[ebx]);
809 info_p->cpuid_leaf7_extfeatures = reg[edx];
810
811 DBG(" Feature Leaf7:\n");
812 DBG(" EBX : 0x%x\n", reg[ebx]);
813 DBG(" ECX : 0x%x\n", reg[ecx]);
814 DBG(" EDX : 0x%x\n", reg[edx]);
815 }
816
817 if (info_p->cpuid_max_basic >= 0x15) {
818 /*
819 * TCS/CCC frequency leaf:
820 */
821 cpuid_fn(0x15, reg);
822 info_p->cpuid_tsc_leaf.denominator = reg[eax];
823 info_p->cpuid_tsc_leaf.numerator = reg[ebx];
824
825 DBG(" TSC/CCC Information Leaf:\n");
826 DBG(" numerator : 0x%x\n", reg[ebx]);
827 DBG(" denominator : 0x%x\n", reg[eax]);
828 }
829
830 return;
831 }
832
833 static uint32_t
834 cpuid_set_cpufamily(i386_cpu_info_t *info_p)
835 {
836 uint32_t cpufamily = CPUFAMILY_UNKNOWN;
837
838 switch (info_p->cpuid_family) {
839 case 6:
840 switch (info_p->cpuid_model) {
841 case 23:
842 cpufamily = CPUFAMILY_INTEL_PENRYN;
843 break;
844 case CPUID_MODEL_NEHALEM:
845 case CPUID_MODEL_FIELDS:
846 case CPUID_MODEL_DALES:
847 case CPUID_MODEL_NEHALEM_EX:
848 cpufamily = CPUFAMILY_INTEL_NEHALEM;
849 break;
850 case CPUID_MODEL_DALES_32NM:
851 case CPUID_MODEL_WESTMERE:
852 case CPUID_MODEL_WESTMERE_EX:
853 cpufamily = CPUFAMILY_INTEL_WESTMERE;
854 break;
855 case CPUID_MODEL_SANDYBRIDGE:
856 case CPUID_MODEL_JAKETOWN:
857 cpufamily = CPUFAMILY_INTEL_SANDYBRIDGE;
858 break;
859 case CPUID_MODEL_IVYBRIDGE:
860 case CPUID_MODEL_IVYBRIDGE_EP:
861 cpufamily = CPUFAMILY_INTEL_IVYBRIDGE;
862 break;
863 case CPUID_MODEL_HASWELL:
864 case CPUID_MODEL_HASWELL_EP:
865 case CPUID_MODEL_HASWELL_ULT:
866 case CPUID_MODEL_CRYSTALWELL:
867 cpufamily = CPUFAMILY_INTEL_HASWELL;
868 break;
869 case CPUID_MODEL_BROADWELL:
870 case CPUID_MODEL_BRYSTALWELL:
871 cpufamily = CPUFAMILY_INTEL_BROADWELL;
872 break;
873 case CPUID_MODEL_SKYLAKE:
874 case CPUID_MODEL_SKYLAKE_DT:
875 #if !defined(RC_HIDE_XNU_J137)
876 case CPUID_MODEL_SKYLAKE_W:
877 #endif
878 cpufamily = CPUFAMILY_INTEL_SKYLAKE;
879 break;
880 case CPUID_MODEL_KABYLAKE:
881 case CPUID_MODEL_KABYLAKE_DT:
882 cpufamily = CPUFAMILY_INTEL_KABYLAKE;
883 break;
884 }
885 break;
886 }
887
888 info_p->cpuid_cpufamily = cpufamily;
889 DBG("cpuid_set_cpufamily(%p) returning 0x%x\n", info_p, cpufamily);
890 return cpufamily;
891 }
892 /*
893 * Must be invoked either when executing single threaded, or with
894 * independent synchronization.
895 */
896 void
897 cpuid_set_info(void)
898 {
899 i386_cpu_info_t *info_p = &cpuid_cpu_info;
900 boolean_t enable_x86_64h = TRUE;
901
902 /* Perform pre-cpuid workarounds (since their effects impact values returned via cpuid) */
903 cpuid_do_precpuid_was();
904
905 cpuid_set_generic_info(info_p);
906
907 /* verify we are running on a supported CPU */
908 if ((strncmp(CPUID_VID_INTEL, info_p->cpuid_vendor,
909 min(strlen(CPUID_STRING_UNKNOWN) + 1,
910 sizeof(info_p->cpuid_vendor)))) ||
911 (cpuid_set_cpufamily(info_p) == CPUFAMILY_UNKNOWN)) {
912 panic("Unsupported CPU");
913 }
914
915 info_p->cpuid_cpu_type = CPU_TYPE_X86;
916
917 if (!PE_parse_boot_argn("-enable_x86_64h", &enable_x86_64h, sizeof(enable_x86_64h))) {
918 boolean_t disable_x86_64h = FALSE;
919
920 if (PE_parse_boot_argn("-disable_x86_64h", &disable_x86_64h, sizeof(disable_x86_64h))) {
921 enable_x86_64h = FALSE;
922 }
923 }
924
925 if (enable_x86_64h &&
926 ((info_p->cpuid_features & CPUID_X86_64_H_FEATURE_SUBSET) == CPUID_X86_64_H_FEATURE_SUBSET) &&
927 ((info_p->cpuid_extfeatures & CPUID_X86_64_H_EXTFEATURE_SUBSET) == CPUID_X86_64_H_EXTFEATURE_SUBSET) &&
928 ((info_p->cpuid_leaf7_features & CPUID_X86_64_H_LEAF7_FEATURE_SUBSET) == CPUID_X86_64_H_LEAF7_FEATURE_SUBSET)) {
929 info_p->cpuid_cpu_subtype = CPU_SUBTYPE_X86_64_H;
930 } else {
931 info_p->cpuid_cpu_subtype = CPU_SUBTYPE_X86_ARCH1;
932 }
933 /* cpuid_set_cache_info must be invoked after set_generic_info */
934
935 /*
936 * Find the number of enabled cores and threads
937 * (which determines whether SMT/Hyperthreading is active).
938 */
939
940 if (0 != (info_p->cpuid_features & CPUID_FEATURE_VMM) &&
941 PE_parse_boot_argn("-nomsr35h", NULL, 0)) {
942 info_p->core_count = 1;
943 info_p->thread_count = 1;
944 cpuid_set_cache_info(info_p);
945 } else {
946 switch (info_p->cpuid_cpufamily) {
947 case CPUFAMILY_INTEL_PENRYN:
948 cpuid_set_cache_info(info_p);
949 info_p->core_count = info_p->cpuid_cores_per_package;
950 info_p->thread_count = info_p->cpuid_logical_per_package;
951 break;
952 case CPUFAMILY_INTEL_WESTMERE: {
953 uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
954 if (0 == msr) {
955 /* Provide a non-zero default for some VMMs */
956 msr = (1 << 16) | 1;
957 }
958 info_p->core_count = bitfield32((uint32_t)msr, 19, 16);
959 info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
960 cpuid_set_cache_info(info_p);
961 break;
962 }
963 default: {
964 uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
965 if (0 == msr) {
966 /* Provide a non-zero default for some VMMs */
967 msr = (1 << 16) | 1;
968 }
969 info_p->core_count = bitfield32((uint32_t)msr, 31, 16);
970 info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
971 cpuid_set_cache_info(info_p);
972 break;
973 }
974 }
975 }
976
977 DBG("cpuid_set_info():\n");
978 DBG(" core_count : %d\n", info_p->core_count);
979 DBG(" thread_count : %d\n", info_p->thread_count);
980 DBG(" cpu_type: 0x%08x\n", info_p->cpuid_cpu_type);
981 DBG(" cpu_subtype: 0x%08x\n", info_p->cpuid_cpu_subtype);
982
983 info_p->cpuid_model_string = ""; /* deprecated */
984
985 do_cwas(info_p, FALSE);
986 }
987
988 static struct table {
989 uint64_t mask;
990 const char *name;
991 } feature_map[] = {
992 {CPUID_FEATURE_FPU, "FPU"},
993 {CPUID_FEATURE_VME, "VME"},
994 {CPUID_FEATURE_DE, "DE"},
995 {CPUID_FEATURE_PSE, "PSE"},
996 {CPUID_FEATURE_TSC, "TSC"},
997 {CPUID_FEATURE_MSR, "MSR"},
998 {CPUID_FEATURE_PAE, "PAE"},
999 {CPUID_FEATURE_MCE, "MCE"},
1000 {CPUID_FEATURE_CX8, "CX8"},
1001 {CPUID_FEATURE_APIC, "APIC"},
1002 {CPUID_FEATURE_SEP, "SEP"},
1003 {CPUID_FEATURE_MTRR, "MTRR"},
1004 {CPUID_FEATURE_PGE, "PGE"},
1005 {CPUID_FEATURE_MCA, "MCA"},
1006 {CPUID_FEATURE_CMOV, "CMOV"},
1007 {CPUID_FEATURE_PAT, "PAT"},
1008 {CPUID_FEATURE_PSE36, "PSE36"},
1009 {CPUID_FEATURE_PSN, "PSN"},
1010 {CPUID_FEATURE_CLFSH, "CLFSH"},
1011 {CPUID_FEATURE_DS, "DS"},
1012 {CPUID_FEATURE_ACPI, "ACPI"},
1013 {CPUID_FEATURE_MMX, "MMX"},
1014 {CPUID_FEATURE_FXSR, "FXSR"},
1015 {CPUID_FEATURE_SSE, "SSE"},
1016 {CPUID_FEATURE_SSE2, "SSE2"},
1017 {CPUID_FEATURE_SS, "SS"},
1018 {CPUID_FEATURE_HTT, "HTT"},
1019 {CPUID_FEATURE_TM, "TM"},
1020 {CPUID_FEATURE_PBE, "PBE"},
1021 {CPUID_FEATURE_SSE3, "SSE3"},
1022 {CPUID_FEATURE_PCLMULQDQ, "PCLMULQDQ"},
1023 {CPUID_FEATURE_DTES64, "DTES64"},
1024 {CPUID_FEATURE_MONITOR, "MON"},
1025 {CPUID_FEATURE_DSCPL, "DSCPL"},
1026 {CPUID_FEATURE_VMX, "VMX"},
1027 {CPUID_FEATURE_SMX, "SMX"},
1028 {CPUID_FEATURE_EST, "EST"},
1029 {CPUID_FEATURE_TM2, "TM2"},
1030 {CPUID_FEATURE_SSSE3, "SSSE3"},
1031 {CPUID_FEATURE_CID, "CID"},
1032 {CPUID_FEATURE_FMA, "FMA"},
1033 {CPUID_FEATURE_CX16, "CX16"},
1034 {CPUID_FEATURE_xTPR, "TPR"},
1035 {CPUID_FEATURE_PDCM, "PDCM"},
1036 {CPUID_FEATURE_SSE4_1, "SSE4.1"},
1037 {CPUID_FEATURE_SSE4_2, "SSE4.2"},
1038 {CPUID_FEATURE_x2APIC, "x2APIC"},
1039 {CPUID_FEATURE_MOVBE, "MOVBE"},
1040 {CPUID_FEATURE_POPCNT, "POPCNT"},
1041 {CPUID_FEATURE_AES, "AES"},
1042 {CPUID_FEATURE_VMM, "VMM"},
1043 {CPUID_FEATURE_PCID, "PCID"},
1044 {CPUID_FEATURE_XSAVE, "XSAVE"},
1045 {CPUID_FEATURE_OSXSAVE, "OSXSAVE"},
1046 {CPUID_FEATURE_SEGLIM64, "SEGLIM64"},
1047 {CPUID_FEATURE_TSCTMR, "TSCTMR"},
1048 {CPUID_FEATURE_AVX1_0, "AVX1.0"},
1049 {CPUID_FEATURE_RDRAND, "RDRAND"},
1050 {CPUID_FEATURE_F16C, "F16C"},
1051 {0, 0}
1052 },
1053 extfeature_map[] = {
1054 {CPUID_EXTFEATURE_SYSCALL, "SYSCALL"},
1055 {CPUID_EXTFEATURE_XD, "XD"},
1056 {CPUID_EXTFEATURE_1GBPAGE, "1GBPAGE"},
1057 {CPUID_EXTFEATURE_EM64T, "EM64T"},
1058 {CPUID_EXTFEATURE_LAHF, "LAHF"},
1059 {CPUID_EXTFEATURE_LZCNT, "LZCNT"},
1060 {CPUID_EXTFEATURE_PREFETCHW, "PREFETCHW"},
1061 {CPUID_EXTFEATURE_RDTSCP, "RDTSCP"},
1062 {CPUID_EXTFEATURE_TSCI, "TSCI"},
1063 {0, 0}
1064 },
1065 leaf7_feature_map[] = {
1066 {CPUID_LEAF7_FEATURE_RDWRFSGS, "RDWRFSGS"},
1067 {CPUID_LEAF7_FEATURE_TSCOFF, "TSC_THREAD_OFFSET"},
1068 {CPUID_LEAF7_FEATURE_SGX, "SGX"},
1069 {CPUID_LEAF7_FEATURE_BMI1, "BMI1"},
1070 {CPUID_LEAF7_FEATURE_HLE, "HLE"},
1071 {CPUID_LEAF7_FEATURE_AVX2, "AVX2"},
1072 {CPUID_LEAF7_FEATURE_FDPEO, "FDPEO"},
1073 {CPUID_LEAF7_FEATURE_SMEP, "SMEP"},
1074 {CPUID_LEAF7_FEATURE_BMI2, "BMI2"},
1075 {CPUID_LEAF7_FEATURE_ERMS, "ERMS"},
1076 {CPUID_LEAF7_FEATURE_INVPCID, "INVPCID"},
1077 {CPUID_LEAF7_FEATURE_RTM, "RTM"},
1078 {CPUID_LEAF7_FEATURE_PQM, "PQM"},
1079 {CPUID_LEAF7_FEATURE_FPU_CSDS, "FPU_CSDS"},
1080 {CPUID_LEAF7_FEATURE_MPX, "MPX"},
1081 {CPUID_LEAF7_FEATURE_PQE, "PQE"},
1082 {CPUID_LEAF7_FEATURE_AVX512F, "AVX512F"},
1083 {CPUID_LEAF7_FEATURE_AVX512DQ, "AVX512DQ"},
1084 {CPUID_LEAF7_FEATURE_RDSEED, "RDSEED"},
1085 {CPUID_LEAF7_FEATURE_ADX, "ADX"},
1086 {CPUID_LEAF7_FEATURE_SMAP, "SMAP"},
1087 {CPUID_LEAF7_FEATURE_AVX512IFMA, "AVX512IFMA"},
1088 {CPUID_LEAF7_FEATURE_CLFSOPT, "CLFSOPT"},
1089 {CPUID_LEAF7_FEATURE_CLWB, "CLWB"},
1090 {CPUID_LEAF7_FEATURE_IPT, "IPT"},
1091 {CPUID_LEAF7_FEATURE_AVX512CD, "AVX512CD"},
1092 {CPUID_LEAF7_FEATURE_SHA, "SHA"},
1093 {CPUID_LEAF7_FEATURE_AVX512BW, "AVX512BW"},
1094 {CPUID_LEAF7_FEATURE_AVX512VL, "AVX512VL"},
1095 {CPUID_LEAF7_FEATURE_PREFETCHWT1, "PREFETCHWT1"},
1096 {CPUID_LEAF7_FEATURE_AVX512VBMI, "AVX512VBMI"},
1097 {CPUID_LEAF7_FEATURE_UMIP, "UMIP"},
1098 {CPUID_LEAF7_FEATURE_PKU, "PKU"},
1099 {CPUID_LEAF7_FEATURE_OSPKE, "OSPKE"},
1100 {CPUID_LEAF7_FEATURE_WAITPKG, "WAITPKG"},
1101 {CPUID_LEAF7_FEATURE_GFNI, "GFNI"},
1102 {CPUID_LEAF7_FEATURE_VAES, "VAES"},
1103 {CPUID_LEAF7_FEATURE_VPCLMULQDQ, "VPCLMULQDQ"},
1104 {CPUID_LEAF7_FEATURE_AVX512VNNI, "AVX512VNNI"},
1105 {CPUID_LEAF7_FEATURE_AVX512BITALG, "AVX512BITALG"},
1106 {CPUID_LEAF7_FEATURE_AVX512VPCDQ, "AVX512VPOPCNTDQ"},
1107 {CPUID_LEAF7_FEATURE_RDPID, "RDPID"},
1108 {CPUID_LEAF7_FEATURE_CLDEMOTE, "CLDEMOTE"},
1109 {CPUID_LEAF7_FEATURE_MOVDIRI, "MOVDIRI"},
1110 {CPUID_LEAF7_FEATURE_MOVDIRI64B, "MOVDIRI64B"},
1111 {CPUID_LEAF7_FEATURE_SGXLC, "SGXLC"},
1112 {0, 0}
1113 },
1114 leaf7_extfeature_map[] = {
1115 { CPUID_LEAF7_EXTFEATURE_AVX5124VNNIW, "AVX5124VNNIW" },
1116 { CPUID_LEAF7_EXTFEATURE_AVX5124FMAPS, "AVX5124FMAPS" },
1117 { CPUID_LEAF7_EXTFEATURE_FSREPMOV, "FSREPMOV" },
1118 { CPUID_LEAF7_EXTFEATURE_MDCLEAR, "MDCLEAR" },
1119 { CPUID_LEAF7_EXTFEATURE_TSXFA, "TSXFA" },
1120 { CPUID_LEAF7_EXTFEATURE_IBRS, "IBRS" },
1121 { CPUID_LEAF7_EXTFEATURE_STIBP, "STIBP" },
1122 { CPUID_LEAF7_EXTFEATURE_L1DF, "L1DF" },
1123 { CPUID_LEAF7_EXTFEATURE_ACAPMSR, "ACAPMSR" },
1124 { CPUID_LEAF7_EXTFEATURE_CCAPMSR, "CCAPMSR" },
1125 { CPUID_LEAF7_EXTFEATURE_SSBD, "SSBD" },
1126 {0, 0}
1127 };
1128
1129 static char *
1130 cpuid_get_names(struct table *map, uint64_t bits, char *buf, unsigned buf_len)
1131 {
1132 size_t len = 0;
1133 char *p = buf;
1134 int i;
1135
1136 for (i = 0; map[i].mask != 0; i++) {
1137 if ((bits & map[i].mask) == 0) {
1138 continue;
1139 }
1140 if (len && ((size_t) (p - buf) < (buf_len - 1))) {
1141 *p++ = ' ';
1142 }
1143 len = min(strlen(map[i].name), (size_t)((buf_len - 1) - (p - buf)));
1144 if (len == 0) {
1145 break;
1146 }
1147 bcopy(map[i].name, p, len);
1148 p += len;
1149 }
1150 *p = '\0';
1151 return buf;
1152 }
1153
1154 i386_cpu_info_t *
1155 cpuid_info(void)
1156 {
1157 /* Set-up the cpuid_info stucture lazily */
1158 if (cpuid_cpu_infop == NULL) {
1159 PE_parse_boot_argn("-cpuid", &cpuid_dbg, sizeof(cpuid_dbg));
1160 cpuid_set_info();
1161 cpuid_cpu_infop = &cpuid_cpu_info;
1162 }
1163 return cpuid_cpu_infop;
1164 }
1165
1166 char *
1167 cpuid_get_feature_names(uint64_t features, char *buf, unsigned buf_len)
1168 {
1169 return cpuid_get_names(feature_map, features, buf, buf_len);
1170 }
1171
1172 char *
1173 cpuid_get_extfeature_names(uint64_t extfeatures, char *buf, unsigned buf_len)
1174 {
1175 return cpuid_get_names(extfeature_map, extfeatures, buf, buf_len);
1176 }
1177
1178 char *
1179 cpuid_get_leaf7_feature_names(uint64_t features, char *buf, unsigned buf_len)
1180 {
1181 return cpuid_get_names(leaf7_feature_map, features, buf, buf_len);
1182 }
1183
1184 char *
1185 cpuid_get_leaf7_extfeature_names(uint64_t features, char *buf, unsigned buf_len)
1186 {
1187 return cpuid_get_names(leaf7_extfeature_map, features, buf, buf_len);
1188 }
1189
1190 void
1191 cpuid_feature_display(
1192 const char *header)
1193 {
1194 char buf[320];
1195
1196 kprintf("%s: %s", header,
1197 cpuid_get_feature_names(cpuid_features(), buf, sizeof(buf)));
1198 if (cpuid_leaf7_features()) {
1199 kprintf(" %s", cpuid_get_leaf7_feature_names(
1200 cpuid_leaf7_features(), buf, sizeof(buf)));
1201 }
1202 if (cpuid_leaf7_extfeatures()) {
1203 kprintf(" %s", cpuid_get_leaf7_extfeature_names(
1204 cpuid_leaf7_extfeatures(), buf, sizeof(buf)));
1205 }
1206 kprintf("\n");
1207 if (cpuid_features() & CPUID_FEATURE_HTT) {
1208 #define s_if_plural(n) ((n > 1) ? "s" : "")
1209 kprintf(" HTT: %d core%s per package;"
1210 " %d logical cpu%s per package\n",
1211 cpuid_cpu_infop->cpuid_cores_per_package,
1212 s_if_plural(cpuid_cpu_infop->cpuid_cores_per_package),
1213 cpuid_cpu_infop->cpuid_logical_per_package,
1214 s_if_plural(cpuid_cpu_infop->cpuid_logical_per_package));
1215 }
1216 }
1217
1218 void
1219 cpuid_extfeature_display(
1220 const char *header)
1221 {
1222 char buf[256];
1223
1224 kprintf("%s: %s\n", header,
1225 cpuid_get_extfeature_names(cpuid_extfeatures(),
1226 buf, sizeof(buf)));
1227 }
1228
1229 void
1230 cpuid_cpu_display(
1231 const char *header)
1232 {
1233 if (cpuid_cpu_infop->cpuid_brand_string[0] != '\0') {
1234 kprintf("%s: %s\n", header, cpuid_cpu_infop->cpuid_brand_string);
1235 }
1236 }
1237
1238 unsigned int
1239 cpuid_family(void)
1240 {
1241 return cpuid_info()->cpuid_family;
1242 }
1243
1244 uint32_t
1245 cpuid_cpufamily(void)
1246 {
1247 return cpuid_info()->cpuid_cpufamily;
1248 }
1249
1250 cpu_type_t
1251 cpuid_cputype(void)
1252 {
1253 return cpuid_info()->cpuid_cpu_type;
1254 }
1255
1256 cpu_subtype_t
1257 cpuid_cpusubtype(void)
1258 {
1259 return cpuid_info()->cpuid_cpu_subtype;
1260 }
1261
1262 uint64_t
1263 cpuid_features(void)
1264 {
1265 static int checked = 0;
1266 char fpu_arg[20] = { 0 };
1267
1268 (void) cpuid_info();
1269 if (!checked) {
1270 /* check for boot-time fpu limitations */
1271 if (PE_parse_boot_argn("_fpu", &fpu_arg[0], sizeof(fpu_arg))) {
1272 printf("limiting fpu features to: %s\n", fpu_arg);
1273 if (!strncmp("387", fpu_arg, sizeof("387")) || !strncmp("mmx", fpu_arg, sizeof("mmx"))) {
1274 printf("no sse or sse2\n");
1275 cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE | CPUID_FEATURE_SSE2 | CPUID_FEATURE_FXSR);
1276 } else if (!strncmp("sse", fpu_arg, sizeof("sse"))) {
1277 printf("no sse2\n");
1278 cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE2);
1279 }
1280 }
1281 checked = 1;
1282 }
1283 return cpuid_cpu_infop->cpuid_features;
1284 }
1285
1286 uint64_t
1287 cpuid_extfeatures(void)
1288 {
1289 return cpuid_info()->cpuid_extfeatures;
1290 }
1291
1292 uint64_t
1293 cpuid_leaf7_features(void)
1294 {
1295 return cpuid_info()->cpuid_leaf7_features;
1296 }
1297
1298 uint64_t
1299 cpuid_leaf7_extfeatures(void)
1300 {
1301 return cpuid_info()->cpuid_leaf7_extfeatures;
1302 }
1303
1304 static i386_vmm_info_t *_cpuid_vmm_infop = NULL;
1305 static i386_vmm_info_t _cpuid_vmm_info;
1306
1307 static void
1308 cpuid_init_vmm_info(i386_vmm_info_t *info_p)
1309 {
1310 uint32_t reg[4];
1311 uint32_t max_vmm_leaf;
1312
1313 bzero(info_p, sizeof(*info_p));
1314
1315 if (!cpuid_vmm_present()) {
1316 return;
1317 }
1318
1319 DBG("cpuid_init_vmm_info(%p)\n", info_p);
1320
1321 /* do cpuid 0x40000000 to get VMM vendor */
1322 cpuid_fn(0x40000000, reg);
1323 max_vmm_leaf = reg[eax];
1324 bcopy((char *)&reg[ebx], &info_p->cpuid_vmm_vendor[0], 4);
1325 bcopy((char *)&reg[ecx], &info_p->cpuid_vmm_vendor[4], 4);
1326 bcopy((char *)&reg[edx], &info_p->cpuid_vmm_vendor[8], 4);
1327 info_p->cpuid_vmm_vendor[12] = '\0';
1328
1329 if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_VMWARE)) {
1330 /* VMware identification string: kb.vmware.com/kb/1009458 */
1331 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_VMWARE;
1332 } else if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_PARALLELS)) {
1333 /* Parallels identification string */
1334 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_PARALLELS;
1335 } else {
1336 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_UNKNOWN;
1337 }
1338
1339 /* VMM generic leaves: https://lkml.org/lkml/2008/10/1/246 */
1340 if (max_vmm_leaf >= 0x40000010) {
1341 cpuid_fn(0x40000010, reg);
1342
1343 info_p->cpuid_vmm_tsc_frequency = reg[eax];
1344 info_p->cpuid_vmm_bus_frequency = reg[ebx];
1345 }
1346
1347 DBG(" vmm_vendor : %s\n", info_p->cpuid_vmm_vendor);
1348 DBG(" vmm_family : %u\n", info_p->cpuid_vmm_family);
1349 DBG(" vmm_bus_frequency : %u\n", info_p->cpuid_vmm_bus_frequency);
1350 DBG(" vmm_tsc_frequency : %u\n", info_p->cpuid_vmm_tsc_frequency);
1351 }
1352
1353 boolean_t
1354 cpuid_vmm_present(void)
1355 {
1356 return (cpuid_features() & CPUID_FEATURE_VMM) ? TRUE : FALSE;
1357 }
1358
1359 i386_vmm_info_t *
1360 cpuid_vmm_info(void)
1361 {
1362 if (_cpuid_vmm_infop == NULL) {
1363 cpuid_init_vmm_info(&_cpuid_vmm_info);
1364 _cpuid_vmm_infop = &_cpuid_vmm_info;
1365 }
1366 return _cpuid_vmm_infop;
1367 }
1368
1369 uint32_t
1370 cpuid_vmm_family(void)
1371 {
1372 return cpuid_vmm_info()->cpuid_vmm_family;
1373 }
1374
1375 cwa_classifier_e
1376 cpuid_wa_required(cpu_wa_e wa)
1377 {
1378 i386_cpu_info_t *info_p = &cpuid_cpu_info;
1379 static uint64_t bootarg_cpu_wa_enables = 0;
1380 static uint64_t bootarg_cpu_wa_disables = 0;
1381 static int bootargs_overrides_processed = 0;
1382
1383 if (!bootargs_overrides_processed) {
1384 if (!PE_parse_boot_argn("cwae", &bootarg_cpu_wa_enables, sizeof(bootarg_cpu_wa_enables))) {
1385 bootarg_cpu_wa_enables = 0;
1386 }
1387
1388 if (!PE_parse_boot_argn("cwad", &bootarg_cpu_wa_disables, sizeof(bootarg_cpu_wa_disables))) {
1389 bootarg_cpu_wa_disables = 0;
1390 }
1391 bootargs_overrides_processed = 1;
1392 }
1393
1394 if (bootarg_cpu_wa_enables & (1 << wa)) {
1395 return CWA_FORCE_ON;
1396 }
1397
1398 if (bootarg_cpu_wa_disables & (1 << wa)) {
1399 return CWA_FORCE_OFF;
1400 }
1401
1402 switch (wa) {
1403 case CPU_INTEL_SEGCHK:
1404 /* First, check to see if this CPU requires the workaround */
1405 if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_ACAPMSR) != 0) {
1406 /* We have ARCHCAP, so check it for either RDCL_NO or MDS_NO */
1407 uint64_t archcap_msr = rdmsr64(MSR_IA32_ARCH_CAPABILITIES);
1408 if ((archcap_msr & (MSR_IA32_ARCH_CAPABILITIES_RDCL_NO | MSR_IA32_ARCH_CAPABILITIES_MDS_NO)) != 0) {
1409 /* Workaround not needed */
1410 return CWA_OFF;
1411 }
1412 }
1413
1414 if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_MDCLEAR) != 0) {
1415 return CWA_ON;
1416 }
1417
1418 /*
1419 * If the CPU supports the ARCHCAP MSR and neither the RDCL_NO bit nor the MDS_NO
1420 * bit are set, OR the CPU does not support the ARCHCAP MSR and the CPU does
1421 * not enumerate the presence of the enhanced VERW instruction, report
1422 * that the workaround should not be enabled.
1423 */
1424 break;
1425
1426 case CPU_INTEL_TSXFA:
1427 /*
1428 * Otherwise, if the CPU supports both TSX(HLE) and FORCE_ABORT, return that
1429 * the workaround should be enabled.
1430 */
1431 if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_TSXFA) != 0 &&
1432 (info_p->cpuid_leaf7_features & CPUID_LEAF7_FEATURE_RTM) != 0) {
1433 return CWA_ON;
1434 }
1435 break;
1436
1437 default:
1438 break;
1439 }
1440
1441 return CWA_OFF;
1442 }
1443
1444 static void
1445 cpuid_do_precpuid_was(void)
1446 {
1447 /*
1448 * Note that care must be taken not to use any data from the cached cpuid data since it is
1449 * likely uninitialized at this point. That includes calling functions that make use of
1450 * that data as well.
1451 */
1452
1453 }