]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpuid.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / i386 / cpuid.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 #include <vm/vm_page.h>
32 #include <pexpert/pexpert.h>
33
34 #include <i386/cpu_threads.h>
35 #include <i386/cpuid.h>
36
37 int force_tecs_at_idle;
38 int tecs_mode_supported;
39
40 static boolean_t cpuid_dbg
41 #if DEBUG
42 = TRUE;
43 #else
44 = FALSE;
45 #endif
46 #define DBG(x...) \
47 do { \
48 if (cpuid_dbg) \
49 kprintf(x); \
50 } while (0) \
51
52 #define min(a, b) ((a) < (b) ? (a) : (b))
53 #define quad(hi, lo) (((uint64_t)(hi)) << 32 | (lo))
54
55 /*
56 * Leaf 2 cache descriptor encodings.
57 */
58 typedef enum {
59 _NULL_, /* NULL (empty) descriptor */
60 CACHE, /* Cache */
61 TLB, /* TLB */
62 STLB, /* Shared second-level unified TLB */
63 PREFETCH /* Prefetch size */
64 } cpuid_leaf2_desc_type_t;
65
66 typedef enum {
67 NA, /* Not Applicable */
68 FULLY, /* Fully-associative */
69 TRACE, /* Trace Cache (P4 only) */
70 INST, /* Instruction TLB */
71 DATA, /* Data TLB */
72 DATA0, /* Data TLB, 1st level */
73 DATA1, /* Data TLB, 2nd level */
74 L1, /* L1 (unified) cache */
75 L1_INST, /* L1 Instruction cache */
76 L1_DATA, /* L1 Data cache */
77 L2, /* L2 (unified) cache */
78 L3, /* L3 (unified) cache */
79 L2_2LINESECTOR, /* L2 (unified) cache with 2 lines per sector */
80 L3_2LINESECTOR, /* L3(unified) cache with 2 lines per sector */
81 SMALL, /* Small page TLB */
82 LARGE, /* Large page TLB */
83 BOTH /* Small and Large page TLB */
84 } cpuid_leaf2_qualifier_t;
85
86 typedef struct cpuid_cache_descriptor {
87 uint8_t value; /* descriptor code */
88 uint8_t type; /* cpuid_leaf2_desc_type_t */
89 uint8_t level; /* level of cache/TLB hierachy */
90 uint8_t ways; /* wayness of cache */
91 uint16_t size; /* cachesize or TLB pagesize */
92 uint16_t entries; /* number of TLB entries or linesize */
93 } cpuid_cache_descriptor_t;
94
95 /*
96 * These multipliers are used to encode 1*K .. 64*M in a 16 bit size field
97 */
98 #define K (1)
99 #define M (1024)
100
101 /*
102 * Intel cache descriptor table:
103 */
104 static cpuid_cache_descriptor_t intel_cpuid_leaf2_descriptor_table[] = {
105 // -------------------------------------------------------
106 // value type level ways size entries
107 // -------------------------------------------------------
108 { 0x00, _NULL_, NA, NA, NA, NA },
109 { 0x01, TLB, INST, 4, SMALL, 32 },
110 { 0x02, TLB, INST, FULLY, LARGE, 2 },
111 { 0x03, TLB, DATA, 4, SMALL, 64 },
112 { 0x04, TLB, DATA, 4, LARGE, 8 },
113 { 0x05, TLB, DATA1, 4, LARGE, 32 },
114 { 0x06, CACHE, L1_INST, 4, 8 * K, 32 },
115 { 0x08, CACHE, L1_INST, 4, 16 * K, 32 },
116 { 0x09, CACHE, L1_INST, 4, 32 * K, 64 },
117 { 0x0A, CACHE, L1_DATA, 2, 8 * K, 32 },
118 { 0x0B, TLB, INST, 4, LARGE, 4 },
119 { 0x0C, CACHE, L1_DATA, 4, 16 * K, 32 },
120 { 0x0D, CACHE, L1_DATA, 4, 16 * K, 64 },
121 { 0x0E, CACHE, L1_DATA, 6, 24 * K, 64 },
122 { 0x21, CACHE, L2, 8, 256 * K, 64 },
123 { 0x22, CACHE, L3_2LINESECTOR, 4, 512 * K, 64 },
124 { 0x23, CACHE, L3_2LINESECTOR, 8, 1 * M, 64 },
125 { 0x25, CACHE, L3_2LINESECTOR, 8, 2 * M, 64 },
126 { 0x29, CACHE, L3_2LINESECTOR, 8, 4 * M, 64 },
127 { 0x2C, CACHE, L1_DATA, 8, 32 * K, 64 },
128 { 0x30, CACHE, L1_INST, 8, 32 * K, 64 },
129 { 0x40, CACHE, L2, NA, 0, NA },
130 { 0x41, CACHE, L2, 4, 128 * K, 32 },
131 { 0x42, CACHE, L2, 4, 256 * K, 32 },
132 { 0x43, CACHE, L2, 4, 512 * K, 32 },
133 { 0x44, CACHE, L2, 4, 1 * M, 32 },
134 { 0x45, CACHE, L2, 4, 2 * M, 32 },
135 { 0x46, CACHE, L3, 4, 4 * M, 64 },
136 { 0x47, CACHE, L3, 8, 8 * M, 64 },
137 { 0x48, CACHE, L2, 12, 3 * M, 64 },
138 { 0x49, CACHE, L2, 16, 4 * M, 64 },
139 { 0x4A, CACHE, L3, 12, 6 * M, 64 },
140 { 0x4B, CACHE, L3, 16, 8 * M, 64 },
141 { 0x4C, CACHE, L3, 12, 12 * M, 64 },
142 { 0x4D, CACHE, L3, 16, 16 * M, 64 },
143 { 0x4E, CACHE, L2, 24, 6 * M, 64 },
144 { 0x4F, TLB, INST, NA, SMALL, 32 },
145 { 0x50, TLB, INST, NA, BOTH, 64 },
146 { 0x51, TLB, INST, NA, BOTH, 128 },
147 { 0x52, TLB, INST, NA, BOTH, 256 },
148 { 0x55, TLB, INST, FULLY, BOTH, 7 },
149 { 0x56, TLB, DATA0, 4, LARGE, 16 },
150 { 0x57, TLB, DATA0, 4, SMALL, 16 },
151 { 0x59, TLB, DATA0, FULLY, SMALL, 16 },
152 { 0x5A, TLB, DATA0, 4, LARGE, 32 },
153 { 0x5B, TLB, DATA, NA, BOTH, 64 },
154 { 0x5C, TLB, DATA, NA, BOTH, 128 },
155 { 0x5D, TLB, DATA, NA, BOTH, 256 },
156 { 0x60, CACHE, L1, 16 * K, 8, 64 },
157 { 0x61, CACHE, L1, 4, 8 * K, 64 },
158 { 0x62, CACHE, L1, 4, 16 * K, 64 },
159 { 0x63, CACHE, L1, 4, 32 * K, 64 },
160 { 0x70, CACHE, TRACE, 8, 12 * K, NA },
161 { 0x71, CACHE, TRACE, 8, 16 * K, NA },
162 { 0x72, CACHE, TRACE, 8, 32 * K, NA },
163 { 0x76, TLB, INST, NA, BOTH, 8 },
164 { 0x78, CACHE, L2, 4, 1 * M, 64 },
165 { 0x79, CACHE, L2_2LINESECTOR, 8, 128 * K, 64 },
166 { 0x7A, CACHE, L2_2LINESECTOR, 8, 256 * K, 64 },
167 { 0x7B, CACHE, L2_2LINESECTOR, 8, 512 * K, 64 },
168 { 0x7C, CACHE, L2_2LINESECTOR, 8, 1 * M, 64 },
169 { 0x7D, CACHE, L2, 8, 2 * M, 64 },
170 { 0x7F, CACHE, L2, 2, 512 * K, 64 },
171 { 0x80, CACHE, L2, 8, 512 * K, 64 },
172 { 0x82, CACHE, L2, 8, 256 * K, 32 },
173 { 0x83, CACHE, L2, 8, 512 * K, 32 },
174 { 0x84, CACHE, L2, 8, 1 * M, 32 },
175 { 0x85, CACHE, L2, 8, 2 * M, 32 },
176 { 0x86, CACHE, L2, 4, 512 * K, 64 },
177 { 0x87, CACHE, L2, 8, 1 * M, 64 },
178 { 0xB0, TLB, INST, 4, SMALL, 128 },
179 { 0xB1, TLB, INST, 4, LARGE, 8 },
180 { 0xB2, TLB, INST, 4, SMALL, 64 },
181 { 0xB3, TLB, DATA, 4, SMALL, 128 },
182 { 0xB4, TLB, DATA1, 4, SMALL, 256 },
183 { 0xB5, TLB, DATA1, 8, SMALL, 64 },
184 { 0xB6, TLB, DATA1, 8, SMALL, 128 },
185 { 0xBA, TLB, DATA1, 4, BOTH, 64 },
186 { 0xC1, STLB, DATA1, 8, SMALL, 1024},
187 { 0xCA, STLB, DATA1, 4, SMALL, 512 },
188 { 0xD0, CACHE, L3, 4, 512 * K, 64 },
189 { 0xD1, CACHE, L3, 4, 1 * M, 64 },
190 { 0xD2, CACHE, L3, 4, 2 * M, 64 },
191 { 0xD3, CACHE, L3, 4, 4 * M, 64 },
192 { 0xD4, CACHE, L3, 4, 8 * M, 64 },
193 { 0xD6, CACHE, L3, 8, 1 * M, 64 },
194 { 0xD7, CACHE, L3, 8, 2 * M, 64 },
195 { 0xD8, CACHE, L3, 8, 4 * M, 64 },
196 { 0xD9, CACHE, L3, 8, 8 * M, 64 },
197 { 0xDA, CACHE, L3, 8, 12 * M, 64 },
198 { 0xDC, CACHE, L3, 12, 1536 * K, 64 },
199 { 0xDD, CACHE, L3, 12, 3 * M, 64 },
200 { 0xDE, CACHE, L3, 12, 6 * M, 64 },
201 { 0xDF, CACHE, L3, 12, 12 * M, 64 },
202 { 0xE0, CACHE, L3, 12, 18 * M, 64 },
203 { 0xE2, CACHE, L3, 16, 2 * M, 64 },
204 { 0xE3, CACHE, L3, 16, 4 * M, 64 },
205 { 0xE4, CACHE, L3, 16, 8 * M, 64 },
206 { 0xE5, CACHE, L3, 16, 16 * M, 64 },
207 { 0xE6, CACHE, L3, 16, 24 * M, 64 },
208 { 0xF0, PREFETCH, NA, NA, 64, NA },
209 { 0xF1, PREFETCH, NA, NA, 128, NA },
210 { 0xFF, CACHE, NA, NA, 0, NA }
211 };
212 #define INTEL_LEAF2_DESC_NUM (sizeof(intel_cpuid_leaf2_descriptor_table) / \
213 sizeof(cpuid_cache_descriptor_t))
214
215 static void do_cwas(i386_cpu_info_t *cpuinfo, boolean_t on_slave);
216
217 static inline cpuid_cache_descriptor_t *
218 cpuid_leaf2_find(uint8_t value)
219 {
220 unsigned int i;
221
222 for (i = 0; i < INTEL_LEAF2_DESC_NUM; i++) {
223 if (intel_cpuid_leaf2_descriptor_table[i].value == value) {
224 return &intel_cpuid_leaf2_descriptor_table[i];
225 }
226 }
227 return NULL;
228 }
229
230 /*
231 * CPU identification routines.
232 */
233
234 static i386_cpu_info_t cpuid_cpu_info;
235 static i386_cpu_info_t *cpuid_cpu_infop = NULL;
236
237 static void
238 cpuid_fn(uint32_t selector, uint32_t *result)
239 {
240 do_cpuid(selector, result);
241 DBG("cpuid_fn(0x%08x) eax:0x%08x ebx:0x%08x ecx:0x%08x edx:0x%08x\n",
242 selector, result[0], result[1], result[2], result[3]);
243 }
244
245 static const char *cache_type_str[LCACHE_MAX] = {
246 "Lnone", "L1I", "L1D", "L2U", "L3U"
247 };
248
249 static void
250 do_cwas(i386_cpu_info_t *cpuinfo, boolean_t on_slave)
251 {
252 extern int force_thread_policy_tecs;
253
254 /*
255 * Workaround for reclaiming perf counter 3 due to TSX memory ordering erratum.
256 * This workaround does not support being forcibly set (since an MSR must be
257 * enumerated, lest we #GP when forced to access it.)
258 * When RTM_FORCE_FORCE is enabled all RTM transactions on the logical CPU will
259 * forcefully abort, but the general purpose counter 3 will report correct values.
260 */
261 if (cpuid_wa_required(CPU_INTEL_TSXFA) == CWA_ON) {
262 wrmsr64(MSR_IA32_TSX_FORCE_ABORT,
263 rdmsr64(MSR_IA32_TSX_FORCE_ABORT) | MSR_IA32_TSXFA_RTM_FORCE_ABORT);
264 }
265
266 if (on_slave) {
267 return;
268 }
269
270 switch (cpuid_wa_required(CPU_INTEL_SEGCHK)) {
271 case CWA_FORCE_ON:
272 force_thread_policy_tecs = 1;
273
274 /* If hyperthreaded, enable idle workaround */
275 if (cpuinfo->thread_count > cpuinfo->core_count) {
276 force_tecs_at_idle = 1;
277 }
278
279 /*FALLTHROUGH*/
280 case CWA_ON:
281 tecs_mode_supported = 1;
282 break;
283
284 case CWA_FORCE_OFF:
285 case CWA_OFF:
286 tecs_mode_supported = 0;
287 force_tecs_at_idle = 0;
288 force_thread_policy_tecs = 0;
289 break;
290
291 default:
292 break;
293 }
294 }
295
296 void
297 cpuid_do_was(void)
298 {
299 do_cwas(cpuid_info(), TRUE);
300 }
301
302 /* this function is Intel-specific */
303 static void
304 cpuid_set_cache_info( i386_cpu_info_t * info_p )
305 {
306 uint32_t cpuid_result[4];
307 uint32_t reg[4];
308 uint32_t index;
309 uint32_t linesizes[LCACHE_MAX];
310 unsigned int i;
311 unsigned int j;
312 boolean_t cpuid_deterministic_supported = FALSE;
313
314 DBG("cpuid_set_cache_info(%p)\n", info_p);
315
316 bzero( linesizes, sizeof(linesizes));
317
318 /* Get processor cache descriptor info using leaf 2. We don't use
319 * this internally, but must publish it for KEXTs.
320 */
321 cpuid_fn(2, cpuid_result);
322 for (j = 0; j < 4; j++) {
323 if ((cpuid_result[j] >> 31) == 1) { /* bit31 is validity */
324 continue;
325 }
326 ((uint32_t *) info_p->cache_info)[j] = cpuid_result[j];
327 }
328 /* first byte gives number of cpuid calls to get all descriptors */
329 for (i = 1; i < info_p->cache_info[0]; i++) {
330 if (i * 16 > sizeof(info_p->cache_info)) {
331 break;
332 }
333 cpuid_fn(2, cpuid_result);
334 for (j = 0; j < 4; j++) {
335 if ((cpuid_result[j] >> 31) == 1) {
336 continue;
337 }
338 ((uint32_t *) info_p->cache_info)[4 * i + j] =
339 cpuid_result[j];
340 }
341 }
342
343 /*
344 * Get cache info using leaf 4, the "deterministic cache parameters."
345 * Most processors Mac OS X supports implement this flavor of CPUID.
346 * Loop over each cache on the processor.
347 */
348 cpuid_fn(0, cpuid_result);
349 if (cpuid_result[eax] >= 4) {
350 cpuid_deterministic_supported = TRUE;
351 }
352
353 for (index = 0; cpuid_deterministic_supported; index++) {
354 cache_type_t type = Lnone;
355 uint32_t cache_type;
356 uint32_t cache_level;
357 uint32_t cache_sharing;
358 uint32_t cache_linesize;
359 uint32_t cache_sets;
360 uint32_t cache_associativity;
361 uint32_t cache_size;
362 uint32_t cache_partitions;
363 uint32_t colors;
364
365 reg[eax] = 4; /* cpuid request 4 */
366 reg[ecx] = index; /* index starting at 0 */
367 cpuid(reg);
368 DBG("cpuid(4) index=%d eax=0x%x\n", index, reg[eax]);
369 cache_type = bitfield32(reg[eax], 4, 0);
370 if (cache_type == 0) {
371 break; /* no more caches */
372 }
373 cache_level = bitfield32(reg[eax], 7, 5);
374 cache_sharing = bitfield32(reg[eax], 25, 14) + 1;
375 info_p->cpuid_cores_per_package
376 = bitfield32(reg[eax], 31, 26) + 1;
377 cache_linesize = bitfield32(reg[ebx], 11, 0) + 1;
378 cache_partitions = bitfield32(reg[ebx], 21, 12) + 1;
379 cache_associativity = bitfield32(reg[ebx], 31, 22) + 1;
380 cache_sets = bitfield32(reg[ecx], 31, 0) + 1;
381
382 /* Map type/levels returned by CPUID into cache_type_t */
383 switch (cache_level) {
384 case 1:
385 type = cache_type == 1 ? L1D :
386 cache_type == 2 ? L1I :
387 Lnone;
388 break;
389 case 2:
390 type = cache_type == 3 ? L2U :
391 Lnone;
392 break;
393 case 3:
394 type = cache_type == 3 ? L3U :
395 Lnone;
396 break;
397 default:
398 type = Lnone;
399 }
400
401 /* The total size of a cache is:
402 * ( linesize * sets * associativity * partitions )
403 */
404 if (type != Lnone) {
405 cache_size = cache_linesize * cache_sets *
406 cache_associativity * cache_partitions;
407 info_p->cache_size[type] = cache_size;
408 info_p->cache_sharing[type] = cache_sharing;
409 info_p->cache_partitions[type] = cache_partitions;
410 linesizes[type] = cache_linesize;
411
412 DBG(" cache_size[%s] : %d\n",
413 cache_type_str[type], cache_size);
414 DBG(" cache_sharing[%s] : %d\n",
415 cache_type_str[type], cache_sharing);
416 DBG(" cache_partitions[%s]: %d\n",
417 cache_type_str[type], cache_partitions);
418
419 /*
420 * Overwrite associativity determined via
421 * CPUID.0x80000006 -- this leaf is more
422 * accurate
423 */
424 if (type == L2U) {
425 info_p->cpuid_cache_L2_associativity = cache_associativity;
426 }
427 /*
428 * Adjust #sets to account for the N CBos
429 * This is because addresses are hashed across CBos
430 */
431 if (type == L3U && info_p->core_count) {
432 cache_sets = cache_sets / info_p->core_count;
433 }
434
435 /* Compute the number of page colors for this cache,
436 * which is:
437 * ( linesize * sets ) / page_size
438 *
439 * To help visualize this, consider two views of a
440 * physical address. To the cache, it is composed
441 * of a line offset, a set selector, and a tag.
442 * To VM, it is composed of a page offset, a page
443 * color, and other bits in the pageframe number:
444 *
445 * +-----------------+---------+--------+
446 * cache: | tag | set | offset |
447 * +-----------------+---------+--------+
448 *
449 * +-----------------+-------+----------+
450 * VM: | don't care | color | pg offset|
451 * +-----------------+-------+----------+
452 *
453 * The color is those bits in (set+offset) not covered
454 * by the page offset.
455 */
456 colors = (cache_linesize * cache_sets) >> 12;
457
458 if (colors > vm_cache_geometry_colors) {
459 vm_cache_geometry_colors = colors;
460 }
461 }
462 }
463 DBG(" vm_cache_geometry_colors: %d\n", vm_cache_geometry_colors);
464
465 /*
466 * If deterministic cache parameters are not available, use
467 * something else
468 */
469 if (info_p->cpuid_cores_per_package == 0) {
470 info_p->cpuid_cores_per_package = 1;
471
472 /* cpuid define in 1024 quantities */
473 info_p->cache_size[L2U] = info_p->cpuid_cache_size * 1024;
474 info_p->cache_sharing[L2U] = 1;
475 info_p->cache_partitions[L2U] = 1;
476
477 linesizes[L2U] = info_p->cpuid_cache_linesize;
478
479 DBG(" cache_size[L2U] : %d\n",
480 info_p->cache_size[L2U]);
481 DBG(" cache_sharing[L2U] : 1\n");
482 DBG(" cache_partitions[L2U]: 1\n");
483 DBG(" linesizes[L2U] : %d\n",
484 info_p->cpuid_cache_linesize);
485 }
486
487 /*
488 * What linesize to publish? We use the L2 linesize if any,
489 * else the L1D.
490 */
491 if (linesizes[L2U]) {
492 info_p->cache_linesize = linesizes[L2U];
493 } else if (linesizes[L1D]) {
494 info_p->cache_linesize = linesizes[L1D];
495 } else {
496 panic("no linesize");
497 }
498 DBG(" cache_linesize : %d\n", info_p->cache_linesize);
499
500 /*
501 * Extract and publish TLB information from Leaf 2 descriptors.
502 */
503 DBG(" %ld leaf2 descriptors:\n", sizeof(info_p->cache_info));
504 for (i = 1; i < sizeof(info_p->cache_info); i++) {
505 cpuid_cache_descriptor_t *descp;
506 int id;
507 int level;
508 int page;
509
510 DBG(" 0x%02x", info_p->cache_info[i]);
511 descp = cpuid_leaf2_find(info_p->cache_info[i]);
512 if (descp == NULL) {
513 continue;
514 }
515
516 switch (descp->type) {
517 case TLB:
518 page = (descp->size == SMALL) ? TLB_SMALL : TLB_LARGE;
519 /* determine I or D: */
520 switch (descp->level) {
521 case INST:
522 id = TLB_INST;
523 break;
524 case DATA:
525 case DATA0:
526 case DATA1:
527 id = TLB_DATA;
528 break;
529 default:
530 continue;
531 }
532 /* determine level: */
533 switch (descp->level) {
534 case DATA1:
535 level = 1;
536 break;
537 default:
538 level = 0;
539 }
540 info_p->cpuid_tlb[id][page][level] = descp->entries;
541 break;
542 case STLB:
543 info_p->cpuid_stlb = descp->entries;
544 }
545 }
546 DBG("\n");
547 }
548
549 static void
550 cpuid_set_generic_info(i386_cpu_info_t *info_p)
551 {
552 uint32_t reg[4];
553 char str[128], *p;
554
555 DBG("cpuid_set_generic_info(%p)\n", info_p);
556
557 /* do cpuid 0 to get vendor */
558 cpuid_fn(0, reg);
559 info_p->cpuid_max_basic = reg[eax];
560 bcopy((char *)&reg[ebx], &info_p->cpuid_vendor[0], 4); /* ug */
561 bcopy((char *)&reg[ecx], &info_p->cpuid_vendor[8], 4);
562 bcopy((char *)&reg[edx], &info_p->cpuid_vendor[4], 4);
563 info_p->cpuid_vendor[12] = 0;
564
565 /* get extended cpuid results */
566 cpuid_fn(0x80000000, reg);
567 info_p->cpuid_max_ext = reg[eax];
568
569 /* check to see if we can get brand string */
570 if (info_p->cpuid_max_ext >= 0x80000004) {
571 /*
572 * The brand string 48 bytes (max), guaranteed to
573 * be NUL terminated.
574 */
575 cpuid_fn(0x80000002, reg);
576 bcopy((char *)reg, &str[0], 16);
577 cpuid_fn(0x80000003, reg);
578 bcopy((char *)reg, &str[16], 16);
579 cpuid_fn(0x80000004, reg);
580 bcopy((char *)reg, &str[32], 16);
581 for (p = str; *p != '\0'; p++) {
582 if (*p != ' ') {
583 break;
584 }
585 }
586 strlcpy(info_p->cpuid_brand_string,
587 p, sizeof(info_p->cpuid_brand_string));
588
589 if (!strncmp(info_p->cpuid_brand_string, CPUID_STRING_UNKNOWN,
590 min(sizeof(info_p->cpuid_brand_string),
591 strlen(CPUID_STRING_UNKNOWN) + 1))) {
592 /*
593 * This string means we have a firmware-programmable brand string,
594 * and the firmware couldn't figure out what sort of CPU we have.
595 */
596 info_p->cpuid_brand_string[0] = '\0';
597 }
598 }
599
600 /* Get cache and addressing info. */
601 if (info_p->cpuid_max_ext >= 0x80000006) {
602 uint32_t assoc;
603 cpuid_fn(0x80000006, reg);
604 info_p->cpuid_cache_linesize = bitfield32(reg[ecx], 7, 0);
605 assoc = bitfield32(reg[ecx], 15, 12);
606 /*
607 * L2 associativity is encoded, though in an insufficiently
608 * descriptive fashion, e.g. 24-way is mapped to 16-way.
609 * Represent a fully associative cache as 0xFFFF.
610 * Overwritten by associativity as determined via CPUID.4
611 * if available.
612 */
613 if (assoc == 6) {
614 assoc = 8;
615 } else if (assoc == 8) {
616 assoc = 16;
617 } else if (assoc == 0xF) {
618 assoc = 0xFFFF;
619 }
620 info_p->cpuid_cache_L2_associativity = assoc;
621 info_p->cpuid_cache_size = bitfield32(reg[ecx], 31, 16);
622 cpuid_fn(0x80000008, reg);
623 info_p->cpuid_address_bits_physical =
624 bitfield32(reg[eax], 7, 0);
625 info_p->cpuid_address_bits_virtual =
626 bitfield32(reg[eax], 15, 8);
627 }
628
629 /*
630 * Get processor signature and decode
631 * and bracket this with the approved procedure for reading the
632 * the microcode version number a.k.a. signature a.k.a. BIOS ID
633 */
634 wrmsr64(MSR_IA32_BIOS_SIGN_ID, 0);
635 cpuid_fn(1, reg);
636 info_p->cpuid_microcode_version =
637 (uint32_t) (rdmsr64(MSR_IA32_BIOS_SIGN_ID) >> 32);
638 info_p->cpuid_signature = reg[eax];
639 info_p->cpuid_stepping = bitfield32(reg[eax], 3, 0);
640 info_p->cpuid_model = bitfield32(reg[eax], 7, 4);
641 info_p->cpuid_family = bitfield32(reg[eax], 11, 8);
642 info_p->cpuid_type = bitfield32(reg[eax], 13, 12);
643 info_p->cpuid_extmodel = bitfield32(reg[eax], 19, 16);
644 info_p->cpuid_extfamily = bitfield32(reg[eax], 27, 20);
645 info_p->cpuid_brand = bitfield32(reg[ebx], 7, 0);
646 info_p->cpuid_features = quad(reg[ecx], reg[edx]);
647
648 /* Get "processor flag"; necessary for microcode update matching */
649 info_p->cpuid_processor_flag = (rdmsr64(MSR_IA32_PLATFORM_ID) >> 50) & 0x7;
650
651 /* Fold extensions into family/model */
652 if (info_p->cpuid_family == 0x0f) {
653 info_p->cpuid_family += info_p->cpuid_extfamily;
654 }
655 if (info_p->cpuid_family == 0x0f || info_p->cpuid_family == 0x06) {
656 info_p->cpuid_model += (info_p->cpuid_extmodel << 4);
657 }
658
659 if (info_p->cpuid_features & CPUID_FEATURE_HTT) {
660 info_p->cpuid_logical_per_package =
661 bitfield32(reg[ebx], 23, 16);
662 } else {
663 info_p->cpuid_logical_per_package = 1;
664 }
665
666 if (info_p->cpuid_max_ext >= 0x80000001) {
667 cpuid_fn(0x80000001, reg);
668 info_p->cpuid_extfeatures =
669 quad(reg[ecx], reg[edx]);
670 }
671
672 DBG(" max_basic : %d\n", info_p->cpuid_max_basic);
673 DBG(" max_ext : 0x%08x\n", info_p->cpuid_max_ext);
674 DBG(" vendor : %s\n", info_p->cpuid_vendor);
675 DBG(" brand_string : %s\n", info_p->cpuid_brand_string);
676 DBG(" signature : 0x%08x\n", info_p->cpuid_signature);
677 DBG(" stepping : %d\n", info_p->cpuid_stepping);
678 DBG(" model : %d\n", info_p->cpuid_model);
679 DBG(" family : %d\n", info_p->cpuid_family);
680 DBG(" type : %d\n", info_p->cpuid_type);
681 DBG(" extmodel : %d\n", info_p->cpuid_extmodel);
682 DBG(" extfamily : %d\n", info_p->cpuid_extfamily);
683 DBG(" brand : %d\n", info_p->cpuid_brand);
684 DBG(" features : 0x%016llx\n", info_p->cpuid_features);
685 DBG(" extfeatures : 0x%016llx\n", info_p->cpuid_extfeatures);
686 DBG(" logical_per_package : %d\n", info_p->cpuid_logical_per_package);
687 DBG(" microcode_version : 0x%08x\n", info_p->cpuid_microcode_version);
688
689 /* Fold in the Invariant TSC feature bit, if present */
690 if (info_p->cpuid_max_ext >= 0x80000007) {
691 cpuid_fn(0x80000007, reg);
692 info_p->cpuid_extfeatures |=
693 reg[edx] & (uint32_t)CPUID_EXTFEATURE_TSCI;
694 DBG(" extfeatures : 0x%016llx\n",
695 info_p->cpuid_extfeatures);
696 }
697
698 if (info_p->cpuid_max_basic >= 0x5) {
699 cpuid_mwait_leaf_t *cmp = &info_p->cpuid_mwait_leaf;
700
701 /*
702 * Extract the Monitor/Mwait Leaf info:
703 */
704 cpuid_fn(5, reg);
705 cmp->linesize_min = reg[eax];
706 cmp->linesize_max = reg[ebx];
707 cmp->extensions = reg[ecx];
708 cmp->sub_Cstates = reg[edx];
709 info_p->cpuid_mwait_leafp = cmp;
710
711 DBG(" Monitor/Mwait Leaf:\n");
712 DBG(" linesize_min : %d\n", cmp->linesize_min);
713 DBG(" linesize_max : %d\n", cmp->linesize_max);
714 DBG(" extensions : %d\n", cmp->extensions);
715 DBG(" sub_Cstates : 0x%08x\n", cmp->sub_Cstates);
716 }
717
718 if (info_p->cpuid_max_basic >= 0x6) {
719 cpuid_thermal_leaf_t *ctp = &info_p->cpuid_thermal_leaf;
720
721 /*
722 * The thermal and Power Leaf:
723 */
724 cpuid_fn(6, reg);
725 ctp->sensor = bitfield32(reg[eax], 0, 0);
726 ctp->dynamic_acceleration = bitfield32(reg[eax], 1, 1);
727 ctp->invariant_APIC_timer = bitfield32(reg[eax], 2, 2);
728 ctp->core_power_limits = bitfield32(reg[eax], 4, 4);
729 ctp->fine_grain_clock_mod = bitfield32(reg[eax], 5, 5);
730 ctp->package_thermal_intr = bitfield32(reg[eax], 6, 6);
731 ctp->thresholds = bitfield32(reg[ebx], 3, 0);
732 ctp->ACNT_MCNT = bitfield32(reg[ecx], 0, 0);
733 ctp->hardware_feedback = bitfield32(reg[ecx], 1, 1);
734 ctp->energy_policy = bitfield32(reg[ecx], 3, 3);
735 info_p->cpuid_thermal_leafp = ctp;
736
737 DBG(" Thermal/Power Leaf:\n");
738 DBG(" sensor : %d\n", ctp->sensor);
739 DBG(" dynamic_acceleration : %d\n", ctp->dynamic_acceleration);
740 DBG(" invariant_APIC_timer : %d\n", ctp->invariant_APIC_timer);
741 DBG(" core_power_limits : %d\n", ctp->core_power_limits);
742 DBG(" fine_grain_clock_mod : %d\n", ctp->fine_grain_clock_mod);
743 DBG(" package_thermal_intr : %d\n", ctp->package_thermal_intr);
744 DBG(" thresholds : %d\n", ctp->thresholds);
745 DBG(" ACNT_MCNT : %d\n", ctp->ACNT_MCNT);
746 DBG(" ACNT2 : %d\n", ctp->hardware_feedback);
747 DBG(" energy_policy : %d\n", ctp->energy_policy);
748 }
749
750 if (info_p->cpuid_max_basic >= 0xa) {
751 cpuid_arch_perf_leaf_t *capp = &info_p->cpuid_arch_perf_leaf;
752
753 /*
754 * Architectural Performance Monitoring Leaf:
755 */
756 cpuid_fn(0xa, reg);
757 capp->version = bitfield32(reg[eax], 7, 0);
758 capp->number = bitfield32(reg[eax], 15, 8);
759 capp->width = bitfield32(reg[eax], 23, 16);
760 capp->events_number = bitfield32(reg[eax], 31, 24);
761 capp->events = reg[ebx];
762 capp->fixed_number = bitfield32(reg[edx], 4, 0);
763 capp->fixed_width = bitfield32(reg[edx], 12, 5);
764 info_p->cpuid_arch_perf_leafp = capp;
765
766 DBG(" Architectural Performance Monitoring Leaf:\n");
767 DBG(" version : %d\n", capp->version);
768 DBG(" number : %d\n", capp->number);
769 DBG(" width : %d\n", capp->width);
770 DBG(" events_number : %d\n", capp->events_number);
771 DBG(" events : %d\n", capp->events);
772 DBG(" fixed_number : %d\n", capp->fixed_number);
773 DBG(" fixed_width : %d\n", capp->fixed_width);
774 }
775
776 if (info_p->cpuid_max_basic >= 0xd) {
777 cpuid_xsave_leaf_t *xsp;
778 /*
779 * XSAVE Features:
780 */
781 xsp = &info_p->cpuid_xsave_leaf[0];
782 info_p->cpuid_xsave_leafp = xsp;
783 xsp->extended_state[eax] = 0xd;
784 xsp->extended_state[ecx] = 0;
785 cpuid(xsp->extended_state);
786 DBG(" XSAVE Main leaf:\n");
787 DBG(" EAX : 0x%x\n", xsp->extended_state[eax]);
788 DBG(" EBX : 0x%x\n", xsp->extended_state[ebx]);
789 DBG(" ECX : 0x%x\n", xsp->extended_state[ecx]);
790 DBG(" EDX : 0x%x\n", xsp->extended_state[edx]);
791
792 xsp = &info_p->cpuid_xsave_leaf[1];
793 xsp->extended_state[eax] = 0xd;
794 xsp->extended_state[ecx] = 1;
795 cpuid(xsp->extended_state);
796 DBG(" XSAVE Sub-leaf1:\n");
797 DBG(" EAX : 0x%x\n", xsp->extended_state[eax]);
798 DBG(" EBX : 0x%x\n", xsp->extended_state[ebx]);
799 DBG(" ECX : 0x%x\n", xsp->extended_state[ecx]);
800 DBG(" EDX : 0x%x\n", xsp->extended_state[edx]);
801 }
802
803 if (info_p->cpuid_model >= CPUID_MODEL_IVYBRIDGE) {
804 /*
805 * Leaf7 Features:
806 */
807 cpuid_fn(0x7, reg);
808 info_p->cpuid_leaf7_features = quad(reg[ecx], reg[ebx]);
809 info_p->cpuid_leaf7_extfeatures = reg[edx];
810
811 DBG(" Feature Leaf7:\n");
812 DBG(" EBX : 0x%x\n", reg[ebx]);
813 DBG(" ECX : 0x%x\n", reg[ecx]);
814 DBG(" EDX : 0x%x\n", reg[edx]);
815 }
816
817 if (info_p->cpuid_max_basic >= 0x15) {
818 /*
819 * TCS/CCC frequency leaf:
820 */
821 cpuid_fn(0x15, reg);
822 info_p->cpuid_tsc_leaf.denominator = reg[eax];
823 info_p->cpuid_tsc_leaf.numerator = reg[ebx];
824
825 DBG(" TSC/CCC Information Leaf:\n");
826 DBG(" numerator : 0x%x\n", reg[ebx]);
827 DBG(" denominator : 0x%x\n", reg[eax]);
828 }
829
830 return;
831 }
832
833 static uint32_t
834 cpuid_set_cpufamily(i386_cpu_info_t *info_p)
835 {
836 uint32_t cpufamily = CPUFAMILY_UNKNOWN;
837
838 switch (info_p->cpuid_family) {
839 case 6:
840 switch (info_p->cpuid_model) {
841 case 23:
842 cpufamily = CPUFAMILY_INTEL_PENRYN;
843 break;
844 case CPUID_MODEL_NEHALEM:
845 case CPUID_MODEL_FIELDS:
846 case CPUID_MODEL_DALES:
847 case CPUID_MODEL_NEHALEM_EX:
848 cpufamily = CPUFAMILY_INTEL_NEHALEM;
849 break;
850 case CPUID_MODEL_DALES_32NM:
851 case CPUID_MODEL_WESTMERE:
852 case CPUID_MODEL_WESTMERE_EX:
853 cpufamily = CPUFAMILY_INTEL_WESTMERE;
854 break;
855 case CPUID_MODEL_SANDYBRIDGE:
856 case CPUID_MODEL_JAKETOWN:
857 cpufamily = CPUFAMILY_INTEL_SANDYBRIDGE;
858 break;
859 case CPUID_MODEL_IVYBRIDGE:
860 case CPUID_MODEL_IVYBRIDGE_EP:
861 cpufamily = CPUFAMILY_INTEL_IVYBRIDGE;
862 break;
863 case CPUID_MODEL_HASWELL:
864 case CPUID_MODEL_HASWELL_EP:
865 case CPUID_MODEL_HASWELL_ULT:
866 case CPUID_MODEL_CRYSTALWELL:
867 cpufamily = CPUFAMILY_INTEL_HASWELL;
868 break;
869 case CPUID_MODEL_BROADWELL:
870 case CPUID_MODEL_BRYSTALWELL:
871 cpufamily = CPUFAMILY_INTEL_BROADWELL;
872 break;
873 case CPUID_MODEL_SKYLAKE:
874 case CPUID_MODEL_SKYLAKE_DT:
875 #if !defined(RC_HIDE_XNU_J137)
876 case CPUID_MODEL_SKYLAKE_W:
877 #endif
878 cpufamily = CPUFAMILY_INTEL_SKYLAKE;
879 break;
880 case CPUID_MODEL_KABYLAKE:
881 case CPUID_MODEL_KABYLAKE_DT:
882 cpufamily = CPUFAMILY_INTEL_KABYLAKE;
883 break;
884 }
885 break;
886 }
887
888 info_p->cpuid_cpufamily = cpufamily;
889 DBG("cpuid_set_cpufamily(%p) returning 0x%x\n", info_p, cpufamily);
890 return cpufamily;
891 }
892 /*
893 * Must be invoked either when executing single threaded, or with
894 * independent synchronization.
895 */
896 void
897 cpuid_set_info(void)
898 {
899 i386_cpu_info_t *info_p = &cpuid_cpu_info;
900 boolean_t enable_x86_64h = TRUE;
901
902 cpuid_set_generic_info(info_p);
903
904 /* verify we are running on a supported CPU */
905 if ((strncmp(CPUID_VID_INTEL, info_p->cpuid_vendor,
906 min(strlen(CPUID_STRING_UNKNOWN) + 1,
907 sizeof(info_p->cpuid_vendor)))) ||
908 (cpuid_set_cpufamily(info_p) == CPUFAMILY_UNKNOWN)) {
909 panic("Unsupported CPU");
910 }
911
912 info_p->cpuid_cpu_type = CPU_TYPE_X86;
913
914 if (!PE_parse_boot_argn("-enable_x86_64h", &enable_x86_64h, sizeof(enable_x86_64h))) {
915 boolean_t disable_x86_64h = FALSE;
916
917 if (PE_parse_boot_argn("-disable_x86_64h", &disable_x86_64h, sizeof(disable_x86_64h))) {
918 enable_x86_64h = FALSE;
919 }
920 }
921
922 if (enable_x86_64h &&
923 ((info_p->cpuid_features & CPUID_X86_64_H_FEATURE_SUBSET) == CPUID_X86_64_H_FEATURE_SUBSET) &&
924 ((info_p->cpuid_extfeatures & CPUID_X86_64_H_EXTFEATURE_SUBSET) == CPUID_X86_64_H_EXTFEATURE_SUBSET) &&
925 ((info_p->cpuid_leaf7_features & CPUID_X86_64_H_LEAF7_FEATURE_SUBSET) == CPUID_X86_64_H_LEAF7_FEATURE_SUBSET)) {
926 info_p->cpuid_cpu_subtype = CPU_SUBTYPE_X86_64_H;
927 } else {
928 info_p->cpuid_cpu_subtype = CPU_SUBTYPE_X86_ARCH1;
929 }
930 /* cpuid_set_cache_info must be invoked after set_generic_info */
931
932 if (info_p->cpuid_cpufamily == CPUFAMILY_INTEL_PENRYN) {
933 cpuid_set_cache_info(info_p);
934 }
935
936 /*
937 * Find the number of enabled cores and threads
938 * (which determines whether SMT/Hyperthreading is active).
939 */
940 switch (info_p->cpuid_cpufamily) {
941 case CPUFAMILY_INTEL_PENRYN:
942 info_p->core_count = info_p->cpuid_cores_per_package;
943 info_p->thread_count = info_p->cpuid_logical_per_package;
944 break;
945 case CPUFAMILY_INTEL_WESTMERE: {
946 uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
947 info_p->core_count = bitfield32((uint32_t)msr, 19, 16);
948 info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
949 break;
950 }
951 default: {
952 uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
953 if (msr == 0) {
954 /* Provide a non-zero default for some VMMs */
955 msr = (1 << 16) + 1;
956 }
957 info_p->core_count = bitfield32((uint32_t)msr, 31, 16);
958 info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
959 break;
960 }
961 }
962 if (info_p->core_count == 0) {
963 info_p->core_count = info_p->cpuid_cores_per_package;
964 info_p->thread_count = info_p->cpuid_logical_per_package;
965 }
966
967 if (info_p->cpuid_cpufamily != CPUFAMILY_INTEL_PENRYN) {
968 cpuid_set_cache_info(info_p);
969 }
970
971 DBG("cpuid_set_info():\n");
972 DBG(" core_count : %d\n", info_p->core_count);
973 DBG(" thread_count : %d\n", info_p->thread_count);
974 DBG(" cpu_type: 0x%08x\n", info_p->cpuid_cpu_type);
975 DBG(" cpu_subtype: 0x%08x\n", info_p->cpuid_cpu_subtype);
976
977 info_p->cpuid_model_string = ""; /* deprecated */
978
979 do_cwas(info_p, FALSE);
980 }
981
982 static struct table {
983 uint64_t mask;
984 const char *name;
985 } feature_map[] = {
986 {CPUID_FEATURE_FPU, "FPU"},
987 {CPUID_FEATURE_VME, "VME"},
988 {CPUID_FEATURE_DE, "DE"},
989 {CPUID_FEATURE_PSE, "PSE"},
990 {CPUID_FEATURE_TSC, "TSC"},
991 {CPUID_FEATURE_MSR, "MSR"},
992 {CPUID_FEATURE_PAE, "PAE"},
993 {CPUID_FEATURE_MCE, "MCE"},
994 {CPUID_FEATURE_CX8, "CX8"},
995 {CPUID_FEATURE_APIC, "APIC"},
996 {CPUID_FEATURE_SEP, "SEP"},
997 {CPUID_FEATURE_MTRR, "MTRR"},
998 {CPUID_FEATURE_PGE, "PGE"},
999 {CPUID_FEATURE_MCA, "MCA"},
1000 {CPUID_FEATURE_CMOV, "CMOV"},
1001 {CPUID_FEATURE_PAT, "PAT"},
1002 {CPUID_FEATURE_PSE36, "PSE36"},
1003 {CPUID_FEATURE_PSN, "PSN"},
1004 {CPUID_FEATURE_CLFSH, "CLFSH"},
1005 {CPUID_FEATURE_DS, "DS"},
1006 {CPUID_FEATURE_ACPI, "ACPI"},
1007 {CPUID_FEATURE_MMX, "MMX"},
1008 {CPUID_FEATURE_FXSR, "FXSR"},
1009 {CPUID_FEATURE_SSE, "SSE"},
1010 {CPUID_FEATURE_SSE2, "SSE2"},
1011 {CPUID_FEATURE_SS, "SS"},
1012 {CPUID_FEATURE_HTT, "HTT"},
1013 {CPUID_FEATURE_TM, "TM"},
1014 {CPUID_FEATURE_PBE, "PBE"},
1015 {CPUID_FEATURE_SSE3, "SSE3"},
1016 {CPUID_FEATURE_PCLMULQDQ, "PCLMULQDQ"},
1017 {CPUID_FEATURE_DTES64, "DTES64"},
1018 {CPUID_FEATURE_MONITOR, "MON"},
1019 {CPUID_FEATURE_DSCPL, "DSCPL"},
1020 {CPUID_FEATURE_VMX, "VMX"},
1021 {CPUID_FEATURE_SMX, "SMX"},
1022 {CPUID_FEATURE_EST, "EST"},
1023 {CPUID_FEATURE_TM2, "TM2"},
1024 {CPUID_FEATURE_SSSE3, "SSSE3"},
1025 {CPUID_FEATURE_CID, "CID"},
1026 {CPUID_FEATURE_FMA, "FMA"},
1027 {CPUID_FEATURE_CX16, "CX16"},
1028 {CPUID_FEATURE_xTPR, "TPR"},
1029 {CPUID_FEATURE_PDCM, "PDCM"},
1030 {CPUID_FEATURE_SSE4_1, "SSE4.1"},
1031 {CPUID_FEATURE_SSE4_2, "SSE4.2"},
1032 {CPUID_FEATURE_x2APIC, "x2APIC"},
1033 {CPUID_FEATURE_MOVBE, "MOVBE"},
1034 {CPUID_FEATURE_POPCNT, "POPCNT"},
1035 {CPUID_FEATURE_AES, "AES"},
1036 {CPUID_FEATURE_VMM, "VMM"},
1037 {CPUID_FEATURE_PCID, "PCID"},
1038 {CPUID_FEATURE_XSAVE, "XSAVE"},
1039 {CPUID_FEATURE_OSXSAVE, "OSXSAVE"},
1040 {CPUID_FEATURE_SEGLIM64, "SEGLIM64"},
1041 {CPUID_FEATURE_TSCTMR, "TSCTMR"},
1042 {CPUID_FEATURE_AVX1_0, "AVX1.0"},
1043 {CPUID_FEATURE_RDRAND, "RDRAND"},
1044 {CPUID_FEATURE_F16C, "F16C"},
1045 {0, 0}
1046 },
1047 extfeature_map[] = {
1048 {CPUID_EXTFEATURE_SYSCALL, "SYSCALL"},
1049 {CPUID_EXTFEATURE_XD, "XD"},
1050 {CPUID_EXTFEATURE_1GBPAGE, "1GBPAGE"},
1051 {CPUID_EXTFEATURE_EM64T, "EM64T"},
1052 {CPUID_EXTFEATURE_LAHF, "LAHF"},
1053 {CPUID_EXTFEATURE_LZCNT, "LZCNT"},
1054 {CPUID_EXTFEATURE_PREFETCHW, "PREFETCHW"},
1055 {CPUID_EXTFEATURE_RDTSCP, "RDTSCP"},
1056 {CPUID_EXTFEATURE_TSCI, "TSCI"},
1057 {0, 0}
1058 },
1059 leaf7_feature_map[] = {
1060 {CPUID_LEAF7_FEATURE_RDWRFSGS, "RDWRFSGS"},
1061 {CPUID_LEAF7_FEATURE_TSCOFF, "TSC_THREAD_OFFSET"},
1062 {CPUID_LEAF7_FEATURE_SGX, "SGX"},
1063 {CPUID_LEAF7_FEATURE_BMI1, "BMI1"},
1064 {CPUID_LEAF7_FEATURE_HLE, "HLE"},
1065 {CPUID_LEAF7_FEATURE_AVX2, "AVX2"},
1066 {CPUID_LEAF7_FEATURE_FDPEO, "FDPEO"},
1067 {CPUID_LEAF7_FEATURE_SMEP, "SMEP"},
1068 {CPUID_LEAF7_FEATURE_BMI2, "BMI2"},
1069 {CPUID_LEAF7_FEATURE_ERMS, "ERMS"},
1070 {CPUID_LEAF7_FEATURE_INVPCID, "INVPCID"},
1071 {CPUID_LEAF7_FEATURE_RTM, "RTM"},
1072 {CPUID_LEAF7_FEATURE_PQM, "PQM"},
1073 {CPUID_LEAF7_FEATURE_FPU_CSDS, "FPU_CSDS"},
1074 {CPUID_LEAF7_FEATURE_MPX, "MPX"},
1075 {CPUID_LEAF7_FEATURE_PQE, "PQE"},
1076 {CPUID_LEAF7_FEATURE_AVX512F, "AVX512F"},
1077 {CPUID_LEAF7_FEATURE_AVX512DQ, "AVX512DQ"},
1078 {CPUID_LEAF7_FEATURE_RDSEED, "RDSEED"},
1079 {CPUID_LEAF7_FEATURE_ADX, "ADX"},
1080 {CPUID_LEAF7_FEATURE_SMAP, "SMAP"},
1081 {CPUID_LEAF7_FEATURE_AVX512IFMA, "AVX512IFMA"},
1082 {CPUID_LEAF7_FEATURE_CLFSOPT, "CLFSOPT"},
1083 {CPUID_LEAF7_FEATURE_CLWB, "CLWB"},
1084 {CPUID_LEAF7_FEATURE_IPT, "IPT"},
1085 {CPUID_LEAF7_FEATURE_AVX512CD, "AVX512CD"},
1086 {CPUID_LEAF7_FEATURE_SHA, "SHA"},
1087 {CPUID_LEAF7_FEATURE_AVX512BW, "AVX512BW"},
1088 {CPUID_LEAF7_FEATURE_AVX512VL, "AVX512VL"},
1089 {CPUID_LEAF7_FEATURE_PREFETCHWT1, "PREFETCHWT1"},
1090 {CPUID_LEAF7_FEATURE_AVX512VBMI, "AVX512VBMI"},
1091 {CPUID_LEAF7_FEATURE_UMIP, "UMIP"},
1092 {CPUID_LEAF7_FEATURE_PKU, "PKU"},
1093 {CPUID_LEAF7_FEATURE_OSPKE, "OSPKE"},
1094 {CPUID_LEAF7_FEATURE_WAITPKG, "WAITPKG"},
1095 {CPUID_LEAF7_FEATURE_GFNI, "GFNI"},
1096 {CPUID_LEAF7_FEATURE_AVX512VPCDQ, "AVX512VPCDQ"},
1097 {CPUID_LEAF7_FEATURE_RDPID, "RDPID"},
1098 {CPUID_LEAF7_FEATURE_CLDEMOTE, "CLDEMOTE"},
1099 {CPUID_LEAF7_FEATURE_MOVDIRI, "MOVDIRI"},
1100 {CPUID_LEAF7_FEATURE_MOVDIRI64B, "MOVDIRI64B"},
1101 {CPUID_LEAF7_FEATURE_SGXLC, "SGXLC"},
1102 {0, 0}
1103 },
1104 leaf7_extfeature_map[] = {
1105 { CPUID_LEAF7_EXTFEATURE_AVX5124VNNIW, "AVX5124VNNIW" },
1106 { CPUID_LEAF7_EXTFEATURE_AVX5124FMAPS, "AVX5124FMAPS" },
1107 { CPUID_LEAF7_EXTFEATURE_MDCLEAR, "MDCLEAR" },
1108 { CPUID_LEAF7_EXTFEATURE_TSXFA, "TSXFA" },
1109 { CPUID_LEAF7_EXTFEATURE_IBRS, "IBRS" },
1110 { CPUID_LEAF7_EXTFEATURE_STIBP, "STIBP" },
1111 { CPUID_LEAF7_EXTFEATURE_L1DF, "L1DF" },
1112 { CPUID_LEAF7_EXTFEATURE_ACAPMSR, "ACAPMSR" },
1113 { CPUID_LEAF7_EXTFEATURE_CCAPMSR, "CCAPMSR" },
1114 { CPUID_LEAF7_EXTFEATURE_SSBD, "SSBD" },
1115 {0, 0}
1116 };
1117
1118 static char *
1119 cpuid_get_names(struct table *map, uint64_t bits, char *buf, unsigned buf_len)
1120 {
1121 size_t len = 0;
1122 char *p = buf;
1123 int i;
1124
1125 for (i = 0; map[i].mask != 0; i++) {
1126 if ((bits & map[i].mask) == 0) {
1127 continue;
1128 }
1129 if (len && ((size_t) (p - buf) < (buf_len - 1))) {
1130 *p++ = ' ';
1131 }
1132 len = min(strlen(map[i].name), (size_t)((buf_len - 1) - (p - buf)));
1133 if (len == 0) {
1134 break;
1135 }
1136 bcopy(map[i].name, p, len);
1137 p += len;
1138 }
1139 *p = '\0';
1140 return buf;
1141 }
1142
1143 i386_cpu_info_t *
1144 cpuid_info(void)
1145 {
1146 /* Set-up the cpuid_info stucture lazily */
1147 if (cpuid_cpu_infop == NULL) {
1148 PE_parse_boot_argn("-cpuid", &cpuid_dbg, sizeof(cpuid_dbg));
1149 cpuid_set_info();
1150 cpuid_cpu_infop = &cpuid_cpu_info;
1151 }
1152 return cpuid_cpu_infop;
1153 }
1154
1155 char *
1156 cpuid_get_feature_names(uint64_t features, char *buf, unsigned buf_len)
1157 {
1158 return cpuid_get_names(feature_map, features, buf, buf_len);
1159 }
1160
1161 char *
1162 cpuid_get_extfeature_names(uint64_t extfeatures, char *buf, unsigned buf_len)
1163 {
1164 return cpuid_get_names(extfeature_map, extfeatures, buf, buf_len);
1165 }
1166
1167 char *
1168 cpuid_get_leaf7_feature_names(uint64_t features, char *buf, unsigned buf_len)
1169 {
1170 return cpuid_get_names(leaf7_feature_map, features, buf, buf_len);
1171 }
1172
1173 char *
1174 cpuid_get_leaf7_extfeature_names(uint64_t features, char *buf, unsigned buf_len)
1175 {
1176 return cpuid_get_names(leaf7_extfeature_map, features, buf, buf_len);
1177 }
1178
1179 void
1180 cpuid_feature_display(
1181 const char *header)
1182 {
1183 char buf[320];
1184
1185 kprintf("%s: %s", header,
1186 cpuid_get_feature_names(cpuid_features(), buf, sizeof(buf)));
1187 if (cpuid_leaf7_features()) {
1188 kprintf(" %s", cpuid_get_leaf7_feature_names(
1189 cpuid_leaf7_features(), buf, sizeof(buf)));
1190 }
1191 if (cpuid_leaf7_extfeatures()) {
1192 kprintf(" %s", cpuid_get_leaf7_extfeature_names(
1193 cpuid_leaf7_extfeatures(), buf, sizeof(buf)));
1194 }
1195 kprintf("\n");
1196 if (cpuid_features() & CPUID_FEATURE_HTT) {
1197 #define s_if_plural(n) ((n > 1) ? "s" : "")
1198 kprintf(" HTT: %d core%s per package;"
1199 " %d logical cpu%s per package\n",
1200 cpuid_cpu_infop->cpuid_cores_per_package,
1201 s_if_plural(cpuid_cpu_infop->cpuid_cores_per_package),
1202 cpuid_cpu_infop->cpuid_logical_per_package,
1203 s_if_plural(cpuid_cpu_infop->cpuid_logical_per_package));
1204 }
1205 }
1206
1207 void
1208 cpuid_extfeature_display(
1209 const char *header)
1210 {
1211 char buf[256];
1212
1213 kprintf("%s: %s\n", header,
1214 cpuid_get_extfeature_names(cpuid_extfeatures(),
1215 buf, sizeof(buf)));
1216 }
1217
1218 void
1219 cpuid_cpu_display(
1220 const char *header)
1221 {
1222 if (cpuid_cpu_infop->cpuid_brand_string[0] != '\0') {
1223 kprintf("%s: %s\n", header, cpuid_cpu_infop->cpuid_brand_string);
1224 }
1225 }
1226
1227 unsigned int
1228 cpuid_family(void)
1229 {
1230 return cpuid_info()->cpuid_family;
1231 }
1232
1233 uint32_t
1234 cpuid_cpufamily(void)
1235 {
1236 return cpuid_info()->cpuid_cpufamily;
1237 }
1238
1239 cpu_type_t
1240 cpuid_cputype(void)
1241 {
1242 return cpuid_info()->cpuid_cpu_type;
1243 }
1244
1245 cpu_subtype_t
1246 cpuid_cpusubtype(void)
1247 {
1248 return cpuid_info()->cpuid_cpu_subtype;
1249 }
1250
1251 uint64_t
1252 cpuid_features(void)
1253 {
1254 static int checked = 0;
1255 char fpu_arg[20] = { 0 };
1256
1257 (void) cpuid_info();
1258 if (!checked) {
1259 /* check for boot-time fpu limitations */
1260 if (PE_parse_boot_argn("_fpu", &fpu_arg[0], sizeof(fpu_arg))) {
1261 printf("limiting fpu features to: %s\n", fpu_arg);
1262 if (!strncmp("387", fpu_arg, sizeof("387")) || !strncmp("mmx", fpu_arg, sizeof("mmx"))) {
1263 printf("no sse or sse2\n");
1264 cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE | CPUID_FEATURE_SSE2 | CPUID_FEATURE_FXSR);
1265 } else if (!strncmp("sse", fpu_arg, sizeof("sse"))) {
1266 printf("no sse2\n");
1267 cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE2);
1268 }
1269 }
1270 checked = 1;
1271 }
1272 return cpuid_cpu_infop->cpuid_features;
1273 }
1274
1275 uint64_t
1276 cpuid_extfeatures(void)
1277 {
1278 return cpuid_info()->cpuid_extfeatures;
1279 }
1280
1281 uint64_t
1282 cpuid_leaf7_features(void)
1283 {
1284 return cpuid_info()->cpuid_leaf7_features;
1285 }
1286
1287 uint64_t
1288 cpuid_leaf7_extfeatures(void)
1289 {
1290 return cpuid_info()->cpuid_leaf7_extfeatures;
1291 }
1292
1293 static i386_vmm_info_t *_cpuid_vmm_infop = NULL;
1294 static i386_vmm_info_t _cpuid_vmm_info;
1295
1296 static void
1297 cpuid_init_vmm_info(i386_vmm_info_t *info_p)
1298 {
1299 uint32_t reg[4];
1300 uint32_t max_vmm_leaf;
1301
1302 bzero(info_p, sizeof(*info_p));
1303
1304 if (!cpuid_vmm_present()) {
1305 return;
1306 }
1307
1308 DBG("cpuid_init_vmm_info(%p)\n", info_p);
1309
1310 /* do cpuid 0x40000000 to get VMM vendor */
1311 cpuid_fn(0x40000000, reg);
1312 max_vmm_leaf = reg[eax];
1313 bcopy((char *)&reg[ebx], &info_p->cpuid_vmm_vendor[0], 4);
1314 bcopy((char *)&reg[ecx], &info_p->cpuid_vmm_vendor[4], 4);
1315 bcopy((char *)&reg[edx], &info_p->cpuid_vmm_vendor[8], 4);
1316 info_p->cpuid_vmm_vendor[12] = '\0';
1317
1318 if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_VMWARE)) {
1319 /* VMware identification string: kb.vmware.com/kb/1009458 */
1320 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_VMWARE;
1321 } else if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_PARALLELS)) {
1322 /* Parallels identification string */
1323 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_PARALLELS;
1324 } else {
1325 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_UNKNOWN;
1326 }
1327
1328 /* VMM generic leaves: https://lkml.org/lkml/2008/10/1/246 */
1329 if (max_vmm_leaf >= 0x40000010) {
1330 cpuid_fn(0x40000010, reg);
1331
1332 info_p->cpuid_vmm_tsc_frequency = reg[eax];
1333 info_p->cpuid_vmm_bus_frequency = reg[ebx];
1334 }
1335
1336 DBG(" vmm_vendor : %s\n", info_p->cpuid_vmm_vendor);
1337 DBG(" vmm_family : %u\n", info_p->cpuid_vmm_family);
1338 DBG(" vmm_bus_frequency : %u\n", info_p->cpuid_vmm_bus_frequency);
1339 DBG(" vmm_tsc_frequency : %u\n", info_p->cpuid_vmm_tsc_frequency);
1340 }
1341
1342 boolean_t
1343 cpuid_vmm_present(void)
1344 {
1345 return (cpuid_features() & CPUID_FEATURE_VMM) ? TRUE : FALSE;
1346 }
1347
1348 i386_vmm_info_t *
1349 cpuid_vmm_info(void)
1350 {
1351 if (_cpuid_vmm_infop == NULL) {
1352 cpuid_init_vmm_info(&_cpuid_vmm_info);
1353 _cpuid_vmm_infop = &_cpuid_vmm_info;
1354 }
1355 return _cpuid_vmm_infop;
1356 }
1357
1358 uint32_t
1359 cpuid_vmm_family(void)
1360 {
1361 return cpuid_vmm_info()->cpuid_vmm_family;
1362 }
1363
1364 cwa_classifier_e
1365 cpuid_wa_required(cpu_wa_e wa)
1366 {
1367 static uint64_t bootarg_cpu_wa_enables = 0;
1368 static uint64_t bootarg_cpu_wa_disables = 0;
1369 static int bootargs_overrides_processed = 0;
1370 i386_cpu_info_t *info_p = &cpuid_cpu_info;
1371
1372 if (!bootargs_overrides_processed) {
1373 if (!PE_parse_boot_argn("cwae", &bootarg_cpu_wa_enables, sizeof(bootarg_cpu_wa_enables))) {
1374 bootarg_cpu_wa_enables = 0;
1375 }
1376
1377 if (!PE_parse_boot_argn("cwad", &bootarg_cpu_wa_disables, sizeof(bootarg_cpu_wa_disables))) {
1378 bootarg_cpu_wa_disables = 0;
1379 }
1380 bootargs_overrides_processed = 1;
1381 }
1382
1383 if (bootarg_cpu_wa_enables & (1 << wa)) {
1384 return CWA_FORCE_ON;
1385 }
1386
1387 if (bootarg_cpu_wa_disables & (1 << wa)) {
1388 return CWA_FORCE_OFF;
1389 }
1390
1391 switch (wa) {
1392 case CPU_INTEL_SEGCHK:
1393 /* First, check to see if this CPU requires the workaround */
1394 if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_ACAPMSR) != 0) {
1395 /* We have ARCHCAP, so check it for either RDCL_NO or MDS_NO */
1396 uint64_t archcap_msr = rdmsr64(MSR_IA32_ARCH_CAPABILITIES);
1397 if ((archcap_msr & (MSR_IA32_ARCH_CAPABILITIES_RDCL_NO | MSR_IA32_ARCH_CAPABILITIES_MDS_NO)) != 0) {
1398 /* Workaround not needed */
1399 return CWA_OFF;
1400 }
1401 }
1402
1403 if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_MDCLEAR) != 0) {
1404 return CWA_ON;
1405 }
1406
1407 /*
1408 * If the CPU supports the ARCHCAP MSR and neither the RDCL_NO bit nor the MDS_NO
1409 * bit are set, OR the CPU does not support the ARCHCAP MSR and the CPU does
1410 * not enumerate the presence of the enhanced VERW instruction, report
1411 * that the workaround should not be enabled.
1412 */
1413 break;
1414
1415 case CPU_INTEL_TSXFA:
1416 /*
1417 * If this CPU supports RTM and supports FORCE_ABORT, return that
1418 * the workaround should be enabled.
1419 */
1420 if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_TSXFA) != 0 &&
1421 (info_p->cpuid_leaf7_features & CPUID_LEAF7_FEATURE_RTM) != 0) {
1422 return CWA_ON;
1423 }
1424 break;
1425
1426 default:
1427 break;
1428 }
1429
1430 return CWA_OFF;
1431 }