]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpuid.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / i386 / cpuid.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 #include <vm/vm_page.h>
32 #include <pexpert/pexpert.h>
33
34 #include <i386/cpu_threads.h>
35 #include <i386/cpuid.h>
36 #include <i386/machine_routines.h>
37
38 int force_tecs_at_idle;
39 int tecs_mode_supported;
40
41 static boolean_t cpuid_dbg
42 #if DEBUG
43 = TRUE;
44 #else
45 = FALSE;
46 #endif
47 #define DBG(x...) \
48 do { \
49 if (cpuid_dbg) \
50 kprintf(x); \
51 } while (0) \
52
53 #define min(a, b) ((a) < (b) ? (a) : (b))
54 #define quad(hi, lo) (((uint64_t)(hi)) << 32 | (lo))
55
56 /*
57 * Leaf 2 cache descriptor encodings.
58 */
59 typedef enum {
60 _NULL_, /* NULL (empty) descriptor */
61 CACHE, /* Cache */
62 TLB, /* TLB */
63 STLB, /* Shared second-level unified TLB */
64 PREFETCH /* Prefetch size */
65 } cpuid_leaf2_desc_type_t;
66
67 typedef enum {
68 NA, /* Not Applicable */
69 FULLY, /* Fully-associative */
70 TRACE, /* Trace Cache (P4 only) */
71 INST, /* Instruction TLB */
72 DATA, /* Data TLB */
73 DATA0, /* Data TLB, 1st level */
74 DATA1, /* Data TLB, 2nd level */
75 L1, /* L1 (unified) cache */
76 L1_INST, /* L1 Instruction cache */
77 L1_DATA, /* L1 Data cache */
78 L2, /* L2 (unified) cache */
79 L3, /* L3 (unified) cache */
80 L2_2LINESECTOR, /* L2 (unified) cache with 2 lines per sector */
81 L3_2LINESECTOR, /* L3(unified) cache with 2 lines per sector */
82 SMALL, /* Small page TLB */
83 LARGE, /* Large page TLB */
84 BOTH /* Small and Large page TLB */
85 } cpuid_leaf2_qualifier_t;
86
87 typedef struct cpuid_cache_descriptor {
88 uint8_t value; /* descriptor code */
89 uint8_t type; /* cpuid_leaf2_desc_type_t */
90 uint8_t level; /* level of cache/TLB hierachy */
91 uint8_t ways; /* wayness of cache */
92 uint16_t size; /* cachesize or TLB pagesize */
93 uint16_t entries; /* number of TLB entries or linesize */
94 } cpuid_cache_descriptor_t;
95
96 /*
97 * These multipliers are used to encode 1*K .. 64*M in a 16 bit size field
98 */
99 #define K (1)
100 #define M (1024)
101
102 /*
103 * Intel cache descriptor table:
104 */
105 static cpuid_cache_descriptor_t intel_cpuid_leaf2_descriptor_table[] = {
106 // -------------------------------------------------------
107 // value type level ways size entries
108 // -------------------------------------------------------
109 { 0x00, _NULL_, NA, NA, NA, NA },
110 { 0x01, TLB, INST, 4, SMALL, 32 },
111 { 0x02, TLB, INST, FULLY, LARGE, 2 },
112 { 0x03, TLB, DATA, 4, SMALL, 64 },
113 { 0x04, TLB, DATA, 4, LARGE, 8 },
114 { 0x05, TLB, DATA1, 4, LARGE, 32 },
115 { 0x06, CACHE, L1_INST, 4, 8 * K, 32 },
116 { 0x08, CACHE, L1_INST, 4, 16 * K, 32 },
117 { 0x09, CACHE, L1_INST, 4, 32 * K, 64 },
118 { 0x0A, CACHE, L1_DATA, 2, 8 * K, 32 },
119 { 0x0B, TLB, INST, 4, LARGE, 4 },
120 { 0x0C, CACHE, L1_DATA, 4, 16 * K, 32 },
121 { 0x0D, CACHE, L1_DATA, 4, 16 * K, 64 },
122 { 0x0E, CACHE, L1_DATA, 6, 24 * K, 64 },
123 { 0x21, CACHE, L2, 8, 256 * K, 64 },
124 { 0x22, CACHE, L3_2LINESECTOR, 4, 512 * K, 64 },
125 { 0x23, CACHE, L3_2LINESECTOR, 8, 1 * M, 64 },
126 { 0x25, CACHE, L3_2LINESECTOR, 8, 2 * M, 64 },
127 { 0x29, CACHE, L3_2LINESECTOR, 8, 4 * M, 64 },
128 { 0x2C, CACHE, L1_DATA, 8, 32 * K, 64 },
129 { 0x30, CACHE, L1_INST, 8, 32 * K, 64 },
130 { 0x40, CACHE, L2, NA, 0, NA },
131 { 0x41, CACHE, L2, 4, 128 * K, 32 },
132 { 0x42, CACHE, L2, 4, 256 * K, 32 },
133 { 0x43, CACHE, L2, 4, 512 * K, 32 },
134 { 0x44, CACHE, L2, 4, 1 * M, 32 },
135 { 0x45, CACHE, L2, 4, 2 * M, 32 },
136 { 0x46, CACHE, L3, 4, 4 * M, 64 },
137 { 0x47, CACHE, L3, 8, 8 * M, 64 },
138 { 0x48, CACHE, L2, 12, 3 * M, 64 },
139 { 0x49, CACHE, L2, 16, 4 * M, 64 },
140 { 0x4A, CACHE, L3, 12, 6 * M, 64 },
141 { 0x4B, CACHE, L3, 16, 8 * M, 64 },
142 { 0x4C, CACHE, L3, 12, 12 * M, 64 },
143 { 0x4D, CACHE, L3, 16, 16 * M, 64 },
144 { 0x4E, CACHE, L2, 24, 6 * M, 64 },
145 { 0x4F, TLB, INST, NA, SMALL, 32 },
146 { 0x50, TLB, INST, NA, BOTH, 64 },
147 { 0x51, TLB, INST, NA, BOTH, 128 },
148 { 0x52, TLB, INST, NA, BOTH, 256 },
149 { 0x55, TLB, INST, FULLY, BOTH, 7 },
150 { 0x56, TLB, DATA0, 4, LARGE, 16 },
151 { 0x57, TLB, DATA0, 4, SMALL, 16 },
152 { 0x59, TLB, DATA0, FULLY, SMALL, 16 },
153 { 0x5A, TLB, DATA0, 4, LARGE, 32 },
154 { 0x5B, TLB, DATA, NA, BOTH, 64 },
155 { 0x5C, TLB, DATA, NA, BOTH, 128 },
156 { 0x5D, TLB, DATA, NA, BOTH, 256 },
157 { 0x60, CACHE, L1, 16 * K, 8, 64 },
158 { 0x61, CACHE, L1, 4, 8 * K, 64 },
159 { 0x62, CACHE, L1, 4, 16 * K, 64 },
160 { 0x63, CACHE, L1, 4, 32 * K, 64 },
161 { 0x70, CACHE, TRACE, 8, 12 * K, NA },
162 { 0x71, CACHE, TRACE, 8, 16 * K, NA },
163 { 0x72, CACHE, TRACE, 8, 32 * K, NA },
164 { 0x76, TLB, INST, NA, BOTH, 8 },
165 { 0x78, CACHE, L2, 4, 1 * M, 64 },
166 { 0x79, CACHE, L2_2LINESECTOR, 8, 128 * K, 64 },
167 { 0x7A, CACHE, L2_2LINESECTOR, 8, 256 * K, 64 },
168 { 0x7B, CACHE, L2_2LINESECTOR, 8, 512 * K, 64 },
169 { 0x7C, CACHE, L2_2LINESECTOR, 8, 1 * M, 64 },
170 { 0x7D, CACHE, L2, 8, 2 * M, 64 },
171 { 0x7F, CACHE, L2, 2, 512 * K, 64 },
172 { 0x80, CACHE, L2, 8, 512 * K, 64 },
173 { 0x82, CACHE, L2, 8, 256 * K, 32 },
174 { 0x83, CACHE, L2, 8, 512 * K, 32 },
175 { 0x84, CACHE, L2, 8, 1 * M, 32 },
176 { 0x85, CACHE, L2, 8, 2 * M, 32 },
177 { 0x86, CACHE, L2, 4, 512 * K, 64 },
178 { 0x87, CACHE, L2, 8, 1 * M, 64 },
179 { 0xB0, TLB, INST, 4, SMALL, 128 },
180 { 0xB1, TLB, INST, 4, LARGE, 8 },
181 { 0xB2, TLB, INST, 4, SMALL, 64 },
182 { 0xB3, TLB, DATA, 4, SMALL, 128 },
183 { 0xB4, TLB, DATA1, 4, SMALL, 256 },
184 { 0xB5, TLB, DATA1, 8, SMALL, 64 },
185 { 0xB6, TLB, DATA1, 8, SMALL, 128 },
186 { 0xBA, TLB, DATA1, 4, BOTH, 64 },
187 { 0xC1, STLB, DATA1, 8, SMALL, 1024},
188 { 0xCA, STLB, DATA1, 4, SMALL, 512 },
189 { 0xD0, CACHE, L3, 4, 512 * K, 64 },
190 { 0xD1, CACHE, L3, 4, 1 * M, 64 },
191 { 0xD2, CACHE, L3, 4, 2 * M, 64 },
192 { 0xD3, CACHE, L3, 4, 4 * M, 64 },
193 { 0xD4, CACHE, L3, 4, 8 * M, 64 },
194 { 0xD6, CACHE, L3, 8, 1 * M, 64 },
195 { 0xD7, CACHE, L3, 8, 2 * M, 64 },
196 { 0xD8, CACHE, L3, 8, 4 * M, 64 },
197 { 0xD9, CACHE, L3, 8, 8 * M, 64 },
198 { 0xDA, CACHE, L3, 8, 12 * M, 64 },
199 { 0xDC, CACHE, L3, 12, 1536 * K, 64 },
200 { 0xDD, CACHE, L3, 12, 3 * M, 64 },
201 { 0xDE, CACHE, L3, 12, 6 * M, 64 },
202 { 0xDF, CACHE, L3, 12, 12 * M, 64 },
203 { 0xE0, CACHE, L3, 12, 18 * M, 64 },
204 { 0xE2, CACHE, L3, 16, 2 * M, 64 },
205 { 0xE3, CACHE, L3, 16, 4 * M, 64 },
206 { 0xE4, CACHE, L3, 16, 8 * M, 64 },
207 { 0xE5, CACHE, L3, 16, 16 * M, 64 },
208 { 0xE6, CACHE, L3, 16, 24 * M, 64 },
209 { 0xF0, PREFETCH, NA, NA, 64, NA },
210 { 0xF1, PREFETCH, NA, NA, 128, NA },
211 { 0xFF, CACHE, NA, NA, 0, NA }
212 };
213 #define INTEL_LEAF2_DESC_NUM (sizeof(intel_cpuid_leaf2_descriptor_table) / \
214 sizeof(cpuid_cache_descriptor_t))
215
216 boolean_t cpuid_tsx_disabled = false; /* true if XNU disabled TSX */
217 boolean_t cpuid_tsx_supported = false;
218
219 static void do_cwas(i386_cpu_info_t *cpuinfo, boolean_t on_slave);
220 static void cpuid_do_precpuid_was(void);
221
222 #if DEBUG || DEVELOPMENT
223 static void cpuid_vmm_detect_pv_interface(i386_vmm_info_t *info_p, const char *signature,
224 bool (*)(i386_vmm_info_t*, const uint32_t, const uint32_t));
225 static bool cpuid_vmm_detect_applepv_features(i386_vmm_info_t *info_p, const uint32_t base, const uint32_t max_leaf);
226 #endif /* DEBUG || DEVELOPMENT */
227
228 static inline cpuid_cache_descriptor_t *
229 cpuid_leaf2_find(uint8_t value)
230 {
231 unsigned int i;
232
233 for (i = 0; i < INTEL_LEAF2_DESC_NUM; i++) {
234 if (intel_cpuid_leaf2_descriptor_table[i].value == value) {
235 return &intel_cpuid_leaf2_descriptor_table[i];
236 }
237 }
238 return NULL;
239 }
240
241 /*
242 * CPU identification routines.
243 */
244
245 static i386_cpu_info_t cpuid_cpu_info;
246 static i386_cpu_info_t *cpuid_cpu_infop = NULL;
247
248 static void
249 cpuid_fn(uint32_t selector, uint32_t *result)
250 {
251 do_cpuid(selector, result);
252 DBG("cpuid_fn(0x%08x) eax:0x%08x ebx:0x%08x ecx:0x%08x edx:0x%08x\n",
253 selector, result[0], result[1], result[2], result[3]);
254 }
255
256 static const char *cache_type_str[LCACHE_MAX] = {
257 "Lnone", "L1I", "L1D", "L2U", "L3U"
258 };
259
260 static void
261 do_cwas(i386_cpu_info_t *cpuinfo, boolean_t on_slave)
262 {
263 extern int force_thread_policy_tecs;
264 cwa_classifier_e wa_reqd;
265
266 /*
267 * Workaround for reclaiming perf counter 3 due to TSX memory ordering erratum.
268 * This workaround does not support being forcibly set (since an MSR must be
269 * enumerated, lest we #GP when forced to access it.)
270 *
271 * Note that if disabling TSX is supported, disablement is prefered over forcing
272 * TSX transactions to abort.
273 */
274 if (cpuid_wa_required(CPU_INTEL_TSXDA) == CWA_ON) {
275 /* This must be executed on all logical processors */
276 wrmsr64(MSR_IA32_TSX_CTRL, MSR_IA32_TSXCTRL_TSX_CPU_CLEAR | MSR_IA32_TSXCTRL_RTM_DISABLE);
277 } else if (cpuid_wa_required(CPU_INTEL_TSXFA) == CWA_ON) {
278 /* This must be executed on all logical processors */
279 wrmsr64(MSR_IA32_TSX_FORCE_ABORT,
280 rdmsr64(MSR_IA32_TSX_FORCE_ABORT) | MSR_IA32_TSXFA_RTM_FORCE_ABORT);
281 }
282
283 if (((wa_reqd = cpuid_wa_required(CPU_INTEL_SRBDS)) & CWA_ON) != 0 &&
284 ((wa_reqd & CWA_FORCE_ON) == CWA_ON ||
285 (cpuinfo->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_SRBDS_CTRL) != 0)) {
286 /* This must be executed on all logical processors */
287 uint64_t mcuoptctrl = rdmsr64(MSR_IA32_MCU_OPT_CTRL);
288 mcuoptctrl |= MSR_IA32_MCUOPTCTRL_RNGDS_MITG_DIS;
289 wrmsr64(MSR_IA32_MCU_OPT_CTRL, mcuoptctrl);
290 }
291
292 if (on_slave) {
293 return;
294 }
295
296 switch (cpuid_wa_required(CPU_INTEL_SEGCHK)) {
297 case CWA_FORCE_ON:
298 force_thread_policy_tecs = 1;
299
300 /* If hyperthreaded, enable idle workaround */
301 if (cpuinfo->thread_count > cpuinfo->core_count) {
302 force_tecs_at_idle = 1;
303 }
304
305 OS_FALLTHROUGH;
306 case CWA_ON:
307 tecs_mode_supported = 1;
308 break;
309
310 case CWA_FORCE_OFF:
311 case CWA_OFF:
312 tecs_mode_supported = 0;
313 force_tecs_at_idle = 0;
314 force_thread_policy_tecs = 0;
315 break;
316
317 default:
318 break;
319 }
320 }
321
322 void
323 cpuid_do_was(void)
324 {
325 do_cwas(cpuid_info(), TRUE);
326 }
327
328 /* this function is Intel-specific */
329 static void
330 cpuid_set_cache_info( i386_cpu_info_t * info_p )
331 {
332 uint32_t cpuid_result[4];
333 uint32_t reg[4];
334 uint32_t index;
335 uint32_t linesizes[LCACHE_MAX];
336 unsigned int i;
337 unsigned int j;
338 boolean_t cpuid_deterministic_supported = FALSE;
339
340 DBG("cpuid_set_cache_info(%p)\n", info_p);
341
342 bzero( linesizes, sizeof(linesizes));
343
344 /* Get processor cache descriptor info using leaf 2. We don't use
345 * this internally, but must publish it for KEXTs.
346 */
347 cpuid_fn(2, cpuid_result);
348 for (j = 0; j < 4; j++) {
349 if ((cpuid_result[j] >> 31) == 1) { /* bit31 is validity */
350 continue;
351 }
352 ((uint32_t *)(void *)info_p->cache_info)[j] = cpuid_result[j];
353 }
354 /* first byte gives number of cpuid calls to get all descriptors */
355 for (i = 1; i < info_p->cache_info[0]; i++) {
356 if (i * 16 > sizeof(info_p->cache_info)) {
357 break;
358 }
359 cpuid_fn(2, cpuid_result);
360 for (j = 0; j < 4; j++) {
361 if ((cpuid_result[j] >> 31) == 1) {
362 continue;
363 }
364 ((uint32_t *)(void *)info_p->cache_info)[4 * i + j] =
365 cpuid_result[j];
366 }
367 }
368
369 /*
370 * Get cache info using leaf 4, the "deterministic cache parameters."
371 * Most processors Mac OS X supports implement this flavor of CPUID.
372 * Loop over each cache on the processor.
373 */
374 cpuid_fn(0, cpuid_result);
375 if (cpuid_result[eax] >= 4) {
376 cpuid_deterministic_supported = TRUE;
377 }
378
379 for (index = 0; cpuid_deterministic_supported; index++) {
380 cache_type_t type = Lnone;
381 uint32_t cache_type;
382 uint32_t cache_level;
383 uint32_t cache_sharing;
384 uint32_t cache_linesize;
385 uint32_t cache_sets;
386 uint32_t cache_associativity;
387 uint32_t cache_size;
388 uint32_t cache_partitions;
389 uint32_t colors;
390
391 reg[eax] = 4; /* cpuid request 4 */
392 reg[ecx] = index; /* index starting at 0 */
393 cpuid(reg);
394 DBG("cpuid(4) index=%d eax=0x%x\n", index, reg[eax]);
395 cache_type = bitfield32(reg[eax], 4, 0);
396 if (cache_type == 0) {
397 break; /* no more caches */
398 }
399 cache_level = bitfield32(reg[eax], 7, 5);
400 cache_sharing = bitfield32(reg[eax], 25, 14) + 1;
401 info_p->cpuid_cores_per_package
402 = bitfield32(reg[eax], 31, 26) + 1;
403 cache_linesize = bitfield32(reg[ebx], 11, 0) + 1;
404 cache_partitions = bitfield32(reg[ebx], 21, 12) + 1;
405 cache_associativity = bitfield32(reg[ebx], 31, 22) + 1;
406 cache_sets = bitfield32(reg[ecx], 31, 0) + 1;
407
408 /* Map type/levels returned by CPUID into cache_type_t */
409 switch (cache_level) {
410 case 1:
411 type = cache_type == 1 ? L1D :
412 cache_type == 2 ? L1I :
413 Lnone;
414 break;
415 case 2:
416 type = cache_type == 3 ? L2U :
417 Lnone;
418 break;
419 case 3:
420 type = cache_type == 3 ? L3U :
421 Lnone;
422 break;
423 default:
424 type = Lnone;
425 }
426
427 /* The total size of a cache is:
428 * ( linesize * sets * associativity * partitions )
429 */
430 if (type != Lnone) {
431 cache_size = cache_linesize * cache_sets *
432 cache_associativity * cache_partitions;
433 info_p->cache_size[type] = cache_size;
434 info_p->cache_sharing[type] = cache_sharing;
435 info_p->cache_partitions[type] = cache_partitions;
436 linesizes[type] = cache_linesize;
437
438 DBG(" cache_size[%s] : %d\n",
439 cache_type_str[type], cache_size);
440 DBG(" cache_sharing[%s] : %d\n",
441 cache_type_str[type], cache_sharing);
442 DBG(" cache_partitions[%s]: %d\n",
443 cache_type_str[type], cache_partitions);
444
445 /*
446 * Overwrite associativity determined via
447 * CPUID.0x80000006 -- this leaf is more
448 * accurate
449 */
450 if (type == L2U) {
451 info_p->cpuid_cache_L2_associativity = cache_associativity;
452 }
453 /*
454 * Adjust #sets to account for the N CBos
455 * This is because addresses are hashed across CBos
456 */
457 if (type == L3U && info_p->core_count) {
458 cache_sets = cache_sets / info_p->core_count;
459 }
460
461 /* Compute the number of page colors for this cache,
462 * which is:
463 * ( linesize * sets ) / page_size
464 *
465 * To help visualize this, consider two views of a
466 * physical address. To the cache, it is composed
467 * of a line offset, a set selector, and a tag.
468 * To VM, it is composed of a page offset, a page
469 * color, and other bits in the pageframe number:
470 *
471 * +-----------------+---------+--------+
472 * cache: | tag | set | offset |
473 * +-----------------+---------+--------+
474 *
475 * +-----------------+-------+----------+
476 * VM: | don't care | color | pg offset|
477 * +-----------------+-------+----------+
478 *
479 * The color is those bits in (set+offset) not covered
480 * by the page offset.
481 */
482 colors = (cache_linesize * cache_sets) >> 12;
483
484 if (colors > vm_cache_geometry_colors) {
485 vm_cache_geometry_colors = colors;
486 }
487 }
488 }
489 DBG(" vm_cache_geometry_colors: %d\n", vm_cache_geometry_colors);
490
491 /*
492 * If deterministic cache parameters are not available, use
493 * something else
494 */
495 if (info_p->cpuid_cores_per_package == 0) {
496 info_p->cpuid_cores_per_package = 1;
497
498 /* cpuid define in 1024 quantities */
499 info_p->cache_size[L2U] = info_p->cpuid_cache_size * 1024;
500 info_p->cache_sharing[L2U] = 1;
501 info_p->cache_partitions[L2U] = 1;
502
503 linesizes[L2U] = info_p->cpuid_cache_linesize;
504
505 DBG(" cache_size[L2U] : %d\n",
506 info_p->cache_size[L2U]);
507 DBG(" cache_sharing[L2U] : 1\n");
508 DBG(" cache_partitions[L2U]: 1\n");
509 DBG(" linesizes[L2U] : %d\n",
510 info_p->cpuid_cache_linesize);
511 }
512
513 /*
514 * What linesize to publish? We use the L2 linesize if any,
515 * else the L1D.
516 */
517 if (linesizes[L2U]) {
518 info_p->cache_linesize = linesizes[L2U];
519 } else if (linesizes[L1D]) {
520 info_p->cache_linesize = linesizes[L1D];
521 } else {
522 panic("no linesize");
523 }
524 DBG(" cache_linesize : %d\n", info_p->cache_linesize);
525
526 /*
527 * Extract and publish TLB information from Leaf 2 descriptors.
528 */
529 DBG(" %ld leaf2 descriptors:\n", sizeof(info_p->cache_info));
530 for (i = 1; i < sizeof(info_p->cache_info); i++) {
531 cpuid_cache_descriptor_t *descp;
532 int id;
533 int level;
534 int page;
535
536 DBG(" 0x%02x", info_p->cache_info[i]);
537 descp = cpuid_leaf2_find(info_p->cache_info[i]);
538 if (descp == NULL) {
539 continue;
540 }
541
542 switch (descp->type) {
543 case TLB:
544 page = (descp->size == SMALL) ? TLB_SMALL : TLB_LARGE;
545 /* determine I or D: */
546 switch (descp->level) {
547 case INST:
548 id = TLB_INST;
549 break;
550 case DATA:
551 case DATA0:
552 case DATA1:
553 id = TLB_DATA;
554 break;
555 default:
556 continue;
557 }
558 /* determine level: */
559 switch (descp->level) {
560 case DATA1:
561 level = 1;
562 break;
563 default:
564 level = 0;
565 }
566 info_p->cpuid_tlb[id][page][level] = descp->entries;
567 break;
568 case STLB:
569 info_p->cpuid_stlb = descp->entries;
570 }
571 }
572 DBG("\n");
573 }
574
575 static void
576 cpuid_set_generic_info(i386_cpu_info_t *info_p)
577 {
578 uint32_t reg[4];
579 char str[128], *p;
580
581 DBG("cpuid_set_generic_info(%p)\n", info_p);
582
583 /* do cpuid 0 to get vendor */
584 cpuid_fn(0, reg);
585 info_p->cpuid_max_basic = reg[eax];
586 bcopy((char *)&reg[ebx], &info_p->cpuid_vendor[0], 4); /* ug */
587 bcopy((char *)&reg[ecx], &info_p->cpuid_vendor[8], 4);
588 bcopy((char *)&reg[edx], &info_p->cpuid_vendor[4], 4);
589 info_p->cpuid_vendor[12] = 0;
590
591 /* get extended cpuid results */
592 cpuid_fn(0x80000000, reg);
593 info_p->cpuid_max_ext = reg[eax];
594
595 /* check to see if we can get brand string */
596 if (info_p->cpuid_max_ext >= 0x80000004) {
597 /*
598 * The brand string 48 bytes (max), guaranteed to
599 * be NUL terminated.
600 */
601 cpuid_fn(0x80000002, reg);
602 bcopy((char *)reg, &str[0], 16);
603 cpuid_fn(0x80000003, reg);
604 bcopy((char *)reg, &str[16], 16);
605 cpuid_fn(0x80000004, reg);
606 bcopy((char *)reg, &str[32], 16);
607 for (p = str; *p != '\0'; p++) {
608 if (*p != ' ') {
609 break;
610 }
611 }
612 strlcpy(info_p->cpuid_brand_string,
613 p, sizeof(info_p->cpuid_brand_string));
614
615 if (!strncmp(info_p->cpuid_brand_string, CPUID_STRING_UNKNOWN,
616 min(sizeof(info_p->cpuid_brand_string),
617 strlen(CPUID_STRING_UNKNOWN) + 1))) {
618 /*
619 * This string means we have a firmware-programmable brand string,
620 * and the firmware couldn't figure out what sort of CPU we have.
621 */
622 info_p->cpuid_brand_string[0] = '\0';
623 }
624 }
625
626 /* Get cache and addressing info. */
627 if (info_p->cpuid_max_ext >= 0x80000006) {
628 uint32_t assoc;
629 cpuid_fn(0x80000006, reg);
630 info_p->cpuid_cache_linesize = bitfield32(reg[ecx], 7, 0);
631 assoc = bitfield32(reg[ecx], 15, 12);
632 /*
633 * L2 associativity is encoded, though in an insufficiently
634 * descriptive fashion, e.g. 24-way is mapped to 16-way.
635 * Represent a fully associative cache as 0xFFFF.
636 * Overwritten by associativity as determined via CPUID.4
637 * if available.
638 */
639 if (assoc == 6) {
640 assoc = 8;
641 } else if (assoc == 8) {
642 assoc = 16;
643 } else if (assoc == 0xF) {
644 assoc = 0xFFFF;
645 }
646 info_p->cpuid_cache_L2_associativity = assoc;
647 info_p->cpuid_cache_size = bitfield32(reg[ecx], 31, 16);
648 cpuid_fn(0x80000008, reg);
649 info_p->cpuid_address_bits_physical =
650 bitfield32(reg[eax], 7, 0);
651 info_p->cpuid_address_bits_virtual =
652 bitfield32(reg[eax], 15, 8);
653 }
654
655 /*
656 * Get processor signature and decode
657 * and bracket this with the approved procedure for reading the
658 * the microcode version number a.k.a. signature a.k.a. BIOS ID
659 */
660 wrmsr64(MSR_IA32_BIOS_SIGN_ID, 0);
661 cpuid_fn(1, reg);
662 info_p->cpuid_microcode_version =
663 (uint32_t) (rdmsr64(MSR_IA32_BIOS_SIGN_ID) >> 32);
664 info_p->cpuid_signature = reg[eax];
665 info_p->cpuid_stepping = bitfield32(reg[eax], 3, 0);
666 info_p->cpuid_model = bitfield32(reg[eax], 7, 4);
667 info_p->cpuid_family = bitfield32(reg[eax], 11, 8);
668 info_p->cpuid_type = bitfield32(reg[eax], 13, 12);
669 info_p->cpuid_extmodel = bitfield32(reg[eax], 19, 16);
670 info_p->cpuid_extfamily = bitfield32(reg[eax], 27, 20);
671 info_p->cpuid_brand = bitfield32(reg[ebx], 7, 0);
672 info_p->cpuid_features = quad(reg[ecx], reg[edx]);
673
674 /* Get "processor flag"; necessary for microcode update matching */
675 info_p->cpuid_processor_flag = (rdmsr64(MSR_IA32_PLATFORM_ID) >> 50) & 0x7;
676
677 /* Fold extensions into family/model */
678 if (info_p->cpuid_family == 0x0f) {
679 info_p->cpuid_family += info_p->cpuid_extfamily;
680 }
681 if (info_p->cpuid_family == 0x0f || info_p->cpuid_family == 0x06) {
682 info_p->cpuid_model += (info_p->cpuid_extmodel << 4);
683 }
684
685 if (info_p->cpuid_features & CPUID_FEATURE_HTT) {
686 info_p->cpuid_logical_per_package =
687 bitfield32(reg[ebx], 23, 16);
688 } else {
689 info_p->cpuid_logical_per_package = 1;
690 }
691
692 if (info_p->cpuid_max_ext >= 0x80000001) {
693 cpuid_fn(0x80000001, reg);
694 info_p->cpuid_extfeatures =
695 quad(reg[ecx], reg[edx]);
696 }
697
698 DBG(" max_basic : %d\n", info_p->cpuid_max_basic);
699 DBG(" max_ext : 0x%08x\n", info_p->cpuid_max_ext);
700 DBG(" vendor : %s\n", info_p->cpuid_vendor);
701 DBG(" brand_string : %s\n", info_p->cpuid_brand_string);
702 DBG(" signature : 0x%08x\n", info_p->cpuid_signature);
703 DBG(" stepping : %d\n", info_p->cpuid_stepping);
704 DBG(" model : %d\n", info_p->cpuid_model);
705 DBG(" family : %d\n", info_p->cpuid_family);
706 DBG(" type : %d\n", info_p->cpuid_type);
707 DBG(" extmodel : %d\n", info_p->cpuid_extmodel);
708 DBG(" extfamily : %d\n", info_p->cpuid_extfamily);
709 DBG(" brand : %d\n", info_p->cpuid_brand);
710 DBG(" features : 0x%016llx\n", info_p->cpuid_features);
711 DBG(" extfeatures : 0x%016llx\n", info_p->cpuid_extfeatures);
712 DBG(" logical_per_package : %d\n", info_p->cpuid_logical_per_package);
713 DBG(" microcode_version : 0x%08x\n", info_p->cpuid_microcode_version);
714
715 /* Fold in the Invariant TSC feature bit, if present */
716 if (info_p->cpuid_max_ext >= 0x80000007) {
717 cpuid_fn(0x80000007, reg);
718 info_p->cpuid_extfeatures |=
719 reg[edx] & (uint32_t)CPUID_EXTFEATURE_TSCI;
720 DBG(" extfeatures : 0x%016llx\n",
721 info_p->cpuid_extfeatures);
722 }
723
724 if (info_p->cpuid_max_basic >= 0x5) {
725 cpuid_mwait_leaf_t *cmp = &info_p->cpuid_mwait_leaf;
726
727 /*
728 * Extract the Monitor/Mwait Leaf info:
729 */
730 cpuid_fn(5, reg);
731 cmp->linesize_min = reg[eax];
732 cmp->linesize_max = reg[ebx];
733 cmp->extensions = reg[ecx];
734 cmp->sub_Cstates = reg[edx];
735 info_p->cpuid_mwait_leafp = cmp;
736
737 DBG(" Monitor/Mwait Leaf:\n");
738 DBG(" linesize_min : %d\n", cmp->linesize_min);
739 DBG(" linesize_max : %d\n", cmp->linesize_max);
740 DBG(" extensions : %d\n", cmp->extensions);
741 DBG(" sub_Cstates : 0x%08x\n", cmp->sub_Cstates);
742 }
743
744 if (info_p->cpuid_max_basic >= 0x6) {
745 cpuid_thermal_leaf_t *ctp = &info_p->cpuid_thermal_leaf;
746
747 /*
748 * The thermal and Power Leaf:
749 */
750 cpuid_fn(6, reg);
751 ctp->sensor = bitfield32(reg[eax], 0, 0);
752 ctp->dynamic_acceleration = bitfield32(reg[eax], 1, 1);
753 ctp->invariant_APIC_timer = bitfield32(reg[eax], 2, 2);
754 ctp->core_power_limits = bitfield32(reg[eax], 4, 4);
755 ctp->fine_grain_clock_mod = bitfield32(reg[eax], 5, 5);
756 ctp->package_thermal_intr = bitfield32(reg[eax], 6, 6);
757 ctp->thresholds = bitfield32(reg[ebx], 3, 0);
758 ctp->ACNT_MCNT = bitfield32(reg[ecx], 0, 0);
759 ctp->hardware_feedback = bitfield32(reg[ecx], 1, 1);
760 ctp->energy_policy = bitfield32(reg[ecx], 3, 3);
761 info_p->cpuid_thermal_leafp = ctp;
762
763 DBG(" Thermal/Power Leaf:\n");
764 DBG(" sensor : %d\n", ctp->sensor);
765 DBG(" dynamic_acceleration : %d\n", ctp->dynamic_acceleration);
766 DBG(" invariant_APIC_timer : %d\n", ctp->invariant_APIC_timer);
767 DBG(" core_power_limits : %d\n", ctp->core_power_limits);
768 DBG(" fine_grain_clock_mod : %d\n", ctp->fine_grain_clock_mod);
769 DBG(" package_thermal_intr : %d\n", ctp->package_thermal_intr);
770 DBG(" thresholds : %d\n", ctp->thresholds);
771 DBG(" ACNT_MCNT : %d\n", ctp->ACNT_MCNT);
772 DBG(" ACNT2 : %d\n", ctp->hardware_feedback);
773 DBG(" energy_policy : %d\n", ctp->energy_policy);
774 }
775
776 if (info_p->cpuid_max_basic >= 0xa) {
777 cpuid_arch_perf_leaf_t *capp = &info_p->cpuid_arch_perf_leaf;
778
779 /*
780 * Architectural Performance Monitoring Leaf:
781 */
782 cpuid_fn(0xa, reg);
783 capp->version = bitfield32(reg[eax], 7, 0);
784 capp->number = bitfield32(reg[eax], 15, 8);
785 capp->width = bitfield32(reg[eax], 23, 16);
786 capp->events_number = bitfield32(reg[eax], 31, 24);
787 capp->events = reg[ebx];
788 capp->fixed_number = bitfield32(reg[edx], 4, 0);
789 capp->fixed_width = bitfield32(reg[edx], 12, 5);
790 info_p->cpuid_arch_perf_leafp = capp;
791
792 DBG(" Architectural Performance Monitoring Leaf:\n");
793 DBG(" version : %d\n", capp->version);
794 DBG(" number : %d\n", capp->number);
795 DBG(" width : %d\n", capp->width);
796 DBG(" events_number : %d\n", capp->events_number);
797 DBG(" events : %d\n", capp->events);
798 DBG(" fixed_number : %d\n", capp->fixed_number);
799 DBG(" fixed_width : %d\n", capp->fixed_width);
800 }
801
802 if (info_p->cpuid_max_basic >= 0xd) {
803 cpuid_xsave_leaf_t *xsp;
804 /*
805 * XSAVE Features:
806 */
807 xsp = &info_p->cpuid_xsave_leaf[0];
808 info_p->cpuid_xsave_leafp = xsp;
809 xsp->extended_state[eax] = 0xd;
810 xsp->extended_state[ecx] = 0;
811 cpuid(xsp->extended_state);
812 DBG(" XSAVE Main leaf:\n");
813 DBG(" EAX : 0x%x\n", xsp->extended_state[eax]);
814 DBG(" EBX : 0x%x\n", xsp->extended_state[ebx]);
815 DBG(" ECX : 0x%x\n", xsp->extended_state[ecx]);
816 DBG(" EDX : 0x%x\n", xsp->extended_state[edx]);
817
818 xsp = &info_p->cpuid_xsave_leaf[1];
819 xsp->extended_state[eax] = 0xd;
820 xsp->extended_state[ecx] = 1;
821 cpuid(xsp->extended_state);
822 DBG(" XSAVE Sub-leaf1:\n");
823 DBG(" EAX : 0x%x\n", xsp->extended_state[eax]);
824 DBG(" EBX : 0x%x\n", xsp->extended_state[ebx]);
825 DBG(" ECX : 0x%x\n", xsp->extended_state[ecx]);
826 DBG(" EDX : 0x%x\n", xsp->extended_state[edx]);
827 }
828
829 if (info_p->cpuid_model >= CPUID_MODEL_IVYBRIDGE) {
830 /*
831 * Leaf7 Features:
832 */
833 cpuid_fn(0x7, reg);
834 info_p->cpuid_leaf7_features = quad(reg[ecx], reg[ebx]);
835 info_p->cpuid_leaf7_extfeatures = reg[edx];
836
837 cpuid_tsx_supported = (reg[ebx] & (CPUID_LEAF7_FEATURE_HLE | CPUID_LEAF7_FEATURE_RTM)) != 0;
838
839 DBG(" Feature Leaf7:\n");
840 DBG(" EBX : 0x%x\n", reg[ebx]);
841 DBG(" ECX : 0x%x\n", reg[ecx]);
842 DBG(" EDX : 0x%x\n", reg[edx]);
843 }
844
845 if (info_p->cpuid_max_basic >= 0x15) {
846 /*
847 * TCS/CCC frequency leaf:
848 */
849 cpuid_fn(0x15, reg);
850 info_p->cpuid_tsc_leaf.denominator = reg[eax];
851 info_p->cpuid_tsc_leaf.numerator = reg[ebx];
852
853 DBG(" TSC/CCC Information Leaf:\n");
854 DBG(" numerator : 0x%x\n", reg[ebx]);
855 DBG(" denominator : 0x%x\n", reg[eax]);
856 }
857
858 return;
859 }
860
861 static uint32_t
862 cpuid_set_cpufamily(i386_cpu_info_t *info_p)
863 {
864 uint32_t cpufamily = CPUFAMILY_UNKNOWN;
865
866 switch (info_p->cpuid_family) {
867 case 6:
868 switch (info_p->cpuid_model) {
869 case 23:
870 cpufamily = CPUFAMILY_INTEL_PENRYN;
871 break;
872 case CPUID_MODEL_NEHALEM:
873 case CPUID_MODEL_FIELDS:
874 case CPUID_MODEL_DALES:
875 case CPUID_MODEL_NEHALEM_EX:
876 cpufamily = CPUFAMILY_INTEL_NEHALEM;
877 break;
878 case CPUID_MODEL_DALES_32NM:
879 case CPUID_MODEL_WESTMERE:
880 case CPUID_MODEL_WESTMERE_EX:
881 cpufamily = CPUFAMILY_INTEL_WESTMERE;
882 break;
883 case CPUID_MODEL_SANDYBRIDGE:
884 case CPUID_MODEL_JAKETOWN:
885 cpufamily = CPUFAMILY_INTEL_SANDYBRIDGE;
886 break;
887 case CPUID_MODEL_IVYBRIDGE:
888 case CPUID_MODEL_IVYBRIDGE_EP:
889 cpufamily = CPUFAMILY_INTEL_IVYBRIDGE;
890 break;
891 case CPUID_MODEL_HASWELL:
892 case CPUID_MODEL_HASWELL_EP:
893 case CPUID_MODEL_HASWELL_ULT:
894 case CPUID_MODEL_CRYSTALWELL:
895 cpufamily = CPUFAMILY_INTEL_HASWELL;
896 break;
897 case CPUID_MODEL_BROADWELL:
898 case CPUID_MODEL_BRYSTALWELL:
899 cpufamily = CPUFAMILY_INTEL_BROADWELL;
900 break;
901 case CPUID_MODEL_SKYLAKE:
902 case CPUID_MODEL_SKYLAKE_DT:
903 case CPUID_MODEL_SKYLAKE_W:
904 cpufamily = CPUFAMILY_INTEL_SKYLAKE;
905 break;
906 case CPUID_MODEL_KABYLAKE:
907 case CPUID_MODEL_KABYLAKE_DT:
908 cpufamily = CPUFAMILY_INTEL_KABYLAKE;
909 break;
910 case CPUID_MODEL_ICELAKE:
911 case CPUID_MODEL_ICELAKE_H:
912 case CPUID_MODEL_ICELAKE_DT:
913 cpufamily = CPUFAMILY_INTEL_ICELAKE;
914 break;
915 }
916 break;
917 }
918
919 info_p->cpuid_cpufamily = cpufamily;
920 DBG("cpuid_set_cpufamily(%p) returning 0x%x\n", info_p, cpufamily);
921 return cpufamily;
922 }
923 /*
924 * Must be invoked either when executing single threaded, or with
925 * independent synchronization.
926 */
927 void
928 cpuid_set_info(void)
929 {
930 i386_cpu_info_t *info_p = &cpuid_cpu_info;
931 boolean_t enable_x86_64h = TRUE;
932
933 /* Perform pre-cpuid workarounds (since their effects impact values returned via cpuid) */
934 cpuid_do_precpuid_was();
935
936 cpuid_set_generic_info(info_p);
937
938 /* verify we are running on a supported CPU */
939 if ((strncmp(CPUID_VID_INTEL, info_p->cpuid_vendor,
940 min(strlen(CPUID_STRING_UNKNOWN) + 1,
941 sizeof(info_p->cpuid_vendor)))) ||
942 (cpuid_set_cpufamily(info_p) == CPUFAMILY_UNKNOWN)) {
943 panic("Unsupported CPU");
944 }
945
946 info_p->cpuid_cpu_type = CPU_TYPE_X86;
947
948 if (!PE_parse_boot_argn("-enable_x86_64h", &enable_x86_64h, sizeof(enable_x86_64h))) {
949 boolean_t disable_x86_64h = FALSE;
950
951 if (PE_parse_boot_argn("-disable_x86_64h", &disable_x86_64h, sizeof(disable_x86_64h))) {
952 enable_x86_64h = FALSE;
953 }
954 }
955
956 if (enable_x86_64h &&
957 ((info_p->cpuid_features & CPUID_X86_64_H_FEATURE_SUBSET) == CPUID_X86_64_H_FEATURE_SUBSET) &&
958 ((info_p->cpuid_extfeatures & CPUID_X86_64_H_EXTFEATURE_SUBSET) == CPUID_X86_64_H_EXTFEATURE_SUBSET) &&
959 ((info_p->cpuid_leaf7_features & CPUID_X86_64_H_LEAF7_FEATURE_SUBSET) == CPUID_X86_64_H_LEAF7_FEATURE_SUBSET)) {
960 info_p->cpuid_cpu_subtype = CPU_SUBTYPE_X86_64_H;
961 } else {
962 info_p->cpuid_cpu_subtype = CPU_SUBTYPE_X86_ARCH1;
963 }
964 /* cpuid_set_cache_info must be invoked after set_generic_info */
965
966 /*
967 * Find the number of enabled cores and threads
968 * (which determines whether SMT/Hyperthreading is active).
969 */
970
971 /*
972 * Not all VMMs emulate MSR_CORE_THREAD_COUNT (0x35).
973 */
974 if (0 != (info_p->cpuid_features & CPUID_FEATURE_VMM) &&
975 PE_parse_boot_argn("-nomsr35h", NULL, 0)) {
976 info_p->core_count = 1;
977 info_p->thread_count = 1;
978 cpuid_set_cache_info(info_p);
979 } else {
980 switch (info_p->cpuid_cpufamily) {
981 case CPUFAMILY_INTEL_PENRYN:
982 cpuid_set_cache_info(info_p);
983 info_p->core_count = info_p->cpuid_cores_per_package;
984 info_p->thread_count = info_p->cpuid_logical_per_package;
985 break;
986 case CPUFAMILY_INTEL_WESTMERE: {
987 /*
988 * This should be the same as Nehalem but an A0 silicon bug returns
989 * invalid data in the top 12 bits. Hence, we use only bits [19..16]
990 * rather than [31..16] for core count - which actually can't exceed 8.
991 */
992 uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
993 if (0 == msr) {
994 /* Provide a non-zero default for some VMMs */
995 msr = (1 << 16) | 1;
996 }
997 info_p->core_count = bitfield32((uint32_t)msr, 19, 16);
998 info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
999 cpuid_set_cache_info(info_p);
1000 break;
1001 }
1002 default: {
1003 uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
1004 if (0 == msr) {
1005 /* Provide a non-zero default for some VMMs */
1006 msr = (1 << 16) | 1;
1007 }
1008 info_p->core_count = bitfield32((uint32_t)msr, 31, 16);
1009 info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
1010 cpuid_set_cache_info(info_p);
1011 break;
1012 }
1013 }
1014 }
1015
1016 DBG("cpuid_set_info():\n");
1017 DBG(" core_count : %d\n", info_p->core_count);
1018 DBG(" thread_count : %d\n", info_p->thread_count);
1019 DBG(" cpu_type: 0x%08x\n", info_p->cpuid_cpu_type);
1020 DBG(" cpu_subtype: 0x%08x\n", info_p->cpuid_cpu_subtype);
1021
1022 info_p->cpuid_model_string = ""; /* deprecated */
1023
1024 /* Init CPU LBRs */
1025 i386_lbr_init(info_p, true);
1026
1027 do_cwas(info_p, FALSE);
1028 }
1029
1030 static struct table {
1031 uint64_t mask;
1032 const char *name;
1033 } feature_map[] = {
1034 {CPUID_FEATURE_FPU, "FPU"},
1035 {CPUID_FEATURE_VME, "VME"},
1036 {CPUID_FEATURE_DE, "DE"},
1037 {CPUID_FEATURE_PSE, "PSE"},
1038 {CPUID_FEATURE_TSC, "TSC"},
1039 {CPUID_FEATURE_MSR, "MSR"},
1040 {CPUID_FEATURE_PAE, "PAE"},
1041 {CPUID_FEATURE_MCE, "MCE"},
1042 {CPUID_FEATURE_CX8, "CX8"},
1043 {CPUID_FEATURE_APIC, "APIC"},
1044 {CPUID_FEATURE_SEP, "SEP"},
1045 {CPUID_FEATURE_MTRR, "MTRR"},
1046 {CPUID_FEATURE_PGE, "PGE"},
1047 {CPUID_FEATURE_MCA, "MCA"},
1048 {CPUID_FEATURE_CMOV, "CMOV"},
1049 {CPUID_FEATURE_PAT, "PAT"},
1050 {CPUID_FEATURE_PSE36, "PSE36"},
1051 {CPUID_FEATURE_PSN, "PSN"},
1052 {CPUID_FEATURE_CLFSH, "CLFSH"},
1053 {CPUID_FEATURE_DS, "DS"},
1054 {CPUID_FEATURE_ACPI, "ACPI"},
1055 {CPUID_FEATURE_MMX, "MMX"},
1056 {CPUID_FEATURE_FXSR, "FXSR"},
1057 {CPUID_FEATURE_SSE, "SSE"},
1058 {CPUID_FEATURE_SSE2, "SSE2"},
1059 {CPUID_FEATURE_SS, "SS"},
1060 {CPUID_FEATURE_HTT, "HTT"},
1061 {CPUID_FEATURE_TM, "TM"},
1062 {CPUID_FEATURE_PBE, "PBE"},
1063 {CPUID_FEATURE_SSE3, "SSE3"},
1064 {CPUID_FEATURE_PCLMULQDQ, "PCLMULQDQ"},
1065 {CPUID_FEATURE_DTES64, "DTES64"},
1066 {CPUID_FEATURE_MONITOR, "MON"},
1067 {CPUID_FEATURE_DSCPL, "DSCPL"},
1068 {CPUID_FEATURE_VMX, "VMX"},
1069 {CPUID_FEATURE_SMX, "SMX"},
1070 {CPUID_FEATURE_EST, "EST"},
1071 {CPUID_FEATURE_TM2, "TM2"},
1072 {CPUID_FEATURE_SSSE3, "SSSE3"},
1073 {CPUID_FEATURE_CID, "CID"},
1074 {CPUID_FEATURE_FMA, "FMA"},
1075 {CPUID_FEATURE_CX16, "CX16"},
1076 {CPUID_FEATURE_xTPR, "TPR"},
1077 {CPUID_FEATURE_PDCM, "PDCM"},
1078 {CPUID_FEATURE_SSE4_1, "SSE4.1"},
1079 {CPUID_FEATURE_SSE4_2, "SSE4.2"},
1080 {CPUID_FEATURE_x2APIC, "x2APIC"},
1081 {CPUID_FEATURE_MOVBE, "MOVBE"},
1082 {CPUID_FEATURE_POPCNT, "POPCNT"},
1083 {CPUID_FEATURE_AES, "AES"},
1084 {CPUID_FEATURE_VMM, "VMM"},
1085 {CPUID_FEATURE_PCID, "PCID"},
1086 {CPUID_FEATURE_XSAVE, "XSAVE"},
1087 {CPUID_FEATURE_OSXSAVE, "OSXSAVE"},
1088 {CPUID_FEATURE_SEGLIM64, "SEGLIM64"},
1089 {CPUID_FEATURE_TSCTMR, "TSCTMR"},
1090 {CPUID_FEATURE_AVX1_0, "AVX1.0"},
1091 {CPUID_FEATURE_RDRAND, "RDRAND"},
1092 {CPUID_FEATURE_F16C, "F16C"},
1093 {0, 0}
1094 },
1095 extfeature_map[] = {
1096 {CPUID_EXTFEATURE_SYSCALL, "SYSCALL"},
1097 {CPUID_EXTFEATURE_XD, "XD"},
1098 {CPUID_EXTFEATURE_1GBPAGE, "1GBPAGE"},
1099 {CPUID_EXTFEATURE_EM64T, "EM64T"},
1100 {CPUID_EXTFEATURE_LAHF, "LAHF"},
1101 {CPUID_EXTFEATURE_LZCNT, "LZCNT"},
1102 {CPUID_EXTFEATURE_PREFETCHW, "PREFETCHW"},
1103 {CPUID_EXTFEATURE_RDTSCP, "RDTSCP"},
1104 {CPUID_EXTFEATURE_TSCI, "TSCI"},
1105 {0, 0}
1106 },
1107 leaf7_feature_map[] = {
1108 {CPUID_LEAF7_FEATURE_RDWRFSGS, "RDWRFSGS"},
1109 {CPUID_LEAF7_FEATURE_TSCOFF, "TSC_THREAD_OFFSET"},
1110 {CPUID_LEAF7_FEATURE_SGX, "SGX"},
1111 {CPUID_LEAF7_FEATURE_BMI1, "BMI1"},
1112 {CPUID_LEAF7_FEATURE_HLE, "HLE"},
1113 {CPUID_LEAF7_FEATURE_AVX2, "AVX2"},
1114 {CPUID_LEAF7_FEATURE_FDPEO, "FDPEO"},
1115 {CPUID_LEAF7_FEATURE_SMEP, "SMEP"},
1116 {CPUID_LEAF7_FEATURE_BMI2, "BMI2"},
1117 {CPUID_LEAF7_FEATURE_ERMS, "ERMS"},
1118 {CPUID_LEAF7_FEATURE_INVPCID, "INVPCID"},
1119 {CPUID_LEAF7_FEATURE_RTM, "RTM"},
1120 {CPUID_LEAF7_FEATURE_PQM, "PQM"},
1121 {CPUID_LEAF7_FEATURE_FPU_CSDS, "FPU_CSDS"},
1122 {CPUID_LEAF7_FEATURE_MPX, "MPX"},
1123 {CPUID_LEAF7_FEATURE_PQE, "PQE"},
1124 {CPUID_LEAF7_FEATURE_AVX512F, "AVX512F"},
1125 {CPUID_LEAF7_FEATURE_AVX512DQ, "AVX512DQ"},
1126 {CPUID_LEAF7_FEATURE_RDSEED, "RDSEED"},
1127 {CPUID_LEAF7_FEATURE_ADX, "ADX"},
1128 {CPUID_LEAF7_FEATURE_SMAP, "SMAP"},
1129 {CPUID_LEAF7_FEATURE_AVX512IFMA, "AVX512IFMA"},
1130 {CPUID_LEAF7_FEATURE_CLFSOPT, "CLFSOPT"},
1131 {CPUID_LEAF7_FEATURE_CLWB, "CLWB"},
1132 {CPUID_LEAF7_FEATURE_IPT, "IPT"},
1133 {CPUID_LEAF7_FEATURE_AVX512CD, "AVX512CD"},
1134 {CPUID_LEAF7_FEATURE_SHA, "SHA"},
1135 {CPUID_LEAF7_FEATURE_AVX512BW, "AVX512BW"},
1136 {CPUID_LEAF7_FEATURE_AVX512VL, "AVX512VL"},
1137 {CPUID_LEAF7_FEATURE_PREFETCHWT1, "PREFETCHWT1"},
1138 {CPUID_LEAF7_FEATURE_AVX512VBMI, "AVX512VBMI"},
1139 {CPUID_LEAF7_FEATURE_UMIP, "UMIP"},
1140 {CPUID_LEAF7_FEATURE_PKU, "PKU"},
1141 {CPUID_LEAF7_FEATURE_OSPKE, "OSPKE"},
1142 {CPUID_LEAF7_FEATURE_WAITPKG, "WAITPKG"},
1143 {CPUID_LEAF7_FEATURE_GFNI, "GFNI"},
1144 {CPUID_LEAF7_FEATURE_VAES, "VAES"},
1145 {CPUID_LEAF7_FEATURE_VPCLMULQDQ, "VPCLMULQDQ"},
1146 {CPUID_LEAF7_FEATURE_AVX512VNNI, "AVX512VNNI"},
1147 {CPUID_LEAF7_FEATURE_AVX512BITALG, "AVX512BITALG"},
1148 {CPUID_LEAF7_FEATURE_AVX512VPCDQ, "AVX512VPOPCNTDQ"},
1149 {CPUID_LEAF7_FEATURE_RDPID, "RDPID"},
1150 {CPUID_LEAF7_FEATURE_CLDEMOTE, "CLDEMOTE"},
1151 {CPUID_LEAF7_FEATURE_MOVDIRI, "MOVDIRI"},
1152 {CPUID_LEAF7_FEATURE_MOVDIRI64B, "MOVDIRI64B"},
1153 {CPUID_LEAF7_FEATURE_SGXLC, "SGXLC"},
1154 {0, 0}
1155 },
1156 leaf7_extfeature_map[] = {
1157 { CPUID_LEAF7_EXTFEATURE_AVX5124VNNIW, "AVX5124VNNIW" },
1158 { CPUID_LEAF7_EXTFEATURE_AVX5124FMAPS, "AVX5124FMAPS" },
1159 { CPUID_LEAF7_EXTFEATURE_FSREPMOV, "FSREPMOV" },
1160 { CPUID_LEAF7_EXTFEATURE_MDCLEAR, "MDCLEAR" },
1161 { CPUID_LEAF7_EXTFEATURE_TSXFA, "TSXFA" },
1162 { CPUID_LEAF7_EXTFEATURE_IBRS, "IBRS" },
1163 { CPUID_LEAF7_EXTFEATURE_STIBP, "STIBP" },
1164 { CPUID_LEAF7_EXTFEATURE_L1DF, "L1DF" },
1165 { CPUID_LEAF7_EXTFEATURE_ACAPMSR, "ACAPMSR" },
1166 { CPUID_LEAF7_EXTFEATURE_CCAPMSR, "CCAPMSR" },
1167 { CPUID_LEAF7_EXTFEATURE_SSBD, "SSBD" },
1168 {0, 0}
1169 };
1170
1171 static char *
1172 cpuid_get_names(struct table *map, uint64_t bits, char *buf, unsigned buf_len)
1173 {
1174 size_t len = 0;
1175 char *p = buf;
1176 int i;
1177
1178 for (i = 0; map[i].mask != 0; i++) {
1179 if ((bits & map[i].mask) == 0) {
1180 continue;
1181 }
1182 if (len && ((size_t) (p - buf) < (buf_len - 1))) {
1183 *p++ = ' ';
1184 }
1185 len = min(strlen(map[i].name), (size_t)((buf_len - 1) - (p - buf)));
1186 if (len == 0) {
1187 break;
1188 }
1189 bcopy(map[i].name, p, len);
1190 p += len;
1191 }
1192 *p = '\0';
1193 return buf;
1194 }
1195
1196 i386_cpu_info_t *
1197 cpuid_info(void)
1198 {
1199 /* Set-up the cpuid_info stucture lazily */
1200 if (cpuid_cpu_infop == NULL) {
1201 PE_parse_boot_argn("-cpuid", &cpuid_dbg, sizeof(cpuid_dbg));
1202 cpuid_set_info();
1203 cpuid_cpu_infop = &cpuid_cpu_info;
1204 }
1205 return cpuid_cpu_infop;
1206 }
1207
1208 char *
1209 cpuid_get_feature_names(uint64_t features, char *buf, unsigned buf_len)
1210 {
1211 return cpuid_get_names(feature_map, features, buf, buf_len);
1212 }
1213
1214 char *
1215 cpuid_get_extfeature_names(uint64_t extfeatures, char *buf, unsigned buf_len)
1216 {
1217 return cpuid_get_names(extfeature_map, extfeatures, buf, buf_len);
1218 }
1219
1220 char *
1221 cpuid_get_leaf7_feature_names(uint64_t features, char *buf, unsigned buf_len)
1222 {
1223 return cpuid_get_names(leaf7_feature_map, features, buf, buf_len);
1224 }
1225
1226 char *
1227 cpuid_get_leaf7_extfeature_names(uint64_t features, char *buf, unsigned buf_len)
1228 {
1229 return cpuid_get_names(leaf7_extfeature_map, features, buf, buf_len);
1230 }
1231
1232 void
1233 cpuid_feature_display(
1234 const char *header)
1235 {
1236 char buf[320];
1237
1238 kprintf("%s: %s", header,
1239 cpuid_get_feature_names(cpuid_features(), buf, sizeof(buf)));
1240 if (cpuid_leaf7_features()) {
1241 kprintf(" %s", cpuid_get_leaf7_feature_names(
1242 cpuid_leaf7_features(), buf, sizeof(buf)));
1243 }
1244 if (cpuid_leaf7_extfeatures()) {
1245 kprintf(" %s", cpuid_get_leaf7_extfeature_names(
1246 cpuid_leaf7_extfeatures(), buf, sizeof(buf)));
1247 }
1248 kprintf("\n");
1249 if (cpuid_features() & CPUID_FEATURE_HTT) {
1250 #define s_if_plural(n) ((n > 1) ? "s" : "")
1251 kprintf(" HTT: %d core%s per package;"
1252 " %d logical cpu%s per package\n",
1253 cpuid_cpu_infop->cpuid_cores_per_package,
1254 s_if_plural(cpuid_cpu_infop->cpuid_cores_per_package),
1255 cpuid_cpu_infop->cpuid_logical_per_package,
1256 s_if_plural(cpuid_cpu_infop->cpuid_logical_per_package));
1257 }
1258 }
1259
1260 void
1261 cpuid_extfeature_display(
1262 const char *header)
1263 {
1264 char buf[256];
1265
1266 kprintf("%s: %s\n", header,
1267 cpuid_get_extfeature_names(cpuid_extfeatures(),
1268 buf, sizeof(buf)));
1269 }
1270
1271 void
1272 cpuid_cpu_display(
1273 const char *header)
1274 {
1275 if (cpuid_cpu_infop->cpuid_brand_string[0] != '\0') {
1276 kprintf("%s: %s\n", header, cpuid_cpu_infop->cpuid_brand_string);
1277 }
1278 }
1279
1280 unsigned int
1281 cpuid_family(void)
1282 {
1283 return cpuid_info()->cpuid_family;
1284 }
1285
1286 uint32_t
1287 cpuid_cpufamily(void)
1288 {
1289 return cpuid_info()->cpuid_cpufamily;
1290 }
1291
1292 cpu_type_t
1293 cpuid_cputype(void)
1294 {
1295 return cpuid_info()->cpuid_cpu_type;
1296 }
1297
1298 cpu_subtype_t
1299 cpuid_cpusubtype(void)
1300 {
1301 return cpuid_info()->cpuid_cpu_subtype;
1302 }
1303
1304 uint64_t
1305 cpuid_features(void)
1306 {
1307 static int checked = 0;
1308 char fpu_arg[20] = { 0 };
1309
1310 (void) cpuid_info();
1311 if (!checked) {
1312 /* check for boot-time fpu limitations */
1313 if (PE_parse_boot_argn("_fpu", &fpu_arg[0], sizeof(fpu_arg))) {
1314 printf("limiting fpu features to: %s\n", fpu_arg);
1315 if (!strncmp("387", fpu_arg, sizeof("387")) || !strncmp("mmx", fpu_arg, sizeof("mmx"))) {
1316 printf("no sse or sse2\n");
1317 cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE | CPUID_FEATURE_SSE2 | CPUID_FEATURE_FXSR);
1318 } else if (!strncmp("sse", fpu_arg, sizeof("sse"))) {
1319 printf("no sse2\n");
1320 cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE2);
1321 }
1322 }
1323 checked = 1;
1324 }
1325 return cpuid_cpu_infop->cpuid_features;
1326 }
1327
1328 uint64_t
1329 cpuid_extfeatures(void)
1330 {
1331 return cpuid_info()->cpuid_extfeatures;
1332 }
1333
1334 uint64_t
1335 cpuid_leaf7_features(void)
1336 {
1337 return cpuid_info()->cpuid_leaf7_features;
1338 }
1339
1340 uint64_t
1341 cpuid_leaf7_extfeatures(void)
1342 {
1343 return cpuid_info()->cpuid_leaf7_extfeatures;
1344 }
1345
1346 const char *
1347 cpuid_vmm_family_string(void)
1348 {
1349 switch (cpuid_vmm_info()->cpuid_vmm_family) {
1350 case CPUID_VMM_FAMILY_NONE:
1351 return "None";
1352
1353 case CPUID_VMM_FAMILY_VMWARE:
1354 return "VMWare";
1355
1356 case CPUID_VMM_FAMILY_PARALLELS:
1357 return "Parallels";
1358
1359 case CPUID_VMM_FAMILY_HYVE:
1360 return "xHyve";
1361
1362 case CPUID_VMM_FAMILY_HVF:
1363 return "HVF";
1364
1365 case CPUID_VMM_FAMILY_KVM:
1366 return "KVM";
1367
1368 case CPUID_VMM_FAMILY_UNKNOWN:
1369 /*FALLTHROUGH*/
1370 default:
1371 return "Unknown VMM";
1372 }
1373 }
1374
1375 static i386_vmm_info_t *_cpuid_vmm_infop = NULL;
1376 static i386_vmm_info_t _cpuid_vmm_info;
1377
1378 static void
1379 cpuid_init_vmm_info(i386_vmm_info_t *info_p)
1380 {
1381 uint32_t reg[4], maxbasic_regs[4];
1382 uint32_t max_vmm_leaf;
1383
1384 bzero(info_p, sizeof(*info_p));
1385
1386 if (!cpuid_vmm_present()) {
1387 return;
1388 }
1389
1390 DBG("cpuid_init_vmm_info(%p)\n", info_p);
1391
1392 /*
1393 * Get the highest basic leaf value, then save the cpuid details for that leaf
1394 * for comparison with the [ostensible] VMM leaf.
1395 */
1396 cpuid_fn(0, reg);
1397 cpuid_fn(reg[eax], maxbasic_regs);
1398
1399 /* do cpuid 0x40000000 to get VMM vendor */
1400 cpuid_fn(0x40000000, reg);
1401
1402 /*
1403 * If leaf 0x40000000 is non-existent, cpuid will return the values as
1404 * if the highest basic leaf was requested, so compare to those values
1405 * we just retrieved to see if no vmm is present.
1406 */
1407 if (bcmp(reg, maxbasic_regs, sizeof(reg)) == 0) {
1408 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_NONE;
1409 DBG(" vmm_vendor : NONE\n");
1410 return;
1411 }
1412
1413 max_vmm_leaf = reg[eax];
1414 bcopy((char *)&reg[ebx], &info_p->cpuid_vmm_vendor[0], 4);
1415 bcopy((char *)&reg[ecx], &info_p->cpuid_vmm_vendor[4], 4);
1416 bcopy((char *)&reg[edx], &info_p->cpuid_vmm_vendor[8], 4);
1417 info_p->cpuid_vmm_vendor[12] = '\0';
1418
1419 if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_VMWARE)) {
1420 /* VMware identification string: kb.vmware.com/kb/1009458 */
1421 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_VMWARE;
1422 } else if (0 == bcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_PARALLELS, 12)) {
1423 /* Parallels identification string */
1424 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_PARALLELS;
1425 } else if (0 == bcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_HYVE, 12)) {
1426 /* bhyve/xhyve identification string */
1427 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_HYVE;
1428 } else if (0 == bcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_HVF, 12)) {
1429 /* HVF identification string */
1430 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_HVF;
1431 } else if (0 == bcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_KVM, 12)) {
1432 /* KVM identification string */
1433 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_KVM;
1434 } else {
1435 info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_UNKNOWN;
1436 }
1437
1438 /* VMM generic leaves: https://lkml.org/lkml/2008/10/1/246 */
1439 if (max_vmm_leaf >= 0x40000010) {
1440 cpuid_fn(0x40000010, reg);
1441
1442 info_p->cpuid_vmm_tsc_frequency = reg[eax];
1443 info_p->cpuid_vmm_bus_frequency = reg[ebx];
1444 }
1445
1446 #if DEBUG || DEVELOPMENT
1447 cpuid_vmm_detect_pv_interface(info_p, APPLEPV_SIGNATURE, &cpuid_vmm_detect_applepv_features);
1448 #endif
1449
1450 DBG(" vmm_vendor : %s\n", info_p->cpuid_vmm_vendor);
1451 DBG(" vmm_family : %u\n", info_p->cpuid_vmm_family);
1452 DBG(" vmm_bus_frequency : %u\n", info_p->cpuid_vmm_bus_frequency);
1453 DBG(" vmm_tsc_frequency : %u\n", info_p->cpuid_vmm_tsc_frequency);
1454 }
1455
1456 boolean_t
1457 cpuid_vmm_present(void)
1458 {
1459 return (cpuid_features() & CPUID_FEATURE_VMM) ? TRUE : FALSE;
1460 }
1461
1462 i386_vmm_info_t *
1463 cpuid_vmm_info(void)
1464 {
1465 if (_cpuid_vmm_infop == NULL) {
1466 cpuid_init_vmm_info(&_cpuid_vmm_info);
1467 _cpuid_vmm_infop = &_cpuid_vmm_info;
1468 }
1469 return _cpuid_vmm_infop;
1470 }
1471
1472 uint32_t
1473 cpuid_vmm_family(void)
1474 {
1475 return cpuid_vmm_info()->cpuid_vmm_family;
1476 }
1477
1478 #if DEBUG || DEVELOPMENT
1479 uint64_t
1480 cpuid_vmm_get_applepv_features(void)
1481 {
1482 return cpuid_vmm_info()->cpuid_vmm_applepv_features;
1483 }
1484 #endif /* DEBUG || DEVELOPMENT */
1485
1486 cwa_classifier_e
1487 cpuid_wa_required(cpu_wa_e wa)
1488 {
1489 i386_cpu_info_t *info_p = &cpuid_cpu_info;
1490 static uint64_t bootarg_cpu_wa_enables = 0;
1491 static uint64_t bootarg_cpu_wa_disables = 0;
1492 static int bootargs_overrides_processed = 0;
1493 uint32_t reg[4];
1494
1495 if (!bootargs_overrides_processed) {
1496 if (!PE_parse_boot_argn("cwae", &bootarg_cpu_wa_enables, sizeof(bootarg_cpu_wa_enables))) {
1497 bootarg_cpu_wa_enables = 0;
1498 }
1499
1500 if (!PE_parse_boot_argn("cwad", &bootarg_cpu_wa_disables, sizeof(bootarg_cpu_wa_disables))) {
1501 bootarg_cpu_wa_disables = 0;
1502 }
1503 bootargs_overrides_processed = 1;
1504 }
1505
1506 if (bootarg_cpu_wa_enables & (1 << wa)) {
1507 return CWA_FORCE_ON;
1508 }
1509
1510 if (bootarg_cpu_wa_disables & (1 << wa)) {
1511 return CWA_FORCE_OFF;
1512 }
1513
1514 switch (wa) {
1515 case CPU_INTEL_SEGCHK:
1516 /* First, check to see if this CPU requires the workaround */
1517 if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_ACAPMSR) != 0) {
1518 /* We have ARCHCAP, so check it for either RDCL_NO or MDS_NO */
1519 uint64_t archcap_msr = rdmsr64(MSR_IA32_ARCH_CAPABILITIES);
1520 if ((archcap_msr & (MSR_IA32_ARCH_CAPABILITIES_RDCL_NO | MSR_IA32_ARCH_CAPABILITIES_MDS_NO)) != 0) {
1521 /* Workaround not needed */
1522 return CWA_OFF;
1523 }
1524 }
1525
1526 if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_MDCLEAR) != 0) {
1527 return CWA_ON;
1528 }
1529
1530 /*
1531 * If the CPU supports the ARCHCAP MSR and neither the RDCL_NO bit nor the MDS_NO
1532 * bit are set, OR the CPU does not support the ARCHCAP MSR and the CPU does
1533 * not enumerate the presence of the enhanced VERW instruction, report
1534 * that the workaround should not be enabled.
1535 */
1536 break;
1537
1538 case CPU_INTEL_TSXFA:
1539 /*
1540 * Note that if TSX was disabled in cpuid_do_precpuid_was(), the cached cpuid
1541 * info will indicate that RTM is *not* supported and this workaround will not
1542 * be enabled.
1543 */
1544 /*
1545 * Otherwise, if the CPU supports both TSX(HLE) and FORCE_ABORT, return that
1546 * the workaround should be enabled.
1547 */
1548 if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_TSXFA) != 0 &&
1549 (info_p->cpuid_leaf7_features & CPUID_LEAF7_FEATURE_RTM) != 0) {
1550 return CWA_ON;
1551 }
1552 break;
1553
1554 case CPU_INTEL_TSXDA:
1555 /*
1556 * Since this workaround might be requested before cpuid_set_info() is complete,
1557 * we need to invoke cpuid directly when looking for the required bits.
1558 */
1559 cpuid_fn(0x7, reg);
1560 if (reg[edx] & CPUID_LEAF7_EXTFEATURE_ACAPMSR) {
1561 uint64_t archcap_msr = rdmsr64(MSR_IA32_ARCH_CAPABILITIES);
1562 /*
1563 * If this CPU supports TSX (HLE being the proxy for TSX detection) AND it does
1564 * not include a hardware fix for TAA and it supports the TSX_CTRL MSR, disable TSX entirely.
1565 * (Note this can be overridden (above) if the cwad boot-arg's value has bit 2 set.)
1566 */
1567 if ((reg[ebx] & CPUID_LEAF7_FEATURE_HLE) != 0 &&
1568 (archcap_msr & (MSR_IA32_ARCH_CAPABILITIES_TAA_NO | MSR_IA32_ARCH_CAPABILITIES_TSX_CTRL))
1569 == MSR_IA32_ARCH_CAPABILITIES_TSX_CTRL) {
1570 return CWA_ON;
1571 }
1572 }
1573 break;
1574
1575 case CPU_INTEL_SRBDS:
1576 /*
1577 * SRBDS mitigations are enabled by default. CWA_ON returned here indicates
1578 * the caller should disable the mitigation. Mitigations should be disabled
1579 * at least for CPUs that advertise MDS_NO *and* (either TAA_NO is set OR TSX
1580 * has been disabled).
1581 */
1582 if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_SRBDS_CTRL) != 0) {
1583 if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_ACAPMSR) != 0) {
1584 uint64_t archcap_msr = rdmsr64(MSR_IA32_ARCH_CAPABILITIES);
1585 if ((archcap_msr & MSR_IA32_ARCH_CAPABILITIES_MDS_NO) != 0 &&
1586 ((archcap_msr & MSR_IA32_ARCH_CAPABILITIES_TAA_NO) != 0 ||
1587 cpuid_tsx_disabled)) {
1588 return CWA_ON;
1589 }
1590 }
1591 }
1592 break;
1593
1594 default:
1595 break;
1596 }
1597
1598 return CWA_OFF;
1599 }
1600
1601 static void
1602 cpuid_do_precpuid_was(void)
1603 {
1604 /*
1605 * Note that care must be taken not to use any data from the cached cpuid data since it is
1606 * likely uninitialized at this point. That includes calling functions that make use of
1607 * that data as well.
1608 */
1609
1610 /* Note the TSX disablement, we do not support force-on since it depends on MSRs being present */
1611 if (cpuid_wa_required(CPU_INTEL_TSXDA) == CWA_ON) {
1612 /* This must be executed on all logical processors */
1613 wrmsr64(MSR_IA32_TSX_CTRL, MSR_IA32_TSXCTRL_TSX_CPU_CLEAR | MSR_IA32_TSXCTRL_RTM_DISABLE);
1614 cpuid_tsx_disabled = true;
1615 }
1616 }
1617
1618
1619 #if DEBUG || DEVELOPMENT
1620
1621 /*
1622 * Hunt for Apple Paravirtualization support in the hypervisor class leaves [0x4000_0000-0x4001_0000].
1623 * Hypervisor interfaces are expected to be found at 0x100 boundaries for compatibility.
1624 */
1625
1626 static bool
1627 cpuid_vmm_detect_applepv_features(i386_vmm_info_t *info_p, const uint32_t base, const uint32_t max_leaf)
1628 {
1629 if ((max_leaf - base) < APPLEPV_LEAF_INDEX_MAX) {
1630 return false;
1631 }
1632
1633 /*
1634 * Issue cpuid to make sure the interface supports "AH#1" features.
1635 * This avoids a possible collision with "Hv#1" used by Hyper-V.
1636 */
1637 uint32_t reg[4];
1638 char interface[5];
1639 cpuid_fn(base + APPLEPV_INTERFACE_LEAF_INDEX, reg);
1640 memcpy(&interface[0], &reg[eax], 4);
1641 interface[4] = '\0';
1642 if (0 == strcmp(interface, APPLEPV_INTERFACE)) {
1643 cpuid_fn(base + APPLEPV_FEATURES_LEAF_INDEX, reg);
1644 info_p->cpuid_vmm_applepv_features = quad(reg[ecx], reg[edx]);
1645 return true;
1646 }
1647 return false;
1648 }
1649
1650 static void
1651 cpuid_vmm_detect_pv_interface(i386_vmm_info_t *info_p, const char *signature,
1652 bool (*searcher)(i386_vmm_info_t*, const uint32_t, const uint32_t))
1653 {
1654 int hcalls;
1655 if (PE_parse_boot_argn("hcalls", &hcalls, sizeof(hcalls)) &&
1656 hcalls == 0) {
1657 return;
1658 }
1659
1660 assert(info_p);
1661 /*
1662 * Look for PV interface matching signature
1663 */
1664 for (uint32_t base = 0x40000100; base < 0x40010000; base += 0x100) {
1665 uint32_t reg[4];
1666 char vendor[13];
1667
1668 cpuid_fn(base, reg);
1669 memcpy(&vendor[0], &reg[ebx], 4);
1670 memcpy(&vendor[4], &reg[ecx], 4);
1671 memcpy(&vendor[8], &reg[edx], 4);
1672 vendor[12] = '\0';
1673 if ((0 == strcmp(vendor, signature)) &&
1674 (reg[eax] - base) < 0x100 &&
1675 (*searcher)(info_p, base, reg[eax])) {
1676 break;
1677 }
1678 }
1679 }
1680
1681 #endif /* DEBUG || DEVELOPMENT */