/*
- * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <i386/cpu_threads.h>
#include <i386/cpuid.h>
+#include <i386/machine_routines.h>
int force_tecs_at_idle;
int tecs_mode_supported;
#define INTEL_LEAF2_DESC_NUM (sizeof(intel_cpuid_leaf2_descriptor_table) / \
sizeof(cpuid_cache_descriptor_t))
+boolean_t cpuid_tsx_disabled = false; /* true if XNU disabled TSX */
+boolean_t cpuid_tsx_supported = false;
+
static void do_cwas(i386_cpu_info_t *cpuinfo, boolean_t on_slave);
+static void cpuid_do_precpuid_was(void);
+
+#if DEBUG || DEVELOPMENT
+static void cpuid_vmm_detect_pv_interface(i386_vmm_info_t *info_p, const char *signature,
+ bool (*)(i386_vmm_info_t*, const uint32_t, const uint32_t));
+static bool cpuid_vmm_detect_applepv_features(i386_vmm_info_t *info_p, const uint32_t base, const uint32_t max_leaf);
+#endif /* DEBUG || DEVELOPMENT */
static inline cpuid_cache_descriptor_t *
cpuid_leaf2_find(uint8_t value)
do_cwas(i386_cpu_info_t *cpuinfo, boolean_t on_slave)
{
extern int force_thread_policy_tecs;
+ cwa_classifier_e wa_reqd;
/*
* Workaround for reclaiming perf counter 3 due to TSX memory ordering erratum.
* This workaround does not support being forcibly set (since an MSR must be
* enumerated, lest we #GP when forced to access it.)
+ *
+ * Note that if disabling TSX is supported, disablement is prefered over forcing
+ * TSX transactions to abort.
*/
- if (cpuid_wa_required(CPU_INTEL_TSXFA) == CWA_ON) {
+ if (cpuid_wa_required(CPU_INTEL_TSXDA) == CWA_ON) {
+ /* This must be executed on all logical processors */
+ wrmsr64(MSR_IA32_TSX_CTRL, MSR_IA32_TSXCTRL_TSX_CPU_CLEAR | MSR_IA32_TSXCTRL_RTM_DISABLE);
+ } else if (cpuid_wa_required(CPU_INTEL_TSXFA) == CWA_ON) {
+ /* This must be executed on all logical processors */
wrmsr64(MSR_IA32_TSX_FORCE_ABORT,
rdmsr64(MSR_IA32_TSX_FORCE_ABORT) | MSR_IA32_TSXFA_RTM_FORCE_ABORT);
}
+ if (((wa_reqd = cpuid_wa_required(CPU_INTEL_SRBDS)) & CWA_ON) != 0 &&
+ ((wa_reqd & CWA_FORCE_ON) == CWA_ON ||
+ (cpuinfo->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_SRBDS_CTRL) != 0)) {
+ /* This must be executed on all logical processors */
+ uint64_t mcuoptctrl = rdmsr64(MSR_IA32_MCU_OPT_CTRL);
+ mcuoptctrl |= MSR_IA32_MCUOPTCTRL_RNGDS_MITG_DIS;
+ wrmsr64(MSR_IA32_MCU_OPT_CTRL, mcuoptctrl);
+ }
+
if (on_slave) {
return;
}
force_tecs_at_idle = 1;
}
- /*FALLTHROUGH*/
+ OS_FALLTHROUGH;
case CWA_ON:
tecs_mode_supported = 1;
break;
if ((cpuid_result[j] >> 31) == 1) { /* bit31 is validity */
continue;
}
- ((uint32_t *) info_p->cache_info)[j] = cpuid_result[j];
+ ((uint32_t *)(void *)info_p->cache_info)[j] = cpuid_result[j];
}
/* first byte gives number of cpuid calls to get all descriptors */
for (i = 1; i < info_p->cache_info[0]; i++) {
if ((cpuid_result[j] >> 31) == 1) {
continue;
}
- ((uint32_t *) info_p->cache_info)[4 * i + j] =
+ ((uint32_t *)(void *)info_p->cache_info)[4 * i + j] =
cpuid_result[j];
}
}
info_p->cpuid_leaf7_features = quad(reg[ecx], reg[ebx]);
info_p->cpuid_leaf7_extfeatures = reg[edx];
+ cpuid_tsx_supported = (reg[ebx] & (CPUID_LEAF7_FEATURE_HLE | CPUID_LEAF7_FEATURE_RTM)) != 0;
+
DBG(" Feature Leaf7:\n");
DBG(" EBX : 0x%x\n", reg[ebx]);
DBG(" ECX : 0x%x\n", reg[ecx]);
break;
case CPUID_MODEL_SKYLAKE:
case CPUID_MODEL_SKYLAKE_DT:
-#if !defined(RC_HIDE_XNU_J137)
case CPUID_MODEL_SKYLAKE_W:
-#endif
cpufamily = CPUFAMILY_INTEL_SKYLAKE;
break;
case CPUID_MODEL_KABYLAKE:
case CPUID_MODEL_KABYLAKE_DT:
cpufamily = CPUFAMILY_INTEL_KABYLAKE;
break;
+ case CPUID_MODEL_ICELAKE:
+ case CPUID_MODEL_ICELAKE_H:
+ case CPUID_MODEL_ICELAKE_DT:
+ cpufamily = CPUFAMILY_INTEL_ICELAKE;
+ break;
}
break;
}
i386_cpu_info_t *info_p = &cpuid_cpu_info;
boolean_t enable_x86_64h = TRUE;
+ /* Perform pre-cpuid workarounds (since their effects impact values returned via cpuid) */
+ cpuid_do_precpuid_was();
+
cpuid_set_generic_info(info_p);
/* verify we are running on a supported CPU */
* (which determines whether SMT/Hyperthreading is active).
*/
+ /*
+ * Not all VMMs emulate MSR_CORE_THREAD_COUNT (0x35).
+ */
if (0 != (info_p->cpuid_features & CPUID_FEATURE_VMM) &&
PE_parse_boot_argn("-nomsr35h", NULL, 0)) {
info_p->core_count = 1;
info_p->thread_count = info_p->cpuid_logical_per_package;
break;
case CPUFAMILY_INTEL_WESTMERE: {
+ /*
+ * This should be the same as Nehalem but an A0 silicon bug returns
+ * invalid data in the top 12 bits. Hence, we use only bits [19..16]
+ * rather than [31..16] for core count - which actually can't exceed 8.
+ */
uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
if (0 == msr) {
/* Provide a non-zero default for some VMMs */
info_p->cpuid_model_string = ""; /* deprecated */
+ /* Init CPU LBRs */
+ i386_lbr_init(info_p, true);
+
do_cwas(info_p, FALSE);
}
return cpuid_info()->cpuid_leaf7_extfeatures;
}
+const char *
+cpuid_vmm_family_string(void)
+{
+ switch (cpuid_vmm_info()->cpuid_vmm_family) {
+ case CPUID_VMM_FAMILY_NONE:
+ return "None";
+
+ case CPUID_VMM_FAMILY_VMWARE:
+ return "VMWare";
+
+ case CPUID_VMM_FAMILY_PARALLELS:
+ return "Parallels";
+
+ case CPUID_VMM_FAMILY_HYVE:
+ return "xHyve";
+
+ case CPUID_VMM_FAMILY_HVF:
+ return "HVF";
+
+ case CPUID_VMM_FAMILY_KVM:
+ return "KVM";
+
+ case CPUID_VMM_FAMILY_UNKNOWN:
+ /*FALLTHROUGH*/
+ default:
+ return "Unknown VMM";
+ }
+}
+
static i386_vmm_info_t *_cpuid_vmm_infop = NULL;
static i386_vmm_info_t _cpuid_vmm_info;
static void
cpuid_init_vmm_info(i386_vmm_info_t *info_p)
{
- uint32_t reg[4];
+ uint32_t reg[4], maxbasic_regs[4];
uint32_t max_vmm_leaf;
bzero(info_p, sizeof(*info_p));
DBG("cpuid_init_vmm_info(%p)\n", info_p);
+ /*
+ * Get the highest basic leaf value, then save the cpuid details for that leaf
+ * for comparison with the [ostensible] VMM leaf.
+ */
+ cpuid_fn(0, reg);
+ cpuid_fn(reg[eax], maxbasic_regs);
+
/* do cpuid 0x40000000 to get VMM vendor */
cpuid_fn(0x40000000, reg);
+
+ /*
+ * If leaf 0x40000000 is non-existent, cpuid will return the values as
+ * if the highest basic leaf was requested, so compare to those values
+ * we just retrieved to see if no vmm is present.
+ */
+ if (bcmp(reg, maxbasic_regs, sizeof(reg)) == 0) {
+ info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_NONE;
+ DBG(" vmm_vendor : NONE\n");
+ return;
+ }
+
max_vmm_leaf = reg[eax];
bcopy((char *)®[ebx], &info_p->cpuid_vmm_vendor[0], 4);
bcopy((char *)®[ecx], &info_p->cpuid_vmm_vendor[4], 4);
if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_VMWARE)) {
/* VMware identification string: kb.vmware.com/kb/1009458 */
info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_VMWARE;
- } else if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_PARALLELS)) {
+ } else if (0 == bcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_PARALLELS, 12)) {
/* Parallels identification string */
info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_PARALLELS;
+ } else if (0 == bcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_HYVE, 12)) {
+ /* bhyve/xhyve identification string */
+ info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_HYVE;
+ } else if (0 == bcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_HVF, 12)) {
+ /* HVF identification string */
+ info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_HVF;
+ } else if (0 == bcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_KVM, 12)) {
+ /* KVM identification string */
+ info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_KVM;
} else {
info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_UNKNOWN;
}
info_p->cpuid_vmm_bus_frequency = reg[ebx];
}
+#if DEBUG || DEVELOPMENT
+ cpuid_vmm_detect_pv_interface(info_p, APPLEPV_SIGNATURE, &cpuid_vmm_detect_applepv_features);
+#endif
+
DBG(" vmm_vendor : %s\n", info_p->cpuid_vmm_vendor);
DBG(" vmm_family : %u\n", info_p->cpuid_vmm_family);
DBG(" vmm_bus_frequency : %u\n", info_p->cpuid_vmm_bus_frequency);
return cpuid_vmm_info()->cpuid_vmm_family;
}
+#if DEBUG || DEVELOPMENT
+uint64_t
+cpuid_vmm_get_applepv_features(void)
+{
+ return cpuid_vmm_info()->cpuid_vmm_applepv_features;
+}
+#endif /* DEBUG || DEVELOPMENT */
+
cwa_classifier_e
cpuid_wa_required(cpu_wa_e wa)
{
+ i386_cpu_info_t *info_p = &cpuid_cpu_info;
static uint64_t bootarg_cpu_wa_enables = 0;
static uint64_t bootarg_cpu_wa_disables = 0;
static int bootargs_overrides_processed = 0;
- i386_cpu_info_t *info_p = &cpuid_cpu_info;
+ uint32_t reg[4];
if (!bootargs_overrides_processed) {
if (!PE_parse_boot_argn("cwae", &bootarg_cpu_wa_enables, sizeof(bootarg_cpu_wa_enables))) {
case CPU_INTEL_TSXFA:
/*
- * If this CPU supports RTM and supports FORCE_ABORT, return that
+ * Note that if TSX was disabled in cpuid_do_precpuid_was(), the cached cpuid
+ * info will indicate that RTM is *not* supported and this workaround will not
+ * be enabled.
+ */
+ /*
+ * Otherwise, if the CPU supports both TSX(HLE) and FORCE_ABORT, return that
* the workaround should be enabled.
*/
if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_TSXFA) != 0 &&
}
break;
+ case CPU_INTEL_TSXDA:
+ /*
+ * Since this workaround might be requested before cpuid_set_info() is complete,
+ * we need to invoke cpuid directly when looking for the required bits.
+ */
+ cpuid_fn(0x7, reg);
+ if (reg[edx] & CPUID_LEAF7_EXTFEATURE_ACAPMSR) {
+ uint64_t archcap_msr = rdmsr64(MSR_IA32_ARCH_CAPABILITIES);
+ /*
+ * If this CPU supports TSX (HLE being the proxy for TSX detection) AND it does
+ * not include a hardware fix for TAA and it supports the TSX_CTRL MSR, disable TSX entirely.
+ * (Note this can be overridden (above) if the cwad boot-arg's value has bit 2 set.)
+ */
+ if ((reg[ebx] & CPUID_LEAF7_FEATURE_HLE) != 0 &&
+ (archcap_msr & (MSR_IA32_ARCH_CAPABILITIES_TAA_NO | MSR_IA32_ARCH_CAPABILITIES_TSX_CTRL))
+ == MSR_IA32_ARCH_CAPABILITIES_TSX_CTRL) {
+ return CWA_ON;
+ }
+ }
+ break;
+
+ case CPU_INTEL_SRBDS:
+ /*
+ * SRBDS mitigations are enabled by default. CWA_ON returned here indicates
+ * the caller should disable the mitigation. Mitigations should be disabled
+ * at least for CPUs that advertise MDS_NO *and* (either TAA_NO is set OR TSX
+ * has been disabled).
+ */
+ if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_SRBDS_CTRL) != 0) {
+ if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_ACAPMSR) != 0) {
+ uint64_t archcap_msr = rdmsr64(MSR_IA32_ARCH_CAPABILITIES);
+ if ((archcap_msr & MSR_IA32_ARCH_CAPABILITIES_MDS_NO) != 0 &&
+ ((archcap_msr & MSR_IA32_ARCH_CAPABILITIES_TAA_NO) != 0 ||
+ cpuid_tsx_disabled)) {
+ return CWA_ON;
+ }
+ }
+ }
+ break;
+
default:
break;
}
return CWA_OFF;
}
+
+static void
+cpuid_do_precpuid_was(void)
+{
+ /*
+ * Note that care must be taken not to use any data from the cached cpuid data since it is
+ * likely uninitialized at this point. That includes calling functions that make use of
+ * that data as well.
+ */
+
+ /* Note the TSX disablement, we do not support force-on since it depends on MSRs being present */
+ if (cpuid_wa_required(CPU_INTEL_TSXDA) == CWA_ON) {
+ /* This must be executed on all logical processors */
+ wrmsr64(MSR_IA32_TSX_CTRL, MSR_IA32_TSXCTRL_TSX_CPU_CLEAR | MSR_IA32_TSXCTRL_RTM_DISABLE);
+ cpuid_tsx_disabled = true;
+ }
+}
+
+
+#if DEBUG || DEVELOPMENT
+
+/*
+ * Hunt for Apple Paravirtualization support in the hypervisor class leaves [0x4000_0000-0x4001_0000].
+ * Hypervisor interfaces are expected to be found at 0x100 boundaries for compatibility.
+ */
+
+static bool
+cpuid_vmm_detect_applepv_features(i386_vmm_info_t *info_p, const uint32_t base, const uint32_t max_leaf)
+{
+ if ((max_leaf - base) < APPLEPV_LEAF_INDEX_MAX) {
+ return false;
+ }
+
+ /*
+ * Issue cpuid to make sure the interface supports "AH#1" features.
+ * This avoids a possible collision with "Hv#1" used by Hyper-V.
+ */
+ uint32_t reg[4];
+ char interface[5];
+ cpuid_fn(base + APPLEPV_INTERFACE_LEAF_INDEX, reg);
+ memcpy(&interface[0], ®[eax], 4);
+ interface[4] = '\0';
+ if (0 == strcmp(interface, APPLEPV_INTERFACE)) {
+ cpuid_fn(base + APPLEPV_FEATURES_LEAF_INDEX, reg);
+ info_p->cpuid_vmm_applepv_features = quad(reg[ecx], reg[edx]);
+ return true;
+ }
+ return false;
+}
+
+static void
+cpuid_vmm_detect_pv_interface(i386_vmm_info_t *info_p, const char *signature,
+ bool (*searcher)(i386_vmm_info_t*, const uint32_t, const uint32_t))
+{
+ int hcalls;
+ if (PE_parse_boot_argn("hcalls", &hcalls, sizeof(hcalls)) &&
+ hcalls == 0) {
+ return;
+ }
+
+ assert(info_p);
+ /*
+ * Look for PV interface matching signature
+ */
+ for (uint32_t base = 0x40000100; base < 0x40010000; base += 0x100) {
+ uint32_t reg[4];
+ char vendor[13];
+
+ cpuid_fn(base, reg);
+ memcpy(&vendor[0], ®[ebx], 4);
+ memcpy(&vendor[4], ®[ecx], 4);
+ memcpy(&vendor[8], ®[edx], 4);
+ vendor[12] = '\0';
+ if ((0 == strcmp(vendor, signature)) &&
+ (reg[eax] - base) < 0x100 &&
+ (*searcher)(info_p, base, reg[eax])) {
+ break;
+ }
+ }
+}
+
+#endif /* DEBUG || DEVELOPMENT */