@@ -167,9 +167,11 @@ static inline void kvm_init_pmu_capability(void)
* For Intel, only support guest architectural pmu
* on a host with architectural pmu.
*/
- if ((is_intel && !kvm_pmu_cap.version) || !kvm_pmu_cap.num_counters_gp) {
- memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
+ if ((is_intel && !kvm_pmu_cap.version) || !kvm_pmu_cap.num_counters_gp)
enable_pmu = false;
+
+ if (!enable_pmu) {
+ memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
return;
}
@@ -7,6 +7,7 @@
#include "lapic.h"
#include "x86.h"
#include "pmu.h"
+#include "cpuid.h"
extern bool __read_mostly enable_vpid;
extern bool __read_mostly flexpriority_enabled;
@@ -415,6 +416,9 @@ static inline u64 vmx_get_perf_capabilities(void)
u64 perf_cap = PMU_CAP_FW_WRITES;
u64 host_perf_cap = 0;
+ if (!enable_pmu)
+ return 0;
+
if (boot_cpu_has(X86_FEATURE_PDCM))
rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
@@ -426,7 +430,7 @@ static inline u64 vmx_get_perf_capabilities(void)
perf_cap &= ~PERF_CAP_PEBS_BASELINE;
}
- if (boot_cpu_has(X86_FEATURE_ARCH_LBR) && !cpu_has_vmx_arch_lbr())
+ if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) && !cpu_has_vmx_arch_lbr())
perf_cap &= ~PMU_CAP_LBR_FMT;
return perf_cap;
@@ -7575,11 +7575,14 @@ static __init void vmx_set_cpu_caps(void)
kvm_cpu_cap_check_and_set(X86_FEATURE_DS);
kvm_cpu_cap_check_and_set(X86_FEATURE_DTES64);
}
- if (!cpu_has_vmx_arch_lbr()) {
+ if (!enable_pmu || !cpu_has_vmx_arch_lbr()) {
kvm_cpu_cap_clear(X86_FEATURE_ARCH_LBR);
supported_xss &= ~XFEATURE_MASK_LBR;
}
+ if (!enable_pmu)
+ kvm_cpu_cap_clear(X86_FEATURE_PDCM);
+
if (!enable_sgx) {
kvm_cpu_cap_clear(X86_FEATURE_SGX);
kvm_cpu_cap_clear(X86_FEATURE_SGX_LC);
@@ -8269,7 +8272,7 @@ static __init int hardware_setup(void)
if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST)
return -EINVAL;
- if (!enable_ept || !cpu_has_vmx_intel_pt())
+ if (!enable_ept || !enable_pmu || !cpu_has_vmx_intel_pt())
pt_mode = PT_MODE_SYSTEM;
if (pt_mode == PT_MODE_HOST_GUEST)
vmx_init_ops.handle_intel_pt_intr = vmx_handle_intel_pt_intr;