@@ -7908,14 +7908,10 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
static __init u64 vmx_get_perf_capabilities(void)
{
u64 perf_cap = PMU_CAP_FW_WRITES;
- u64 host_perf_cap = 0;
if (!enable_pmu)
return 0;
- if (boot_cpu_has(X86_FEATURE_PDCM))
- rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
-
if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR) &&
!enable_mediated_pmu) {
x86_perf_get_lbr(&vmx_lbr_caps);
@@ -7928,11 +7924,11 @@ static __init u64 vmx_get_perf_capabilities(void)
if (!vmx_lbr_caps.has_callstack)
memset(&vmx_lbr_caps, 0, sizeof(vmx_lbr_caps));
else if (vmx_lbr_caps.nr)
- perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
+ perf_cap |= kvm_host.perf_capabilities & PMU_CAP_LBR_FMT;
}
if (vmx_pebs_supported()) {
- perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
+ perf_cap |= kvm_host.perf_capabilities & PERF_CAP_PEBS_MASK;
/*
* Disallow adaptive PEBS as it is functionally broken, can be
@@ -9786,6 +9786,9 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, kvm_host.arch_capabilities);
+ if (boot_cpu_has(X86_FEATURE_PDCM))
+ rdmsrl(MSR_IA32_PERF_CAPABILITIES, kvm_host.perf_capabilities);
+
r = ops->hardware_setup();
if (r != 0)
goto out_mmu_exit;
@@ -46,6 +46,7 @@ struct kvm_host_values {
u64 xcr0;
u64 xss;
u64 arch_capabilities;
+ u64 perf_capabilities;
};
void kvm_spurious_fault(void);