@@ -591,6 +591,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
if (vcpu->arch.perf_capabilities & PERF_CAP_PEBS_FORMAT) {
+ vcpu->arch.ia32_misc_enable_msr &= ~MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
if (vcpu->arch.perf_capabilities & PERF_CAP_PEBS_BASELINE) {
pmu->pebs_enable_mask = ~pmu->global_ctrl;
pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
@@ -604,6 +605,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
~((1ull << pmu->nr_arch_gp_counters) - 1);
}
} else {
+ vcpu->arch.ia32_misc_enable_msr |= MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
vcpu->arch.perf_capabilities &= ~PERF_CAP_PEBS_MASK;
}
}
@@ -3480,7 +3480,13 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_MISC_ENABLE: {
u64 old_val = vcpu->arch.ia32_misc_enable_msr;
- u64 pmu_mask = MSR_IA32_MISC_ENABLE_EMON;
+ u64 pmu_mask = MSR_IA32_MISC_ENABLE_EMON |
+ MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
+
+ /* RO bits */
+ if (!msr_info->host_initiated &&
+ ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL))
+ return 1;
/*
* For a dummy user space, the order of setting vPMU capabilities and