@@ -179,6 +179,21 @@ static int kvm_check_cpuid(struct kvm_vcpu *vcpu)
return -EINVAL;
}
+ best = kvm_find_cpuid_entry(vcpu, 0xa);
+ if (vcpu->kvm->arch.enable_pmu && best) {
+ union cpuid10_eax eax;
+
+ eax.full = best->eax;
+ if (enable_mediated_pmu &&
+ eax.split.version_id > kvm_pmu_cap.version)
+ return -EINVAL;
+ if (eax.split.version_id > 0 && !vcpu_pmu_can_enable(vcpu))
+ return -EINVAL;
+ if (eax.split.version_id > 1 && eax.split.version_id < 5 &&
+ best->ecx != 0)
+ return -EINVAL;
+ }
+
/*
* Exposing dynamic xfeatures to the guest requires additional
* enabling in the FPU, e.g. to expand the guest XSAVE state size.
@@ -743,6 +743,10 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
kvm_pmu_call(reset)(vcpu);
}
+inline bool vcpu_pmu_can_enable(struct kvm_vcpu *vcpu)
+{
+ return vcpu->kvm->arch.enable_pmu && lapic_in_kernel(vcpu);
+}
/*
* Refresh the PMU configuration for the vCPU, e.g. if userspace changes CPUID
@@ -775,8 +779,7 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->pebs_data_cfg_rsvd = ~0ull;
bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
- if (!vcpu->kvm->arch.enable_pmu ||
- (!lapic_in_kernel(vcpu) && enable_mediated_pmu))
+ if (!vcpu_pmu_can_enable(vcpu))
return;
kvm_pmu_call(refresh)(vcpu);
@@ -284,6 +284,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel);
+bool vcpu_pmu_can_enable(struct kvm_vcpu *vcpu);
bool is_vmware_backdoor_pmc(u32 pmc_idx);