@@ -20,6 +20,7 @@ KVM_X86_PMU_OP(get_msr)
KVM_X86_PMU_OP(set_msr)
KVM_X86_PMU_OP(refresh)
KVM_X86_PMU_OP(init)
+KVM_X86_PMU_OP(is_rdpmc_passthru_allowed)
KVM_X86_PMU_OP_OPTIONAL(reset)
KVM_X86_PMU_OP_OPTIONAL(deliver_pmi)
KVM_X86_PMU_OP_OPTIONAL(cleanup)
@@ -102,6 +102,7 @@ bool kvm_pmu_check_rdpmc_passthrough(struct kvm_vcpu *vcpu)
if (is_passthrough_pmu_enabled(vcpu) &&
!enable_vmware_backdoor &&
+ static_call(kvm_x86_pmu_is_rdpmc_passthru_allowed)(vcpu) &&
pmu->nr_arch_gp_counters == kvm_pmu_cap.num_counters_gp &&
pmu->nr_arch_fixed_counters == kvm_pmu_cap.num_counters_fixed &&
pmu->counter_bitmask[KVM_PMC_GP] == (((u64)1 << kvm_pmu_cap.bit_width_gp) - 1) &&
@@ -40,6 +40,7 @@ struct kvm_pmu_ops {
void (*reset)(struct kvm_vcpu *vcpu);
void (*deliver_pmi)(struct kvm_vcpu *vcpu);
void (*cleanup)(struct kvm_vcpu *vcpu);
+ bool (*is_rdpmc_passthru_allowed)(struct kvm_vcpu *vcpu);
const u64 EVENTSEL_EVENT;
const int MAX_NR_GP_COUNTERS;
@@ -228,6 +228,11 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu)
}
}
+static bool amd_is_rdpmc_passthru_allowed(struct kvm_vcpu *vcpu)
+{
+ return true;
+}
+
struct kvm_pmu_ops amd_pmu_ops __initdata = {
.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = amd_msr_idx_to_pmc,
@@ -237,6 +242,7 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = {
.set_msr = amd_pmu_set_msr,
.refresh = amd_pmu_refresh,
.init = amd_pmu_init,
+ .is_rdpmc_passthru_allowed = amd_is_rdpmc_passthru_allowed,
.EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
.MAX_NR_GP_COUNTERS = KVM_AMD_PMC_MAX_GENERIC,
.MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS,
@@ -725,6 +725,21 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
}
}
+static bool intel_is_rdpmc_passthru_allowed(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Per Intel SDM vol. 2 for RDPMC, MSR_PERF_METRICS is accessible by
+ * with type 0x2000 in ECX[31:16], while the index value in ECX[15:0] is
+ * implementation specific. Therefore, if the host has this MSR, but
+ * does not expose it to the guest, RDPMC has to be intercepted.
+ */
+ if ((host_perf_cap & PMU_CAP_PERF_METRICS) &&
+ !(vcpu_get_perf_capabilities(vcpu) & PMU_CAP_PERF_METRICS))
+ return false;
+
+ return true;
+}
+
struct kvm_pmu_ops intel_pmu_ops __initdata = {
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = intel_msr_idx_to_pmc,
@@ -736,6 +751,7 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = {
.reset = intel_pmu_reset,
.deliver_pmi = intel_pmu_deliver_pmi,
.cleanup = intel_pmu_cleanup,
+ .is_rdpmc_passthru_allowed = intel_is_rdpmc_passthru_allowed,
.EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT,
.MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC,
.MIN_NR_GP_COUNTERS = 1,