@@ -911,10 +911,50 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
kvm_pmu_reset(vcpu);
}
+static bool pmc_pmi_enabled(struct kvm_pmc *pmc)
+{
+ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+ u8 fixed_ctr_ctrl;
+ bool pmi_enabled;
+
+ if (pmc_is_gp(pmc)) {
+ pmi_enabled = pmc->eventsel & ARCH_PERFMON_EVENTSEL_INT;
+ } else {
+ fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
+ pmc->idx - KVM_FIXED_PMC_BASE_IDX);
+ pmi_enabled = fixed_ctr_ctrl & INTEL_FIXED_0_ENABLE_PMI;
+ }
+
+ return pmi_enabled;
+}
+
static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
{
- pmc->emulated_counter++;
- kvm_pmu_request_counter_reprogram(pmc);
+ struct kvm_vcpu *vcpu = pmc->vcpu;
+
+ /*
+ * For perf-based PMUs, accumulate software-emulated events separately
+ * from pmc->counter, as pmc->counter is offset by the count of the
+ * associated perf event. Request reprogramming, which will consult
+ * both emulated and hardware-generated events to detect overflow.
+ */
+ if (!kvm_mediated_pmu_enabled(vcpu)) {
+ pmc->emulated_counter++;
+ kvm_pmu_request_counter_reprogram(pmc);
+ return;
+ }
+
+ /*
+ * For mediated PMUs, pmc->counter is updated when the vCPU's PMU is
+ * put, and will be loaded into hardware when the PMU is loaded. Simply
+ * increment the counter and signal overflow if it wraps to zero.
+ */
+ pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
+ if (!pmc->counter) {
+ pmc_to_pmu(pmc)->global_status |= BIT_ULL(pmc->idx);
+ if (pmc_pmi_enabled(pmc))
+ kvm_make_request(KVM_REQ_PMI, vcpu);
+ }
}
static inline bool cpl_is_matched(struct kvm_pmc *pmc)