@@ -837,11 +837,20 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc)
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
{
+ DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *pmc;
int i;
- kvm_for_each_pmc(pmu, pmc, i, pmu->all_valid_pmc_idx) {
+ BUILD_BUG_ON(sizeof(pmu->global_ctrl) * BITS_PER_BYTE != X86_PMC_IDX_MAX);
+
+ if (!kvm_pmu_has_perf_global_ctrl(pmu))
+ bitmap_copy(bitmap, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
+ else if (!bitmap_and(bitmap, pmu->all_valid_pmc_idx,
+ (unsigned long *)&pmu->global_ctrl, X86_PMC_IDX_MAX))
+ return;
+
+ kvm_for_each_pmc(pmu, pmc, i, bitmap) {
if (!pmc_event_is_allowed(pmc))
continue;
Mask off disabled counters based on PERF_GLOBAL_CTRL *before* iterating over PMCs to emulate (branch) instruction required events in software. In the common case where the guest isn't utilizing the PMU, pre-checking for enabled counters turns a relatively expensive search into a few AND uops and a Jcc. Sadly, PMUs without PERF_GLOBAL_CTRL, e.g. most existing AMD CPUs, are out of luck as there is no way to check that a PMC isn't being used without checking the PMC's event selector. Cc: Konstantin Khorenko <khorenko@virtuozzo.com> Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/kvm/pmu.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-)