diff mbox series

[5/7] KVM: x86/pmu: Refactor pmu->available_event_types field using BITMAP

Message ID 20211112095139.21775-6-likexu@tencent.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/pmu: Four functional fixes | expand

Commit Message

Like Xu Nov. 12, 2021, 9:51 a.m. UTC
From: Like Xu <likexu@tencent.com>

Replace the explicit declaration of "unsigned available_event_types" with
the generic macro DECLARE_BITMAP and rename it to "avail_cpuid_events"
for better self-explanation.

Signed-off-by: Like Xu <likexu@tencent.com>
---
 arch/x86/include/asm/kvm_host.h |  2 +-
 arch/x86/kvm/vmx/pmu_intel.c    | 11 +++++++----
 2 files changed, 8 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 88fce6ab4bbd..2e69dec3ad7b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -495,7 +495,6 @@  struct kvm_pmc {
 struct kvm_pmu {
 	unsigned nr_arch_gp_counters;
 	unsigned nr_arch_fixed_counters;
-	unsigned available_event_types;
 	u64 fixed_ctr_ctrl;
 	u64 global_ctrl;
 	u64 global_status;
@@ -510,6 +509,7 @@  struct kvm_pmu {
 	DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
 	DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX);
 	DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);
+	DECLARE_BITMAP(avail_cpuid_events, X86_PMC_IDX_MAX);
 
 	/*
 	 * The gate to release perf_events not marked in
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 4f58c14efa61..db36e743c3cc 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -96,7 +96,7 @@  static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
 			continue;
 
 		if (is_intel_cpuid_event(event_select, unit_mask) &&
-		    !(pmu->available_event_types & BIT_ULL(i)))
+		    !test_bit(i, pmu->avail_cpuid_events))
 			return PERF_COUNT_HW_MAX + 1;
 
 		break;
@@ -125,7 +125,7 @@  static unsigned int intel_find_fixed_event(struct kvm_pmu *pmu, int idx)
 	event_type = intel_arch_events[event].event_type;
 
 	if (is_intel_cpuid_event(event_select, unit_mask) &&
-	    !(pmu->available_event_types & BIT_ULL(event_type)))
+	    !test_bit(event_type, pmu->avail_cpuid_events))
 		return PERF_COUNT_HW_MAX + 1;
 
 	return event_type;
@@ -497,6 +497,7 @@  static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+	unsigned long avail_cpuid_events;
 
 	struct x86_pmu_capability x86_pmu;
 	struct kvm_cpuid_entry2 *entry;
@@ -527,8 +528,10 @@  static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 	eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
 	eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
-	pmu->available_event_types = ~entry->ebx &
-					((1ull << eax.split.mask_length) - 1);
+	avail_cpuid_events = ~entry->ebx & ((1ull << eax.split.mask_length) - 1);
+	bitmap_copy(pmu->avail_cpuid_events,
+		    (unsigned long *)&avail_cpuid_events,
+		    eax.split.mask_length);
 
 	if (pmu->version == 1) {
 		pmu->nr_arch_fixed_counters = 0;