diff mbox series

[v3,04/14] KVM: arm64: PMU: Don't use the PMUVer of the PMU set for the guest

Message ID 20230203042056.1794649-3-reijiw@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: PMU: Allow userspace to limit the number of PMCs on vCPU | expand

Commit Message

Reiji Watanabe Feb. 3, 2023, 4:20 a.m. UTC
KVM uses two potentially different PMUVer for a vCPU with PMU
configured (kvm->arch.dfr0_pmuver.imp and kvm->arch.arm_pmu->pmuver).
Stop using the host's PMUVer (arm_pmu->pmuver) in most cases,
as the PMUVer for the guest (kvm->arch.dfr0_pmuver.imp) could be
set by userspace (could be lower than the host's PMUVer).

The only exception to KVM using the host's PMUVer is to create an
event filter (KVM_ARM_VCPU_PMU_V3_FILTER).  For this, KVM uses
the value to determine the valid range of the event, and as the
size of the event filter bitmap.  Using the host's PMUVer here will
allow KVM to keep the compatibility with the current behavior of
the PMU_V3_FILTER.  Also, that will allow KVM to keep the entire
filter when PMUVer for the guest is changed, and KVM only need
to change the actual range of use.

Signed-off-by: Reiji Watanabe <reijiw@google.com>
---
 arch/arm64/kvm/pmu-emul.c | 20 +++++++++++++-------
 1 file changed, 13 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 49580787ee09..701728ad78d6 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -35,12 +35,8 @@  static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)
 	return &vcpu->arch.pmu.pmc[cnt_idx];
 }
 
-static u32 kvm_pmu_event_mask(struct kvm *kvm)
+static u32 __kvm_pmu_event_mask(u8 pmuver)
 {
-	unsigned int pmuver;
-
-	pmuver = kvm->arch.arm_pmu->pmuver;
-
 	switch (pmuver) {
 	case ID_AA64DFR0_EL1_PMUVer_IMP:
 		return GENMASK(9, 0);
@@ -55,6 +51,11 @@  static u32 kvm_pmu_event_mask(struct kvm *kvm)
 	}
 }
 
+static u32 kvm_pmu_event_mask(struct kvm *kvm)
+{
+	return __kvm_pmu_event_mask(kvm->arch.dfr0_pmuver.imp);
+}
+
 /**
  * kvm_pmc_is_64bit - determine if counter is 64bit
  * @pmc: counter context
@@ -755,7 +756,7 @@  u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
 		 * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
 		 * as RAZ
 		 */
-		if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4)
+		if (vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P4)
 			val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
 		base = 32;
 	}
@@ -955,7 +956,12 @@  int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 		struct kvm_pmu_event_filter filter;
 		int nr_events;
 
-		nr_events = kvm_pmu_event_mask(kvm) + 1;
+		/*
+		 * Allocate an event filter for the entire range supported
+		 * by the PMU hardware so we can simply change the actual
+		 * range of use when the PMUVer for the guest is changed.
+		 */
+		nr_events = __kvm_pmu_event_mask(kvm->arch.dfr0_pmuver.imp_limit) + 1;
 
 		uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;