diff mbox series

[v3,02/14] KVM: arm64: PMU: Set the default PMU for the guest on vCPU reset

Message ID 20230203042056.1794649-1-reijiw@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: PMU: Allow userspace to limit the number of PMCs on vCPU | expand

Commit Message

Reiji Watanabe Feb. 3, 2023, 4:20 a.m. UTC
For vCPUs with PMU configured, KVM uses the sanitized value
(returned from read_sanitised_ftr_reg()) of ID_AA64DFR0_EL1.PMUVer
as the default value and the limit value of that field.
The sanitized value could be inappropriate for these on some
heterogeneous PMU systems, as only one of PMUs on the system
can be associated with the guest.  Also, since the PMUVer
is defined as FTR_EXACT with safe_val == 0 (in cpufeature.c),
it will be zero when any PEs on the system have a different
PMUVer value than the other PEs (i.e. the guest with PMU
configured might see PMUVer == 0).

As a guest with PMU configured is associated with one of
PMUs on the system, the default and the limit PMUVer value for
the guest should be set based on that PMU.  Since the PMU
won't be associated with the guest until some vcpu device
attribute for PMU is set, KVM doesn't have that information
until then though.

Set the default PMU for the guest on the first vCPU reset.
The following patches will use the PMUVer of the PMU as the
default value of the ID_AA64DFR0_EL1.PMUVer for vCPUs with
PMU configured.

Signed-off-by: Reiji Watanabe <reijiw@google.com>
---
 arch/arm64/kvm/pmu-emul.c | 14 +-------------
 arch/arm64/kvm/reset.c    | 21 ++++++++++++++-------
 include/kvm/arm_pmu.h     |  6 ++++++
 3 files changed, 21 insertions(+), 20 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index f2a89f414297..c98020ca427e 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -867,7 +867,7 @@  static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
 	return true;
 }
 
-static int kvm_arm_set_vm_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
+int kvm_arm_set_vm_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
 {
 	lockdep_assert_held(&kvm->lock);
 
@@ -923,18 +923,6 @@  int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 	if (vcpu->arch.pmu.created)
 		return -EBUSY;
 
-	mutex_lock(&kvm->lock);
-	if (!kvm->arch.arm_pmu) {
-		/* No PMU set, get the default one */
-		int ret = kvm_arm_set_vm_pmu(kvm, NULL);
-
-		if (ret) {
-			mutex_unlock(&kvm->lock);
-			return ret;
-		}
-	}
-	mutex_unlock(&kvm->lock);
-
 	switch (attr->attr) {
 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
 		int __user *uaddr = (int __user *)(long)attr->addr;
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index e0267f672b8a..5d1e1acfe6ce 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -248,18 +248,30 @@  static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
  */
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 {
+	struct kvm *kvm = vcpu->kvm;
 	struct vcpu_reset_state reset_state;
 	int ret;
 	bool loaded;
 	u32 pstate;
 
-	mutex_lock(&vcpu->kvm->lock);
+	mutex_lock(&kvm->lock);
 	ret = kvm_set_vm_width(vcpu);
 	if (!ret) {
 		reset_state = vcpu->arch.reset_state;
 		WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+
+		/*
+		 * When the vCPU has a PMU, but no PMU is set for the guest
+		 * yet, set the default one.
+		 */
+		if (kvm_vcpu_has_pmu(vcpu) && unlikely(!kvm->arch.arm_pmu)) {
+			if (kvm_arm_support_pmu_v3())
+				ret = kvm_arm_set_vm_pmu(kvm, NULL);
+			else
+				ret = -EINVAL;
+		}
 	}
-	mutex_unlock(&vcpu->kvm->lock);
+	mutex_unlock(&kvm->lock);
 
 	if (ret)
 		return ret;
@@ -297,11 +309,6 @@  int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 		} else {
 			pstate = VCPU_RESET_PSTATE_EL1;
 		}
-
-		if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
-			ret = -EINVAL;
-			goto out;
-		}
 		break;
 	}
 
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 628775334d5e..7b5c5c8c634b 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -96,6 +96,7 @@  void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
 	(vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5)
 
 u8 kvm_arm_pmu_get_pmuver_limit(void);
+int kvm_arm_set_vm_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu);
 
 #else
 struct kvm_pmu {
@@ -168,6 +169,11 @@  static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
 	return 0;
 }
 
+static inline int kvm_arm_set_vm_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
+{
+	return 0;
+}
+
 #endif
 
 #endif