diff mbox series

[v7,01/12] KVM: arm64: PMU: Introduce helpers to set the guest's PMU

Message ID 20231009230858.3444834-2-rananta@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: PMU: Allow userspace to limit the number of PMCs on vCPU | expand

Commit Message

Raghavendra Rao Ananta Oct. 9, 2023, 11:08 p.m. UTC
From: Reiji Watanabe <reijiw@google.com>

Introduce new helper functions to set the guest's PMU
(kvm->arch.arm_pmu) either to a default probed instance or to a
caller requested one, and use it when the guest's PMU needs to
be set. These helpers will make it easier for the following
patches to modify the relevant code.

No functional change intended.

Signed-off-by: Reiji Watanabe <reijiw@google.com>
Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
---
 arch/arm64/kvm/pmu-emul.c | 50 +++++++++++++++++++++++++++------------
 1 file changed, 35 insertions(+), 15 deletions(-)

Comments

Eric Auger Oct. 16, 2023, 7:45 p.m. UTC | #1
Hi Reiji,

On 10/10/23 01:08, Raghavendra Rao Ananta wrote:
> From: Reiji Watanabe <reijiw@google.com>
> 
> Introduce new helper functions to set the guest's PMU
> (kvm->arch.arm_pmu) either to a default probed instance or to a
> caller requested one, and use it when the guest's PMU needs to
> be set. These helpers will make it easier for the following
> patches to modify the relevant code.
> 
> No functional change intended.
> 
> Signed-off-by: Reiji Watanabe <reijiw@google.com>
> Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>

Eric
> ---
>  arch/arm64/kvm/pmu-emul.c | 50 +++++++++++++++++++++++++++------------
>  1 file changed, 35 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
> index 3afb281ed8d2..eb5dcb12dafe 100644
> --- a/arch/arm64/kvm/pmu-emul.c
> +++ b/arch/arm64/kvm/pmu-emul.c
> @@ -874,6 +874,36 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
>  	return true;
>  }
>  
> +static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
> +{
> +	lockdep_assert_held(&kvm->arch.config_lock);
> +
> +	kvm->arch.arm_pmu = arm_pmu;
> +}
> +
> +/**
> + * kvm_arm_set_default_pmu - No PMU set, get the default one.
> + * @kvm: The kvm pointer
> + *
> + * The observant among you will notice that the supported_cpus
> + * mask does not get updated for the default PMU even though it
> + * is quite possible the selected instance supports only a
> + * subset of cores in the system. This is intentional, and
> + * upholds the preexisting behavior on heterogeneous systems
> + * where vCPUs can be scheduled on any core but the guest
> + * counters could stop working.
> + */
> +static int kvm_arm_set_default_pmu(struct kvm *kvm)
> +{
> +	struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu();
> +
> +	if (!arm_pmu)
> +		return -ENODEV;
> +
> +	kvm_arm_set_pmu(kvm, arm_pmu);
> +	return 0;
> +}
> +
>  static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
>  {
>  	struct kvm *kvm = vcpu->kvm;
> @@ -893,7 +923,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
>  				break;
>  			}
>  
> -			kvm->arch.arm_pmu = arm_pmu;
> +			kvm_arm_set_pmu(kvm, arm_pmu);
>  			cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
>  			ret = 0;
>  			break;
> @@ -917,20 +947,10 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
>  		return -EBUSY;
>  
>  	if (!kvm->arch.arm_pmu) {
> -		/*
> -		 * No PMU set, get the default one.
> -		 *
> -		 * The observant among you will notice that the supported_cpus
> -		 * mask does not get updated for the default PMU even though it
> -		 * is quite possible the selected instance supports only a
> -		 * subset of cores in the system. This is intentional, and
> -		 * upholds the preexisting behavior on heterogeneous systems
> -		 * where vCPUs can be scheduled on any core but the guest
> -		 * counters could stop working.
> -		 */
> -		kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
> -		if (!kvm->arch.arm_pmu)
> -			return -ENODEV;
> +		int ret = kvm_arm_set_default_pmu(kvm);
> +
> +		if (ret)
> +			return ret;
>  	}
>  
>  	switch (attr->attr) {
diff mbox series

Patch

diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 3afb281ed8d2..eb5dcb12dafe 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -874,6 +874,36 @@  static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
 	return true;
 }
 
+static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
+{
+	lockdep_assert_held(&kvm->arch.config_lock);
+
+	kvm->arch.arm_pmu = arm_pmu;
+}
+
+/**
+ * kvm_arm_set_default_pmu - No PMU set, get the default one.
+ * @kvm: The kvm pointer
+ *
+ * The observant among you will notice that the supported_cpus
+ * mask does not get updated for the default PMU even though it
+ * is quite possible the selected instance supports only a
+ * subset of cores in the system. This is intentional, and
+ * upholds the preexisting behavior on heterogeneous systems
+ * where vCPUs can be scheduled on any core but the guest
+ * counters could stop working.
+ */
+static int kvm_arm_set_default_pmu(struct kvm *kvm)
+{
+	struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu();
+
+	if (!arm_pmu)
+		return -ENODEV;
+
+	kvm_arm_set_pmu(kvm, arm_pmu);
+	return 0;
+}
+
 static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
 {
 	struct kvm *kvm = vcpu->kvm;
@@ -893,7 +923,7 @@  static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
 				break;
 			}
 
-			kvm->arch.arm_pmu = arm_pmu;
+			kvm_arm_set_pmu(kvm, arm_pmu);
 			cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
 			ret = 0;
 			break;
@@ -917,20 +947,10 @@  int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 		return -EBUSY;
 
 	if (!kvm->arch.arm_pmu) {
-		/*
-		 * No PMU set, get the default one.
-		 *
-		 * The observant among you will notice that the supported_cpus
-		 * mask does not get updated for the default PMU even though it
-		 * is quite possible the selected instance supports only a
-		 * subset of cores in the system. This is intentional, and
-		 * upholds the preexisting behavior on heterogeneous systems
-		 * where vCPUs can be scheduled on any core but the guest
-		 * counters could stop working.
-		 */
-		kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
-		if (!kvm->arch.arm_pmu)
-			return -ENODEV;
+		int ret = kvm_arm_set_default_pmu(kvm);
+
+		if (ret)
+			return ret;
 	}
 
 	switch (attr->attr) {