diff mbox series

[v3,2/4] KVM: arm64: Keep a list of probed PMUs

Message ID 20211213152309.158462-3-alexandru.elisei@arm.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Improve PMU support on heterogeneous systems | expand

Commit Message

Alexandru Elisei Dec. 13, 2021, 3:23 p.m. UTC
The ARM PMU driver calls kvm_host_pmu_init() after probing to tell KVM that
a hardware PMU is available for guest emulation. Heterogeneous systems can
have more than one PMU present, and the callback gets called multiple
times, once for each of them. Keep track of all the PMUs available to KVM,
as they're going to be needed later.

Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
---
 arch/arm64/kvm/pmu-emul.c | 25 +++++++++++++++++++++++--
 include/kvm/arm_pmu.h     |  5 +++++
 2 files changed, 28 insertions(+), 2 deletions(-)

Comments

Reiji Watanabe Dec. 14, 2021, 7:23 a.m. UTC | #1
Hi Alex,

On Mon, Dec 13, 2021 at 7:23 AM Alexandru Elisei
<alexandru.elisei@arm.com> wrote:
>
> The ARM PMU driver calls kvm_host_pmu_init() after probing to tell KVM that
> a hardware PMU is available for guest emulation. Heterogeneous systems can
> have more than one PMU present, and the callback gets called multiple
> times, once for each of them. Keep track of all the PMUs available to KVM,
> as they're going to be needed later.
>
> Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
> ---
>  arch/arm64/kvm/pmu-emul.c | 25 +++++++++++++++++++++++--
>  include/kvm/arm_pmu.h     |  5 +++++
>  2 files changed, 28 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
> index a5e4bbf5e68f..eb4be96f144d 100644
> --- a/arch/arm64/kvm/pmu-emul.c
> +++ b/arch/arm64/kvm/pmu-emul.c
> @@ -7,6 +7,7 @@
>  #include <linux/cpu.h>
>  #include <linux/kvm.h>
>  #include <linux/kvm_host.h>
> +#include <linux/list.h>
>  #include <linux/perf_event.h>
>  #include <linux/perf/arm_pmu.h>
>  #include <linux/uaccess.h>
> @@ -14,6 +15,9 @@
>  #include <kvm/arm_pmu.h>
>  #include <kvm/arm_vgic.h>
>
> +static LIST_HEAD(arm_pmus);
> +static DEFINE_MUTEX(arm_pmus_lock);
> +
>  static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
>  static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
>  static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
> @@ -742,9 +746,26 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
>
>  void kvm_host_pmu_init(struct arm_pmu *pmu)
>  {
> -       if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
> -           !kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
> +       struct arm_pmu_entry *entry;
> +
> +       if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF ||
> +           is_protected_kvm_enabled())
> +               return;
> +
> +       mutex_lock(&arm_pmus_lock);
> +
> +       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
> +       if (!entry)
> +               goto out_unlock;

It might be better to get the lock after the kmalloc above is done ?
(the kmalloc might sleep, which will make the code hold the lock longer)
I don't think the current code will cause any problem though.

Reviewed-by: Reiji Watanabe <reijiw@google.com>

Thanks,
Reiji


> +
> +       if (list_empty(&arm_pmus))
>                 static_branch_enable(&kvm_arm_pmu_available);
> +
> +       entry->arm_pmu = pmu;
> +       list_add_tail(&entry->entry, &arm_pmus);
> +
> +out_unlock:
> +       mutex_unlock(&arm_pmus_lock);
>  }
>
>  static int kvm_pmu_probe_pmuver(void)
> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> index 90f21898aad8..e249c5f172aa 100644
> --- a/include/kvm/arm_pmu.h
> +++ b/include/kvm/arm_pmu.h
> @@ -36,6 +36,11 @@ struct kvm_pmu {
>         struct irq_work overflow_work;
>  };
>
> +struct arm_pmu_entry {
> +       struct list_head entry;
> +       struct arm_pmu *arm_pmu;
> +};
> +
>  #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
>  u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
>  void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
> --
> 2.34.1
>
> _______________________________________________
> kvmarm mailing list
> kvmarm@lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Marc Zyngier Dec. 14, 2021, 12:30 p.m. UTC | #2
On Mon, 13 Dec 2021 15:23:07 +0000,
Alexandru Elisei <alexandru.elisei@arm.com> wrote:
> 
> The ARM PMU driver calls kvm_host_pmu_init() after probing to tell KVM that
> a hardware PMU is available for guest emulation. Heterogeneous systems can
> have more than one PMU present, and the callback gets called multiple
> times, once for each of them. Keep track of all the PMUs available to KVM,
> as they're going to be needed later.
> 
> Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
> ---
>  arch/arm64/kvm/pmu-emul.c | 25 +++++++++++++++++++++++--
>  include/kvm/arm_pmu.h     |  5 +++++
>  2 files changed, 28 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
> index a5e4bbf5e68f..eb4be96f144d 100644
> --- a/arch/arm64/kvm/pmu-emul.c
> +++ b/arch/arm64/kvm/pmu-emul.c
> @@ -7,6 +7,7 @@
>  #include <linux/cpu.h>
>  #include <linux/kvm.h>
>  #include <linux/kvm_host.h>
> +#include <linux/list.h>
>  #include <linux/perf_event.h>
>  #include <linux/perf/arm_pmu.h>
>  #include <linux/uaccess.h>
> @@ -14,6 +15,9 @@
>  #include <kvm/arm_pmu.h>
>  #include <kvm/arm_vgic.h>
>  
> +static LIST_HEAD(arm_pmus);
> +static DEFINE_MUTEX(arm_pmus_lock);
> +
>  static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
>  static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
>  static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
> @@ -742,9 +746,26 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
>  
>  void kvm_host_pmu_init(struct arm_pmu *pmu)
>  {
> -	if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
> -	    !kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
> +	struct arm_pmu_entry *entry;
> +
> +	if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF ||
> +	    is_protected_kvm_enabled())
> +		return;
> +
> +	mutex_lock(&arm_pmus_lock);
> +
> +	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
> +	if (!entry)
> +		goto out_unlock;
> +
> +	if (list_empty(&arm_pmus))
>  		static_branch_enable(&kvm_arm_pmu_available);

I find it slightly dodgy that you switch the static key before
actually populating the entry. I'd suggest moving it after the
list_add_tail(), and check on list_is_singular() instead.

> +
> +	entry->arm_pmu = pmu;
> +	list_add_tail(&entry->entry, &arm_pmus);
> +
> +out_unlock:
> +	mutex_unlock(&arm_pmus_lock);
>  }
>  
>  static int kvm_pmu_probe_pmuver(void)
> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> index 90f21898aad8..e249c5f172aa 100644
> --- a/include/kvm/arm_pmu.h
> +++ b/include/kvm/arm_pmu.h
> @@ -36,6 +36,11 @@ struct kvm_pmu {
>  	struct irq_work overflow_work;
>  };
>  
> +struct arm_pmu_entry {
> +	struct list_head entry;
> +	struct arm_pmu *arm_pmu;
> +};
> +
>  #define kvm_arm_pmu_irq_initialized(v)	((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
>  u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
>  void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);

Thanks,

	M.
Alexandru Elisei Jan. 6, 2022, 11:46 a.m. UTC | #3
Hi Marc,

Sorry for the long silence, I didn't manage to get to your comments before
going on holiday.

On Tue, Dec 14, 2021 at 12:30:30PM +0000, Marc Zyngier wrote:
> On Mon, 13 Dec 2021 15:23:07 +0000,
> Alexandru Elisei <alexandru.elisei@arm.com> wrote:
> > 
> > The ARM PMU driver calls kvm_host_pmu_init() after probing to tell KVM that
> > a hardware PMU is available for guest emulation. Heterogeneous systems can
> > have more than one PMU present, and the callback gets called multiple
> > times, once for each of them. Keep track of all the PMUs available to KVM,
> > as they're going to be needed later.
> > 
> > Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
> > ---
> >  arch/arm64/kvm/pmu-emul.c | 25 +++++++++++++++++++++++--
> >  include/kvm/arm_pmu.h     |  5 +++++
> >  2 files changed, 28 insertions(+), 2 deletions(-)
> > 
> > diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
> > index a5e4bbf5e68f..eb4be96f144d 100644
> > --- a/arch/arm64/kvm/pmu-emul.c
> > +++ b/arch/arm64/kvm/pmu-emul.c
> > @@ -7,6 +7,7 @@
> >  #include <linux/cpu.h>
> >  #include <linux/kvm.h>
> >  #include <linux/kvm_host.h>
> > +#include <linux/list.h>
> >  #include <linux/perf_event.h>
> >  #include <linux/perf/arm_pmu.h>
> >  #include <linux/uaccess.h>
> > @@ -14,6 +15,9 @@
> >  #include <kvm/arm_pmu.h>
> >  #include <kvm/arm_vgic.h>
> >  
> > +static LIST_HEAD(arm_pmus);
> > +static DEFINE_MUTEX(arm_pmus_lock);
> > +
> >  static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
> >  static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
> >  static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
> > @@ -742,9 +746,26 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
> >  
> >  void kvm_host_pmu_init(struct arm_pmu *pmu)
> >  {
> > -	if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
> > -	    !kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
> > +	struct arm_pmu_entry *entry;
> > +
> > +	if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF ||
> > +	    is_protected_kvm_enabled())
> > +		return;
> > +
> > +	mutex_lock(&arm_pmus_lock);
> > +
> > +	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
> > +	if (!entry)
> > +		goto out_unlock;
> > +
> > +	if (list_empty(&arm_pmus))
> >  		static_branch_enable(&kvm_arm_pmu_available);
> 
> I find it slightly dodgy that you switch the static key before
> actually populating the entry. I'd suggest moving it after the
> list_add_tail(), and check on list_is_singular() instead.

That's better, will do.

Thanks,
Alex

> 
> > +
> > +	entry->arm_pmu = pmu;
> > +	list_add_tail(&entry->entry, &arm_pmus);
> > +
> > +out_unlock:
> > +	mutex_unlock(&arm_pmus_lock);
> >  }
> >  
> >  static int kvm_pmu_probe_pmuver(void)
> > diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> > index 90f21898aad8..e249c5f172aa 100644
> > --- a/include/kvm/arm_pmu.h
> > +++ b/include/kvm/arm_pmu.h
> > @@ -36,6 +36,11 @@ struct kvm_pmu {
> >  	struct irq_work overflow_work;
> >  };
> >  
> > +struct arm_pmu_entry {
> > +	struct list_head entry;
> > +	struct arm_pmu *arm_pmu;
> > +};
> > +
> >  #define kvm_arm_pmu_irq_initialized(v)	((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
> >  u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
> >  void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
> 
> Thanks,
> 
> 	M.
> 
> -- 
> Without deviation from the norm, progress is not possible.
diff mbox series

Patch

diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index a5e4bbf5e68f..eb4be96f144d 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -7,6 +7,7 @@ 
 #include <linux/cpu.h>
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
+#include <linux/list.h>
 #include <linux/perf_event.h>
 #include <linux/perf/arm_pmu.h>
 #include <linux/uaccess.h>
@@ -14,6 +15,9 @@ 
 #include <kvm/arm_pmu.h>
 #include <kvm/arm_vgic.h>
 
+static LIST_HEAD(arm_pmus);
+static DEFINE_MUTEX(arm_pmus_lock);
+
 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
 static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
@@ -742,9 +746,26 @@  void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
 
 void kvm_host_pmu_init(struct arm_pmu *pmu)
 {
-	if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
-	    !kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
+	struct arm_pmu_entry *entry;
+
+	if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF ||
+	    is_protected_kvm_enabled())
+		return;
+
+	mutex_lock(&arm_pmus_lock);
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		goto out_unlock;
+
+	if (list_empty(&arm_pmus))
 		static_branch_enable(&kvm_arm_pmu_available);
+
+	entry->arm_pmu = pmu;
+	list_add_tail(&entry->entry, &arm_pmus);
+
+out_unlock:
+	mutex_unlock(&arm_pmus_lock);
 }
 
 static int kvm_pmu_probe_pmuver(void)
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 90f21898aad8..e249c5f172aa 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -36,6 +36,11 @@  struct kvm_pmu {
 	struct irq_work overflow_work;
 };
 
+struct arm_pmu_entry {
+	struct list_head entry;
+	struct arm_pmu *arm_pmu;
+};
+
 #define kvm_arm_pmu_irq_initialized(v)	((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);