diff mbox series

[1/3] arm_pmu: Add support for perf NMI interrupts registration

Message ID 20200516124857.75004-2-lecopzer@gmail.com (mailing list archive)
State New, archived
Headers show
Series arm64: perf: Add support for Perf NMI interrupts | expand

Commit Message

Lecopzer Chen May 16, 2020, 12:48 p.m. UTC
Register perf interrupts by request_nmi()/percpu_nmi() when both
ARM64_PSEUDO_NMI and ARM64_PSEUDO_NMI_PERF are enabled and nmi
cpufreature is active.

Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
---
 drivers/perf/arm_pmu.c       | 51 +++++++++++++++++++++++++++++++-----
 include/linux/perf/arm_pmu.h |  6 +++++
 2 files changed, 51 insertions(+), 6 deletions(-)

Comments

Lecopzer Chen May 17, 2020, 6:39 a.m. UTC | #1
There was some mistakes when merging this patch.
The free nmi part is not present :(

The following part will be added in V2 next weekend.

diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index fa37b72d19e2..aa9ed09e5303 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -544,6 +544,38 @@ static int armpmu_count_irq_users(const int irq)
        return count;
 }

+static void armpmu_teardown_percpu_nmi_other(void* info)
+{
+       /*
+        * We don't need to disable preemption since smp_call_function()
+        * did this for us.
+        */
+       teardown_percpu_nmi((uintptr_t) info);
+}
+
+static void _armpmu_free_irq(unsigned int irq, void *dev_id)
+{
+       if (armpmu_support_nmi())
+               free_nmi(irq, dev_id);
+       else
+               free_irq(irq, dev_id);
+}
+
+static void _armpmu_free_percpu_irq(unsigned int irq, void __percpu *dev_id)
+{
+       if (armpmu_support_nmi()) {
+               preempt_disable();
+               teardown_percpu_nmi(irq);
+               smp_call_function(armpmu_teardown_percpu_nmi_other,
+                                 (void *)(uintptr_t) irq, true);
+               preempt_enable();
+
+               free_percpu_nmi(irq, dev_id);
+       }
+       else
+               free_percpu_irq(irq, dev_id);
+}
+
 void armpmu_free_irq(int irq, int cpu)
 {
        if (per_cpu(cpu_irq, cpu) == 0)
@@ -552,9 +584,9 @@ void armpmu_free_irq(int irq, int cpu)
                return;

        if (!irq_is_percpu_devid(irq))
-               free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
+               _armpmu_free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
        else if (armpmu_count_irq_users(irq) == 1)
-               free_percpu_irq(irq, &cpu_armpmu);
+               _armpmu_free_percpu_irq(irq, &cpu_armpmu);

        per_cpu(cpu_irq, cpu) = 0;
 }




Thanks,
Lecopzer

Lecopzer Chen <lecopzer@gmail.com> 於 2020年5月16日 週六 下午8:50寫道:
>
> Register perf interrupts by request_nmi()/percpu_nmi() when both
> ARM64_PSEUDO_NMI and ARM64_PSEUDO_NMI_PERF are enabled and nmi
> cpufreature is active.
>
> Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
> ---
>  drivers/perf/arm_pmu.c       | 51 +++++++++++++++++++++++++++++++-----
>  include/linux/perf/arm_pmu.h |  6 +++++
>  2 files changed, 51 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
> index df352b334ea7..fa37b72d19e2 100644
> --- a/drivers/perf/arm_pmu.c
> +++ b/drivers/perf/arm_pmu.c
> @@ -559,6 +559,48 @@ void armpmu_free_irq(int irq, int cpu)
>         per_cpu(cpu_irq, cpu) = 0;
>  }
>
> +static void armpmu_prepare_percpu_nmi_other(void *info)
> +{
> +       /*
> +        * We don't need to disable preemption since smp_call_function()
> +        * did this for us.
> +        */
> +       prepare_percpu_nmi((uintptr_t) info);
> +}
> +
> +static int _armpmu_request_irq(unsigned int irq, irq_handler_t handler,
> +                              unsigned long flags, int cpu)
> +{
> +       if (armpmu_support_nmi())
> +               return request_nmi(irq, handler, flags, "arm-pmu",
> +                                  per_cpu_ptr(&cpu_armpmu, cpu));
> +       return request_irq(irq, handler, flags, "arm-pmu",
> +                          per_cpu_ptr(&cpu_armpmu, cpu));
> +}
> +
> +static int _armpmu_request_percpu_irq(unsigned int irq, irq_handler_t handler)
> +{
> +       if (armpmu_support_nmi()) {
> +               int err;
> +
> +               err = request_percpu_nmi(irq, handler, "arm-pmu",
> +                                        &cpu_armpmu);
> +               if (err)
> +                       return err;
> +
> +               preempt_disable();
> +               err = prepare_percpu_nmi(irq);
> +               if (err) {
> +                       return err;
> +                       preempt_enable();
> +               }
> +               smp_call_function(armpmu_prepare_percpu_nmi_other,
> +                                 (void *)(uintptr_t) irq, true);
> +               preempt_enable();
> +       }
> +       return request_percpu_irq(irq, handler, "arm-pmu",
> +                                 &cpu_armpmu);
> +}
> +
>  int armpmu_request_irq(int irq, int cpu)
>  {
>         int err = 0;
> @@ -582,12 +624,9 @@ int armpmu_request_irq(int irq, int cpu)
>                             IRQF_NO_THREAD;
>
>                 irq_set_status_flags(irq, IRQ_NOAUTOEN);
> -               err = request_irq(irq, handler, irq_flags, "arm-pmu",
> -                                 per_cpu_ptr(&cpu_armpmu, cpu));
> -       } else if (armpmu_count_irq_users(irq) == 0) {
> -               err = request_percpu_irq(irq, handler, "arm-pmu",
> -                                        &cpu_armpmu);
> -       }
> +               err = _armpmu_request_irq(irq, handler, irq_flags, cpu);
> +       } else if (armpmu_count_irq_users(irq) == 0)
> +               err = _armpmu_request_percpu_irq(irq, handler);
>
>         if (err)
>                 goto err_out;
> diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
> index 5b616dde9a4c..5b878b5a22aa 100644
> --- a/include/linux/perf/arm_pmu.h
> +++ b/include/linux/perf/arm_pmu.h
> @@ -160,6 +160,12 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
>  static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
>  #endif
>
> +static inline bool armpmu_support_nmi(void)
> +{
> +       return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI_PERF) &&
> +              system_uses_irq_prio_masking();
> +}
> +
>  /* Internal functions only for core arm_pmu code */
>  struct arm_pmu *armpmu_alloc(void);
>  struct arm_pmu *armpmu_alloc_atomic(void);
> --
> 2.25.1
>
diff mbox series

Patch

diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index df352b334ea7..fa37b72d19e2 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -559,6 +559,48 @@  void armpmu_free_irq(int irq, int cpu)
 	per_cpu(cpu_irq, cpu) = 0;
 }
 
+static void armpmu_prepare_percpu_nmi_other(void *info)
+{
+	/*
+	 * We don't need to disable preemption since smp_call_function()
+	 * did this for us.
+	 */
+	prepare_percpu_nmi((uintptr_t) info);
+}
+
+static int _armpmu_request_irq(unsigned int irq, irq_handler_t handler,
+			       unsigned long flags, int cpu)
+{
+	if (armpmu_support_nmi())
+		return request_nmi(irq, handler, flags, "arm-pmu",
+				   per_cpu_ptr(&cpu_armpmu, cpu));
+	return request_irq(irq, handler, flags, "arm-pmu",
+			   per_cpu_ptr(&cpu_armpmu, cpu));
+}
+
+static int _armpmu_request_percpu_irq(unsigned int irq, irq_handler_t handler)
+{
+	if (armpmu_support_nmi()) {
+		int err;
+
+		err = request_percpu_nmi(irq, handler, "arm-pmu",
+					 &cpu_armpmu);
+		if (err)
+			return err;
+
+		preempt_disable();
+		err = prepare_percpu_nmi(irq);
+		if (err) {
+			return err;
+			preempt_enable();
+		}
+		smp_call_function(armpmu_prepare_percpu_nmi_other,
+				  (void *)(uintptr_t) irq, true);
+		preempt_enable();
+	}
+	return request_percpu_irq(irq, handler, "arm-pmu",
+				  &cpu_armpmu);
+}
+
 int armpmu_request_irq(int irq, int cpu)
 {
 	int err = 0;
@@ -582,12 +624,9 @@  int armpmu_request_irq(int irq, int cpu)
 			    IRQF_NO_THREAD;
 
 		irq_set_status_flags(irq, IRQ_NOAUTOEN);
-		err = request_irq(irq, handler, irq_flags, "arm-pmu",
-				  per_cpu_ptr(&cpu_armpmu, cpu));
-	} else if (armpmu_count_irq_users(irq) == 0) {
-		err = request_percpu_irq(irq, handler, "arm-pmu",
-					 &cpu_armpmu);
-	}
+		err = _armpmu_request_irq(irq, handler, irq_flags, cpu);
+	} else if (armpmu_count_irq_users(irq) == 0)
+		err = _armpmu_request_percpu_irq(irq, handler);
 
 	if (err)
 		goto err_out;
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 5b616dde9a4c..5b878b5a22aa 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -160,6 +160,12 @@  int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
 static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
 #endif
 
+static inline bool armpmu_support_nmi(void)
+{
+	return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI_PERF) &&
+	       system_uses_irq_prio_masking();
+}
+
 /* Internal functions only for core arm_pmu code */
 struct arm_pmu *armpmu_alloc(void);
 struct arm_pmu *armpmu_alloc_atomic(void);