diff mbox

[9/9] ARM: perf: move irq registration into pmu implementation

Message ID 1344620195-22372-10-git-send-email-will.deacon@arm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Will Deacon Aug. 10, 2012, 5:36 p.m. UTC
From: Sudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com>

This patch moves the CPU-specific IRQ registration and parsing code into
the CPU PMU backend. This is required because a PMU may have more than
one interrupt, which in turn can be either PPI (per-cpu) or SPI
(requiring strict affinity setting at the interrupt distributor).

Signed-off-by: Sudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com>
[will: cosmetic edits and reworked interrupt dispatching]
Signed-off-by: Will Deacon <will.deacon@arm.com>
---
 arch/arm/include/asm/pmu.h       |    2 +
 arch/arm/kernel/perf_event.c     |   72 ++++++-------------------------------
 arch/arm/kernel/perf_event_cpu.c |   66 ++++++++++++++++++++++++++++++++++-
 3 files changed, 79 insertions(+), 61 deletions(-)
diff mbox

Patch

diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index a993ad6..a26170d 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -78,6 +78,8 @@  struct arm_pmu {
 	void		(*start)(void);
 	void		(*stop)(void);
 	void		(*reset)(void *);
+	int		(*request_irq)(irq_handler_t handler);
+	void		(*free_irq)(void);
 	int		(*map_event)(struct perf_event *event);
 	int		num_events;
 	atomic_t	active_events;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 86fd399..93971b1 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -297,87 +297,39 @@  validate_group(struct perf_event *event)
 	return 0;
 }
 
-static irqreturn_t armpmu_platform_irq(int irq, void *dev)
+static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
 {
 	struct arm_pmu *armpmu = (struct arm_pmu *) dev;
 	struct platform_device *plat_device = armpmu->plat_device;
 	struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
 
-	return plat->handle_irq(irq, dev, armpmu->handle_irq);
+	if (plat && plat->handle_irq)
+		return plat->handle_irq(irq, dev, armpmu->handle_irq);
+	else
+		return armpmu->handle_irq(irq, dev);
 }
 
 static void
 armpmu_release_hardware(struct arm_pmu *armpmu)
 {
-	int i, irq, irqs;
-	struct platform_device *pmu_device = armpmu->plat_device;
-
-	irqs = min(pmu_device->num_resources, num_possible_cpus());
-
-	for (i = 0; i < irqs; ++i) {
-		if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
-			continue;
-		irq = platform_get_irq(pmu_device, i);
-		if (irq >= 0)
-			free_irq(irq, armpmu);
-	}
-
-	pm_runtime_put_sync(&pmu_device->dev);
+	armpmu->free_irq();
+	pm_runtime_put_sync(&armpmu->plat_device->dev);
 }
 
 static int
 armpmu_reserve_hardware(struct arm_pmu *armpmu)
 {
-	struct arm_pmu_platdata *plat;
-	irq_handler_t handle_irq;
-	int i, err, irq, irqs;
+	int err;
 	struct platform_device *pmu_device = armpmu->plat_device;
 
 	if (!pmu_device)
 		return -ENODEV;
 
-	plat = dev_get_platdata(&pmu_device->dev);
-	if (plat && plat->handle_irq)
-		handle_irq = armpmu_platform_irq;
-	else
-		handle_irq = armpmu->handle_irq;
-
-	irqs = min(pmu_device->num_resources, num_possible_cpus());
-	if (irqs < 1) {
-		pr_err("no irqs for PMUs defined\n");
-		return -ENODEV;
-	}
-
 	pm_runtime_get_sync(&pmu_device->dev);
-
-	for (i = 0; i < irqs; ++i) {
-		err = 0;
-		irq = platform_get_irq(pmu_device, i);
-		if (irq < 0)
-			continue;
-
-		/*
-		 * If we have a single PMU interrupt that we can't shift,
-		 * assume that we're running on a uniprocessor machine and
-		 * continue. Otherwise, continue without this interrupt.
-		 */
-		if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
-			pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
-				    irq, i);
-			continue;
-		}
-
-		err = request_irq(irq, handle_irq,
-				  IRQF_DISABLED | IRQF_NOBALANCING,
-				  "arm-pmu", armpmu);
-		if (err) {
-			pr_err("unable to request IRQ%d for ARM PMU counters\n",
-				irq);
-			armpmu_release_hardware(armpmu);
-			return err;
-		}
-
-		cpumask_set_cpu(i, &armpmu->active_irqs);
+	err = armpmu->request_irq(armpmu_dispatch_irq);
+	if (err) {
+		armpmu_release_hardware(armpmu);
+		return err;
 	}
 
 	return 0;
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 56ddc98..8d7d8d4 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -70,6 +70,67 @@  static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
 	return &__get_cpu_var(cpu_hw_events);
 }
 
+static void cpu_pmu_free_irq(void)
+{
+	int i, irq, irqs;
+	struct platform_device *pmu_device = cpu_pmu->plat_device;
+
+	irqs = min(pmu_device->num_resources, num_possible_cpus());
+
+	for (i = 0; i < irqs; ++i) {
+		if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
+			continue;
+		irq = platform_get_irq(pmu_device, i);
+		if (irq >= 0)
+			free_irq(irq, cpu_pmu);
+	}
+}
+
+static int cpu_pmu_request_irq(irq_handler_t handler)
+{
+	int i, err, irq, irqs;
+	struct platform_device *pmu_device = cpu_pmu->plat_device;
+
+	if (!pmu_device)
+		return -ENODEV;
+
+	irqs = min(pmu_device->num_resources, num_possible_cpus());
+	if (irqs < 1) {
+		pr_err("no irqs for PMUs defined\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < irqs; ++i) {
+		err = 0;
+		irq = platform_get_irq(pmu_device, i);
+		if (irq < 0)
+			continue;
+
+		/*
+		 * If we have a single PMU interrupt that we can't shift,
+		 * assume that we're running on a uniprocessor machine and
+		 * continue. Otherwise, continue without this interrupt.
+		 */
+		if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+			pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+				    irq, i);
+			continue;
+		}
+
+		err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu",
+				  cpu_pmu);
+		if (err) {
+			pr_err("unable to request IRQ%d for ARM PMU counters\n",
+				irq);
+			return err;
+		}
+
+		cpumask_set_cpu(i, &cpu_pmu->active_irqs);
+	}
+
+	return 0;
+}
+
 static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)
 {
 	int cpu;
@@ -79,7 +140,10 @@  static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)
 		events->used_mask = per_cpu(used_mask, cpu);
 		raw_spin_lock_init(&events->pmu_lock);
 	}
-	cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
+
+	cpu_pmu->get_hw_events	= cpu_pmu_get_cpu_events;
+	cpu_pmu->request_irq	= cpu_pmu_request_irq;
+	cpu_pmu->free_irq	= cpu_pmu_free_irq;
 
 	/* Ensure the PMU has sane values out of reset. */
 	if (cpu_pmu && cpu_pmu->reset)