@@ -69,6 +69,12 @@ enum armv6_counters {
};
/*
+ * Hardware lock to serialize accesses to PMU registers. Needed for the
+ * read/modify/write sequences.
+ */
+DEFINE_PER_CPU(raw_spinlock_t, pmu_lock);
+
+/*
* The hardware events that we support. We do support cache operations but
* we have harvard caches and no way to combine instruction and data
* accesses/misses in hardware.
@@ -269,9 +275,8 @@ static inline void armv6pmu_write_counter(struct perf_event *event, u64 value)
static void armv6pmu_enable_event(struct perf_event *event)
{
unsigned long val, mask, evt, flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
@@ -294,12 +299,12 @@ static void armv6pmu_enable_event(struct perf_event *event)
* Mask out the current event and set the counter to count the event
* that we're interested in.
*/
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
static irqreturn_t
@@ -363,25 +368,25 @@ static void armv6pmu_enable_event(struct perf_event *event)
static void armv6pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock);
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock);
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
static int
@@ -420,9 +425,8 @@ static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
static void armv6pmu_disable_event(struct perf_event *event)
{
unsigned long val, mask, evt, flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
@@ -444,20 +448,19 @@ static void armv6pmu_disable_event(struct perf_event *event)
* of ETM bus signal assertion cycles. The external reporting should
* be disabled and so this should never increment.
*/
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
static void armv6mpcore_pmu_disable_event(struct perf_event *event)
{
unsigned long val, mask, flags, evt = 0;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ raw_spinlock_t *lock = this_cpu_ptr(&pmu_lock);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
@@ -475,12 +478,12 @@ static void armv6mpcore_pmu_disable_event(struct perf_event *event)
* Unlike UP ARMv6, we don't have a way of stopping the counters. We
* simply disable the interrupt reporting.
*/
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
static int armv6_map_event(struct perf_event *event)
@@ -502,6 +505,8 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6_map_event;
cpu_pmu->num_events = 3;
+
+ raw_spin_lock_init(this_cpu_ptr(&pmu_lock));
}
static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
@@ -554,6 +559,8 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->map_event = armv6mpcore_map_event;
cpu_pmu->num_events = 3;
+ raw_spin_lock_init(this_cpu_ptr(&pmu_lock));
+
return 0;
}
@@ -823,7 +823,6 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags)
struct pmu_hw_events *events;
events = per_cpu_ptr(pmu->hw_events, cpu);
- raw_spin_lock_init(&events->pmu_lock);
events->percpu_pmu = pmu;
}
@@ -54,11 +54,6 @@ struct pmu_hw_events {
*/
DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
- /*
- * Hardware lock to serialize accesses to PMU registers. Needed for the
- * read/modify/write sequences.
- */
- raw_spinlock_t pmu_lock;
/*
* When using percpu IRQs, we need a percpu dev_id. Place it here as we
Perf event backend for ARMv8 and ARMv7 no longer uses the pmu_lock. The only remaining user is the ARMv6 event backend. Move the pmu_lock out of the generic arm_pmu driver into the ARMv6 code. Signed-off-by: Julien Thierry <julien.thierry@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Russell King <linux@armlinux.org.uk> --- arch/arm/kernel/perf_event_v6.c | 43 ++++++++++++++++++++++++----------------- drivers/perf/arm_pmu.c | 1 - include/linux/perf/arm_pmu.h | 5 ----- 3 files changed, 25 insertions(+), 24 deletions(-) -- 1.9.1