@@ -18,6 +18,12 @@
typedef struct {
atomic64_t id;
+
+ /*
+ * non-zero if userspace have access to hardware
+ * counters directly.
+ */
+ atomic_t pmu_direct_access;
void *vdso;
unsigned long flags;
} mm_context_t;
@@ -21,6 +21,7 @@
#include <asm-generic/mm_hooks.h>
#include <asm/cputype.h>
#include <asm/pgtable.h>
+#include <asm/perf_event.h>
#include <asm/sysreg.h>
#include <asm/tlbflush.h>
@@ -224,6 +225,7 @@ static inline void __switch_mm(struct mm_struct *next)
}
check_and_switch_context(next, cpu);
+ perf_switch_user_access(next);
}
static inline void
@@ -8,6 +8,7 @@
#include <asm/stack_pointer.h>
#include <asm/ptrace.h>
+#include <linux/mm_types.h>
#define ARMV8_PMU_MAX_COUNTERS 32
#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
@@ -223,4 +224,17 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
(regs)->pstate = PSR_MODE_EL1h; \
}
+static inline void perf_switch_user_access(struct mm_struct *mm)
+{
+ if (!IS_ENABLED(CONFIG_PERF_EVENTS))
+ return;
+
+ if (atomic_read(&mm->context.pmu_direct_access)) {
+ write_sysreg(ARMV8_PMU_USERENR_ER|ARMV8_PMU_USERENR_CR,
+ pmuserenr_el0);
+ } else {
+ write_sysreg(0, pmuserenr_el0);
+ }
+}
+
#endif
@@ -25,6 +25,7 @@
#include <linux/irqdesc.h>
#include <asm/irq_regs.h>
+#include <asm/mmu_context.h>
static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
static DEFINE_PER_CPU(int, cpu_irq);
@@ -778,6 +779,41 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
&cpu_pmu->node);
}
+static void refresh_pmuserenr(void *mm)
+{
+ perf_switch_user_access(mm);
+}
+
+static void armpmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
+{
+ if (!(event->hw.flags & ARMPMU_EL0_RD_CNTR))
+ return;
+
+ /*
+ * This function relies on not being called concurrently in two
+ * tasks in the same mm. Otherwise one task could observe
+ * pmu_direct_access > 1 and return all the way back to
+ * userspace with user access disabled while another task is still
+ * doing on_each_cpu_mask() to enable user access.
+ *
+ * For now, this can't happen because all callers hold mmap_sem
+ * for write. If this changes, we'll need a different solution.
+ */
+ lockdep_assert_held_write(&mm->mmap_sem);
+
+ if (atomic_inc_return(&mm->context.pmu_direct_access) == 1)
+ on_each_cpu(refresh_pmuserenr, mm, 1);
+}
+
+static void armpmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
+{
+ if (!(event->hw.flags & ARMPMU_EL0_RD_CNTR))
+ return;
+
+ if (atomic_dec_and_test(&mm->context.pmu_direct_access))
+ on_each_cpu_mask(mm_cpumask(mm), refresh_pmuserenr, NULL, 1);
+}
+
static struct arm_pmu *__armpmu_alloc(gfp_t flags)
{
struct arm_pmu *pmu;
@@ -799,6 +835,8 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags)
.pmu_enable = armpmu_enable,
.pmu_disable = armpmu_disable,
.event_init = armpmu_event_init,
+ .event_mapped = armpmu_event_mapped,
+ .event_unmapped = armpmu_event_unmapped,
.add = armpmu_add,
.del = armpmu_del,
.start = armpmu_start,
Keep track of event opened with direct access to the hardware counters and modify permissions while they are open. The strategy used here is the same which x86 uses: everytime an event is mapped, the permissions are set if required. The atomic field added in the mm_context helps keep track of the different event opened and de-activate the permissions when all are unmapped. We also need to update the permissions in the context switch code so that tasks keep the right permissions. Signed-off-by: Raphael Gault <raphael.gault@arm.com> --- arch/arm64/include/asm/mmu.h | 6 +++++ arch/arm64/include/asm/mmu_context.h | 2 ++ arch/arm64/include/asm/perf_event.h | 14 ++++++++++ drivers/perf/arm_pmu.c | 38 ++++++++++++++++++++++++++++ 4 files changed, 60 insertions(+)