diff mbox series

[v2,2/2] perf/smmuv3: Validate groups for global filtering

Message ID bb733b983dd696f6d98cc720f7185f8a89c1bc60.1564672242.git.robin.murphy@arm.com (mailing list archive)
State Mainlined
Commit 3c9347351a6ea1234aa647b36f89052de050d2a2
Headers show
Series [v2,1/2] perf/smmuv3: Validate group size | expand

Commit Message

Robin Murphy Aug. 1, 2019, 3:22 p.m. UTC
With global filtering, it becomes possible for users to construct
self-contradictory groups with conflicting filters. Make sure we
cover that when initially validating events.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
---
v2:
 - generalise to reusable event-checking helpers
 - don't forget the group leader (again)

This seems like a reasonable compromise for now vs. rewriting half the
driver just to make a 'fake PMU' approach viable.

 drivers/perf/arm_smmuv3_pmu.c | 47 +++++++++++++++++++++++++----------
 1 file changed, 34 insertions(+), 13 deletions(-)
diff mbox series

Patch

diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index c65c197b52a7..9110d7d86b81 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -113,8 +113,6 @@  struct smmu_pmu {
 	u64 counter_mask;
 	u32 options;
 	bool global_filter;
-	u32 global_filter_span;
-	u32 global_filter_sid;
 };
 
 #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
@@ -260,6 +258,19 @@  static void smmu_pmu_set_event_filter(struct perf_event *event,
 	smmu_pmu_set_smr(smmu_pmu, idx, sid);
 }
 
+static bool smmu_pmu_check_global_filter(struct perf_event *curr,
+					 struct perf_event *new)
+{
+	if (get_filter_enable(new) != get_filter_enable(curr))
+		return false;
+
+	if (!get_filter_enable(new))
+		return true;
+
+	return get_filter_span(new) == get_filter_span(curr) &&
+	       get_filter_stream_id(new) == get_filter_stream_id(curr);
+}
+
 static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
 				       struct perf_event *event, int idx)
 {
@@ -279,17 +290,14 @@  static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
 	}
 
 	/* Requested settings same as current global settings*/
-	if (span == smmu_pmu->global_filter_span &&
-	    sid == smmu_pmu->global_filter_sid)
+	idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
+	if (idx == num_ctrs ||
+	    smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
+		smmu_pmu_set_event_filter(event, 0, span, sid);
 		return 0;
+	}
 
-	if (!bitmap_empty(smmu_pmu->used_counters, num_ctrs))
-		return -EAGAIN;
-
-	smmu_pmu_set_event_filter(event, 0, span, sid);
-	smmu_pmu->global_filter_span = span;
-	smmu_pmu->global_filter_sid = sid;
-	return 0;
+	return -EAGAIN;
 }
 
 static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
@@ -312,6 +320,19 @@  static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
 	return idx;
 }
 
+static bool smmu_pmu_events_compatible(struct perf_event *curr,
+				       struct perf_event *new)
+{
+	if (new->pmu != curr->pmu)
+		return false;
+
+	if (to_smmu_pmu(new->pmu)->global_filter &&
+	    !smmu_pmu_check_global_filter(curr, new))
+		return false;
+
+	return true;
+}
+
 /*
  * Implementation of abstract pmu functionality required by
  * the core perf events code.
@@ -349,7 +370,7 @@  static int smmu_pmu_event_init(struct perf_event *event)
 
 	/* Don't allow groups with mixed PMUs, except for s/w events */
 	if (!is_software_event(event->group_leader)) {
-		if (event->group_leader->pmu != event->pmu)
+		if (!smmu_pmu_events_compatible(event->group_leader, event))
 			return -EINVAL;
 
 		if (++group_num_events > smmu_pmu->num_counters)
@@ -360,7 +381,7 @@  static int smmu_pmu_event_init(struct perf_event *event)
 		if (is_software_event(sibling))
 			continue;
 
-		if (sibling->pmu != event->pmu)
+		if (!smmu_pmu_events_compatible(sibling, event))
 			return -EINVAL;
 
 		if (++group_num_events > smmu_pmu->num_counters)