diff mbox series

[v3,5/8] perf bpf-filter: Support separate lost counts for each filter

Message ID 20240703223035.2024586-6-namhyung@kernel.org (mailing list archive)
State Not Applicable
Delegated to: BPF
Headers show
Series perf record: Use a pinned BPF program for filter | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch

Commit Message

Namhyung Kim July 3, 2024, 10:30 p.m. UTC
As the BPF filter is shared between other processes, it should have its
own counter for each invocation.  Add a new array map (lost_count) to
save the count using the same index as the filter.  It should clear the
count before running the filter.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 tools/perf/util/bpf-filter.c                 | 37 ++++++++++++++++++--
 tools/perf/util/bpf_skel/sample_filter.bpf.c | 15 ++++++--
 2 files changed, 48 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/tools/perf/util/bpf-filter.c b/tools/perf/util/bpf-filter.c
index 37ed6c48debf..c5eb0b7eec19 100644
--- a/tools/perf/util/bpf-filter.c
+++ b/tools/perf/util/bpf-filter.c
@@ -260,11 +260,23 @@  int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target)
 	}
 
 	if (needs_pid_hash && geteuid() != 0) {
+		int zero = 0;
+
 		/* The filters map is shared among other processes */
 		ret = update_pid_hash(evsel, entry);
 		if (ret < 0)
 			goto err;
 
+		fd = get_pinned_fd("dropped");
+		if (fd < 0) {
+			ret = fd;
+			goto err;
+		}
+
+		/* Reset the lost count */
+		bpf_map_update_elem(fd, &pinned_filter_idx, &zero, BPF_ANY);
+		close(fd);
+
 		fd = get_pinned_fd("perf_sample_filter");
 		if (fd < 0) {
 			ret = fd;
@@ -347,9 +359,25 @@  int perf_bpf_filter__destroy(struct evsel *evsel)
 
 u64 perf_bpf_filter__lost_count(struct evsel *evsel)
 {
-	struct sample_filter_bpf *skel = evsel->bpf_skel;
+	int count = 0;
+
+	if (list_empty(&evsel->bpf_filters))
+		return 0;
+
+	if (pinned_filter_idx >= 0) {
+		int fd = get_pinned_fd("dropped");
+
+		bpf_map_lookup_elem(fd, &pinned_filter_idx, &count);
+		close(fd);
+	} else if (evsel->bpf_skel) {
+		struct sample_filter_bpf *skel = evsel->bpf_skel;
+		int fd = bpf_map__fd(skel->maps.dropped);
+		int idx = 0;
 
-	return skel ? skel->bss->dropped : 0;
+		bpf_map_lookup_elem(fd, &idx, &count);
+	}
+
+	return count;
 }
 
 struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(enum perf_bpf_filter_term term,
@@ -402,6 +430,7 @@  int perf_bpf_filter__pin(void)
 	/* pinned program will use pid-hash */
 	bpf_map__set_max_entries(skel->maps.filters, MAX_FILTERS);
 	bpf_map__set_max_entries(skel->maps.pid_hash, MAX_PIDS);
+	bpf_map__set_max_entries(skel->maps.dropped, MAX_FILTERS);
 	skel->rodata->use_pid_hash = 1;
 
 	if (sample_filter_bpf__load(skel) < 0) {
@@ -459,6 +488,10 @@  int perf_bpf_filter__pin(void)
 		pr_debug("chmod for pid_hash failed\n");
 		ret = -errno;
 	}
+	if (fchmodat(dir_fd, "dropped", 0666, 0) < 0) {
+		pr_debug("chmod for dropped failed\n");
+		ret = -errno;
+	}
 
 err_close:
 	close(dir_fd);
diff --git a/tools/perf/util/bpf_skel/sample_filter.bpf.c b/tools/perf/util/bpf_skel/sample_filter.bpf.c
index c5273f06fa45..4c75354b84fd 100644
--- a/tools/perf/util/bpf_skel/sample_filter.bpf.c
+++ b/tools/perf/util/bpf_skel/sample_filter.bpf.c
@@ -23,7 +23,14 @@  struct pid_hash {
 	__uint(max_entries, 1);
 } pid_hash SEC(".maps");
 
-int dropped;
+/* tgid to filter index */
+struct lost_count {
+	__uint(type, BPF_MAP_TYPE_ARRAY);
+	__type(key, int);
+	__type(value, int);
+	__uint(max_entries, 1);
+} dropped SEC(".maps");
+
 volatile const int use_pid_hash;
 
 void *bpf_cast_to_kern_ctx(void *) __ksym;
@@ -189,6 +196,7 @@  int perf_sample_filter(void *ctx)
 	int in_group = 0;
 	int group_result = 0;
 	int i, k;
+	int *losts;
 
 	kctx = bpf_cast_to_kern_ctx(ctx);
 
@@ -252,7 +260,10 @@  int perf_sample_filter(void *ctx)
 	return 1;
 
 drop:
-	__sync_fetch_and_add(&dropped, 1);
+	losts = bpf_map_lookup_elem(&dropped, &k);
+	if (losts != NULL)
+		__sync_fetch_and_add(losts, 1);
+
 	return 0;
 }