@@ -265,7 +265,7 @@ static int bpf_program_profiler__read(struct evsel *evsel)
return 0;
}
-static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu,
+static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu_map_idx,
int fd)
{
struct bpf_prog_profiler_bpf *skel;
@@ -277,7 +277,7 @@ static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu,
assert(skel != NULL);
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.events),
- &cpu, &fd, BPF_ANY);
+ &cpu_map_idx, &fd, BPF_ANY);
if (ret)
return ret;
}
@@ -566,12 +566,12 @@ static int bperf__load(struct evsel *evsel, struct target *target)
return err;
}
-static int bperf__install_pe(struct evsel *evsel, int cpu, int fd)
+static int bperf__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
{
struct bperf_leader_bpf *skel = evsel->leader_skel;
return bpf_map_update_elem(bpf_map__fd(skel->maps.events),
- &cpu, &fd, BPF_ANY);
+ &cpu_map_idx, &fd, BPF_ANY);
}
/*
@@ -623,9 +623,7 @@ static int bperf__read(struct evsel *evsel)
case BPERF_FILTER_GLOBAL:
assert(i == 0);
- num_cpu = all_cpu_map->nr;
- for (j = 0; j < num_cpu; j++) {
- cpu = all_cpu_map->map[j];
+ perf_cpu_map__for_each_cpu(cpu, j, all_cpu_map) {
perf_counts(evsel->counts, cpu, 0)->val = values[cpu].counter;
perf_counts(evsel->counts, cpu, 0)->ena = values[cpu].enabled;
perf_counts(evsel->counts, cpu, 0)->run = values[cpu].running;
@@ -757,11 +755,11 @@ static inline bool bpf_counter_skip(struct evsel *evsel)
evsel->follower_skel == NULL;
}
-int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd)
+int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
{
if (bpf_counter_skip(evsel))
return 0;
- return evsel->bpf_counter_ops->install_pe(evsel, cpu, fd);
+ return evsel->bpf_counter_ops->install_pe(evsel, cpu_map_idx, fd);
}
int bpf_counter__load(struct evsel *evsel, struct target *target)
@@ -16,7 +16,7 @@ typedef int (*bpf_counter_evsel_op)(struct evsel *evsel);
typedef int (*bpf_counter_evsel_target_op)(struct evsel *evsel,
struct target *target);
typedef int (*bpf_counter_evsel_install_pe_op)(struct evsel *evsel,
- int cpu,
+ int cpu_map_idx,
int fd);
struct bpf_counter_ops {
@@ -40,7 +40,7 @@ int bpf_counter__enable(struct evsel *evsel);
int bpf_counter__disable(struct evsel *evsel);
int bpf_counter__read(struct evsel *evsel);
void bpf_counter__destroy(struct evsel *evsel);
-int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
+int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd);
#else /* HAVE_BPF_SKEL */
Synchronize the caller in evsel with the called function. Shorten 3 lines of code in bperf_read by using perf_cpu_map__for_each_cpu. This code is frequently using variables named cpu as cpu map indices, which doesn't matter as all CPUs are in the CPU map. It is strange in some cases the cpumap is used at all. Signed-off-by: Ian Rogers <irogers@google.com> --- tools/perf/util/bpf_counter.c | 16 +++++++--------- tools/perf/util/bpf_counter.h | 4 ++-- 2 files changed, 9 insertions(+), 11 deletions(-)