@@ -93,6 +93,8 @@ int lock_contention_prepare(struct lock_contention *con)
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
}
+ skel->bss->stack_skip = con->stack_skip;
+
lock_contention_bpf__attach(skel);
return 0;
}
@@ -127,7 +129,7 @@ int lock_contention_read(struct lock_contention *con)
while (!bpf_map_get_next_key(fd, &prev_key, &key)) {
struct map *kmap;
struct symbol *sym;
- int idx;
+ int idx = 0;
bpf_map_lookup_elem(fd, &key, &data);
st = zalloc(sizeof(*st));
@@ -146,8 +148,7 @@ int lock_contention_read(struct lock_contention *con)
bpf_map_lookup_elem(stack, &key, stack_trace);
- /* skip BPF + lock internal functions */
- idx = con->stack_skip;
+ /* skip lock internal functions */
while (is_lock_function(machine, stack_trace[idx]) &&
idx < con->max_stack - 1)
idx++;
@@ -72,6 +72,7 @@ struct {
int enabled;
int has_cpu;
int has_task;
+int stack_skip;
/* error stat */
unsigned long lost;
@@ -117,7 +118,7 @@ int contention_begin(u64 *ctx)
pelem->timestamp = bpf_ktime_get_ns();
pelem->lock = (__u64)ctx[0];
pelem->flags = (__u32)ctx[1];
- pelem->stack_id = bpf_get_stackid(ctx, &stacks, BPF_F_FAST_STACK_CMP);
+ pelem->stack_id = bpf_get_stackid(ctx, &stacks, BPF_F_FAST_STACK_CMP | stack_skip);
if (pelem->stack_id < 0)
lost++;
Currently it collects stack traces to max size then skip entries. Because we don't have control how to skip perf callchains. But BPF can do it with bpf_get_stackid() with a flag. Say we have max-stack=4 and stack-skip=2, we get these stack traces. Before: After: .---> +---+ <--. .---> +---+ <--. | | | | | | | | | +---+ usable | +---+ | max | | | max | | | stack +---+ <--' stack +---+ usable | | X | | | | | | +---+ skip | +---+ | | | X | | | | | `---> +---+ `---> +---+ <--' <=== collection | X | +---+ skip | X | +---+ Signed-off-by: Namhyung Kim <namhyung@kernel.org> --- tools/perf/util/bpf_lock_contention.c | 7 ++++--- tools/perf/util/bpf_skel/lock_contention.bpf.c | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-)