@@ -86,6 +86,37 @@ static void off_cpu_finish(void *arg __maybe_unused)
off_cpu_bpf__destroy(skel);
}
+/* recent kernel added prev_state arg, so it needs to call the proper function */
+static void check_sched_switch_args(void)
+{
+ const struct btf *btf = bpf_object__btf(skel->obj);
+ const struct btf_type *t1, *t2, *t3;
+ u32 type_id;
+
+ type_id = btf__find_by_name_kind(btf, "bpf_trace_sched_switch",
+ BTF_KIND_TYPEDEF);
+ if ((s32)type_id < 0)
+ goto old_format;
+
+ t1 = btf__type_by_id(btf, type_id);
+ if (t1 == NULL)
+ goto old_format;
+
+ t2 = btf__type_by_id(btf, t1->type);
+ if (t2 == NULL || !btf_is_ptr(t2))
+ goto old_format;
+
+ t3 = btf__type_by_id(btf, t2->type);
+ if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
+ /* new format: disable old functions */
+ bpf_program__set_autoload(skel->progs.on_switch3, false);
+ return;
+ }
+
+old_format:
+ bpf_program__set_autoload(skel->progs.on_switch4, false);
+}
+
int off_cpu_prepare(struct evlist *evlist, struct target *target)
{
int err, fd, i;
@@ -114,6 +145,7 @@ int off_cpu_prepare(struct evlist *evlist, struct target *target)
}
set_max_rlimit();
+ check_sched_switch_args();
err = off_cpu_bpf__load(skel);
if (err) {
@@ -121,22 +121,13 @@ static inline int can_record(struct task_struct *t, int state)
return 1;
}
-SEC("tp_btf/sched_switch")
-int on_switch(u64 *ctx)
+static int on_switch(u64 *ctx, struct task_struct *prev,
+ struct task_struct *next, int state)
{
__u64 ts;
- int state;
__u32 pid, stack_id;
- struct task_struct *prev, *next;
struct tstamp_data elem, *pelem;
- if (!enabled)
- return 0;
-
- prev = (struct task_struct *)ctx[1];
- next = (struct task_struct *)ctx[2];
- state = get_task_state(prev);
-
ts = bpf_ktime_get_ns();
if (!can_record(prev, state))
@@ -178,4 +169,46 @@ int on_switch(u64 *ctx)
return 0;
}
+SEC("tp_btf/sched_switch")
+int on_switch3(u64 *ctx)
+{
+ struct task_struct *prev, *next;
+ int state;
+
+ if (!enabled)
+ return 0;
+
+ /*
+ * TP_PROTO(bool preempt, struct task_struct *prev,
+ * struct task_struct *next)
+ */
+ prev = (struct task_struct *)ctx[1];
+ next = (struct task_struct *)ctx[2];
+
+ state = get_task_state(prev);
+
+ return on_switch(ctx, prev, next, state);
+}
+
+SEC("tp_btf/sched_switch")
+int on_switch4(u64 *ctx)
+{
+ struct task_struct *prev, *next;
+ int prev_state;
+
+ if (!enabled)
+ return 0;
+
+ /*
+ * TP_PROTO(bool preempt, int prev_state,
+ * struct task_struct *prev,
+ * struct task_struct *next)
+ */
+ prev = (struct task_struct *)ctx[2];
+ next = (struct task_struct *)ctx[3];
+ prev_state = (int)ctx[1];
+
+ return on_switch(ctx, prev, next, prev_state);
+}
+
char LICENSE[] SEC("license") = "Dual BSD/GPL";
Recently sched_switch tracepoint added a new argument for prev_state, but it's hard to handle the change in a BPF program. Instead, we can check the function prototype in BTF before loading the program. Thus I make two copies of the tracepoint handler and select one based on the BTF info. Signed-off-by: Namhyung Kim <namhyung@kernel.org> --- tools/perf/util/bpf_off_cpu.c | 32 +++++++++++++++ tools/perf/util/bpf_skel/off_cpu.bpf.c | 55 ++++++++++++++++++++------ 2 files changed, 76 insertions(+), 11 deletions(-)