@@ -1517,7 +1517,18 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
u64 bpf_cookie,
struct bpf_prog_array **new_array);
-struct bpf_run_ctx {};
+enum bpf_run_ctx_type {
+ BPF_RUN_CTX_TYPE_NONE,
+ BPF_RUN_CTX_TYPE_CG,
+ BPF_RUN_CTX_TYPE_TRACE,
+ BPF_RUN_CTX_TYPE_TRAMP,
+ BPF_RUN_CTX_TYPE_KPROBE_MULTI,
+ BPF_RUN_CTX_TYPE_STRUCT_OPS,
+};
+
+struct bpf_run_ctx {
+ enum bpf_run_ctx_type type;
+};
struct bpf_cg_run_ctx {
struct bpf_run_ctx run_ctx;
@@ -1568,7 +1579,7 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
const struct bpf_prog_array_item *item;
const struct bpf_prog *prog;
struct bpf_run_ctx *old_run_ctx;
- struct bpf_trace_run_ctx run_ctx;
+ struct bpf_trace_run_ctx run_ctx = { .run_ctx.type = BPF_RUN_CTX_TYPE_TRACE };
u32 ret = 1;
RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
@@ -1607,7 +1618,7 @@ bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,
const struct bpf_prog *prog;
const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx;
- struct bpf_trace_run_ctx run_ctx;
+ struct bpf_trace_run_ctx run_ctx = { .run_ctx.type = BPF_RUN_CTX_TYPE_TRACE };
u32 ret = 1;
might_fault();
@@ -694,7 +694,7 @@ struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop)
int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx)
{
- struct bpf_run_ctx run_ctx, *old_run_ctx;
+ struct bpf_run_ctx run_ctx = {}, *old_run_ctx;
int ret;
if (prog->aux->sleepable) {
@@ -37,7 +37,7 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
const struct bpf_prog *prog;
const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx;
- struct bpf_cg_run_ctx run_ctx;
+ struct bpf_cg_run_ctx run_ctx = { .run_ctx.type = BPF_RUN_CTX_TYPE_CG };
u32 func_ret;
run_ctx.retval = retval;
@@ -882,6 +882,7 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *ru
rcu_read_lock();
migrate_disable();
+ run_ctx->run_ctx.type = BPF_RUN_CTX_TYPE_TRAMP;
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
@@ -934,6 +935,7 @@ u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
rcu_read_lock();
migrate_disable();
+ run_ctx->run_ctx.type = BPF_RUN_CTX_TYPE_TRAMP;
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
return NO_START_TIME;
@@ -960,6 +962,7 @@ u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_r
return 0;
}
+ run_ctx->run_ctx.type = BPF_RUN_CTX_TYPE_TRAMP;
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
return bpf_prog_start_time();
@@ -983,6 +986,7 @@ u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
rcu_read_lock();
migrate_disable();
+ run_ctx->run_ctx.type = BPF_RUN_CTX_TYPE_STRUCT_OPS;
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
return bpf_prog_start_time();
@@ -2575,6 +2575,7 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
unsigned long entry_ip, struct pt_regs *regs)
{
struct bpf_kprobe_multi_run_ctx run_ctx = {
+ .run_ctx.type = BPF_RUN_CTX_TYPE_KPROBE_MULTI,
.link = link,
.entry_ip = entry_ip,
};
@@ -374,7 +374,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
{
struct bpf_prog_array_item item = {.prog = prog};
struct bpf_run_ctx *old_ctx;
- struct bpf_cg_run_ctx run_ctx;
+ struct bpf_cg_run_ctx run_ctx = { .run_ctx.type = BPF_RUN_CTX_TYPE_CG };
struct bpf_test_timer t = { NO_MIGRATE };
enum bpf_cgroup_storage_type stype;
int ret;