diff mbox series

[bpf] bpf: Set run context for rawtp test_run callback

Message ID 20240603111408.3981087-1-jolsa@kernel.org (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series [bpf] bpf: Set run context for rawtp test_run callback | expand

Checks

Context Check Description
bpf/vmtest-bpf-PR success PR summary
bpf/vmtest-bpf-VM_Test-16 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
netdev/series_format success Single patches do not need cover letters
netdev/tree_selection success Clearly marked for bpf, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag present in non-next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 6637 this patch: 6637
netdev/build_tools success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers fail 1 blamed authors not CCed: yonghong.song@linux.dev; 12 maintainers not CCed: pabeni@redhat.com netdev@vger.kernel.org song@kernel.org edumazet@google.com martin.lau@linux.dev kpsingh@kernel.org yonghong.song@linux.dev linux-trace-kernel@vger.kernel.org kuba@kernel.org rostedt@goodmis.org eddyz87@gmail.com mathieu.desnoyers@efficios.com
netdev/build_clang fail Errors and warnings before: 344 this patch: 344
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn fail Errors and warnings before: 7005 this patch: 7005
netdev/checkpatch warning CHECK: No space is necessary after a cast CHECK: Unnecessary parentheses around 'prog->active' CHECK: Unnecessary parentheses around prog->active WARNING: line length of 82 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 7 this patch: 7
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-VM_Test-18 success Logs for set-matrix
bpf/vmtest-bpf-VM_Test-20 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-VM_Test-17 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-VM_Test-8 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-VM_Test-7 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-VM_Test-13 success Logs for s390x-gcc / test (test_maps, false, 360) / test_maps on s390x with gcc
bpf/vmtest-bpf-VM_Test-19 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-VM_Test-28 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-VM_Test-29 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-VM_Test-34 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-VM_Test-36 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
bpf/vmtest-bpf-VM_Test-35 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-VM_Test-42 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-VM_Test-14 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-VM_Test-15 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-VM_Test-23 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-22 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-21 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-27 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-25 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-26 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-VM_Test-33 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-VM_Test-41 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18

Commit Message

Jiri Olsa June 3, 2024, 11:14 a.m. UTC
syzbot reported crash when rawtp program executed through the
test_run interface calls bpf_get_attach_cookie helper or any
other helper that touches task->bpf_ctx pointer.

We need to setup bpf_ctx pointer in rawtp test_run as well,
so fixing this by moving __bpf_trace_run in header file and
using it in test_run callback.

Also renaming __bpf_trace_run to bpf_prog_run_trace.

Fixes: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value")
Reported-by: syzbot+3ab78ff125b7979e45f9@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=3ab78ff125b7979e45f9
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
 include/linux/bpf.h      | 27 +++++++++++++++++++++++++++
 kernel/trace/bpf_trace.c | 28 ++--------------------------
 net/bpf/test_run.c       |  4 +---
 3 files changed, 30 insertions(+), 29 deletions(-)

Comments

Alexei Starovoitov June 3, 2024, 4:25 p.m. UTC | #1
On Mon, Jun 3, 2024 at 4:14 AM Jiri Olsa <jolsa@kernel.org> wrote:
>
> syzbot reported crash when rawtp program executed through the
> test_run interface calls bpf_get_attach_cookie helper or any
> other helper that touches task->bpf_ctx pointer.
>
> We need to setup bpf_ctx pointer in rawtp test_run as well,
> so fixing this by moving __bpf_trace_run in header file and
> using it in test_run callback.
>
> Also renaming __bpf_trace_run to bpf_prog_run_trace.
>
> Fixes: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value")
> Reported-by: syzbot+3ab78ff125b7979e45f9@syzkaller.appspotmail.com
> Closes: https://syzkaller.appspot.com/bug?extid=3ab78ff125b7979e45f9
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
>  include/linux/bpf.h      | 27 +++++++++++++++++++++++++++
>  kernel/trace/bpf_trace.c | 28 ++--------------------------
>  net/bpf/test_run.c       |  4 +---
>  3 files changed, 30 insertions(+), 29 deletions(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index 5e694a308081..4eb803b1d308 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -2914,6 +2914,33 @@ static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
>  }
>  #endif /* CONFIG_BPF_SYSCALL */
>
> +static __always_inline int
> +bpf_prog_run_trace(struct bpf_prog *prog, u64 cookie, u64 *ctx,
> +                  bpf_prog_run_fn run_prog)
> +{
> +       struct bpf_run_ctx *old_run_ctx;
> +       struct bpf_trace_run_ctx run_ctx;
> +       int ret = -1;
> +
> +       cant_sleep();

I suspect you should see a splat with that.

Overall I think it's better to add empty run_ctx to
__bpf_prog_test_run_raw_tp()
instead of moving such a big function to .h

No need for prog->active increments. test_run is running
from syscall. If the same prog is attached somewhere as well
it may recurse once and it's fine imo.

pw-bot: cr

> +       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
> +               bpf_prog_inc_misses_counter(prog);
> +               goto out;
> +       }
> +
> +       run_ctx.bpf_cookie = cookie;
> +       old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> +
> +       rcu_read_lock();
> +       ret = run_prog(prog, ctx);
> +       rcu_read_unlock();
> +
> +       bpf_reset_run_ctx(old_run_ctx);
> +out:
> +       this_cpu_dec(*(prog->active));
> +       return ret;
> +}
> +
>  static __always_inline int
>  bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
>  {
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index d1daeab1bbc1..8a23ef42b76b 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -2383,31 +2383,6 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
>         preempt_enable();
>  }
>
> -static __always_inline
> -void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
> -{
> -       struct bpf_prog *prog = link->link.prog;
> -       struct bpf_run_ctx *old_run_ctx;
> -       struct bpf_trace_run_ctx run_ctx;
> -
> -       cant_sleep();
> -       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
> -               bpf_prog_inc_misses_counter(prog);
> -               goto out;
> -       }
> -
> -       run_ctx.bpf_cookie = link->cookie;
> -       old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> -
> -       rcu_read_lock();
> -       (void) bpf_prog_run(prog, args);
> -       rcu_read_unlock();
> -
> -       bpf_reset_run_ctx(old_run_ctx);
> -out:
> -       this_cpu_dec(*(prog->active));
> -}
> -
>  #define UNPACK(...)                    __VA_ARGS__
>  #define REPEAT_1(FN, DL, X, ...)       FN(X)
>  #define REPEAT_2(FN, DL, X, ...)       FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
> @@ -2437,7 +2412,8 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
>         {                                                               \
>                 u64 args[x];                                            \
>                 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);                  \
> -               __bpf_trace_run(link, args);                            \
> +               (void) bpf_prog_run_trace(link->link.prog, link->cookie,\
> +                                         args, bpf_prog_run);          \
>         }                                                               \
>         EXPORT_SYMBOL_GPL(bpf_trace_run##x)
>  BPF_TRACE_DEFN_x(1);
> diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> index f6aad4ed2ab2..84d1c91b01ab 100644
> --- a/net/bpf/test_run.c
> +++ b/net/bpf/test_run.c
> @@ -728,9 +728,7 @@ __bpf_prog_test_run_raw_tp(void *data)
>  {
>         struct bpf_raw_tp_test_run_info *info = data;
>
> -       rcu_read_lock();
> -       info->retval = bpf_prog_run(info->prog, info->ctx);
> -       rcu_read_unlock();
> +       info->retval = bpf_prog_run_trace(info->prog, 0, info->ctx, bpf_prog_run);
>  }
>
>  int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
> --
> 2.45.1
>
Jiri Olsa June 4, 2024, 7:42 a.m. UTC | #2
On Mon, Jun 03, 2024 at 09:25:47AM -0700, Alexei Starovoitov wrote:
> On Mon, Jun 3, 2024 at 4:14 AM Jiri Olsa <jolsa@kernel.org> wrote:
> >
> > syzbot reported crash when rawtp program executed through the
> > test_run interface calls bpf_get_attach_cookie helper or any
> > other helper that touches task->bpf_ctx pointer.
> >
> > We need to setup bpf_ctx pointer in rawtp test_run as well,
> > so fixing this by moving __bpf_trace_run in header file and
> > using it in test_run callback.
> >
> > Also renaming __bpf_trace_run to bpf_prog_run_trace.
> >
> > Fixes: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value")
> > Reported-by: syzbot+3ab78ff125b7979e45f9@syzkaller.appspotmail.com
> > Closes: https://syzkaller.appspot.com/bug?extid=3ab78ff125b7979e45f9
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> >  include/linux/bpf.h      | 27 +++++++++++++++++++++++++++
> >  kernel/trace/bpf_trace.c | 28 ++--------------------------
> >  net/bpf/test_run.c       |  4 +---
> >  3 files changed, 30 insertions(+), 29 deletions(-)
> >
> > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > index 5e694a308081..4eb803b1d308 100644
> > --- a/include/linux/bpf.h
> > +++ b/include/linux/bpf.h
> > @@ -2914,6 +2914,33 @@ static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
> >  }
> >  #endif /* CONFIG_BPF_SYSCALL */
> >
> > +static __always_inline int
> > +bpf_prog_run_trace(struct bpf_prog *prog, u64 cookie, u64 *ctx,
> > +                  bpf_prog_run_fn run_prog)
> > +{
> > +       struct bpf_run_ctx *old_run_ctx;
> > +       struct bpf_trace_run_ctx run_ctx;
> > +       int ret = -1;
> > +
> > +       cant_sleep();
> 
> I suspect you should see a splat with that.

hum, __bpf_prog_test_run_raw_tp is called with preempt_disable,
so I think it should be fine

> 
> Overall I think it's better to add empty run_ctx to
> __bpf_prog_test_run_raw_tp()
> instead of moving such a big function to .h
> 
> No need for prog->active increments. test_run is running
> from syscall. If the same prog is attached somewhere as well
> it may recurse once and it's fine imo.

heh, it was my first change, then I was thinking let's not duplicate the
code and re-use the existing function.. but it's true that there's no
use for the prog->active intest_run interface

jirka

> 
> pw-bot: cr
> 
> > +       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
> > +               bpf_prog_inc_misses_counter(prog);
> > +               goto out;
> > +       }
> > +
> > +       run_ctx.bpf_cookie = cookie;
> > +       old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> > +
> > +       rcu_read_lock();
> > +       ret = run_prog(prog, ctx);
> > +       rcu_read_unlock();
> > +
> > +       bpf_reset_run_ctx(old_run_ctx);
> > +out:
> > +       this_cpu_dec(*(prog->active));
> > +       return ret;
> > +}
> > +
> >  static __always_inline int
> >  bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
> >  {
> > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> > index d1daeab1bbc1..8a23ef42b76b 100644
> > --- a/kernel/trace/bpf_trace.c
> > +++ b/kernel/trace/bpf_trace.c
> > @@ -2383,31 +2383,6 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
> >         preempt_enable();
> >  }
> >
> > -static __always_inline
> > -void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
> > -{
> > -       struct bpf_prog *prog = link->link.prog;
> > -       struct bpf_run_ctx *old_run_ctx;
> > -       struct bpf_trace_run_ctx run_ctx;
> > -
> > -       cant_sleep();
> > -       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
> > -               bpf_prog_inc_misses_counter(prog);
> > -               goto out;
> > -       }
> > -
> > -       run_ctx.bpf_cookie = link->cookie;
> > -       old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> > -
> > -       rcu_read_lock();
> > -       (void) bpf_prog_run(prog, args);
> > -       rcu_read_unlock();
> > -
> > -       bpf_reset_run_ctx(old_run_ctx);
> > -out:
> > -       this_cpu_dec(*(prog->active));
> > -}
> > -
> >  #define UNPACK(...)                    __VA_ARGS__
> >  #define REPEAT_1(FN, DL, X, ...)       FN(X)
> >  #define REPEAT_2(FN, DL, X, ...)       FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
> > @@ -2437,7 +2412,8 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
> >         {                                                               \
> >                 u64 args[x];                                            \
> >                 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);                  \
> > -               __bpf_trace_run(link, args);                            \
> > +               (void) bpf_prog_run_trace(link->link.prog, link->cookie,\
> > +                                         args, bpf_prog_run);          \
> >         }                                                               \
> >         EXPORT_SYMBOL_GPL(bpf_trace_run##x)
> >  BPF_TRACE_DEFN_x(1);
> > diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> > index f6aad4ed2ab2..84d1c91b01ab 100644
> > --- a/net/bpf/test_run.c
> > +++ b/net/bpf/test_run.c
> > @@ -728,9 +728,7 @@ __bpf_prog_test_run_raw_tp(void *data)
> >  {
> >         struct bpf_raw_tp_test_run_info *info = data;
> >
> > -       rcu_read_lock();
> > -       info->retval = bpf_prog_run(info->prog, info->ctx);
> > -       rcu_read_unlock();
> > +       info->retval = bpf_prog_run_trace(info->prog, 0, info->ctx, bpf_prog_run);
> >  }
> >
> >  int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
> > --
> > 2.45.1
> >
diff mbox series

Patch

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5e694a308081..4eb803b1d308 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2914,6 +2914,33 @@  static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
 }
 #endif /* CONFIG_BPF_SYSCALL */
 
+static __always_inline int
+bpf_prog_run_trace(struct bpf_prog *prog, u64 cookie, u64 *ctx,
+		   bpf_prog_run_fn run_prog)
+{
+	struct bpf_run_ctx *old_run_ctx;
+	struct bpf_trace_run_ctx run_ctx;
+	int ret = -1;
+
+	cant_sleep();
+	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
+		bpf_prog_inc_misses_counter(prog);
+		goto out;
+	}
+
+	run_ctx.bpf_cookie = cookie;
+	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+
+	rcu_read_lock();
+	ret = run_prog(prog, ctx);
+	rcu_read_unlock();
+
+	bpf_reset_run_ctx(old_run_ctx);
+out:
+	this_cpu_dec(*(prog->active));
+	return ret;
+}
+
 static __always_inline int
 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
 {
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d1daeab1bbc1..8a23ef42b76b 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -2383,31 +2383,6 @@  void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
 	preempt_enable();
 }
 
-static __always_inline
-void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
-{
-	struct bpf_prog *prog = link->link.prog;
-	struct bpf_run_ctx *old_run_ctx;
-	struct bpf_trace_run_ctx run_ctx;
-
-	cant_sleep();
-	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
-		bpf_prog_inc_misses_counter(prog);
-		goto out;
-	}
-
-	run_ctx.bpf_cookie = link->cookie;
-	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
-
-	rcu_read_lock();
-	(void) bpf_prog_run(prog, args);
-	rcu_read_unlock();
-
-	bpf_reset_run_ctx(old_run_ctx);
-out:
-	this_cpu_dec(*(prog->active));
-}
-
 #define UNPACK(...)			__VA_ARGS__
 #define REPEAT_1(FN, DL, X, ...)	FN(X)
 #define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
@@ -2437,7 +2412,8 @@  void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
 	{								\
 		u64 args[x];						\
 		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
-		__bpf_trace_run(link, args);				\
+		(void) bpf_prog_run_trace(link->link.prog, link->cookie,\
+					  args, bpf_prog_run);		\
 	}								\
 	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
 BPF_TRACE_DEFN_x(1);
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index f6aad4ed2ab2..84d1c91b01ab 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -728,9 +728,7 @@  __bpf_prog_test_run_raw_tp(void *data)
 {
 	struct bpf_raw_tp_test_run_info *info = data;
 
-	rcu_read_lock();
-	info->retval = bpf_prog_run(info->prog, info->ctx);
-	rcu_read_unlock();
+	info->retval = bpf_prog_run_trace(info->prog, 0, info->ctx, bpf_prog_run);
 }
 
 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,