diff mbox series

[bpf-next,01/18] bpf: trampoline: export __bpf_prog_enter/exit_recur

Message ID 20240416-bpf_wq-v1-1-c9e66092f842@kernel.org (mailing list archive)
State New
Headers show
Series Introduce bpf_wq | expand

Commit Message

Benjamin Tissoires April 16, 2024, 2:08 p.m. UTC
When dealing with workqueues, we need to also use
__bpf_prog_enter/exit_recur().

bpf_trampoline_enter/exit() is not suitable because we are
dealing with async callbacks that are set sleepable or not
by the verifier, depending on how they are configured, so
we need to rewrite our own test "if (sleepable)..."

Signed-off-by: Benjamin Tissoires <bentiss@kernel.org>
---
 include/linux/bpf.h     | 4 ++++
 kernel/bpf/trampoline.c | 6 +++---
 2 files changed, 7 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5034c1b4ded7..551445c47779 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1124,6 +1124,10 @@  u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
 					     struct bpf_tramp_run_ctx *run_ctx);
 void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
 					     struct bpf_tramp_run_ctx *run_ctx);
+u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog,
+				   struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
+				   struct bpf_tramp_run_ctx *run_ctx);
 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
 typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 26ae703d3c3b..c6a3e0280993 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -858,7 +858,7 @@  static __always_inline u64 notrace bpf_prog_start_time(void)
  * [2..MAX_U64] - execute bpf prog and record execution time.
  *     This is start time.
  */
-static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
+u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
 	__acquires(RCU)
 {
 	rcu_read_lock();
@@ -896,8 +896,8 @@  static void notrace update_prog_stats(struct bpf_prog *prog,
 	}
 }
 
-static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
-					  struct bpf_tramp_run_ctx *run_ctx)
+void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
+				   struct bpf_tramp_run_ctx *run_ctx)
 	__releases(RCU)
 {
 	bpf_reset_run_ctx(run_ctx->saved_run_ctx);