diff mbox series

[RFC,v2,6/6] bpf: Enable kprobe_multi feature if CONFIG_FPROBE is enabled

Message ID 169139097360.324433.2521527070503682979.stgit@devnote2 (mailing list archive)
State Superseded
Headers show
Series bpf: fprobe: rethook: Use ftrace_regs instead of pt_regs | expand

Commit Message

Masami Hiramatsu (Google) Aug. 7, 2023, 6:49 a.m. UTC
From: Masami Hiramatsu (Google) <mhiramat@kernel.org>

Enable kprobe_multi feature if CONFIG_FPROBE is enabled. The pt_regs is
converted from ftrace_regs by ftrace_partial_regs(), thus some registers
may always returns 0. But it should be enough for function entry (access
arguments) and exit (access return value).

Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
---
 kernel/trace/bpf_trace.c |   22 +++++++++-------------
 1 file changed, 9 insertions(+), 13 deletions(-)

Comments

Jiri Olsa Aug. 7, 2023, 10:08 p.m. UTC | #1
On Mon, Aug 07, 2023 at 03:49:33PM +0900, Masami Hiramatsu (Google) wrote:
> From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> 
> Enable kprobe_multi feature if CONFIG_FPROBE is enabled. The pt_regs is
> converted from ftrace_regs by ftrace_partial_regs(), thus some registers
> may always returns 0. But it should be enough for function entry (access
> arguments) and exit (access return value).
> 
> Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> ---
>  kernel/trace/bpf_trace.c |   22 +++++++++-------------
>  1 file changed, 9 insertions(+), 13 deletions(-)
> 
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index 99c5f95360f9..0725272a3de2 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -2460,7 +2460,7 @@ static int __init bpf_event_init(void)
>  fs_initcall(bpf_event_init);
>  #endif /* CONFIG_MODULES */
>  
> -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
> +#ifdef CONFIG_FPROBE
>  struct bpf_kprobe_multi_link {
>  	struct bpf_link link;
>  	struct fprobe fp;
> @@ -2482,6 +2482,8 @@ struct user_syms {
>  	char *buf;
>  };
>  
> +static DEFINE_PER_CPU(struct pt_regs, bpf_kprobe_multi_pt_regs);
> +
>  static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
>  {
>  	unsigned long __user usymbol;
> @@ -2623,13 +2625,14 @@ static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
>  
>  static int
>  kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
> -			   unsigned long entry_ip, struct pt_regs *regs)
> +			   unsigned long entry_ip, struct ftrace_regs *fregs)
>  {
>  	struct bpf_kprobe_multi_run_ctx run_ctx = {
>  		.link = link,
>  		.entry_ip = entry_ip,
>  	};
>  	struct bpf_run_ctx *old_run_ctx;
> +	struct pt_regs *regs;
>  	int err;
>  
>  	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
> @@ -2639,6 +2642,7 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
>  
>  	migrate_disable();
>  	rcu_read_lock();
> +	regs = ftrace_partial_regs(fregs, this_cpu_ptr(&bpf_kprobe_multi_pt_regs));

you did check for !regs when returned from ftrace_get_regs, why don't we need
to check it in here? both ftrace_partial_regs and ftrace_get_regs call
arch_ftrace_get_regs on x86

also also I can't find the place ensuring fregs->regs.cs != 0 for FL_SAVE_REGS
flag as stated in arch_ftrace_get_regs, any hint?

thanks,
jirka


>  	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
>  	err = bpf_prog_run(link->link.prog, regs);
>  	bpf_reset_run_ctx(old_run_ctx);
> @@ -2656,13 +2660,9 @@ kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
>  			  void *data)
>  {
>  	struct bpf_kprobe_multi_link *link;
> -	struct pt_regs *regs = ftrace_get_regs(fregs);
> -
> -	if (!regs)
> -		return 0;
>  
>  	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
> -	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
> +	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), fregs);
>  	return 0;
>  }
>  
> @@ -2672,13 +2672,9 @@ kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
>  			       void *data)
>  {
>  	struct bpf_kprobe_multi_link *link;
> -	struct pt_regs *regs = ftrace_get_regs(fregs);
> -
> -	if (!regs)
> -		return;
>  
>  	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
> -	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
> +	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), fregs);
>  }
>  
>  static int symbols_cmp_r(const void *a, const void *b, const void *priv)
> @@ -2918,7 +2914,7 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
>  	kvfree(cookies);
>  	return err;
>  }
> -#else /* !CONFIG_DYNAMIC_FTRACE_WITH_REGS */
> +#else /* !CONFIG_FPROBE */
>  int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
>  {
>  	return -EOPNOTSUPP;
>
Masami Hiramatsu (Google) Aug. 8, 2023, 10:20 a.m. UTC | #2
On Tue, 8 Aug 2023 00:08:11 +0200
Jiri Olsa <olsajiri@gmail.com> wrote:

> On Mon, Aug 07, 2023 at 03:49:33PM +0900, Masami Hiramatsu (Google) wrote:
> > From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > 
> > Enable kprobe_multi feature if CONFIG_FPROBE is enabled. The pt_regs is
> > converted from ftrace_regs by ftrace_partial_regs(), thus some registers
> > may always returns 0. But it should be enough for function entry (access
> > arguments) and exit (access return value).
> > 
> > Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > ---
> >  kernel/trace/bpf_trace.c |   22 +++++++++-------------
> >  1 file changed, 9 insertions(+), 13 deletions(-)
> > 
> > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> > index 99c5f95360f9..0725272a3de2 100644
> > --- a/kernel/trace/bpf_trace.c
> > +++ b/kernel/trace/bpf_trace.c
> > @@ -2460,7 +2460,7 @@ static int __init bpf_event_init(void)
> >  fs_initcall(bpf_event_init);
> >  #endif /* CONFIG_MODULES */
> >  
> > -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
> > +#ifdef CONFIG_FPROBE
> >  struct bpf_kprobe_multi_link {
> >  	struct bpf_link link;
> >  	struct fprobe fp;
> > @@ -2482,6 +2482,8 @@ struct user_syms {
> >  	char *buf;
> >  };
> >  
> > +static DEFINE_PER_CPU(struct pt_regs, bpf_kprobe_multi_pt_regs);
> > +
> >  static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
> >  {
> >  	unsigned long __user usymbol;
> > @@ -2623,13 +2625,14 @@ static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
> >  
> >  static int
> >  kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
> > -			   unsigned long entry_ip, struct pt_regs *regs)
> > +			   unsigned long entry_ip, struct ftrace_regs *fregs)
> >  {
> >  	struct bpf_kprobe_multi_run_ctx run_ctx = {
> >  		.link = link,
> >  		.entry_ip = entry_ip,
> >  	};
> >  	struct bpf_run_ctx *old_run_ctx;
> > +	struct pt_regs *regs;
> >  	int err;
> >  
> >  	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
> > @@ -2639,6 +2642,7 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
> >  
> >  	migrate_disable();
> >  	rcu_read_lock();
> > +	regs = ftrace_partial_regs(fregs, this_cpu_ptr(&bpf_kprobe_multi_pt_regs));
> 
> you did check for !regs when returned from ftrace_get_regs, why don't we need
> to check it in here? both ftrace_partial_regs and ftrace_get_regs call
> arch_ftrace_get_regs on x86

Good catch! I think ftrace_partial_regs must not return NULL (unless getting
invalid parameter, e.g. fregs == NULL).

> 
> also also I can't find the place ensuring fregs->regs.cs != 0 for FL_SAVE_REGS
> flag as stated in arch_ftrace_get_regs, any hint?

Oops, I misread that part. Maybe ftrace_partial_regs must forcibly return
ftrace_regs::regs if HAVE_PT_REGS_COMPAT_FTRACE_REGS=y because it does not
care the regs is partial or not.

Thank you!

> 
> thanks,
> jirka
> 
> 
> >  	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> >  	err = bpf_prog_run(link->link.prog, regs);
> >  	bpf_reset_run_ctx(old_run_ctx);
> > @@ -2656,13 +2660,9 @@ kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
> >  			  void *data)
> >  {
> >  	struct bpf_kprobe_multi_link *link;
> > -	struct pt_regs *regs = ftrace_get_regs(fregs);
> > -
> > -	if (!regs)
> > -		return 0;
> >  
> >  	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
> > -	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
> > +	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), fregs);
> >  	return 0;
> >  }
> >  
> > @@ -2672,13 +2672,9 @@ kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
> >  			       void *data)
> >  {
> >  	struct bpf_kprobe_multi_link *link;
> > -	struct pt_regs *regs = ftrace_get_regs(fregs);
> > -
> > -	if (!regs)
> > -		return;
> >  
> >  	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
> > -	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
> > +	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), fregs);
> >  }
> >  
> >  static int symbols_cmp_r(const void *a, const void *b, const void *priv)
> > @@ -2918,7 +2914,7 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
> >  	kvfree(cookies);
> >  	return err;
> >  }
> > -#else /* !CONFIG_DYNAMIC_FTRACE_WITH_REGS */
> > +#else /* !CONFIG_FPROBE */
> >  int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
> >  {
> >  	return -EOPNOTSUPP;
> >
diff mbox series

Patch

diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 99c5f95360f9..0725272a3de2 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -2460,7 +2460,7 @@  static int __init bpf_event_init(void)
 fs_initcall(bpf_event_init);
 #endif /* CONFIG_MODULES */
 
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#ifdef CONFIG_FPROBE
 struct bpf_kprobe_multi_link {
 	struct bpf_link link;
 	struct fprobe fp;
@@ -2482,6 +2482,8 @@  struct user_syms {
 	char *buf;
 };
 
+static DEFINE_PER_CPU(struct pt_regs, bpf_kprobe_multi_pt_regs);
+
 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
 {
 	unsigned long __user usymbol;
@@ -2623,13 +2625,14 @@  static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
 
 static int
 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
-			   unsigned long entry_ip, struct pt_regs *regs)
+			   unsigned long entry_ip, struct ftrace_regs *fregs)
 {
 	struct bpf_kprobe_multi_run_ctx run_ctx = {
 		.link = link,
 		.entry_ip = entry_ip,
 	};
 	struct bpf_run_ctx *old_run_ctx;
+	struct pt_regs *regs;
 	int err;
 
 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
@@ -2639,6 +2642,7 @@  kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
 
 	migrate_disable();
 	rcu_read_lock();
+	regs = ftrace_partial_regs(fregs, this_cpu_ptr(&bpf_kprobe_multi_pt_regs));
 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
 	err = bpf_prog_run(link->link.prog, regs);
 	bpf_reset_run_ctx(old_run_ctx);
@@ -2656,13 +2660,9 @@  kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
 			  void *data)
 {
 	struct bpf_kprobe_multi_link *link;
-	struct pt_regs *regs = ftrace_get_regs(fregs);
-
-	if (!regs)
-		return 0;
 
 	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
-	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
+	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), fregs);
 	return 0;
 }
 
@@ -2672,13 +2672,9 @@  kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
 			       void *data)
 {
 	struct bpf_kprobe_multi_link *link;
-	struct pt_regs *regs = ftrace_get_regs(fregs);
-
-	if (!regs)
-		return;
 
 	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
-	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
+	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), fregs);
 }
 
 static int symbols_cmp_r(const void *a, const void *b, const void *priv)
@@ -2918,7 +2914,7 @@  int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
 	kvfree(cookies);
 	return err;
 }
-#else /* !CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+#else /* !CONFIG_FPROBE */
 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 {
 	return -EOPNOTSUPP;