diff mbox series

[v3,2/4] fprobe: make fprobe_kprobe_handler recursion free

Message ID 20230517034510.15639-3-zegao@tencent.com (mailing list archive)
State Handled Elsewhere
Headers show
Series Make fprobe + rethook immune to recursion | expand

Checks

Context Check Description
conchuod/cover_letter success Series has a cover letter
conchuod/tree_selection success Guessed tree name to be fixes at HEAD 3b90b09af5be
conchuod/fixes_present success Fixes tag present in non-next series
conchuod/maintainers_pattern success MAINTAINERS pattern errors before the patch: 6 and now 6
conchuod/verify_signedoff success Signed-off-by tag matches author and committer
conchuod/kdoc success Errors and warnings before: 0 this patch: 0
conchuod/build_rv64_clang_allmodconfig success Errors and warnings before: 8 this patch: 8
conchuod/module_param success Was 0 now: 0
conchuod/build_rv64_gcc_allmodconfig success Errors and warnings before: 8 this patch: 8
conchuod/build_rv32_defconfig success Build OK
conchuod/dtb_warn_rv64 success Errors and warnings before: 3 this patch: 3
conchuod/header_inline success No static functions without inline keyword in header files
conchuod/checkpatch warning CHECK: Alignment should match open parenthesis WARNING: From:/Signed-off-by: email address mismatch: 'From: Ze Gao <zegao2021@gmail.com>' != 'Signed-off-by: Ze Gao <zegao@tencent.com>'
conchuod/build_rv64_nommu_k210_defconfig success Build OK
conchuod/verify_fixes success No Fixes tag
conchuod/build_rv64_nommu_virt_defconfig success Build OK

Commit Message

Ze Gao May 17, 2023, 3:45 a.m. UTC
Current implementation calls kprobe related functions before doing
ftrace recursion check in fprobe_kprobe_handler, which opens door
to kernel crash due to stack recursion if preempt_count_{add, sub}
is traceable in kprobe_busy_{begin, end}.

Things goes like this without this patch quoted from Steven:
"
fprobe_kprobe_handler() {
   kprobe_busy_begin() {
      preempt_disable() {
         preempt_count_add() {  <-- trace
            fprobe_kprobe_handler() {
		[ wash, rinse, repeat, CRASH!!! ]
"

By refactoring the common part out of fprobe_kprobe_handler and
fprobe_handler and call ftrace recursion detection at the very beginning,
the whole fprobe_kprobe_handler is free from recursion.

Signed-off-by: Ze Gao <zegao@tencent.com>
Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Link: https://lore.kernel.org/linux-trace-kernel/20230516071830.8190-3-zegao@tencent.com
---
 kernel/trace/fprobe.c | 59 ++++++++++++++++++++++++++++++++-----------
 1 file changed, 44 insertions(+), 15 deletions(-)

Comments

Jiri Olsa May 17, 2023, 10:47 a.m. UTC | #1
On Wed, May 17, 2023 at 11:45:07AM +0800, Ze Gao wrote:
> Current implementation calls kprobe related functions before doing
> ftrace recursion check in fprobe_kprobe_handler, which opens door
> to kernel crash due to stack recursion if preempt_count_{add, sub}
> is traceable in kprobe_busy_{begin, end}.
> 
> Things goes like this without this patch quoted from Steven:
> "
> fprobe_kprobe_handler() {
>    kprobe_busy_begin() {
>       preempt_disable() {
>          preempt_count_add() {  <-- trace
>             fprobe_kprobe_handler() {
> 		[ wash, rinse, repeat, CRASH!!! ]
> "
> 
> By refactoring the common part out of fprobe_kprobe_handler and
> fprobe_handler and call ftrace recursion detection at the very beginning,
> the whole fprobe_kprobe_handler is free from recursion.
> 
> Signed-off-by: Ze Gao <zegao@tencent.com>
> Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> Link: https://lore.kernel.org/linux-trace-kernel/20230516071830.8190-3-zegao@tencent.com
> ---
>  kernel/trace/fprobe.c | 59 ++++++++++++++++++++++++++++++++-----------
>  1 file changed, 44 insertions(+), 15 deletions(-)
> 
> diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
> index 9abb3905bc8e..097c740799ba 100644
> --- a/kernel/trace/fprobe.c
> +++ b/kernel/trace/fprobe.c
> @@ -20,30 +20,22 @@ struct fprobe_rethook_node {
>  	char data[];
>  };
>  
> -static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> -			   struct ftrace_ops *ops, struct ftrace_regs *fregs)
> +static inline void __fprobe_handler(unsigned long ip, unsigned long
> +		parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs)
>  {
>  	struct fprobe_rethook_node *fpr;
>  	struct rethook_node *rh = NULL;
>  	struct fprobe *fp;
>  	void *entry_data = NULL;
> -	int bit, ret;
> +	int ret;
>  

this change uncovered bug for me introduced by [1]

the bpf's kprobe multi uses either fprobe's entry_handler or exit_handler,
so the 'ret' value is undefined for return probe path and occasionally we
won't setup rethook and miss the return probe

we can either squash this change into your patch or I can make separate
patch for that.. but given that [1] is quite recent we could just silently
fix that ;-)

jirka


[1] 39d954200bf6 fprobe: Skip exit_handler if entry_handler returns !0

---
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
index 9abb3905bc8e..293184227394 100644
--- a/kernel/trace/fprobe.c
+++ b/kernel/trace/fprobe.c
@@ -27,7 +27,7 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
 	struct rethook_node *rh = NULL;
 	struct fprobe *fp;
 	void *entry_data = NULL;
-	int bit, ret;
+	int bit, ret = 0;
 
 	fp = container_of(ops, struct fprobe, ops);
 	if (fprobe_disabled(fp))
Masami Hiramatsu (Google) May 17, 2023, 11:42 a.m. UTC | #2
On Wed, 17 May 2023 12:47:42 +0200
Jiri Olsa <olsajiri@gmail.com> wrote:

> On Wed, May 17, 2023 at 11:45:07AM +0800, Ze Gao wrote:
> > Current implementation calls kprobe related functions before doing
> > ftrace recursion check in fprobe_kprobe_handler, which opens door
> > to kernel crash due to stack recursion if preempt_count_{add, sub}
> > is traceable in kprobe_busy_{begin, end}.
> > 
> > Things goes like this without this patch quoted from Steven:
> > "
> > fprobe_kprobe_handler() {
> >    kprobe_busy_begin() {
> >       preempt_disable() {
> >          preempt_count_add() {  <-- trace
> >             fprobe_kprobe_handler() {
> > 		[ wash, rinse, repeat, CRASH!!! ]
> > "
> > 
> > By refactoring the common part out of fprobe_kprobe_handler and
> > fprobe_handler and call ftrace recursion detection at the very beginning,
> > the whole fprobe_kprobe_handler is free from recursion.
> > 
> > Signed-off-by: Ze Gao <zegao@tencent.com>
> > Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > Link: https://lore.kernel.org/linux-trace-kernel/20230516071830.8190-3-zegao@tencent.com
> > ---
> >  kernel/trace/fprobe.c | 59 ++++++++++++++++++++++++++++++++-----------
> >  1 file changed, 44 insertions(+), 15 deletions(-)
> > 
> > diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
> > index 9abb3905bc8e..097c740799ba 100644
> > --- a/kernel/trace/fprobe.c
> > +++ b/kernel/trace/fprobe.c
> > @@ -20,30 +20,22 @@ struct fprobe_rethook_node {
> >  	char data[];
> >  };
> >  
> > -static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> > -			   struct ftrace_ops *ops, struct ftrace_regs *fregs)
> > +static inline void __fprobe_handler(unsigned long ip, unsigned long
> > +		parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs)
> >  {
> >  	struct fprobe_rethook_node *fpr;
> >  	struct rethook_node *rh = NULL;
> >  	struct fprobe *fp;
> >  	void *entry_data = NULL;
> > -	int bit, ret;
> > +	int ret;
> >  
> 
> this change uncovered bug for me introduced by [1]
> 
> the bpf's kprobe multi uses either fprobe's entry_handler or exit_handler,
> so the 'ret' value is undefined for return probe path and occasionally we
> won't setup rethook and miss the return probe

Oops, I missed to push my fix.

https://lore.kernel.org/all/168100731160.79534.374827110083836722.stgit@devnote2/

> 
> we can either squash this change into your patch or I can make separate
> patch for that.. but given that [1] is quite recent we could just silently
> fix that ;-)

Jiri, I think the above will fix the issue, right?

> 
> jirka
> 
> 
> [1] 39d954200bf6 fprobe: Skip exit_handler if entry_handler returns !0
> 
> ---
> diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
> index 9abb3905bc8e..293184227394 100644
> --- a/kernel/trace/fprobe.c
> +++ b/kernel/trace/fprobe.c
> @@ -27,7 +27,7 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
>  	struct rethook_node *rh = NULL;
>  	struct fprobe *fp;
>  	void *entry_data = NULL;
> -	int bit, ret;
> +	int bit, ret = 0;
>  
>  	fp = container_of(ops, struct fprobe, ops);
>  	if (fprobe_disabled(fp))
> 
>
Jiri Olsa May 17, 2023, 12:30 p.m. UTC | #3
On Wed, May 17, 2023 at 08:42:36PM +0900, Masami Hiramatsu wrote:
> On Wed, 17 May 2023 12:47:42 +0200
> Jiri Olsa <olsajiri@gmail.com> wrote:
> 
> > On Wed, May 17, 2023 at 11:45:07AM +0800, Ze Gao wrote:
> > > Current implementation calls kprobe related functions before doing
> > > ftrace recursion check in fprobe_kprobe_handler, which opens door
> > > to kernel crash due to stack recursion if preempt_count_{add, sub}
> > > is traceable in kprobe_busy_{begin, end}.
> > > 
> > > Things goes like this without this patch quoted from Steven:
> > > "
> > > fprobe_kprobe_handler() {
> > >    kprobe_busy_begin() {
> > >       preempt_disable() {
> > >          preempt_count_add() {  <-- trace
> > >             fprobe_kprobe_handler() {
> > > 		[ wash, rinse, repeat, CRASH!!! ]
> > > "
> > > 
> > > By refactoring the common part out of fprobe_kprobe_handler and
> > > fprobe_handler and call ftrace recursion detection at the very beginning,
> > > the whole fprobe_kprobe_handler is free from recursion.
> > > 
> > > Signed-off-by: Ze Gao <zegao@tencent.com>
> > > Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > > Link: https://lore.kernel.org/linux-trace-kernel/20230516071830.8190-3-zegao@tencent.com
> > > ---
> > >  kernel/trace/fprobe.c | 59 ++++++++++++++++++++++++++++++++-----------
> > >  1 file changed, 44 insertions(+), 15 deletions(-)
> > > 
> > > diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
> > > index 9abb3905bc8e..097c740799ba 100644
> > > --- a/kernel/trace/fprobe.c
> > > +++ b/kernel/trace/fprobe.c
> > > @@ -20,30 +20,22 @@ struct fprobe_rethook_node {
> > >  	char data[];
> > >  };
> > >  
> > > -static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> > > -			   struct ftrace_ops *ops, struct ftrace_regs *fregs)
> > > +static inline void __fprobe_handler(unsigned long ip, unsigned long
> > > +		parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs)
> > >  {
> > >  	struct fprobe_rethook_node *fpr;
> > >  	struct rethook_node *rh = NULL;
> > >  	struct fprobe *fp;
> > >  	void *entry_data = NULL;
> > > -	int bit, ret;
> > > +	int ret;
> > >  
> > 
> > this change uncovered bug for me introduced by [1]
> > 
> > the bpf's kprobe multi uses either fprobe's entry_handler or exit_handler,
> > so the 'ret' value is undefined for return probe path and occasionally we
> > won't setup rethook and miss the return probe
> 
> Oops, I missed to push my fix.
> 
> https://lore.kernel.org/all/168100731160.79534.374827110083836722.stgit@devnote2/
> 
> > 
> > we can either squash this change into your patch or I can make separate
> > patch for that.. but given that [1] is quite recent we could just silently
> > fix that ;-)
> 
> Jiri, I think the above will fix the issue, right?

yes, it's the same fix, great, thanks

jirka

> 
> > 
> > jirka
> > 
> > 
> > [1] 39d954200bf6 fprobe: Skip exit_handler if entry_handler returns !0
> > 
> > ---
> > diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
> > index 9abb3905bc8e..293184227394 100644
> > --- a/kernel/trace/fprobe.c
> > +++ b/kernel/trace/fprobe.c
> > @@ -27,7 +27,7 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> >  	struct rethook_node *rh = NULL;
> >  	struct fprobe *fp;
> >  	void *entry_data = NULL;
> > -	int bit, ret;
> > +	int bit, ret = 0;
> >  
> >  	fp = container_of(ops, struct fprobe, ops);
> >  	if (fprobe_disabled(fp))
> > 
> > 
> 
> 
> -- 
> Masami Hiramatsu (Google) <mhiramat@kernel.org>
Masami Hiramatsu (Google) May 17, 2023, 2:27 p.m. UTC | #4
On Wed, 17 May 2023 11:45:07 +0800
Ze Gao <zegao2021@gmail.com> wrote:

> Current implementation calls kprobe related functions before doing
> ftrace recursion check in fprobe_kprobe_handler, which opens door
> to kernel crash due to stack recursion if preempt_count_{add, sub}
> is traceable in kprobe_busy_{begin, end}.
> 
> Things goes like this without this patch quoted from Steven:
> "
> fprobe_kprobe_handler() {
>    kprobe_busy_begin() {
>       preempt_disable() {
>          preempt_count_add() {  <-- trace
>             fprobe_kprobe_handler() {
> 		[ wash, rinse, repeat, CRASH!!! ]
> "
> 
> By refactoring the common part out of fprobe_kprobe_handler and
> fprobe_handler and call ftrace recursion detection at the very beginning,
> the whole fprobe_kprobe_handler is free from recursion.
> 
> Signed-off-by: Ze Gao <zegao@tencent.com>
> Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> Link: https://lore.kernel.org/linux-trace-kernel/20230516071830.8190-3-zegao@tencent.com
> ---
>  kernel/trace/fprobe.c | 59 ++++++++++++++++++++++++++++++++-----------
>  1 file changed, 44 insertions(+), 15 deletions(-)
> 
> diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
> index 9abb3905bc8e..097c740799ba 100644
> --- a/kernel/trace/fprobe.c
> +++ b/kernel/trace/fprobe.c
> @@ -20,30 +20,22 @@ struct fprobe_rethook_node {
>  	char data[];
>  };
>  
> -static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> -			   struct ftrace_ops *ops, struct ftrace_regs *fregs)
> +static inline void __fprobe_handler(unsigned long ip, unsigned long
> +		parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs)

OK, I picked up this series to probes/fixes. Note that I fixed this line 
because the "unsigned long parent_ip" was split into 2 lines.

Thank you,


>  {
>  	struct fprobe_rethook_node *fpr;
>  	struct rethook_node *rh = NULL;
>  	struct fprobe *fp;
>  	void *entry_data = NULL;
> -	int bit, ret;
> +	int ret;
>  
>  	fp = container_of(ops, struct fprobe, ops);
> -	if (fprobe_disabled(fp))
> -		return;
> -
> -	bit = ftrace_test_recursion_trylock(ip, parent_ip);
> -	if (bit < 0) {
> -		fp->nmissed++;
> -		return;
> -	}
>  
>  	if (fp->exit_handler) {
>  		rh = rethook_try_get(fp->rethook);
>  		if (!rh) {
>  			fp->nmissed++;
> -			goto out;
> +			return;
>  		}
>  		fpr = container_of(rh, struct fprobe_rethook_node, node);
>  		fpr->entry_ip = ip;
> @@ -61,23 +53,60 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
>  		else
>  			rethook_hook(rh, ftrace_get_regs(fregs), true);
>  	}
> -out:
> +}
> +
> +static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> +		struct ftrace_ops *ops, struct ftrace_regs *fregs)
> +{
> +	struct fprobe *fp;
> +	int bit;
> +
> +	fp = container_of(ops, struct fprobe, ops);
> +	if (fprobe_disabled(fp))
> +		return;
> +
> +	/* recursion detection has to go before any traceable function and
> +	 * all functions before this point should be marked as notrace
> +	 */
> +	bit = ftrace_test_recursion_trylock(ip, parent_ip);
> +	if (bit < 0) {
> +		fp->nmissed++;
> +		return;
> +	}
> +	__fprobe_handler(ip, parent_ip, ops, fregs);
>  	ftrace_test_recursion_unlock(bit);
> +
>  }
>  NOKPROBE_SYMBOL(fprobe_handler);
>  
>  static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip,
>  				  struct ftrace_ops *ops, struct ftrace_regs *fregs)
>  {
> -	struct fprobe *fp = container_of(ops, struct fprobe, ops);
> +	struct fprobe *fp;
> +	int bit;
> +
> +	fp = container_of(ops, struct fprobe, ops);
> +	if (fprobe_disabled(fp))
> +		return;
> +
> +	/* recursion detection has to go before any traceable function and
> +	 * all functions called before this point should be marked as notrace
> +	 */
> +	bit = ftrace_test_recursion_trylock(ip, parent_ip);
> +	if (bit < 0) {
> +		fp->nmissed++;
> +		return;
> +	}
>  
>  	if (unlikely(kprobe_running())) {
>  		fp->nmissed++;
>  		return;
>  	}
> +
>  	kprobe_busy_begin();
> -	fprobe_handler(ip, parent_ip, ops, fregs);
> +	__fprobe_handler(ip, parent_ip, ops, fregs);
>  	kprobe_busy_end();
> +	ftrace_test_recursion_unlock(bit);
>  }
>  
>  static void fprobe_exit_handler(struct rethook_node *rh, void *data,
> -- 
> 2.40.1
>
Andrii Nakryiko May 18, 2023, 12:16 a.m. UTC | #5
On Wed, May 17, 2023 at 7:28 AM Masami Hiramatsu <mhiramat@kernel.org> wrote:
>
> On Wed, 17 May 2023 11:45:07 +0800
> Ze Gao <zegao2021@gmail.com> wrote:
>
> > Current implementation calls kprobe related functions before doing
> > ftrace recursion check in fprobe_kprobe_handler, which opens door
> > to kernel crash due to stack recursion if preempt_count_{add, sub}
> > is traceable in kprobe_busy_{begin, end}.
> >
> > Things goes like this without this patch quoted from Steven:
> > "
> > fprobe_kprobe_handler() {
> >    kprobe_busy_begin() {
> >       preempt_disable() {
> >          preempt_count_add() {  <-- trace
> >             fprobe_kprobe_handler() {
> >               [ wash, rinse, repeat, CRASH!!! ]
> > "
> >
> > By refactoring the common part out of fprobe_kprobe_handler and
> > fprobe_handler and call ftrace recursion detection at the very beginning,
> > the whole fprobe_kprobe_handler is free from recursion.
> >
> > Signed-off-by: Ze Gao <zegao@tencent.com>
> > Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > Link: https://lore.kernel.org/linux-trace-kernel/20230516071830.8190-3-zegao@tencent.com
> > ---
> >  kernel/trace/fprobe.c | 59 ++++++++++++++++++++++++++++++++-----------
> >  1 file changed, 44 insertions(+), 15 deletions(-)
> >
> > diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
> > index 9abb3905bc8e..097c740799ba 100644
> > --- a/kernel/trace/fprobe.c
> > +++ b/kernel/trace/fprobe.c
> > @@ -20,30 +20,22 @@ struct fprobe_rethook_node {
> >       char data[];
> >  };
> >
> > -static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> > -                        struct ftrace_ops *ops, struct ftrace_regs *fregs)
> > +static inline void __fprobe_handler(unsigned long ip, unsigned long
> > +             parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs)
>
> OK, I picked up this series to probes/fixes. Note that I fixed this line
> because the "unsigned long parent_ip" was split into 2 lines.
>

Hey Masami,

Regarding [0], I was bisecting BPF CI failures related to
multi-kprobes, and it turned out that [0] is the fix we need. It would
be great if you can make sure this fix gets into Linus' tree ASAP, so
that we can get it back into bpf/bpf-next trees and fix BPF selftests
for everyone (we mitigated this for BPF CI as a temporary workaround
for now). Thanks!

  [0] https://lore.kernel.org/all/168100731160.79534.374827110083836722.stgit@devnote2/


> Thank you,
>
>
> >  {
> >       struct fprobe_rethook_node *fpr;
> >       struct rethook_node *rh = NULL;
> >       struct fprobe *fp;
> >       void *entry_data = NULL;
> > -     int bit, ret;
> > +     int ret;
> >
> >       fp = container_of(ops, struct fprobe, ops);
> > -     if (fprobe_disabled(fp))
> > -             return;
> > -
> > -     bit = ftrace_test_recursion_trylock(ip, parent_ip);
> > -     if (bit < 0) {
> > -             fp->nmissed++;
> > -             return;
> > -     }
> >
> >       if (fp->exit_handler) {
> >               rh = rethook_try_get(fp->rethook);
> >               if (!rh) {
> >                       fp->nmissed++;
> > -                     goto out;
> > +                     return;
> >               }
> >               fpr = container_of(rh, struct fprobe_rethook_node, node);
> >               fpr->entry_ip = ip;
> > @@ -61,23 +53,60 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> >               else
> >                       rethook_hook(rh, ftrace_get_regs(fregs), true);
> >       }
> > -out:
> > +}
> > +
> > +static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> > +             struct ftrace_ops *ops, struct ftrace_regs *fregs)
> > +{
> > +     struct fprobe *fp;
> > +     int bit;
> > +
> > +     fp = container_of(ops, struct fprobe, ops);
> > +     if (fprobe_disabled(fp))
> > +             return;
> > +
> > +     /* recursion detection has to go before any traceable function and
> > +      * all functions before this point should be marked as notrace
> > +      */
> > +     bit = ftrace_test_recursion_trylock(ip, parent_ip);
> > +     if (bit < 0) {
> > +             fp->nmissed++;
> > +             return;
> > +     }
> > +     __fprobe_handler(ip, parent_ip, ops, fregs);
> >       ftrace_test_recursion_unlock(bit);
> > +
> >  }
> >  NOKPROBE_SYMBOL(fprobe_handler);
> >
> >  static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip,
> >                                 struct ftrace_ops *ops, struct ftrace_regs *fregs)
> >  {
> > -     struct fprobe *fp = container_of(ops, struct fprobe, ops);
> > +     struct fprobe *fp;
> > +     int bit;
> > +
> > +     fp = container_of(ops, struct fprobe, ops);
> > +     if (fprobe_disabled(fp))
> > +             return;
> > +
> > +     /* recursion detection has to go before any traceable function and
> > +      * all functions called before this point should be marked as notrace
> > +      */
> > +     bit = ftrace_test_recursion_trylock(ip, parent_ip);
> > +     if (bit < 0) {
> > +             fp->nmissed++;
> > +             return;
> > +     }
> >
> >       if (unlikely(kprobe_running())) {
> >               fp->nmissed++;
> >               return;
> >       }
> > +
> >       kprobe_busy_begin();
> > -     fprobe_handler(ip, parent_ip, ops, fregs);
> > +     __fprobe_handler(ip, parent_ip, ops, fregs);
> >       kprobe_busy_end();
> > +     ftrace_test_recursion_unlock(bit);
> >  }
> >
> >  static void fprobe_exit_handler(struct rethook_node *rh, void *data,
> > --
> > 2.40.1
> >
>
>
> --
> Masami Hiramatsu (Google) <mhiramat@kernel.org>
>
Ze Gao May 18, 2023, 2:49 a.m. UTC | #6
Glad to hear that, hooray!  :)

Thanks
Ze

On Wed, May 17, 2023 at 10:27 PM Masami Hiramatsu <mhiramat@kernel.org> wrote:
>
> On Wed, 17 May 2023 11:45:07 +0800
> Ze Gao <zegao2021@gmail.com> wrote:
>
> > Current implementation calls kprobe related functions before doing
> > ftrace recursion check in fprobe_kprobe_handler, which opens door
> > to kernel crash due to stack recursion if preempt_count_{add, sub}
> > is traceable in kprobe_busy_{begin, end}.
> >
> > Things goes like this without this patch quoted from Steven:
> > "
> > fprobe_kprobe_handler() {
> >    kprobe_busy_begin() {
> >       preempt_disable() {
> >          preempt_count_add() {  <-- trace
> >             fprobe_kprobe_handler() {
> >               [ wash, rinse, repeat, CRASH!!! ]
> > "
> >
> > By refactoring the common part out of fprobe_kprobe_handler and
> > fprobe_handler and call ftrace recursion detection at the very beginning,
> > the whole fprobe_kprobe_handler is free from recursion.
> >
> > Signed-off-by: Ze Gao <zegao@tencent.com>
> > Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > Link: https://lore.kernel.org/linux-trace-kernel/20230516071830.8190-3-zegao@tencent.com
> > ---
> >  kernel/trace/fprobe.c | 59 ++++++++++++++++++++++++++++++++-----------
> >  1 file changed, 44 insertions(+), 15 deletions(-)
> >
> > diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
> > index 9abb3905bc8e..097c740799ba 100644
> > --- a/kernel/trace/fprobe.c
> > +++ b/kernel/trace/fprobe.c
> > @@ -20,30 +20,22 @@ struct fprobe_rethook_node {
> >       char data[];
> >  };
> >
> > -static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> > -                        struct ftrace_ops *ops, struct ftrace_regs *fregs)
> > +static inline void __fprobe_handler(unsigned long ip, unsigned long
> > +             parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs)
>
> OK, I picked up this series to probes/fixes. Note that I fixed this line
> because the "unsigned long parent_ip" was split into 2 lines.
>
> Thank you,
>
>
> >  {
> >       struct fprobe_rethook_node *fpr;
> >       struct rethook_node *rh = NULL;
> >       struct fprobe *fp;
> >       void *entry_data = NULL;
> > -     int bit, ret;
> > +     int ret;
> >
> >       fp = container_of(ops, struct fprobe, ops);
> > -     if (fprobe_disabled(fp))
> > -             return;
> > -
> > -     bit = ftrace_test_recursion_trylock(ip, parent_ip);
> > -     if (bit < 0) {
> > -             fp->nmissed++;
> > -             return;
> > -     }
> >
> >       if (fp->exit_handler) {
> >               rh = rethook_try_get(fp->rethook);
> >               if (!rh) {
> >                       fp->nmissed++;
> > -                     goto out;
> > +                     return;
> >               }
> >               fpr = container_of(rh, struct fprobe_rethook_node, node);
> >               fpr->entry_ip = ip;
> > @@ -61,23 +53,60 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> >               else
> >                       rethook_hook(rh, ftrace_get_regs(fregs), true);
> >       }
> > -out:
> > +}
> > +
> > +static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> > +             struct ftrace_ops *ops, struct ftrace_regs *fregs)
> > +{
> > +     struct fprobe *fp;
> > +     int bit;
> > +
> > +     fp = container_of(ops, struct fprobe, ops);
> > +     if (fprobe_disabled(fp))
> > +             return;
> > +
> > +     /* recursion detection has to go before any traceable function and
> > +      * all functions before this point should be marked as notrace
> > +      */
> > +     bit = ftrace_test_recursion_trylock(ip, parent_ip);
> > +     if (bit < 0) {
> > +             fp->nmissed++;
> > +             return;
> > +     }
> > +     __fprobe_handler(ip, parent_ip, ops, fregs);
> >       ftrace_test_recursion_unlock(bit);
> > +
> >  }
> >  NOKPROBE_SYMBOL(fprobe_handler);
> >
> >  static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip,
> >                                 struct ftrace_ops *ops, struct ftrace_regs *fregs)
> >  {
> > -     struct fprobe *fp = container_of(ops, struct fprobe, ops);
> > +     struct fprobe *fp;
> > +     int bit;
> > +
> > +     fp = container_of(ops, struct fprobe, ops);
> > +     if (fprobe_disabled(fp))
> > +             return;
> > +
> > +     /* recursion detection has to go before any traceable function and
> > +      * all functions called before this point should be marked as notrace
> > +      */
> > +     bit = ftrace_test_recursion_trylock(ip, parent_ip);
> > +     if (bit < 0) {
> > +             fp->nmissed++;
> > +             return;
> > +     }
> >
> >       if (unlikely(kprobe_running())) {
> >               fp->nmissed++;
> >               return;
> >       }
> > +
> >       kprobe_busy_begin();
> > -     fprobe_handler(ip, parent_ip, ops, fregs);
> > +     __fprobe_handler(ip, parent_ip, ops, fregs);
> >       kprobe_busy_end();
> > +     ftrace_test_recursion_unlock(bit);
> >  }
> >
> >  static void fprobe_exit_handler(struct rethook_node *rh, void *data,
> > --
> > 2.40.1
> >
>
>
> --
> Masami Hiramatsu (Google) <mhiramat@kernel.org>
Yafang Shao June 28, 2023, 7:16 a.m. UTC | #7
On Wed, May 17, 2023 at 11:45 AM Ze Gao <zegao2021@gmail.com> wrote:
>
> Current implementation calls kprobe related functions before doing
> ftrace recursion check in fprobe_kprobe_handler, which opens door
> to kernel crash due to stack recursion if preempt_count_{add, sub}
> is traceable in kprobe_busy_{begin, end}.
>
> Things goes like this without this patch quoted from Steven:
> "
> fprobe_kprobe_handler() {
>    kprobe_busy_begin() {
>       preempt_disable() {
>          preempt_count_add() {  <-- trace
>             fprobe_kprobe_handler() {
>                 [ wash, rinse, repeat, CRASH!!! ]
> "
>
> By refactoring the common part out of fprobe_kprobe_handler and
> fprobe_handler and call ftrace recursion detection at the very beginning,
> the whole fprobe_kprobe_handler is free from recursion.
>
> Signed-off-by: Ze Gao <zegao@tencent.com>
> Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> Link: https://lore.kernel.org/linux-trace-kernel/20230516071830.8190-3-zegao@tencent.com
> ---
>  kernel/trace/fprobe.c | 59 ++++++++++++++++++++++++++++++++-----------
>  1 file changed, 44 insertions(+), 15 deletions(-)
>
> diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
> index 9abb3905bc8e..097c740799ba 100644
> --- a/kernel/trace/fprobe.c
> +++ b/kernel/trace/fprobe.c
> @@ -20,30 +20,22 @@ struct fprobe_rethook_node {
>         char data[];
>  };
>
> -static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> -                          struct ftrace_ops *ops, struct ftrace_regs *fregs)
> +static inline void __fprobe_handler(unsigned long ip, unsigned long
> +               parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs)
>  {
>         struct fprobe_rethook_node *fpr;
>         struct rethook_node *rh = NULL;
>         struct fprobe *fp;
>         void *entry_data = NULL;
> -       int bit, ret;
> +       int ret;
>
>         fp = container_of(ops, struct fprobe, ops);
> -       if (fprobe_disabled(fp))
> -               return;
> -
> -       bit = ftrace_test_recursion_trylock(ip, parent_ip);
> -       if (bit < 0) {
> -               fp->nmissed++;
> -               return;
> -       }
>
>         if (fp->exit_handler) {
>                 rh = rethook_try_get(fp->rethook);
>                 if (!rh) {
>                         fp->nmissed++;
> -                       goto out;
> +                       return;
>                 }
>                 fpr = container_of(rh, struct fprobe_rethook_node, node);
>                 fpr->entry_ip = ip;
> @@ -61,23 +53,60 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
>                 else
>                         rethook_hook(rh, ftrace_get_regs(fregs), true);
>         }
> -out:
> +}
> +
> +static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> +               struct ftrace_ops *ops, struct ftrace_regs *fregs)
> +{
> +       struct fprobe *fp;
> +       int bit;
> +
> +       fp = container_of(ops, struct fprobe, ops);
> +       if (fprobe_disabled(fp))
> +               return;
> +
> +       /* recursion detection has to go before any traceable function and
> +        * all functions before this point should be marked as notrace
> +        */
> +       bit = ftrace_test_recursion_trylock(ip, parent_ip);
> +       if (bit < 0) {
> +               fp->nmissed++;
> +               return;
> +       }
> +       __fprobe_handler(ip, parent_ip, ops, fregs);
>         ftrace_test_recursion_unlock(bit);
> +
>  }
>  NOKPROBE_SYMBOL(fprobe_handler);
>
>  static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip,
>                                   struct ftrace_ops *ops, struct ftrace_regs *fregs)
>  {
> -       struct fprobe *fp = container_of(ops, struct fprobe, ops);
> +       struct fprobe *fp;
> +       int bit;
> +
> +       fp = container_of(ops, struct fprobe, ops);
> +       if (fprobe_disabled(fp))
> +               return;
> +
> +       /* recursion detection has to go before any traceable function and
> +        * all functions called before this point should be marked as notrace
> +        */
> +       bit = ftrace_test_recursion_trylock(ip, parent_ip);
> +       if (bit < 0) {
> +               fp->nmissed++;
> +               return;
> +       }
>
>         if (unlikely(kprobe_running())) {
>                 fp->nmissed++;

I have just looked through this patchset, just out of curiosity,
shouldn't we call ftrace_test_recursion_unlock(bit) here ?
We have already locked it successfully, so why should we not unlock it?

>                 return;
>         }
> +
>         kprobe_busy_begin();
> -       fprobe_handler(ip, parent_ip, ops, fregs);
> +       __fprobe_handler(ip, parent_ip, ops, fregs);
>         kprobe_busy_end();
> +       ftrace_test_recursion_unlock(bit);
>  }
>
>  static void fprobe_exit_handler(struct rethook_node *rh, void *data,
> --
> 2.40.1
>
>
Ze Gao July 3, 2023, 6:52 a.m. UTC | #8
Hi, yafang.

You're right, it should do the unlock before return for the sake of
sanity. (Please
ignore the last misleading reply :)

Will send a new patch to fix it.

Thanks
Ze

On Wed, Jun 28, 2023 at 3:17 PM Yafang Shao <laoar.shao@gmail.com> wrote:
>
> On Wed, May 17, 2023 at 11:45 AM Ze Gao <zegao2021@gmail.com> wrote:
> >
> > Current implementation calls kprobe related functions before doing
> > ftrace recursion check in fprobe_kprobe_handler, which opens door
> > to kernel crash due to stack recursion if preempt_count_{add, sub}
> > is traceable in kprobe_busy_{begin, end}.
> >
> > Things goes like this without this patch quoted from Steven:
> > "
> > fprobe_kprobe_handler() {
> >    kprobe_busy_begin() {
> >       preempt_disable() {
> >          preempt_count_add() {  <-- trace
> >             fprobe_kprobe_handler() {
> >                 [ wash, rinse, repeat, CRASH!!! ]
> > "
> >
> > By refactoring the common part out of fprobe_kprobe_handler and
> > fprobe_handler and call ftrace recursion detection at the very beginning,
> > the whole fprobe_kprobe_handler is free from recursion.
> >
> > Signed-off-by: Ze Gao <zegao@tencent.com>
> > Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > Link: https://lore.kernel.org/linux-trace-kernel/20230516071830.8190-3-zegao@tencent.com
> > ---
> >  kernel/trace/fprobe.c | 59 ++++++++++++++++++++++++++++++++-----------
> >  1 file changed, 44 insertions(+), 15 deletions(-)
> >
> > diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
> > index 9abb3905bc8e..097c740799ba 100644
> > --- a/kernel/trace/fprobe.c
> > +++ b/kernel/trace/fprobe.c
> > @@ -20,30 +20,22 @@ struct fprobe_rethook_node {
> >         char data[];
> >  };
> >
> > -static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> > -                          struct ftrace_ops *ops, struct ftrace_regs *fregs)
> > +static inline void __fprobe_handler(unsigned long ip, unsigned long
> > +               parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs)
> >  {
> >         struct fprobe_rethook_node *fpr;
> >         struct rethook_node *rh = NULL;
> >         struct fprobe *fp;
> >         void *entry_data = NULL;
> > -       int bit, ret;
> > +       int ret;
> >
> >         fp = container_of(ops, struct fprobe, ops);
> > -       if (fprobe_disabled(fp))
> > -               return;
> > -
> > -       bit = ftrace_test_recursion_trylock(ip, parent_ip);
> > -       if (bit < 0) {
> > -               fp->nmissed++;
> > -               return;
> > -       }
> >
> >         if (fp->exit_handler) {
> >                 rh = rethook_try_get(fp->rethook);
> >                 if (!rh) {
> >                         fp->nmissed++;
> > -                       goto out;
> > +                       return;
> >                 }
> >                 fpr = container_of(rh, struct fprobe_rethook_node, node);
> >                 fpr->entry_ip = ip;
> > @@ -61,23 +53,60 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> >                 else
> >                         rethook_hook(rh, ftrace_get_regs(fregs), true);
> >         }
> > -out:
> > +}
> > +
> > +static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
> > +               struct ftrace_ops *ops, struct ftrace_regs *fregs)
> > +{
> > +       struct fprobe *fp;
> > +       int bit;
> > +
> > +       fp = container_of(ops, struct fprobe, ops);
> > +       if (fprobe_disabled(fp))
> > +               return;
> > +
> > +       /* recursion detection has to go before any traceable function and
> > +        * all functions before this point should be marked as notrace
> > +        */
> > +       bit = ftrace_test_recursion_trylock(ip, parent_ip);
> > +       if (bit < 0) {
> > +               fp->nmissed++;
> > +               return;
> > +       }
> > +       __fprobe_handler(ip, parent_ip, ops, fregs);
> >         ftrace_test_recursion_unlock(bit);
> > +
> >  }
> >  NOKPROBE_SYMBOL(fprobe_handler);
> >
> >  static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip,
> >                                   struct ftrace_ops *ops, struct ftrace_regs *fregs)
> >  {
> > -       struct fprobe *fp = container_of(ops, struct fprobe, ops);
> > +       struct fprobe *fp;
> > +       int bit;
> > +
> > +       fp = container_of(ops, struct fprobe, ops);
> > +       if (fprobe_disabled(fp))
> > +               return;
> > +
> > +       /* recursion detection has to go before any traceable function and
> > +        * all functions called before this point should be marked as notrace
> > +        */
> > +       bit = ftrace_test_recursion_trylock(ip, parent_ip);
> > +       if (bit < 0) {
> > +               fp->nmissed++;
> > +               return;
> > +       }
> >
> >         if (unlikely(kprobe_running())) {
> >                 fp->nmissed++;
>
> I have just looked through this patchset, just out of curiosity,
> shouldn't we call ftrace_test_recursion_unlock(bit) here ?
> We have already locked it successfully, so why should we not unlock it?
>
> >                 return;
> >         }
> > +
> >         kprobe_busy_begin();
> > -       fprobe_handler(ip, parent_ip, ops, fregs);
> > +       __fprobe_handler(ip, parent_ip, ops, fregs);
> >         kprobe_busy_end();
> > +       ftrace_test_recursion_unlock(bit);
> >  }
> >
> >  static void fprobe_exit_handler(struct rethook_node *rh, void *data,
> > --
> > 2.40.1
> >
> >
>
>
> --
> Regards
> Yafang
diff mbox series

Patch

diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
index 9abb3905bc8e..097c740799ba 100644
--- a/kernel/trace/fprobe.c
+++ b/kernel/trace/fprobe.c
@@ -20,30 +20,22 @@  struct fprobe_rethook_node {
 	char data[];
 };
 
-static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
-			   struct ftrace_ops *ops, struct ftrace_regs *fregs)
+static inline void __fprobe_handler(unsigned long ip, unsigned long
+		parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs)
 {
 	struct fprobe_rethook_node *fpr;
 	struct rethook_node *rh = NULL;
 	struct fprobe *fp;
 	void *entry_data = NULL;
-	int bit, ret;
+	int ret;
 
 	fp = container_of(ops, struct fprobe, ops);
-	if (fprobe_disabled(fp))
-		return;
-
-	bit = ftrace_test_recursion_trylock(ip, parent_ip);
-	if (bit < 0) {
-		fp->nmissed++;
-		return;
-	}
 
 	if (fp->exit_handler) {
 		rh = rethook_try_get(fp->rethook);
 		if (!rh) {
 			fp->nmissed++;
-			goto out;
+			return;
 		}
 		fpr = container_of(rh, struct fprobe_rethook_node, node);
 		fpr->entry_ip = ip;
@@ -61,23 +53,60 @@  static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
 		else
 			rethook_hook(rh, ftrace_get_regs(fregs), true);
 	}
-out:
+}
+
+static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
+		struct ftrace_ops *ops, struct ftrace_regs *fregs)
+{
+	struct fprobe *fp;
+	int bit;
+
+	fp = container_of(ops, struct fprobe, ops);
+	if (fprobe_disabled(fp))
+		return;
+
+	/* recursion detection has to go before any traceable function and
+	 * all functions before this point should be marked as notrace
+	 */
+	bit = ftrace_test_recursion_trylock(ip, parent_ip);
+	if (bit < 0) {
+		fp->nmissed++;
+		return;
+	}
+	__fprobe_handler(ip, parent_ip, ops, fregs);
 	ftrace_test_recursion_unlock(bit);
+
 }
 NOKPROBE_SYMBOL(fprobe_handler);
 
 static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip,
 				  struct ftrace_ops *ops, struct ftrace_regs *fregs)
 {
-	struct fprobe *fp = container_of(ops, struct fprobe, ops);
+	struct fprobe *fp;
+	int bit;
+
+	fp = container_of(ops, struct fprobe, ops);
+	if (fprobe_disabled(fp))
+		return;
+
+	/* recursion detection has to go before any traceable function and
+	 * all functions called before this point should be marked as notrace
+	 */
+	bit = ftrace_test_recursion_trylock(ip, parent_ip);
+	if (bit < 0) {
+		fp->nmissed++;
+		return;
+	}
 
 	if (unlikely(kprobe_running())) {
 		fp->nmissed++;
 		return;
 	}
+
 	kprobe_busy_begin();
-	fprobe_handler(ip, parent_ip, ops, fregs);
+	__fprobe_handler(ip, parent_ip, ops, fregs);
 	kprobe_busy_end();
+	ftrace_test_recursion_unlock(bit);
 }
 
 static void fprobe_exit_handler(struct rethook_node *rh, void *data,