Message ID | 20241008033159.22459-1-jeff.xie@linux.dev (mailing list archive) |
---|---|
State | Queued |
Commit | 60b1f578b5789730d81460d1836dec7fa60510bf |
Delegated to: | Steven Rostedt |
Headers | show |
Series | [v5] ftrace: Get the true parent ip for function tracer | expand |
October 8, 2024 at 11:31 AM, "Jeff Xie" <jeff.xie@linux.dev> wrote: I'd like to know if this patch can be merged into 6.13? > > When using both function tracer and function graph simultaneously, > > it is found that function tracer sometimes captures a fake parent ip > > (return_to_handler) instead of the true parent ip. > > This issue is easy to reproduce. Below are my reproduction steps: > > jeff-labs:~/bin # ./trace-net.sh > > jeff-labs:~/bin # cat /sys/kernel/debug/tracing/instances/foo/trace | grep return_to_handler > > trace-net.sh-405 [001] ...2. 31.859501: avc_has_perm+0x4/0x190 <-return_to_handler+0x0/0x40 > > trace-net.sh-405 [001] ...2. 31.859503: simple_setattr+0x4/0x70 <-return_to_handler+0x0/0x40 > > trace-net.sh-405 [001] ...2. 31.859503: truncate_pagecache+0x4/0x60 <-return_to_handler+0x0/0x40 > > trace-net.sh-405 [001] ...2. 31.859505: unmap_mapping_range+0x4/0x140 <-return_to_handler+0x0/0x40 > > trace-net.sh-405 [001] ...3. 31.859508: _raw_spin_unlock+0x4/0x30 <-return_to_handler+0x0/0x40 > > [...] > > The following is my simple trace script: > > <snip> > > jeff-labs:~/bin # cat ./trace-net.sh > > TRACE_PATH="/sys/kernel/debug/tracing" > > set_events() { > > echo 1 > $1/events/net/enable > > echo 1 > $1/events/tcp/enable > > echo 1 > $1/events/sock/enable > > echo 1 > $1/events/napi/enable > > echo 1 > $1/events/fib/enable > > echo 1 > $1/events/neigh/enable > > } > > set_events ${TRACE_PATH} > > echo 1 > ${TRACE_PATH}/options/sym-offset > > echo 1 > ${TRACE_PATH}/options/funcgraph-tail > > echo 1 > ${TRACE_PATH}/options/funcgraph-proc > > echo 1 > ${TRACE_PATH}/options/funcgraph-abstime > > echo 'tcp_orphan*' > ${TRACE_PATH}/set_ftrace_notrace > > echo function_graph > ${TRACE_PATH}/current_tracer > > INSTANCE_FOO=${TRACE_PATH}/instances/foo > > if [ ! -e $INSTANCE_FOO ]; then > > mkdir ${INSTANCE_FOO} > > fi > > set_events ${INSTANCE_FOO} > > echo 1 > ${INSTANCE_FOO}/options/sym-offset > > echo 'tcp_orphan*' > ${INSTANCE_FOO}/set_ftrace_notrace > > echo function > ${INSTANCE_FOO}/current_tracer > > echo 1 > ${TRACE_PATH}/tracing_on > > echo 1 > ${INSTANCE_FOO}/tracing_on > > echo > ${TRACE_PATH}/trace > > echo > ${INSTANCE_FOO}/trace > > </snip> > > Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> > > Signed-off-by: Jeff Xie <jeff.xie@linux.dev> > > --- > > v5: > > https://lore.kernel.org/linux-trace-kernel/5bc5b9b07d997e299b048005826bc6d592265c67@linux.dev/T/#me683cf5ac3858711230e54e5d5924a82e85917e6 > > - Using fregs helper function to get the stack pointer > > > > v4: > > https://lore.kernel.org/linux-trace-kernel/20241005101320.766c1100@rorschach.local.home/ > > - fixed the crash when accessing the "current" if the arch has not implemented noinstr > > thanks steve for the testing and the detailed explanation > > v3: > > https://lore.kernel.org/linux-trace-kernel/20240910001326.87f27e6b312f1d956cf352a2@kernel.org/ > > - fixed build error when CONFIG_FUNCTION_GRAPH_TRACER=n suggested by Masami > > v2: > > https://lore.kernel.org/linux-trace-kernel/20240821095910.1888d7fa@gandalf.local.home/ > > - Adding __always_inline to function_get_true_parent_ip suggested by Steve > > kernel/trace/trace_functions.c | 26 ++++++++++++++++++++++++++ > > 1 file changed, 26 insertions(+) > > diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c > > index 3b0cea37e029..46c79a349d8c 100644 > > --- a/kernel/trace/trace_functions.c > > +++ b/kernel/trace/trace_functions.c > > @@ -176,6 +176,27 @@ static void function_trace_start(struct trace_array *tr) > > tracing_reset_online_cpus(&tr->array_buffer); > > } > > > > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER > > +static __always_inline unsigned long > > +function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs) > > +{ > > + unsigned long true_parent_ip; > > + int idx = 0; > > + > > + true_parent_ip = parent_ip; > > + if (unlikely(parent_ip == (unsigned long)&return_to_handler) && fregs) > > + true_parent_ip = ftrace_graph_ret_addr(current, &idx, parent_ip, > > + (unsigned long *)ftrace_regs_get_stack_pointer(fregs)); > > + return true_parent_ip; > > +} > > +#else > > +static __always_inline unsigned long > > +function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs) > > +{ > > + return parent_ip; > > +} > > +#endif > > + > > static void > > function_trace_call(unsigned long ip, unsigned long parent_ip, > > struct ftrace_ops *op, struct ftrace_regs *fregs) > > @@ -193,6 +214,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, > > if (bit < 0) > > return; > > > > + parent_ip = function_get_true_parent_ip(parent_ip, fregs); > > + > > trace_ctx = tracing_gen_ctx(); > > > > cpu = smp_processor_id(); > > @@ -241,6 +264,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, > > * recursive protection is performed. > > */ > > local_irq_save(flags); > > + parent_ip = function_get_true_parent_ip(parent_ip, fregs); > > cpu = raw_smp_processor_id(); > > data = per_cpu_ptr(tr->array_buffer.data, cpu); > > disabled = atomic_inc_return(&data->disabled); > > @@ -309,6 +333,7 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, > > if (bit < 0) > > return; > > > > + parent_ip = function_get_true_parent_ip(parent_ip, fregs); > > cpu = smp_processor_id(); > > data = per_cpu_ptr(tr->array_buffer.data, cpu); > > if (atomic_read(&data->disabled)) > > @@ -356,6 +381,7 @@ function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, > > * recursive protection is performed. > > */ > > local_irq_save(flags); > > + parent_ip = function_get_true_parent_ip(parent_ip, fregs); > > cpu = raw_smp_processor_id(); > > data = per_cpu_ptr(tr->array_buffer.data, cpu); > > disabled = atomic_inc_return(&data->disabled); > > -- > > 2.43.0 >
On Mon, 18 Nov 2024 16:15:02 +0000 jeff.xie@linux.dev wrote: > October 8, 2024 at 11:31 AM, "Jeff Xie" <jeff.xie@linux.dev> wrote: > > I'd like to know if this patch can be merged into 6.13? Bah' I missed seeing this update and it was buried in patchwork on the 3rd page :-p I wish you pinged me last week. I just sent my pull request for the ftrace topic branch, but since the merge window just opened, I can start testing this now and then push it to next. Wait a few days and then send another pull request to Linus by the end of the week, if nothing breaks. -- Steve
On Mon, 18 Nov 2024 11:43:20 -0500 Steven Rostedt <rostedt@goodmis.org> wrote: > On Mon, 18 Nov 2024 16:15:02 +0000 > jeff.xie@linux.dev wrote: > > > October 8, 2024 at 11:31 AM, "Jeff Xie" <jeff.xie@linux.dev> wrote: > > > > I'd like to know if this patch can be merged into 6.13? > > Bah' I missed seeing this update and it was buried in patchwork on the 3rd page :-p > > I wish you pinged me last week. > > I just sent my pull request for the ftrace topic branch, but since the > merge window just opened, I can start testing this now and then push it to > next. Wait a few days and then send another pull request to Linus by the > end of the week, if nothing breaks. > This patch conflicts with a clean up patch I incorrectly put in my trace topic branch (where it should have gone in the ftrace topic branch). I haven't sent a pull request for the trace topic branch, so I'll just apply this patch on top of that (fix the conflict), test it, push it to linux-next, and then send it later this week. -- Steve
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 3b0cea37e029..46c79a349d8c 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -176,6 +176,27 @@ static void function_trace_start(struct trace_array *tr) tracing_reset_online_cpus(&tr->array_buffer); } +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static __always_inline unsigned long +function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs) +{ + unsigned long true_parent_ip; + int idx = 0; + + true_parent_ip = parent_ip; + if (unlikely(parent_ip == (unsigned long)&return_to_handler) && fregs) + true_parent_ip = ftrace_graph_ret_addr(current, &idx, parent_ip, + (unsigned long *)ftrace_regs_get_stack_pointer(fregs)); + return true_parent_ip; +} +#else +static __always_inline unsigned long +function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs) +{ + return parent_ip; +} +#endif + static void function_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs) @@ -193,6 +214,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, if (bit < 0) return; + parent_ip = function_get_true_parent_ip(parent_ip, fregs); + trace_ctx = tracing_gen_ctx(); cpu = smp_processor_id(); @@ -241,6 +264,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, * recursive protection is performed. */ local_irq_save(flags); + parent_ip = function_get_true_parent_ip(parent_ip, fregs); cpu = raw_smp_processor_id(); data = per_cpu_ptr(tr->array_buffer.data, cpu); disabled = atomic_inc_return(&data->disabled); @@ -309,6 +333,7 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, if (bit < 0) return; + parent_ip = function_get_true_parent_ip(parent_ip, fregs); cpu = smp_processor_id(); data = per_cpu_ptr(tr->array_buffer.data, cpu); if (atomic_read(&data->disabled)) @@ -356,6 +381,7 @@ function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, * recursive protection is performed. */ local_irq_save(flags); + parent_ip = function_get_true_parent_ip(parent_ip, fregs); cpu = raw_smp_processor_id(); data = per_cpu_ptr(tr->array_buffer.data, cpu); disabled = atomic_inc_return(&data->disabled);