diff mbox

[RFC,2/3] arm64: refactor save_stack_trace()

Message ID 55A703F3.8050203@linaro.org (mailing list archive)
State New, archived
Headers show

Commit Message

AKASHI Takahiro July 16, 2015, 1:08 a.m. UTC
On 07/16/2015 09:27 AM, AKASHI Takahiro wrote:
> On 07/16/2015 01:13 AM, Steven Rostedt wrote:
>> On Wed, 15 Jul 2015 10:55:36 -0400
>> Steven Rostedt <rostedt@goodmis.org> wrote:
>>
>>
>>> I'll take a look at it and try to clean up the code.
>>
>> Does the  following patch make sense for you?
>
> Looks nice. The patch greatly simplifies changes on arm64 side.

As follows:

- Takahiro AKASHI

Comments

Steven Rostedt July 16, 2015, 1:38 a.m. UTC | #1
On Thu, 16 Jul 2015 10:08:03 +0900
AKASHI Takahiro <takahiro.akashi@linaro.org> wrote:

> On 07/16/2015 09:27 AM, AKASHI Takahiro wrote:
> > On 07/16/2015 01:13 AM, Steven Rostedt wrote:
> >> On Wed, 15 Jul 2015 10:55:36 -0400
> >> Steven Rostedt <rostedt@goodmis.org> wrote:
> >>
> >>
> >>> I'll take a look at it and try to clean up the code.
> >>
> >> Does the  following patch make sense for you?
> >
> > Looks nice. The patch greatly simplifies changes on arm64 side.
> 
> As follows:
> 
> - Takahiro AKASHI
> 

How should we go forward with this. Want me to make my change to my
tree, and pull this patch in with it? I can set up for a 4.3 release.

Then if I can get an Acked-by from one of the arm64 maintainers, I can
have this go through my tree?

-- Steve


> diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
> index c5534fa..868d6f1 100644
> --- a/arch/arm64/include/asm/ftrace.h
> +++ b/arch/arm64/include/asm/ftrace.h
> @@ -15,6 +15,7 @@
> 
>   #define MCOUNT_ADDR		((unsigned long)_mcount)
>   #define MCOUNT_INSN_SIZE	AARCH64_INSN_SIZE
> +#define FTRACE_STACK_FRAME_OFFSET 4 /* sync it up with stacktrace.c */
> 
>   #ifndef __ASSEMBLY__
>   #include <linux/compat.h>
> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> index 1da6029..2c1bf7d 100644
> --- a/include/linux/ftrace.h
> +++ b/include/linux/ftrace.h
> @@ -260,6 +260,13 @@ static inline void ftrace_kill(void) { }
>   #endif /* CONFIG_FUNCTION_TRACER */
> 
>   #ifdef CONFIG_STACK_TRACER
> +/*
> + * the offset value to add to return address from save_stack_trace()
> + */
> +#ifndef FTRACE_STACK_FRAME_OFFSET
> +#define FTRACE_STACK_FRAME_OFFSET 0
> +#endif
> +
>   extern int stack_tracer_enabled;
>   int
>   stack_trace_sysctl(struct ctl_table *table, int write,
> diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
> index 9384647..c5b9748 100644
> --- a/kernel/trace/trace_stack.c
> +++ b/kernel/trace/trace_stack.c
> @@ -105,7 +105,7 @@ check_stack(unsigned long ip, unsigned long *stack)
> 
>   	/* Skip over the overhead of the stack tracer itself */
>   	for (i = 0; i < max_stack_trace.nr_entries; i++) {
> -		if (stack_dump_trace[i] == ip)
> +		if ((stack_dump_trace[i] + FTRACE_STACK_FRAME_OFFSET) == ip)
>   			break;
>   	}
> 
> @@ -131,7 +131,8 @@ check_stack(unsigned long ip, unsigned long *stack)
>   		p = start;
> 
>   		for (; p < top && i < max_stack_trace.nr_entries; p++) {
> -			if (*p == stack_dump_trace[i]) {
> +			if (*p == (stack_dump_trace[i]
> +					+ FTRACE_STACK_FRAME_OFFSET)) {
>   				stack_dump_trace[x] = stack_dump_trace[i++];
>   				this_size = stack_dump_index[x++] =
>   					(top - p) * sizeof(unsigned long);
Mark Rutland July 16, 2015, 2:28 p.m. UTC | #2
On Thu, Jul 16, 2015 at 02:08:03AM +0100, AKASHI Takahiro wrote:
> On 07/16/2015 09:27 AM, AKASHI Takahiro wrote:
> > On 07/16/2015 01:13 AM, Steven Rostedt wrote:
> >> On Wed, 15 Jul 2015 10:55:36 -0400
> >> Steven Rostedt <rostedt@goodmis.org> wrote:
> >>
> >>
> >>> I'll take a look at it and try to clean up the code.
> >>
> >> Does the  following patch make sense for you?
> >
> > Looks nice. The patch greatly simplifies changes on arm64 side.
> 
> As follows:
> 
> - Takahiro AKASHI
> 
> diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
> index c5534fa..868d6f1 100644
> --- a/arch/arm64/include/asm/ftrace.h
> +++ b/arch/arm64/include/asm/ftrace.h
> @@ -15,6 +15,7 @@
> 
>   #define MCOUNT_ADDR		((unsigned long)_mcount)
>   #define MCOUNT_INSN_SIZE	AARCH64_INSN_SIZE
> +#define FTRACE_STACK_FRAME_OFFSET 4 /* sync it up with stacktrace.c */

Is there any reason we couldn't have the arch code dump the stack depth
for each function when it walks the stack to generate the stack trace?

That means we can provide a more precise result (because we know the
layour of our own stackframes), and we only need walk the stack once to
do so.

The downside is that we need a new function per-arch to do so.

Mark.

> 
>   #ifndef __ASSEMBLY__
>   #include <linux/compat.h>
> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> index 1da6029..2c1bf7d 100644
> --- a/include/linux/ftrace.h
> +++ b/include/linux/ftrace.h
> @@ -260,6 +260,13 @@ static inline void ftrace_kill(void) { }
>   #endif /* CONFIG_FUNCTION_TRACER */
> 
>   #ifdef CONFIG_STACK_TRACER
> +/*
> + * the offset value to add to return address from save_stack_trace()
> + */
> +#ifndef FTRACE_STACK_FRAME_OFFSET
> +#define FTRACE_STACK_FRAME_OFFSET 0
> +#endif
> +
>   extern int stack_tracer_enabled;
>   int
>   stack_trace_sysctl(struct ctl_table *table, int write,
> diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
> index 9384647..c5b9748 100644
> --- a/kernel/trace/trace_stack.c
> +++ b/kernel/trace/trace_stack.c
> @@ -105,7 +105,7 @@ check_stack(unsigned long ip, unsigned long *stack)
> 
>   	/* Skip over the overhead of the stack tracer itself */
>   	for (i = 0; i < max_stack_trace.nr_entries; i++) {
> -		if (stack_dump_trace[i] == ip)
> +		if ((stack_dump_trace[i] + FTRACE_STACK_FRAME_OFFSET) == ip)
>   			break;
>   	}
> 
> @@ -131,7 +131,8 @@ check_stack(unsigned long ip, unsigned long *stack)
>   		p = start;
> 
>   		for (; p < top && i < max_stack_trace.nr_entries; p++) {
> -			if (*p == stack_dump_trace[i]) {
> +			if (*p == (stack_dump_trace[i]
> +					+ FTRACE_STACK_FRAME_OFFSET)) {
>   				stack_dump_trace[x] = stack_dump_trace[i++];
>   				this_size = stack_dump_index[x++] =
>   					(top - p) * sizeof(unsigned long);
> -- 
> 1.7.9.5
> 
> 
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
>
Steven Rostedt July 16, 2015, 2:34 p.m. UTC | #3
On Thu, 16 Jul 2015 15:28:34 +0100
Mark Rutland <mark.rutland@arm.com> wrote:


> > diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
> > index c5534fa..868d6f1 100644
> > --- a/arch/arm64/include/asm/ftrace.h
> > +++ b/arch/arm64/include/asm/ftrace.h
> > @@ -15,6 +15,7 @@
> > 
> >   #define MCOUNT_ADDR		((unsigned long)_mcount)
> >   #define MCOUNT_INSN_SIZE	AARCH64_INSN_SIZE
> > +#define FTRACE_STACK_FRAME_OFFSET 4 /* sync it up with stacktrace.c */
> 
> Is there any reason we couldn't have the arch code dump the stack depth
> for each function when it walks the stack to generate the stack trace?
> 
> That means we can provide a more precise result (because we know the
> layour of our own stackframes), and we only need walk the stack once to
> do so.
> 
> The downside is that we need a new function per-arch to do so.

Or we make check_patch() a weak function, and let archs override it. I
can possibly break up the code a bit to have helper functions where
things are the same.

I want x86 to be able to track irq stacks as well, but there's no way
to do that generically yet, so having arch specific functions has been
on my todo list.

-- Steve
AKASHI Takahiro July 17, 2015, 2:09 a.m. UTC | #4
On 07/16/2015 11:34 PM, Steven Rostedt wrote:
> On Thu, 16 Jul 2015 15:28:34 +0100
> Mark Rutland <mark.rutland@arm.com> wrote:
>
>
>>> diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
>>> index c5534fa..868d6f1 100644
>>> --- a/arch/arm64/include/asm/ftrace.h
>>> +++ b/arch/arm64/include/asm/ftrace.h
>>> @@ -15,6 +15,7 @@
>>>
>>>    #define MCOUNT_ADDR		((unsigned long)_mcount)
>>>    #define MCOUNT_INSN_SIZE	AARCH64_INSN_SIZE
>>> +#define FTRACE_STACK_FRAME_OFFSET 4 /* sync it up with stacktrace.c */
>>
>> Is there any reason we couldn't have the arch code dump the stack depth
>> for each function when it walks the stack to generate the stack trace?
>>
>> That means we can provide a more precise result (because we know the
>> layour of our own stackframes), and we only need walk the stack once to
>> do so.
>>
>> The downside is that we need a new function per-arch to do so.
>
> Or we make check_patch() a weak function, and let archs override it. I
> can possibly break up the code a bit to have helper functions where
> things are the same.

Yeah, that is exactly what I meant in my cover letter[0/3] if the series
of patches are not acceptable.

-Takahiro AKASHI

> I want x86 to be able to track irq stacks as well, but there's no way
> to do that generically yet, so having arch specific functions has been
> on my todo list.
>
> -- Steve
>
Will Deacon July 17, 2015, 10:46 a.m. UTC | #5
Hi Steve,

On Thu, Jul 16, 2015 at 02:38:18AM +0100, Steven Rostedt wrote:
> On Thu, 16 Jul 2015 10:08:03 +0900
> AKASHI Takahiro <takahiro.akashi@linaro.org> wrote:
> 
> > On 07/16/2015 09:27 AM, AKASHI Takahiro wrote:
> > > On 07/16/2015 01:13 AM, Steven Rostedt wrote:
> > >> On Wed, 15 Jul 2015 10:55:36 -0400
> > >> Steven Rostedt <rostedt@goodmis.org> wrote:
> > >>
> > >>
> > >>> I'll take a look at it and try to clean up the code.
> > >>
> > >> Does the  following patch make sense for you?
> > >
> > > Looks nice. The patch greatly simplifies changes on arm64 side.
> > 
> > As follows:
> > 
> > - Takahiro AKASHI
> > 
> 
> How should we go forward with this. Want me to make my change to my
> tree, and pull this patch in with it? I can set up for a 4.3 release.
> 
> Then if I can get an Acked-by from one of the arm64 maintainers, I can
> have this go through my tree?

To be honest, I've completely lost track of this thread!

Once you and Akashi have settled on a way forward, I'm happy to take a look
at anything touching arch/arm64/, but it seems like you're still discussing
some aspects of the series atm.

Will
diff mbox

Patch

diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index c5534fa..868d6f1 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -15,6 +15,7 @@ 

  #define MCOUNT_ADDR		((unsigned long)_mcount)
  #define MCOUNT_INSN_SIZE	AARCH64_INSN_SIZE
+#define FTRACE_STACK_FRAME_OFFSET 4 /* sync it up with stacktrace.c */

  #ifndef __ASSEMBLY__
  #include <linux/compat.h>
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 1da6029..2c1bf7d 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -260,6 +260,13 @@  static inline void ftrace_kill(void) { }
  #endif /* CONFIG_FUNCTION_TRACER */

  #ifdef CONFIG_STACK_TRACER
+/*
+ * the offset value to add to return address from save_stack_trace()
+ */
+#ifndef FTRACE_STACK_FRAME_OFFSET
+#define FTRACE_STACK_FRAME_OFFSET 0
+#endif
+
  extern int stack_tracer_enabled;
  int
  stack_trace_sysctl(struct ctl_table *table, int write,
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 9384647..c5b9748 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -105,7 +105,7 @@  check_stack(unsigned long ip, unsigned long *stack)

  	/* Skip over the overhead of the stack tracer itself */
  	for (i = 0; i < max_stack_trace.nr_entries; i++) {
-		if (stack_dump_trace[i] == ip)
+		if ((stack_dump_trace[i] + FTRACE_STACK_FRAME_OFFSET) == ip)
  			break;
  	}

@@ -131,7 +131,8 @@  check_stack(unsigned long ip, unsigned long *stack)
  		p = start;

  		for (; p < top && i < max_stack_trace.nr_entries; p++) {
-			if (*p == stack_dump_trace[i]) {
+			if (*p == (stack_dump_trace[i]
+					+ FTRACE_STACK_FRAME_OFFSET)) {
  				stack_dump_trace[x] = stack_dump_trace[i++];
  				this_size = stack_dump_index[x++] =
  					(top - p) * sizeof(unsigned long);