Message ID | 20200719155033.24201-3-oscar.carter@gmx.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | kernel/trace: Remove function callback casts | expand |
On Sun, 19 Jul 2020 17:50:33 +0200 Oscar Carter <oscar.carter@gmx.com> wrote: > In an effort to enable -Wcast-function-type in the top-level Makefile to > support Control Flow Integrity builds, there are the need to remove all > the function callback casts. > > ftrace_ops_list_func() can no longer be defined as ftrace_ops_no_ops(). > The reason for ftrace_ops_no_ops() is to use that when an architecture > calls ftrace_ops_list_func() with only two parameters (called from > assembly). And to make sure there's no C side-effects, those archs call > ftrace_ops_no_ops() which only has two parameters, as the function > ftrace_ops_list_func() has four parameters. > > This patch removes the no longer needed function ftrace_ops_no_ops() and > all the function callback casts using the previous defined ftrace_func > union and the two function helpers called ftrace_set_ufunc() and > ftrace_same_address_ufunc(). Ug, I think I prefer the linker trick better. > > Signed-off-by: Oscar Carter <oscar.carter@gmx.com> > --- > kernel/trace/ftrace.c | 48 ++++++++++++++++++++++++++----------------- > 1 file changed, 29 insertions(+), 19 deletions(-) > > diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c > index fd8fbb422860..124ccf914657 100644 > --- a/kernel/trace/ftrace.c > +++ b/kernel/trace/ftrace.c > @@ -143,9 +143,7 @@ static inline bool ftrace_same_address_ufunc(union ftrace_func *ufunc, > return (ufunc->ops == func); > } > #else > -/* See comment below, where ftrace_ops_list_func is defined */ > -static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); > -#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) > +static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); > > static inline void ftrace_set_ufunc(union ftrace_func *ufunc, > ftrace_func_no_ops_t func) > @@ -198,22 +196,29 @@ static void ftrace_sync_ipi(void *data) > smp_rmb(); > } > > -static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) > +static union ftrace_func ftrace_ops_get_list_func(struct ftrace_ops *ops) > { > + union ftrace_func list_func; > + > /* > * If this is a dynamic, RCU, or per CPU ops, or we force list func, > * then it needs to call the list anyway. > */ > if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || > FTRACE_FORCE_LIST_FUNC) > - return ftrace_ops_list_func; > + ftrace_set_ufunc(&list_func, ftrace_ops_list_func); > + else > + list_func.ops = ftrace_ops_get_func(ops); > > - return ftrace_ops_get_func(ops); > + return list_func; Is this the same as returning a pointer? It makes me very nervous about returning a union. Can a compiler return something allocated on the stack? Also, don't use "ufunc" as that makes me think its a user space variable. > } > > static void update_ftrace_function(void) > { > - ftrace_func_t func; > + union ftrace_func func; > +#ifndef CONFIG_DYNAMIC_FTRACE > + union ftrace_func tmp; > +#endif > > /* > * Prepare the ftrace_ops that the arch callback will use. > @@ -225,7 +230,7 @@ static void update_ftrace_function(void) > > /* If there's no ftrace_ops registered, just call the stub function */ > if (set_function_trace_op == &ftrace_list_end) { > - func = ftrace_stub; > + func.ops = ftrace_stub; > > /* > * If we are at the end of the list and this ops is > @@ -239,21 +244,21 @@ static void update_ftrace_function(void) > } else { > /* Just use the default ftrace_ops */ > set_function_trace_op = &ftrace_list_end; > - func = ftrace_ops_list_func; > + ftrace_set_ufunc(&func, ftrace_ops_list_func); > } > > update_function_graph_func(); > > /* If there's no change, then do nothing more here */ > - if (ftrace_trace_function == func) > + if (ftrace_trace_function == func.ops) > return; > > /* > * If we are using the list function, it doesn't care > * about the function_trace_ops. > */ > - if (func == ftrace_ops_list_func) { > - ftrace_trace_function = func; > + if (ftrace_same_address_ufunc(&func, ftrace_ops_list_func)) { > + ftrace_trace_function = func.ops; > /* > * Don't even bother setting function_trace_ops, > * it would be racy to do so anyway. > @@ -272,7 +277,9 @@ static void update_ftrace_function(void) > * function we want, albeit indirectly, but it handles the > * ftrace_ops and doesn't depend on function_trace_op. > */ > - ftrace_trace_function = ftrace_ops_list_func; > + ftrace_set_ufunc(&tmp, ftrace_ops_list_func); > + ftrace_trace_function = tmp.ops; > + > /* > * Make sure all CPUs see this. Yes this is slow, but static > * tracing is slow and nasty to have enabled. > @@ -287,7 +294,7 @@ static void update_ftrace_function(void) > /* OK, we are all set to update the ftrace_trace_function now! */ > #endif /* !CONFIG_DYNAMIC_FTRACE */ > > - ftrace_trace_function = func; > + ftrace_trace_function = func.ops; > } This looks really intrusive for what it's trying to accomplish. The linker trick is far less intrusive, and I believe less error prone. -- Steve > > static void add_ftrace_ops(struct ftrace_ops __rcu **list, > @@ -2680,6 +2687,7 @@ void ftrace_modify_all_code(int command) > int update = command & FTRACE_UPDATE_TRACE_FUNC; > int mod_flags = 0; > int err = 0; > + union ftrace_func func; > > if (command & FTRACE_MAY_SLEEP) > mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; > @@ -2695,7 +2703,8 @@ void ftrace_modify_all_code(int command) > * traced. > */ > if (update) { > - err = ftrace_update_ftrace_func(ftrace_ops_list_func); > + ftrace_set_ufunc(&func, ftrace_ops_list_func); > + err = ftrace_update_ftrace_func(func.ops); > if (FTRACE_WARN_ON(err)) > return; > } > @@ -2705,7 +2714,9 @@ void ftrace_modify_all_code(int command) > else if (command & FTRACE_DISABLE_CALLS) > ftrace_replace_code(mod_flags); > > - if (update && ftrace_trace_function != ftrace_ops_list_func) { > + ftrace_set_ufunc(&func, ftrace_ops_list_func); > + > + if (update && ftrace_trace_function != func.ops) { > function_trace_op = set_function_trace_op; > smp_wmb(); > /* If irqs are disabled, we are in stop machine */ > @@ -6890,14 +6901,13 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, > { > __ftrace_ops_list_func(ip, parent_ip, NULL, regs); > } > -NOKPROBE_SYMBOL(ftrace_ops_list_func); > #else > -static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) > +static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) > { > __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); > } > -NOKPROBE_SYMBOL(ftrace_ops_no_ops); > #endif > +NOKPROBE_SYMBOL(ftrace_ops_list_func); > > /* > * If there's only one function registered but it does not support > -- > 2.20.1
Hi Steven, On Tue, Jul 21, 2020 at 02:05:45PM -0400, Steven Rostedt wrote: > On Sun, 19 Jul 2020 17:50:33 +0200 > Oscar Carter <oscar.carter@gmx.com> wrote: > > > In an effort to enable -Wcast-function-type in the top-level Makefile to > > support Control Flow Integrity builds, there are the need to remove all > > the function callback casts. > > > > ftrace_ops_list_func() can no longer be defined as ftrace_ops_no_ops(). > > The reason for ftrace_ops_no_ops() is to use that when an architecture > > calls ftrace_ops_list_func() with only two parameters (called from > > assembly). And to make sure there's no C side-effects, those archs call > > ftrace_ops_no_ops() which only has two parameters, as the function > > ftrace_ops_list_func() has four parameters. > > > > This patch removes the no longer needed function ftrace_ops_no_ops() and > > all the function callback casts using the previous defined ftrace_func > > union and the two function helpers called ftrace_set_ufunc() and > > ftrace_same_address_ufunc(). > > Ug, I think I prefer the linker trick better. > > > > > Signed-off-by: Oscar Carter <oscar.carter@gmx.com> > > --- > > kernel/trace/ftrace.c | 48 ++++++++++++++++++++++++++----------------- > > 1 file changed, 29 insertions(+), 19 deletions(-) > > > > diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c > > index fd8fbb422860..124ccf914657 100644 > > --- a/kernel/trace/ftrace.c > > +++ b/kernel/trace/ftrace.c > > @@ -143,9 +143,7 @@ static inline bool ftrace_same_address_ufunc(union ftrace_func *ufunc, > > return (ufunc->ops == func); > > } > > #else > > -/* See comment below, where ftrace_ops_list_func is defined */ > > -static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); > > -#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) > > +static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); > > > > static inline void ftrace_set_ufunc(union ftrace_func *ufunc, > > ftrace_func_no_ops_t func) > > @@ -198,22 +196,29 @@ static void ftrace_sync_ipi(void *data) > > smp_rmb(); > > } > > > > -static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) > > +static union ftrace_func ftrace_ops_get_list_func(struct ftrace_ops *ops) > > { > > + union ftrace_func list_func; > > + > > /* > > * If this is a dynamic, RCU, or per CPU ops, or we force list func, > > * then it needs to call the list anyway. > > */ > > if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || > > FTRACE_FORCE_LIST_FUNC) > > - return ftrace_ops_list_func; > > + ftrace_set_ufunc(&list_func, ftrace_ops_list_func); > > + else > > + list_func.ops = ftrace_ops_get_func(ops); > > > > - return ftrace_ops_get_func(ops); > > + return list_func; > > Is this the same as returning a pointer? It makes me very nervous about > returning a union. Can a compiler return something allocated on the stack? Yes, it can. The union is returned by value (copied). So, there is no problem. If you don't like to return a union by value, I can modify the prototype of the ftrace_ops_get_list_func() to the following one: static void ftrace_ops_get_list_func(struct ftrace_ops *ops, union ftrace_func *list_func) This way the old local variable "list_func" is removed and we need to pass the address of a union to this function. Then the function modify the union. If you prefer this option I have no problem with the change. > Also, don't use "ufunc" as that makes me think its a user space variable. Ok, would "bfunc" or "jfunc" be better? bfunc => blend - function jfunc => join - function > > } > > > > static void update_ftrace_function(void) > > { > > - ftrace_func_t func; > > + union ftrace_func func; > > +#ifndef CONFIG_DYNAMIC_FTRACE > > + union ftrace_func tmp; > > +#endif > > > > /* > > * Prepare the ftrace_ops that the arch callback will use. > > @@ -225,7 +230,7 @@ static void update_ftrace_function(void) > > > > /* If there's no ftrace_ops registered, just call the stub function */ > > if (set_function_trace_op == &ftrace_list_end) { > > - func = ftrace_stub; > > + func.ops = ftrace_stub; > > > > /* > > * If we are at the end of the list and this ops is > > @@ -239,21 +244,21 @@ static void update_ftrace_function(void) > > } else { > > /* Just use the default ftrace_ops */ > > set_function_trace_op = &ftrace_list_end; > > - func = ftrace_ops_list_func; > > + ftrace_set_ufunc(&func, ftrace_ops_list_func); > > } > > > > update_function_graph_func(); > > > > /* If there's no change, then do nothing more here */ > > - if (ftrace_trace_function == func) > > + if (ftrace_trace_function == func.ops) > > return; > > > > /* > > * If we are using the list function, it doesn't care > > * about the function_trace_ops. > > */ > > - if (func == ftrace_ops_list_func) { > > - ftrace_trace_function = func; > > + if (ftrace_same_address_ufunc(&func, ftrace_ops_list_func)) { > > + ftrace_trace_function = func.ops; > > /* > > * Don't even bother setting function_trace_ops, > > * it would be racy to do so anyway. > > @@ -272,7 +277,9 @@ static void update_ftrace_function(void) > > * function we want, albeit indirectly, but it handles the > > * ftrace_ops and doesn't depend on function_trace_op. > > */ > > - ftrace_trace_function = ftrace_ops_list_func; > > + ftrace_set_ufunc(&tmp, ftrace_ops_list_func); > > + ftrace_trace_function = tmp.ops; > > + > > /* > > * Make sure all CPUs see this. Yes this is slow, but static > > * tracing is slow and nasty to have enabled. > > @@ -287,7 +294,7 @@ static void update_ftrace_function(void) > > /* OK, we are all set to update the ftrace_trace_function now! */ > > #endif /* !CONFIG_DYNAMIC_FTRACE */ > > > > - ftrace_trace_function = func; > > + ftrace_trace_function = func.ops; > > } > > This looks really intrusive for what it's trying to accomplish. What this patch is trying to accomplish is to remove the warning -Wcast-function-type in a way that allows the CFI build to get the necessary info about the prototypes (4 parameters or 2 parameters) of the functions used for archs that support ftrace ops and for the archs that don't support ftrace ops. This info is necessary to allow the compiler to insert a check to guarantee that an indirect call can only jump to functions that match the prototype. This way, the attack surface is reduced if an attacker can get some control over the system. > The linker trick is far less intrusive, and I believe less error prone. If we use the linker trick, the warning -Wcast-function-type dissapears, but in a way that makes impossible to the compiler to get the necessary info about function prototypes to insert the commented check. As far I know, this linker trick (redirection of a function) is hidden for the CFI build. So, in my opinion, the linker trick is not suitable if we want to protect the function pointers of the ftrace subsystem against an attack that modifiy the normal flow of the kernel. > -- Steve Thanks, Oscar Carter > > > > static void add_ftrace_ops(struct ftrace_ops __rcu **list, > > @@ -2680,6 +2687,7 @@ void ftrace_modify_all_code(int command) > > int update = command & FTRACE_UPDATE_TRACE_FUNC; > > int mod_flags = 0; > > int err = 0; > > + union ftrace_func func; > > > > if (command & FTRACE_MAY_SLEEP) > > mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; > > @@ -2695,7 +2703,8 @@ void ftrace_modify_all_code(int command) > > * traced. > > */ > > if (update) { > > - err = ftrace_update_ftrace_func(ftrace_ops_list_func); > > + ftrace_set_ufunc(&func, ftrace_ops_list_func); > > + err = ftrace_update_ftrace_func(func.ops); > > if (FTRACE_WARN_ON(err)) > > return; > > } > > @@ -2705,7 +2714,9 @@ void ftrace_modify_all_code(int command) > > else if (command & FTRACE_DISABLE_CALLS) > > ftrace_replace_code(mod_flags); > > > > - if (update && ftrace_trace_function != ftrace_ops_list_func) { > > + ftrace_set_ufunc(&func, ftrace_ops_list_func); > > + > > + if (update && ftrace_trace_function != func.ops) { > > function_trace_op = set_function_trace_op; > > smp_wmb(); > > /* If irqs are disabled, we are in stop machine */ > > @@ -6890,14 +6901,13 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, > > { > > __ftrace_ops_list_func(ip, parent_ip, NULL, regs); > > } > > -NOKPROBE_SYMBOL(ftrace_ops_list_func); > > #else > > -static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) > > +static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) > > { > > __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); > > } > > -NOKPROBE_SYMBOL(ftrace_ops_no_ops); > > #endif > > +NOKPROBE_SYMBOL(ftrace_ops_list_func); > > > > /* > > * If there's only one function registered but it does not support > > -- > > 2.20.1 >
On Fri, 24 Jul 2020 18:19:21 +0200 Oscar Carter <oscar.carter@gmx.com> wrote: > > The linker trick is far less intrusive, and I believe less error prone. > > If we use the linker trick, the warning -Wcast-function-type dissapears, > but in a way that makes impossible to the compiler to get the necessary > info about function prototypes to insert the commented check. As far I > know, this linker trick (redirection of a function) is hidden for the > CFI build. > > So, in my opinion, the linker trick is not suitable if we want to protect > the function pointers of the ftrace subsystem against an attack that > modifiy the normal flow of the kernel. The linker trick should only affect architectures that don't implement the needed features. I can make it so the linker trick is only applied to those archs, and other archs that want more protection only need to add these features to their architectures. It's much less intrusive than this patch. -- Steve
On Fri, Jul 24, 2020 at 12:35:28PM -0400, Steven Rostedt wrote: > On Fri, 24 Jul 2020 18:19:21 +0200 > Oscar Carter <oscar.carter@gmx.com> wrote: > > > > The linker trick is far less intrusive, and I believe less error prone. > > > > If we use the linker trick, the warning -Wcast-function-type dissapears, > > but in a way that makes impossible to the compiler to get the necessary > > info about function prototypes to insert the commented check. As far I > > know, this linker trick (redirection of a function) is hidden for the > > CFI build. > > > > So, in my opinion, the linker trick is not suitable if we want to protect > > the function pointers of the ftrace subsystem against an attack that > > modifiy the normal flow of the kernel. > > The linker trick should only affect architectures that don't implement > the needed features. I can make it so the linker trick is only applied > to those archs, and other archs that want more protection only need to > add these features to their architectures. > > It's much less intrusive than this patch. Sorry, but I don't understand your proposal. What features an arch need to add if want the CFI protection? > > -- Steve Thanks, Oscar Carter
Hi Steven, On Fri, Jul 24, 2020 at 07:14:18PM +0200, Oscar Carter wrote: > On Fri, Jul 24, 2020 at 12:35:28PM -0400, Steven Rostedt wrote: > > On Fri, 24 Jul 2020 18:19:21 +0200 > > Oscar Carter <oscar.carter@gmx.com> wrote: > > > > > > The linker trick is far less intrusive, and I believe less error prone. > > > > > > If we use the linker trick, the warning -Wcast-function-type dissapears, > > > but in a way that makes impossible to the compiler to get the necessary > > > info about function prototypes to insert the commented check. As far I > > > know, this linker trick (redirection of a function) is hidden for the > > > CFI build. > > > > > > So, in my opinion, the linker trick is not suitable if we want to protect > > > the function pointers of the ftrace subsystem against an attack that > > > modifiy the normal flow of the kernel. > > > > The linker trick should only affect architectures that don't implement > > the needed features. I can make it so the linker trick is only applied > > to those archs, and other archs that want more protection only need to > > add these features to their architectures. > > > > It's much less intrusive than this patch. > > Sorry, but I don't understand your proposal. What features an arch need to > add if want the CFI protection? Typo correction. Sorry, but I don't understand your proposal. What features does an arch need to add if want the CFI protection? > > > > > -- Steve > Thanks, Oscar Carter
On Fri, 24 Jul 2020 19:14:18 +0200 Oscar Carter <oscar.carter@gmx.com> wrote: > > The linker trick should only affect architectures that don't implement > > the needed features. I can make it so the linker trick is only applied > > to those archs, and other archs that want more protection only need to > > add these features to their architectures. > > > It's much less intrusive than this patch. > > Sorry, but I don't understand your proposal. What features an arch need to > add if want the CFI protection? The better question is, what features should an arch add to not need the linker trick ;-) That is, they need to change it so that they add the two parameters that is expected by the ftrace core code. Once they do that, then they don't need to use the linker trick, and no function typecast is needed. In other-words, if they support the ftrace_ops and regs passing, they can define ARCH_SUPPORTS_FTRACE_OPS. Note, they don't even really need to support the regs, (can just send NULL), if they don't have HAVE_DYNAMIC_FTRACE_WITH_REGS. Which BTW, is supported by the following architectures: arm arm64 csky parisc powerpc riscv s390 x86 All of the above architectures should not even be hitting the code that does the function cast. What architecture are you doing all this for? -- Steve
On Fri, 24 Jul 2020 13:36:56 -0400 Steven Rostedt <rostedt@goodmis.org> wrote: > Which BTW, is supported by the following architectures: > > arm > arm64 > csky > parisc > powerpc > riscv > s390 > x86 And here's a list of architectures that have function tracing but need to be updated: ia64 microblaze mips nds32 sh sparc xtensa > > All of the above architectures should not even be hitting the code > that does the function cast. What architecture are you doing all this > for? Which one of the above is this patch set for? -- Steve
On Fri, 24 Jul 2020 13:40:20 -0400 Steven Rostedt <rostedt@goodmis.org> wrote: > On Fri, 24 Jul 2020 13:36:56 -0400 > Steven Rostedt <rostedt@goodmis.org> wrote: > > > Which BTW, is supported by the following architectures: > > x86 Ah, you can lose support on x86 if you don't enable DYNAMIC_FTRACE, which is stupid to do. I only enabled disabling of DYNAMIC_FTRACE on x86 to test it, as not all architectures have it, and I currently only test on x86. Without DYNAMIC_FTRACE enabled, you *always* call into the ftrace infrastructure for *every* function. This adds something like 15 to 20% overhead to the kernel. Did I say it was stupid to do so? If you are going through all this work because some randconfig causes this warning because it enables CONFIG_FUNCTION_TRACER but without DYNAMIC_FTRACE enabled, then I strongly suggest you start spending your time elsewhere, because it will be a big NAK on my part to add all this intrusive code for a config used only for debugging the non DYNAMIC_FTRACE case. -- Steve
On Fri, Jul 24, 2020 at 01:40:20PM -0400, Steven Rostedt wrote: > On Fri, 24 Jul 2020 13:36:56 -0400 > Steven Rostedt <rostedt@goodmis.org> wrote: > > > Which BTW, is supported by the following architectures: > > > > arm > > arm64 > > csky > > parisc > > powerpc > > riscv > > s390 > > x86 > > And here's a list of architectures that have function tracing but need > to be updated: > > ia64 > microblaze > mips > nds32 > sh > sparc > xtensa > > > > > All of the above architectures should not even be hitting the code > > that does the function cast. What architecture are you doing all this > > for? > > Which one of the above is this patch set for? This patch is the result of a warning obtained with the following: make allmodconfig ARCH=powerpc make ARCH=powerpc CROSS_COMPILE=powerpc-linux-gnu- -j4 And with the -Wcast-function-type enabled in the top level makefile. > > -- Steve Thanks, Oscar Carter
On Fri, 24 Jul 2020 19:55:00 +0200 Oscar Carter <oscar.carter@gmx.com> wrote: > > Which one of the above is this patch set for? > > This patch is the result of a warning obtained with the following: > > make allmodconfig ARCH=powerpc > make ARCH=powerpc CROSS_COMPILE=powerpc-linux-gnu- -j4 > > And with the -Wcast-function-type enabled in the top level makefile. Looking into powerpc I found this: arch/powerpc/include/asm/ftrace.h: #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS #define ARCH_SUPPORTS_FTRACE_OPS 1 #endif arch/powerpc/Kconfig: select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL [..] config MPROFILE_KERNEL depends on PPC64 && CPU_LITTLE_ENDIAN && FUNCTION_TRACER def_bool $(success,$(srctree)/arch/powerpc/tools/gcc-check-mprofile-kernel.sh $(CC) -I$(srctree)/include -D__KERNEL__) So, it looks like you need to be 64bit PowerPC, Little Endian, and gcc needs to support -mprofile. Otherwise, it falls back to the old way that does the type casting. If you are really concerned about this, I would recommend adding support to the architecture you care about, and then this will no longer be an issue. The funny part is, you can still add support for ftrace_ops, without adding support for DYNAMIC_FTRACE_WITH_REGS, if you only care about not having to do that typecast. My NAK still stands. I wont let an intrusive patch be added to the ftrace core code to deal with an unsupported feature in an architecture. I would be will to add that linker trick to remove the warning. Or we just use that warning as incentive to get architecture developers to implement this feature ;-) -- Steve
Hi Steven, On Fri, Jul 24, 2020 at 02:34:57PM -0400, Steven Rostedt wrote: > On Fri, 24 Jul 2020 19:55:00 +0200 > Oscar Carter <oscar.carter@gmx.com> wrote: > > > > Which one of the above is this patch set for? > > > > This patch is the result of a warning obtained with the following: > > > > make allmodconfig ARCH=powerpc > > make ARCH=powerpc CROSS_COMPILE=powerpc-linux-gnu- -j4 > > > > And with the -Wcast-function-type enabled in the top level makefile. > > Looking into powerpc I found this: > > arch/powerpc/include/asm/ftrace.h: > > #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS > #define ARCH_SUPPORTS_FTRACE_OPS 1 > #endif > > > arch/powerpc/Kconfig: > > select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL > [..] > > config MPROFILE_KERNEL > depends on PPC64 && CPU_LITTLE_ENDIAN && FUNCTION_TRACER > def_bool $(success,$(srctree)/arch/powerpc/tools/gcc-check-mprofile-kernel.sh $(CC) -I$(srctree)/include -D__KERNEL__) > > So, it looks like you need to be 64bit PowerPC, Little Endian, and gcc > needs to support -mprofile. > > Otherwise, it falls back to the old way that does the type casting. > > If you are really concerned about this, I would recommend adding > support to the architecture you care about, and then this will no > longer be an issue. > > The funny part is, you can still add support for ftrace_ops, without > adding support for DYNAMIC_FTRACE_WITH_REGS, if you only care about not > having to do that typecast. I agree with you. I will try to add the support for ftrace_ops without adding support for DYNAMIC_FTRACE_WITH_REGS to the powerpc architecture. It's a great solution to allow a clean CFI build and so, protect this arch (powerpc) against attacks that try to modify the normal control flow. I take a look at the kernel documentation about port ftrace to other architectures [1] but it is out of date. If I try to do this I will need some help. Some info that point me to the right direction would be greatly appreciated. Some advice about what functions I will need to implement would be really helpfull. Or point me to the right piece of code that I can pick as base point. > My NAK still stands. I wont let an intrusive patch be added to the > ftrace core code to deal with an unsupported feature in an architecture. Don't worry. I agree with you and thanks for finding a better way to accomplish the initial purpose. > I would be will to add that linker trick to remove the warning. Or we > just use that warning as incentive to get architecture developers to > implement this feature ;-) In my opinion it would be better to leave the code as it an make the warning visible until this feature has been added. > -- Steve [1] https://www.kernel.org/doc/html/latest/trace/ftrace-design.html Thanks again, Oscar Carter
Sorry, typo correction for the last paragraph: On Sat, Jul 25, 2020 at 05:09:14PM +0200, Oscar Carter wrote: > > I would be will to add that linker trick to remove the warning. Or we > > just use that warning as incentive to get architecture developers to > > implement this feature ;-) > > In my opinion it would be better to leave the code as it an make the warning > visible until this feature has been added. In my opinion it would be better to leave the code as is and make the warning visible until this feature has been added. Thanks again, Oscar Carter
Hi Steven, On Sat, Jul 25, 2020 at 05:09:14PM +0200, Oscar Carter wrote: > Hi Steven, > > On Fri, Jul 24, 2020 at 02:34:57PM -0400, Steven Rostedt wrote: > > On Fri, 24 Jul 2020 19:55:00 +0200 > > Oscar Carter <oscar.carter@gmx.com> wrote: > > > > > > Which one of the above is this patch set for? > > > > > > This patch is the result of a warning obtained with the following: > > > > > > make allmodconfig ARCH=powerpc > > > make ARCH=powerpc CROSS_COMPILE=powerpc-linux-gnu- -j4 > > > > > > And with the -Wcast-function-type enabled in the top level makefile. > > > > Looking into powerpc I found this: > > > > arch/powerpc/include/asm/ftrace.h: > > > > #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS > > #define ARCH_SUPPORTS_FTRACE_OPS 1 > > #endif > > > > > > arch/powerpc/Kconfig: > > > > select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL > > [..] > > > > config MPROFILE_KERNEL > > depends on PPC64 && CPU_LITTLE_ENDIAN && FUNCTION_TRACER > > def_bool $(success,$(srctree)/arch/powerpc/tools/gcc-check-mprofile-kernel.sh $(CC) -I$(srctree)/include -D__KERNEL__) > > > > So, it looks like you need to be 64bit PowerPC, Little Endian, and gcc > > needs to support -mprofile. > > > > Otherwise, it falls back to the old way that does the type casting. > > > > If you are really concerned about this, I would recommend adding > > support to the architecture you care about, and then this will no > > longer be an issue. > > > > The funny part is, you can still add support for ftrace_ops, without > > adding support for DYNAMIC_FTRACE_WITH_REGS, if you only care about not > > having to do that typecast. > > I agree with you. I will try to add the support for ftrace_ops without > adding support for DYNAMIC_FTRACE_WITH_REGS to the powerpc architecture. > > It's a great solution to allow a clean CFI build and so, protect this > arch (powerpc) against attacks that try to modify the normal control > flow. > > I take a look at the kernel documentation about port ftrace to other > architectures [1] but it is out of date. > > If I try to do this I will need some help. Some info that point me to the > right direction would be greatly appreciated. Some advice about what > functions I will need to implement would be really helpfull. Or point me > to the right piece of code that I can pick as base point. I've been searching and reading the code as much as possible. I've found two patches that I think can be useful to guide me. One [1] adds support for ftrace_ops to the riscv architecture. The other one [2] adds support for ftrace_ops to the parisc architecture. [1] commit 71e736a7d655 ("riscv/ftrace: Add ARCH_SUPPORTS_FTRACE_OPS support") [2] commit d562aca37a54 ("parisc/ftrace: Add ARCH_SUPPORTS_FTRACE_OPS support") Due to powerpc arch calls the needed functions from assembly, I based my idea on the patch for the RISCV arch. Can something like the following work? diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index bc76970b6ee5..1c51ff5afae1 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -61,9 +61,8 @@ struct dyn_arch_ftrace { }; #endif /* __ASSEMBLY__ */ -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS #define ARCH_SUPPORTS_FTRACE_OPS 1 -#endif + #endif /* CONFIG_FUNCTION_TRACER */ #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/kernel/trace/ftrace_32.S b/arch/powerpc/kernel/trace/ftrace_32.S index e023ae59c429..e69a4e945986 100644 --- a/arch/powerpc/kernel/trace/ftrace_32.S +++ b/arch/powerpc/kernel/trace/ftrace_32.S @@ -29,6 +29,10 @@ _GLOBAL(ftrace_caller) MCOUNT_SAVE_FRAME /* r3 ends up with link register */ subi r3, r3, MCOUNT_INSN_SIZE + + /* Set ftrace_ops (r5) to the global variable function_trace_op */ + /* Set pt_regs (r6) to NULL */ + .globl ftrace_call ftrace_call: bl ftrace_stub diff --git a/arch/powerpc/kernel/trace/ftrace_64_pg.S b/arch/powerpc/kernel/trace/ftrace_64_pg.S index 6708e24db0ab..a741448b1df9 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_pg.S +++ b/arch/powerpc/kernel/trace/ftrace_64_pg.S @@ -22,6 +22,10 @@ _GLOBAL_TOC(ftrace_caller) std r3, 128(r1) ld r4, 16(r11) subi r3, r3, MCOUNT_INSN_SIZE + + /* Set ftrace_ops (r5) to the global variable function_trace_op */ + /* Set pt_regs (r6) to NULL */ + .globl ftrace_call ftrace_call: bl ftrace_stub To add support for ftrace_ops to the powerpc architecture is only necessary to fill the r5 and r6 registers before the call to ftrace_stub in all the cases. The register r5 is a pointer to ftrace_ops struct and the register r6 is a pointer to pt_regs struct. These registers are the third and fourth parameters of a function with the following prototype. The first and second ones are yet set. void func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct pt_regs *regs); Am I in the right direction? or am I totally wrong? Thanks for your time and patience. Oscar Carter. > > My NAK still stands. I wont let an intrusive patch be added to the > > ftrace core code to deal with an unsupported feature in an architecture. > > Don't worry. I agree with you and thanks for finding a better way to > accomplish the initial purpose. > > > I would be will to add that linker trick to remove the warning. Or we > > just use that warning as incentive to get architecture developers to > > implement this feature ;-) > > In my opinion it would be better to leave the code as it an make the warning > visible until this feature has been added. > > > -- Steve > > [1] https://www.kernel.org/doc/html/latest/trace/ftrace-design.html > > Thanks again, > Oscar Carter
On Sun, 26 Jul 2020 17:52:42 +0200 Oscar Carter <oscar.carter@gmx.com> wrote: > > If I try to do this I will need some help. Some info that point me to the > > right direction would be greatly appreciated. Some advice about what > > functions I will need to implement would be really helpfull. Or point me > > to the right piece of code that I can pick as base point. > > I've been searching and reading the code as much as possible. I've found > two patches that I think can be useful to guide me. One [1] adds support > for ftrace_ops to the riscv architecture. The other one [2] adds support > for ftrace_ops to the parisc architecture. > > [1] commit 71e736a7d655 ("riscv/ftrace: Add ARCH_SUPPORTS_FTRACE_OPS support") > [2] commit d562aca37a54 ("parisc/ftrace: Add ARCH_SUPPORTS_FTRACE_OPS support") > > Due to powerpc arch calls the needed functions from assembly, I based my > idea on the patch for the RISCV arch. > > Can something like the following work? > > diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h > index bc76970b6ee5..1c51ff5afae1 100644 > --- a/arch/powerpc/include/asm/ftrace.h > +++ b/arch/powerpc/include/asm/ftrace.h > @@ -61,9 +61,8 @@ struct dyn_arch_ftrace { > }; > #endif /* __ASSEMBLY__ */ > > -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS > #define ARCH_SUPPORTS_FTRACE_OPS 1 > -#endif > + > #endif /* CONFIG_FUNCTION_TRACER */ > > #ifndef __ASSEMBLY__ > diff --git a/arch/powerpc/kernel/trace/ftrace_32.S b/arch/powerpc/kernel/trace/ftrace_32.S > index e023ae59c429..e69a4e945986 100644 > --- a/arch/powerpc/kernel/trace/ftrace_32.S > +++ b/arch/powerpc/kernel/trace/ftrace_32.S > @@ -29,6 +29,10 @@ _GLOBAL(ftrace_caller) > MCOUNT_SAVE_FRAME > /* r3 ends up with link register */ > subi r3, r3, MCOUNT_INSN_SIZE > + > + /* Set ftrace_ops (r5) to the global variable function_trace_op */ > + /* Set pt_regs (r6) to NULL */ > + > .globl ftrace_call > ftrace_call: > bl ftrace_stub > diff --git a/arch/powerpc/kernel/trace/ftrace_64_pg.S b/arch/powerpc/kernel/trace/ftrace_64_pg.S > index 6708e24db0ab..a741448b1df9 100644 > --- a/arch/powerpc/kernel/trace/ftrace_64_pg.S > +++ b/arch/powerpc/kernel/trace/ftrace_64_pg.S > @@ -22,6 +22,10 @@ _GLOBAL_TOC(ftrace_caller) > std r3, 128(r1) > ld r4, 16(r11) > subi r3, r3, MCOUNT_INSN_SIZE > + > + /* Set ftrace_ops (r5) to the global variable function_trace_op */ > + /* Set pt_regs (r6) to NULL */ I'm guessing you are going to do the above here. If so, this looks correct. > + > .globl ftrace_call > ftrace_call: > bl ftrace_stub > > To add support for ftrace_ops to the powerpc architecture is only necessary > to fill the r5 and r6 registers before the call to ftrace_stub in all the > cases. The register r5 is a pointer to ftrace_ops struct and the register > r6 is a pointer to pt_regs struct. These registers are the third and fourth > parameters of a function with the following prototype. The first and second > ones are yet set. I guess you mean that the first and second ones are already set. But, yeah, you are on the correct path here! > > void func(unsigned long ip, unsigned long parent_ip, > struct ftrace_ops *ops, struct pt_regs *regs); > > Am I in the right direction? or am I totally wrong? No, you don't look wrong. But the true test is to try it out :-) Don't forget to update ftrace_32.S as well. > > Thanks for your time and patience. My pleasure. Thanks for doing this. The more people that understand all this, the better! -- Steve
Hi Steven, On Mon, Jul 27, 2020 at 09:53:06AM -0400, Steven Rostedt wrote: > On Sun, 26 Jul 2020 17:52:42 +0200 > Oscar Carter <oscar.carter@gmx.com> wrote: > > > > If I try to do this I will need some help. Some info that point me to the > > > right direction would be greatly appreciated. Some advice about what > > > functions I will need to implement would be really helpfull. Or point me > > > to the right piece of code that I can pick as base point. > > > > I've been searching and reading the code as much as possible. I've found > > two patches that I think can be useful to guide me. One [1] adds support > > for ftrace_ops to the riscv architecture. The other one [2] adds support > > for ftrace_ops to the parisc architecture. > > > > [1] commit 71e736a7d655 ("riscv/ftrace: Add ARCH_SUPPORTS_FTRACE_OPS support") > > [2] commit d562aca37a54 ("parisc/ftrace: Add ARCH_SUPPORTS_FTRACE_OPS support") > > > > Due to powerpc arch calls the needed functions from assembly, I based my > > idea on the patch for the RISCV arch. > > > > Can something like the following work? > > > > diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h > > index bc76970b6ee5..1c51ff5afae1 100644 > > --- a/arch/powerpc/include/asm/ftrace.h > > +++ b/arch/powerpc/include/asm/ftrace.h > > @@ -61,9 +61,8 @@ struct dyn_arch_ftrace { > > }; > > #endif /* __ASSEMBLY__ */ > > > > -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS > > #define ARCH_SUPPORTS_FTRACE_OPS 1 > > -#endif > > + > > #endif /* CONFIG_FUNCTION_TRACER */ > > > > #ifndef __ASSEMBLY__ > > diff --git a/arch/powerpc/kernel/trace/ftrace_32.S b/arch/powerpc/kernel/trace/ftrace_32.S > > index e023ae59c429..e69a4e945986 100644 > > --- a/arch/powerpc/kernel/trace/ftrace_32.S > > +++ b/arch/powerpc/kernel/trace/ftrace_32.S > > @@ -29,6 +29,10 @@ _GLOBAL(ftrace_caller) > > MCOUNT_SAVE_FRAME > > /* r3 ends up with link register */ > > subi r3, r3, MCOUNT_INSN_SIZE > > + > > + /* Set ftrace_ops (r5) to the global variable function_trace_op */ > > + /* Set pt_regs (r6) to NULL */ > > + > > .globl ftrace_call > > ftrace_call: > > bl ftrace_stub > > diff --git a/arch/powerpc/kernel/trace/ftrace_64_pg.S b/arch/powerpc/kernel/trace/ftrace_64_pg.S > > index 6708e24db0ab..a741448b1df9 100644 > > --- a/arch/powerpc/kernel/trace/ftrace_64_pg.S > > +++ b/arch/powerpc/kernel/trace/ftrace_64_pg.S > > @@ -22,6 +22,10 @@ _GLOBAL_TOC(ftrace_caller) > > std r3, 128(r1) > > ld r4, 16(r11) > > subi r3, r3, MCOUNT_INSN_SIZE > > + > > + /* Set ftrace_ops (r5) to the global variable function_trace_op */ > > + /* Set pt_regs (r6) to NULL */ > > I'm guessing you are going to do the above here. If so, this looks correct. > > > + > > .globl ftrace_call > > ftrace_call: > > bl ftrace_stub > > > > To add support for ftrace_ops to the powerpc architecture is only necessary > > to fill the r5 and r6 registers before the call to ftrace_stub in all the > > cases. The register r5 is a pointer to ftrace_ops struct and the register > > r6 is a pointer to pt_regs struct. These registers are the third and fourth > > parameters of a function with the following prototype. The first and second > > ones are yet set. > > I guess you mean that the first and second ones are already set. But, > yeah, you are on the correct path here! > > > > > void func(unsigned long ip, unsigned long parent_ip, > > struct ftrace_ops *ops, struct pt_regs *regs); > > > > Am I in the right direction? or am I totally wrong? > > No, you don't look wrong. But the true test is to try it out :-) Of course. I will do the commented work, I will test it and then I will send a new patch. > Don't forget to update ftrace_32.S as well. > > > > > > Thanks for your time and patience. > > My pleasure. Thanks for doing this. The more people that understand all > this, the better! Thanks for your guidance and advices. > -- Steve Regards, Oscar Carter
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fd8fbb422860..124ccf914657 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -143,9 +143,7 @@ static inline bool ftrace_same_address_ufunc(union ftrace_func *ufunc, return (ufunc->ops == func); } #else -/* See comment below, where ftrace_ops_list_func is defined */ -static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); -#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) +static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); static inline void ftrace_set_ufunc(union ftrace_func *ufunc, ftrace_func_no_ops_t func) @@ -198,22 +196,29 @@ static void ftrace_sync_ipi(void *data) smp_rmb(); } -static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) +static union ftrace_func ftrace_ops_get_list_func(struct ftrace_ops *ops) { + union ftrace_func list_func; + /* * If this is a dynamic, RCU, or per CPU ops, or we force list func, * then it needs to call the list anyway. */ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC) - return ftrace_ops_list_func; + ftrace_set_ufunc(&list_func, ftrace_ops_list_func); + else + list_func.ops = ftrace_ops_get_func(ops); - return ftrace_ops_get_func(ops); + return list_func; } static void update_ftrace_function(void) { - ftrace_func_t func; + union ftrace_func func; +#ifndef CONFIG_DYNAMIC_FTRACE + union ftrace_func tmp; +#endif /* * Prepare the ftrace_ops that the arch callback will use. @@ -225,7 +230,7 @@ static void update_ftrace_function(void) /* If there's no ftrace_ops registered, just call the stub function */ if (set_function_trace_op == &ftrace_list_end) { - func = ftrace_stub; + func.ops = ftrace_stub; /* * If we are at the end of the list and this ops is @@ -239,21 +244,21 @@ static void update_ftrace_function(void) } else { /* Just use the default ftrace_ops */ set_function_trace_op = &ftrace_list_end; - func = ftrace_ops_list_func; + ftrace_set_ufunc(&func, ftrace_ops_list_func); } update_function_graph_func(); /* If there's no change, then do nothing more here */ - if (ftrace_trace_function == func) + if (ftrace_trace_function == func.ops) return; /* * If we are using the list function, it doesn't care * about the function_trace_ops. */ - if (func == ftrace_ops_list_func) { - ftrace_trace_function = func; + if (ftrace_same_address_ufunc(&func, ftrace_ops_list_func)) { + ftrace_trace_function = func.ops; /* * Don't even bother setting function_trace_ops, * it would be racy to do so anyway. @@ -272,7 +277,9 @@ static void update_ftrace_function(void) * function we want, albeit indirectly, but it handles the * ftrace_ops and doesn't depend on function_trace_op. */ - ftrace_trace_function = ftrace_ops_list_func; + ftrace_set_ufunc(&tmp, ftrace_ops_list_func); + ftrace_trace_function = tmp.ops; + /* * Make sure all CPUs see this. Yes this is slow, but static * tracing is slow and nasty to have enabled. @@ -287,7 +294,7 @@ static void update_ftrace_function(void) /* OK, we are all set to update the ftrace_trace_function now! */ #endif /* !CONFIG_DYNAMIC_FTRACE */ - ftrace_trace_function = func; + ftrace_trace_function = func.ops; } static void add_ftrace_ops(struct ftrace_ops __rcu **list, @@ -2680,6 +2687,7 @@ void ftrace_modify_all_code(int command) int update = command & FTRACE_UPDATE_TRACE_FUNC; int mod_flags = 0; int err = 0; + union ftrace_func func; if (command & FTRACE_MAY_SLEEP) mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; @@ -2695,7 +2703,8 @@ void ftrace_modify_all_code(int command) * traced. */ if (update) { - err = ftrace_update_ftrace_func(ftrace_ops_list_func); + ftrace_set_ufunc(&func, ftrace_ops_list_func); + err = ftrace_update_ftrace_func(func.ops); if (FTRACE_WARN_ON(err)) return; } @@ -2705,7 +2714,9 @@ void ftrace_modify_all_code(int command) else if (command & FTRACE_DISABLE_CALLS) ftrace_replace_code(mod_flags); - if (update && ftrace_trace_function != ftrace_ops_list_func) { + ftrace_set_ufunc(&func, ftrace_ops_list_func); + + if (update && ftrace_trace_function != func.ops) { function_trace_op = set_function_trace_op; smp_wmb(); /* If irqs are disabled, we are in stop machine */ @@ -6890,14 +6901,13 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, { __ftrace_ops_list_func(ip, parent_ip, NULL, regs); } -NOKPROBE_SYMBOL(ftrace_ops_list_func); #else -static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) +static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) { __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); } -NOKPROBE_SYMBOL(ftrace_ops_no_ops); #endif +NOKPROBE_SYMBOL(ftrace_ops_list_func); /* * If there's only one function registered but it does not support
In an effort to enable -Wcast-function-type in the top-level Makefile to support Control Flow Integrity builds, there are the need to remove all the function callback casts. ftrace_ops_list_func() can no longer be defined as ftrace_ops_no_ops(). The reason for ftrace_ops_no_ops() is to use that when an architecture calls ftrace_ops_list_func() with only two parameters (called from assembly). And to make sure there's no C side-effects, those archs call ftrace_ops_no_ops() which only has two parameters, as the function ftrace_ops_list_func() has four parameters. This patch removes the no longer needed function ftrace_ops_no_ops() and all the function callback casts using the previous defined ftrace_func union and the two function helpers called ftrace_set_ufunc() and ftrace_same_address_ufunc(). Signed-off-by: Oscar Carter <oscar.carter@gmx.com> --- kernel/trace/ftrace.c | 48 ++++++++++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 19 deletions(-) -- 2.20.1