Message ID | 20240930071031.3694979-1-liaochang1@huawei.com (mailing list archive) |
---|---|
State | Rejected |
Headers | show |
Series | [v2] function_graph: Improve fgraph LRU data initialization | expand |
On Mon, 30 Sep 2024 07:10:31 +0000 Liao Chang <liaochang1@huawei.com> wrote: > This patch uses [first ... last] = value to initialize fgraph_array[]. > And use fgraph_lru_next and fgraph_lru_last as the indicator of > initialization. The only thing this patch does is to allow the use of [first...last] annotation for initialization. What actual benefit does that give us? In other words, why would I want to apply this? Just so that we can use [first...last] annotation with the added cost of having to manage setting fgraph_lru_next and fgraph_lru_last to -1 and then comparing them? Personally, I find the original code easier to maintain, as it's simple and doesn't add extra management. -- Steve > > v2->v1: > Fixup the build error reported by kernel test robot <lkp@intel.com>. > Since some architectures use ftrace_graph_entry_stub() for the static > ftrace scenario, then restore the definition without static keyword in > the original patch [1]. And rebasing patch to next-20240927. > > [1] https://lore.kernel.org/all/20240912111550.1752115-1-liaochang1@huawei.com > > Signed-off-by: Liao Chang <liaochang1@huawei.com> > --- > kernel/trace/fgraph.c | 54 +++++++++++++++++++++---------------------- > 1 file changed, 27 insertions(+), 27 deletions(-) > > diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c > index d7d4fb403f6f..eb2fbc0338c7 100644 > --- a/kernel/trace/fgraph.c > +++ b/kernel/trace/fgraph.c > @@ -172,20 +172,41 @@ enum { > DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph); > int ftrace_graph_active; > > -static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE]; > +int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, > + struct fgraph_ops *gops) > +{ > + return 0; > +} > + > +static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace, > + struct fgraph_ops *gops) > +{ > +} > + > +static struct fgraph_ops fgraph_stub = { > + .entryfunc = ftrace_graph_entry_stub, > + .retfunc = ftrace_graph_ret_stub, > +}; > + > +static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE] = { > + [0 ... FGRAPH_ARRAY_SIZE - 1] = &fgraph_stub, > +}; > static unsigned long fgraph_array_bitmask; > > /* LRU index table for fgraph_array */ > static int fgraph_lru_table[FGRAPH_ARRAY_SIZE]; > -static int fgraph_lru_next; > -static int fgraph_lru_last; > +static int fgraph_lru_next = -1; > +static int fgraph_lru_last = -1; > > /* Initialize fgraph_lru_table with unused index */ > static void fgraph_lru_init(void) > { > - int i; > + if ((fgraph_lru_next >= 0) && (fgraph_lru_last >= 0)) > + return; > > - for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) > + fgraph_lru_next = fgraph_lru_last = 0; > + > + for (int i = 0; i < FGRAPH_ARRAY_SIZE; i++) > fgraph_lru_table[i] = i; > } > > @@ -483,22 +504,6 @@ int __weak ftrace_disable_ftrace_graph_caller(void) > } > #endif > > -int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, > - struct fgraph_ops *gops) > -{ > - return 0; > -} > - > -static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace, > - struct fgraph_ops *gops) > -{ > -} > - > -static struct fgraph_ops fgraph_stub = { > - .entryfunc = ftrace_graph_entry_stub, > - .retfunc = ftrace_graph_ret_stub, > -}; > - > static struct fgraph_ops *fgraph_direct_gops = &fgraph_stub; > DEFINE_STATIC_CALL(fgraph_func, ftrace_graph_entry_stub); > DEFINE_STATIC_CALL(fgraph_retfunc, ftrace_graph_ret_stub); > @@ -1250,12 +1255,7 @@ int register_ftrace_graph(struct fgraph_ops *gops) > > mutex_lock(&ftrace_lock); > > - if (!fgraph_array[0]) { > - /* The array must always have real data on it */ > - for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) > - fgraph_array[i] = &fgraph_stub; > - fgraph_lru_init(); > - } > + fgraph_lru_init(); > > i = fgraph_lru_alloc_index(); > if (i < 0 || WARN_ON_ONCE(fgraph_array[i] != &fgraph_stub)) {
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index d7d4fb403f6f..eb2fbc0338c7 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -172,20 +172,41 @@ enum { DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph); int ftrace_graph_active; -static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE]; +int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, + struct fgraph_ops *gops) +{ + return 0; +} + +static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace, + struct fgraph_ops *gops) +{ +} + +static struct fgraph_ops fgraph_stub = { + .entryfunc = ftrace_graph_entry_stub, + .retfunc = ftrace_graph_ret_stub, +}; + +static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE] = { + [0 ... FGRAPH_ARRAY_SIZE - 1] = &fgraph_stub, +}; static unsigned long fgraph_array_bitmask; /* LRU index table for fgraph_array */ static int fgraph_lru_table[FGRAPH_ARRAY_SIZE]; -static int fgraph_lru_next; -static int fgraph_lru_last; +static int fgraph_lru_next = -1; +static int fgraph_lru_last = -1; /* Initialize fgraph_lru_table with unused index */ static void fgraph_lru_init(void) { - int i; + if ((fgraph_lru_next >= 0) && (fgraph_lru_last >= 0)) + return; - for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) + fgraph_lru_next = fgraph_lru_last = 0; + + for (int i = 0; i < FGRAPH_ARRAY_SIZE; i++) fgraph_lru_table[i] = i; } @@ -483,22 +504,6 @@ int __weak ftrace_disable_ftrace_graph_caller(void) } #endif -int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, - struct fgraph_ops *gops) -{ - return 0; -} - -static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace, - struct fgraph_ops *gops) -{ -} - -static struct fgraph_ops fgraph_stub = { - .entryfunc = ftrace_graph_entry_stub, - .retfunc = ftrace_graph_ret_stub, -}; - static struct fgraph_ops *fgraph_direct_gops = &fgraph_stub; DEFINE_STATIC_CALL(fgraph_func, ftrace_graph_entry_stub); DEFINE_STATIC_CALL(fgraph_retfunc, ftrace_graph_ret_stub); @@ -1250,12 +1255,7 @@ int register_ftrace_graph(struct fgraph_ops *gops) mutex_lock(&ftrace_lock); - if (!fgraph_array[0]) { - /* The array must always have real data on it */ - for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) - fgraph_array[i] = &fgraph_stub; - fgraph_lru_init(); - } + fgraph_lru_init(); i = fgraph_lru_alloc_index(); if (i < 0 || WARN_ON_ONCE(fgraph_array[i] != &fgraph_stub)) {
This patch uses [first ... last] = value to initialize fgraph_array[]. And use fgraph_lru_next and fgraph_lru_last as the indicator of initialization. v2->v1: Fixup the build error reported by kernel test robot <lkp@intel.com>. Since some architectures use ftrace_graph_entry_stub() for the static ftrace scenario, then restore the definition without static keyword in the original patch [1]. And rebasing patch to next-20240927. [1] https://lore.kernel.org/all/20240912111550.1752115-1-liaochang1@huawei.com Signed-off-by: Liao Chang <liaochang1@huawei.com> --- kernel/trace/fgraph.c | 54 +++++++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 27 deletions(-)