Message ID | 20240602033832.870736657@goodmis.org (mailing list archive) |
---|---|
State | Not Applicable |
Delegated to: | BPF |
Headers | show |
Series | function_graph: Allow multiple users for function graph tracing | expand |
Context | Check | Description |
---|---|---|
netdev/tree_selection | success | Not a local patch |
On Sat, 01 Jun 2024 23:37:55 -0400 Steven Rostedt <rostedt@goodmis.org> wrote: [...] > > +static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops, > + struct ftrace_hash **orig_subhash, > + struct ftrace_hash *hash, > + int enable) > +{ > + struct ftrace_ops *ops = subops->managed; > + struct ftrace_hash **orig_hash; > + struct ftrace_hash *save_hash; > + struct ftrace_hash *new_hash; > + int ret; > + > + /* Manager ops can not be subops (yet) */ > + if (WARN_ON_ONCE(!ops || ops->flags & FTRACE_OPS_FL_SUBOP)) > + return -EINVAL; This does return if ops->flags & FTRACE_OPS_FL_SUBOP, but --> (1) > + > + /* Move the new hash over to the subops hash */ > + save_hash = *orig_subhash; > + *orig_subhash = __ftrace_hash_move(hash); > + if (!*orig_subhash) { > + *orig_subhash = save_hash; > + return -ENOMEM; > + } > + > + /* Create a new_hash to hold the ops new functions */ > + if (enable) { > + orig_hash = &ops->func_hash->filter_hash; > + new_hash = append_hashes(ops); > + } else { > + orig_hash = &ops->func_hash->notrace_hash; > + new_hash = intersect_hashes(ops); > + } > + > + /* Move the hash over to the new hash */ > + ret = ftrace_hash_move_and_update_ops(ops, orig_hash, new_hash, enable); This also a bit wired to me. maybe we need simple version like `__ftrace_hash_move_and_update_ops()` And call it from ftrace_hash_move_and_update_ops() and here? > + > + free_ftrace_hash(new_hash); > + > + if (ret) { > + /* Put back the original hash */ > + free_ftrace_hash_rcu(*orig_subhash); > + *orig_subhash = save_hash; > + } else { > + free_ftrace_hash_rcu(save_hash); > + } > + return ret; > +} > + > + > static u64 ftrace_update_time; > unsigned long ftrace_update_tot_cnt; > unsigned long ftrace_number_of_pages; > @@ -4770,8 +4823,33 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, > { > struct ftrace_ops_hash old_hash_ops; > struct ftrace_hash *old_hash; > + struct ftrace_ops *op; > int ret; > > + if (ops->flags & FTRACE_OPS_FL_SUBOP) > + return ftrace_hash_move_and_update_subops(ops, orig_hash, hash, enable); (1) This calls ftrace_hash_move_and_update_subops() if ops->flags & FTRACE_OPS_FL_SUBOP ? Thank you, > + > + /* > + * If this ops is not enabled, it could be sharing its filters > + * with a subop. If that's the case, update the subop instead of > + * this ops. Shared filters are only allowed to have one ops set > + * at a time, and if we update the ops that is not enabled, > + * it will not affect subops that share it. > + */ > + if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) { > + /* Check if any other manager subops maps to this hash */ > + do_for_each_ftrace_op(op, ftrace_ops_list) { > + struct ftrace_ops *subops; > + > + list_for_each_entry(subops, &op->subop_list, list) { > + if ((subops->flags & FTRACE_OPS_FL_ENABLED) && > + subops->func_hash == ops->func_hash) { > + return ftrace_hash_move_and_update_subops(subops, orig_hash, hash, enable); > + } > + } > + } while_for_each_ftrace_op(op); > + } > + > old_hash = *orig_hash; > old_hash_ops.filter_hash = ops->func_hash->filter_hash; > old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; > -- > 2.43.0 > >
On Mon, 3 Jun 2024 11:37:23 +0900 Masami Hiramatsu (Google) <mhiramat@kernel.org> wrote: > On Sat, 01 Jun 2024 23:37:55 -0400 > Steven Rostedt <rostedt@goodmis.org> wrote: > > [...] > > > > +static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops, > > + struct ftrace_hash **orig_subhash, > > + struct ftrace_hash *hash, > > + int enable) > > +{ > > + struct ftrace_ops *ops = subops->managed; > > + struct ftrace_hash **orig_hash; > > + struct ftrace_hash *save_hash; > > + struct ftrace_hash *new_hash; > > + int ret; > > + > > + /* Manager ops can not be subops (yet) */ > > + if (WARN_ON_ONCE(!ops || ops->flags & FTRACE_OPS_FL_SUBOP)) > > + return -EINVAL; > > This does return if ops->flags & FTRACE_OPS_FL_SUBOP, but --> (1) Yes, because what is passed in is "subops" and "ops" is subops->managed. > > > + > > + /* Move the new hash over to the subops hash */ > > + save_hash = *orig_subhash; > > + *orig_subhash = __ftrace_hash_move(hash); > > + if (!*orig_subhash) { > > + *orig_subhash = save_hash; > > + return -ENOMEM; > > + } > > + > > + /* Create a new_hash to hold the ops new functions */ > > + if (enable) { > > + orig_hash = &ops->func_hash->filter_hash; > > + new_hash = append_hashes(ops); > > + } else { > > + orig_hash = &ops->func_hash->notrace_hash; > > + new_hash = intersect_hashes(ops); > > + } > > + > > + /* Move the hash over to the new hash */ > > + ret = ftrace_hash_move_and_update_ops(ops, orig_hash, new_hash, enable); > > This also a bit wired to me. maybe we need simple version like > > `__ftrace_hash_move_and_update_ops()` > > And call it from ftrace_hash_move_and_update_ops() and here? We could do that. I almost did due to other issues but I reworked the code where I didn't need to. > > > + > > + free_ftrace_hash(new_hash); > > + > > + if (ret) { > > + /* Put back the original hash */ > > + free_ftrace_hash_rcu(*orig_subhash); > > + *orig_subhash = save_hash; > > + } else { > > + free_ftrace_hash_rcu(save_hash); > > + } > > + return ret; > > +} > > + > > + > > static u64 ftrace_update_time; > > unsigned long ftrace_update_tot_cnt; > > unsigned long ftrace_number_of_pages; > > @@ -4770,8 +4823,33 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, > > { > > struct ftrace_ops_hash old_hash_ops; > > struct ftrace_hash *old_hash; > > + struct ftrace_ops *op; > > int ret; > > > > + if (ops->flags & FTRACE_OPS_FL_SUBOP) > > + return ftrace_hash_move_and_update_subops(ops, orig_hash, hash, enable); > > (1) This calls ftrace_hash_move_and_update_subops() if ops->flags & FTRACE_OPS_FL_SUBOP ? Yes, because ops turns into subops, and the ops above it is its manager ops. -- Steve
On Mon, 3 Jun 2024 10:52:50 -0400 Steven Rostedt <rostedt@goodmis.org> wrote: > On Mon, 3 Jun 2024 11:37:23 +0900 > Masami Hiramatsu (Google) <mhiramat@kernel.org> wrote: > > > On Sat, 01 Jun 2024 23:37:55 -0400 > > Steven Rostedt <rostedt@goodmis.org> wrote: > > > > [...] > > > > > > +static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops, > > > + struct ftrace_hash **orig_subhash, > > > + struct ftrace_hash *hash, > > > + int enable) > > > +{ > > > + struct ftrace_ops *ops = subops->managed; > > > + struct ftrace_hash **orig_hash; > > > + struct ftrace_hash *save_hash; > > > + struct ftrace_hash *new_hash; > > > + int ret; > > > + > > > + /* Manager ops can not be subops (yet) */ > > > + if (WARN_ON_ONCE(!ops || ops->flags & FTRACE_OPS_FL_SUBOP)) > > > + return -EINVAL; > > > > This does return if ops->flags & FTRACE_OPS_FL_SUBOP, but --> (1) > > Yes, because what is passed in is "subops" and "ops" is subops->managed. Ah, I missed that point. OK, I got it. > > > > > > + > > > + /* Move the new hash over to the subops hash */ > > > + save_hash = *orig_subhash; > > > + *orig_subhash = __ftrace_hash_move(hash); > > > + if (!*orig_subhash) { > > > + *orig_subhash = save_hash; > > > + return -ENOMEM; > > > + } > > > + > > > + /* Create a new_hash to hold the ops new functions */ > > > + if (enable) { > > > + orig_hash = &ops->func_hash->filter_hash; > > > + new_hash = append_hashes(ops); > > > + } else { > > > + orig_hash = &ops->func_hash->notrace_hash; > > > + new_hash = intersect_hashes(ops); > > > + } > > > + > > > + /* Move the hash over to the new hash */ > > > + ret = ftrace_hash_move_and_update_ops(ops, orig_hash, new_hash, enable); So this `ops` is managed ops of this subops. > > > > This also a bit wired to me. maybe we need simple version like > > > > `__ftrace_hash_move_and_update_ops()` > > > > And call it from ftrace_hash_move_and_update_ops() and here? > > We could do that. I almost did due to other issues but I reworked the code > where I didn't need to. > > > > > > + > > > + free_ftrace_hash(new_hash); > > > + > > > + if (ret) { > > > + /* Put back the original hash */ > > > + free_ftrace_hash_rcu(*orig_subhash); > > > + *orig_subhash = save_hash; > > > + } else { > > > + free_ftrace_hash_rcu(save_hash); > > > + } > > > + return ret; > > > +} > > > + > > > + > > > static u64 ftrace_update_time; > > > unsigned long ftrace_update_tot_cnt; > > > unsigned long ftrace_number_of_pages; > > > @@ -4770,8 +4823,33 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, > > > { > > > struct ftrace_ops_hash old_hash_ops; > > > struct ftrace_hash *old_hash; > > > + struct ftrace_ops *op; > > > int ret; > > > > > > + if (ops->flags & FTRACE_OPS_FL_SUBOP) > > > + return ftrace_hash_move_and_update_subops(ops, orig_hash, hash, enable); > > > > (1) This calls ftrace_hash_move_and_update_subops() if ops->flags & FTRACE_OPS_FL_SUBOP ? > > Yes, because ops turns into subops, and the ops above it is its manager ops. Ah, OK. This `ops` is a subops. Thank you, > > -- Steve >
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 978a1d3b270a..63238a9a9270 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -227,6 +227,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); * ftrace_enabled. * DIRECT - Used by the direct ftrace_ops helper for direct functions * (internal ftrace only, should not be used by others) + * SUBOP - Is controlled by another op in field managed. */ enum { FTRACE_OPS_FL_ENABLED = BIT(0), @@ -247,6 +248,7 @@ enum { FTRACE_OPS_FL_TRACE_ARRAY = BIT(15), FTRACE_OPS_FL_PERMANENT = BIT(16), FTRACE_OPS_FL_DIRECT = BIT(17), + FTRACE_OPS_FL_SUBOP = BIT(18), }; #ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS @@ -336,6 +338,7 @@ struct ftrace_ops { struct list_head list; struct list_head subop_list; ftrace_ops_func_t ops_func; + struct ftrace_ops *managed; #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS unsigned long direct_call; #endif diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 38fb2a634b04..e447b04c0c9c 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -3424,7 +3424,8 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int } else { free_ftrace_hash(save_filter_hash); free_ftrace_hash(save_notrace_hash); - subops->flags |= FTRACE_OPS_FL_ENABLED; + subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP; + subops->managed = ops; } return ret; } @@ -3478,11 +3479,12 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int ret = ftrace_update_ops(ops, filter_hash, notrace_hash); free_ftrace_hash(filter_hash); free_ftrace_hash(notrace_hash); - if (ret < 0) + if (ret < 0) { list_del(&subops->list); - else - subops->flags |= FTRACE_OPS_FL_ENABLED; - + } else { + subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP; + subops->managed = ops; + } return ret; } @@ -3527,6 +3529,8 @@ int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, in free_ftrace_hash(ops->func_hash->notrace_hash); ops->func_hash->filter_hash = EMPTY_HASH; ops->func_hash->notrace_hash = EMPTY_HASH; + subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP); + subops->managed = NULL; return 0; } @@ -3542,16 +3546,65 @@ int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, in } ret = ftrace_update_ops(ops, filter_hash, notrace_hash); - if (ret < 0) + if (ret < 0) { list_add(&subops->list, &ops->subop_list); - else - subops->flags &= ~FTRACE_OPS_FL_ENABLED; - + } else { + subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP); + subops->managed = NULL; + } free_ftrace_hash(filter_hash); free_ftrace_hash(notrace_hash); return ret; } +static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops, + struct ftrace_hash **orig_subhash, + struct ftrace_hash *hash, + int enable) +{ + struct ftrace_ops *ops = subops->managed; + struct ftrace_hash **orig_hash; + struct ftrace_hash *save_hash; + struct ftrace_hash *new_hash; + int ret; + + /* Manager ops can not be subops (yet) */ + if (WARN_ON_ONCE(!ops || ops->flags & FTRACE_OPS_FL_SUBOP)) + return -EINVAL; + + /* Move the new hash over to the subops hash */ + save_hash = *orig_subhash; + *orig_subhash = __ftrace_hash_move(hash); + if (!*orig_subhash) { + *orig_subhash = save_hash; + return -ENOMEM; + } + + /* Create a new_hash to hold the ops new functions */ + if (enable) { + orig_hash = &ops->func_hash->filter_hash; + new_hash = append_hashes(ops); + } else { + orig_hash = &ops->func_hash->notrace_hash; + new_hash = intersect_hashes(ops); + } + + /* Move the hash over to the new hash */ + ret = ftrace_hash_move_and_update_ops(ops, orig_hash, new_hash, enable); + + free_ftrace_hash(new_hash); + + if (ret) { + /* Put back the original hash */ + free_ftrace_hash_rcu(*orig_subhash); + *orig_subhash = save_hash; + } else { + free_ftrace_hash_rcu(save_hash); + } + return ret; +} + + static u64 ftrace_update_time; unsigned long ftrace_update_tot_cnt; unsigned long ftrace_number_of_pages; @@ -4770,8 +4823,33 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, { struct ftrace_ops_hash old_hash_ops; struct ftrace_hash *old_hash; + struct ftrace_ops *op; int ret; + if (ops->flags & FTRACE_OPS_FL_SUBOP) + return ftrace_hash_move_and_update_subops(ops, orig_hash, hash, enable); + + /* + * If this ops is not enabled, it could be sharing its filters + * with a subop. If that's the case, update the subop instead of + * this ops. Shared filters are only allowed to have one ops set + * at a time, and if we update the ops that is not enabled, + * it will not affect subops that share it. + */ + if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) { + /* Check if any other manager subops maps to this hash */ + do_for_each_ftrace_op(op, ftrace_ops_list) { + struct ftrace_ops *subops; + + list_for_each_entry(subops, &op->subop_list, list) { + if ((subops->flags & FTRACE_OPS_FL_ENABLED) && + subops->func_hash == ops->func_hash) { + return ftrace_hash_move_and_update_subops(subops, orig_hash, hash, enable); + } + } + } while_for_each_ftrace_op(op); + } + old_hash = *orig_hash; old_hash_ops.filter_hash = ops->func_hash->filter_hash; old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;