Message ID | 6798d1469dd31609e76d13f5ac6249f7af48456a.1574116163.git.leonard.crestez@nxp.com (mailing list archive) |
---|---|
State | Not Applicable, archived |
Headers | show |
Series | [v2] interconnect: Add interconnect_graph file to debugfs | expand |
Hi Leonard, On 19.11.19 г. 0:34 ч., Leonard Crestez wrote: > The interconnect graphs can be difficult to understand and the current > "interconnect_summary" file doesn't even display links in any way. > > Add a new "interconnect_graph" file to debugfs in the graphviz "dot" > format which describes interconnect providers, nodes and links. > > The file is human-readable and can be visualized by piping through > graphviz. Example: > > ssh $TARGET cat /sys/kernel/debug/interconnect/interconnect_graph \ > | dot -Tsvg > interconnect_graph.svg > > Signed-off-by: Leonard Crestez <leonard.crestez@nxp.com> > Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> > Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org> > --- > Documentation/driver-api/interconnect.rst | 23 ++++++++ > drivers/interconnect/core.c | 66 +++++++++++++++++++++++ > 2 files changed, 89 insertions(+) > > Example output as a github gist: > https://gist.github.com/cdleonard/2f74a7efe74587e3d4b57cf7983b46a8 > > The qcs404 driver was hacked to probe on imx, the links to "0" seem to > from incorrect trailing 0s on DEFINE_QNODE. Possibly fallout from > switching to ARRAY_SIZE(__VA_ARGS__)? > > This makes it easier to understand the interconnect graph than just > staring at registration code. > > Changes since RFC v1: > * Document under driver-api/interconnect.rst > * Collect reviews > Link to v1: https://patchwork.kernel.org/patch/11242921/ > > diff --git a/Documentation/driver-api/interconnect.rst b/Documentation/driver-api/interconnect.rst > index cdeb5825f314..77a85aad8d2f 100644 > --- a/Documentation/driver-api/interconnect.rst > +++ b/Documentation/driver-api/interconnect.rst > @@ -89,5 +89,28 @@ Interconnect consumers > > Interconnect consumers are the clients which use the interconnect APIs to > get paths between endpoints and set their bandwidth/latency/QoS requirements > for these interconnect paths. These interfaces are not currently > documented. > + > +Interconnect debugfs interfaces > +------------------------------- > + > +Like several other subsystems interconnect will create some files for debugging > +and introspection. Files in debugfs are not considered ABI so application > +software shouldn't rely on format details > +change between kernel versions. > + > +``/sys/kernel/debug/interconnect/interconnect_summary``: > + > +Show all interconnect nodes in the system with their aggregated bandwith s/bandwith/bandwidth/ > +request. Indented under each node show bandwith requests from each device. s/bandwith/bandwidth/ > + > +``/sys/kernel/debug/interconnect/interconnect_graph``: > + > +Show the interconnect graph in the graphviz dot format. It shows all > +interconnect nodes and links in the system and groups together nodes from the > +same provider as subgraphs. The format is human-readable and can also be piped > +through dot to generate diagrams in many graphical formats:: > + > + $ cat /sys/kernel/debug/interconnect/interconnect_graph | \ > + dot -Tsvg > interconnect_graph.svg > diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c > index c498796adc07..07e91288c7f4 100644 > --- a/drivers/interconnect/core.c > +++ b/drivers/interconnect/core.c > @@ -92,10 +92,74 @@ static int icc_summary_show(struct seq_file *s, void *data) > > return 0; > } > DEFINE_SHOW_ATTRIBUTE(icc_summary); > > +static void icc_graph_show_link(struct seq_file *s, int level, > + struct icc_node *n, struct icc_node *m) > +{ > + seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n", > + level == 2 ? "\t\t" : "\t", > + n->id, n->name, m->id, m->name); Nit: Should be aligned to the open parenthesis. > +} > + > +static void icc_graph_show_node(struct seq_file *s, struct icc_node *n) > +{ > + seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s", > + n->id, n->name, n->id, n->name); > + seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw); > + seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw); > + seq_puts(s, "\"]\n"); > +} > + > +static int icc_graph_show(struct seq_file *s, void *data) > +{ > + struct icc_provider *provider; > + struct icc_node *n; > + int cluster_index = 0; > + int i; > + > + seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n"); > + mutex_lock(&icc_lock); > + > + /* draw providers as cluster subgraphs */ > + cluster_index = 0; > + list_for_each_entry(provider, &icc_providers, provider_list) { > + seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index); > + if (provider->dev) > + seq_printf(s, "\t\tlabel = \"%s\"\n", > + dev_name(provider->dev)); > + > + /* draw nodes */ > + list_for_each_entry(n, &provider->nodes, node_list) > + icc_graph_show_node(s, n); > + > + /* draw internal links */ > + list_for_each_entry(n, &provider->nodes, node_list) > + for (i = 0; i < n->num_links; ++i) > + if (n->provider == n->links[i]->provider) > + icc_graph_show_link(s, 2, n, > + n->links[i]); > + > + seq_puts(s, "\t}\n"); > + } > + > + /* draw external links */ > + list_for_each_entry(provider, &icc_providers, provider_list) > + list_for_each_entry(n, &provider->nodes, node_list) > + for (i = 0; i < n->num_links; ++i) > + if (n->provider != n->links[i]->provider) > + icc_graph_show_link(s, 1, n, > + n->links[i]); > + > + mutex_unlock(&icc_lock); > + seq_puts(s, "}"); > + > + return 0; > +} > +DEFINE_SHOW_ATTRIBUTE(icc_graph); > + > static struct icc_node *node_find(const int id) > { > return idr_find(&icc_idr, id); > } > > @@ -800,10 +864,12 @@ EXPORT_SYMBOL_GPL(icc_provider_del); > static int __init icc_init(void) > { > icc_debugfs_dir = debugfs_create_dir("interconnect", NULL); > debugfs_create_file("interconnect_summary", 0444, > icc_debugfs_dir, NULL, &icc_summary_fops); > + debugfs_create_file("interconnect_graph", 0444, > + icc_debugfs_dir, NULL, &icc_graph_fops); > return 0; > } > > static void __exit icc_exit(void) > { This is good stuff! Thank you! I will fix up the typos while applying, no need to resend it. BR, Georgi
diff --git a/Documentation/driver-api/interconnect.rst b/Documentation/driver-api/interconnect.rst index cdeb5825f314..77a85aad8d2f 100644 --- a/Documentation/driver-api/interconnect.rst +++ b/Documentation/driver-api/interconnect.rst @@ -89,5 +89,28 @@ Interconnect consumers Interconnect consumers are the clients which use the interconnect APIs to get paths between endpoints and set their bandwidth/latency/QoS requirements for these interconnect paths. These interfaces are not currently documented. + +Interconnect debugfs interfaces +------------------------------- + +Like several other subsystems interconnect will create some files for debugging +and introspection. Files in debugfs are not considered ABI so application +software shouldn't rely on format details +change between kernel versions. + +``/sys/kernel/debug/interconnect/interconnect_summary``: + +Show all interconnect nodes in the system with their aggregated bandwith +request. Indented under each node show bandwith requests from each device. + +``/sys/kernel/debug/interconnect/interconnect_graph``: + +Show the interconnect graph in the graphviz dot format. It shows all +interconnect nodes and links in the system and groups together nodes from the +same provider as subgraphs. The format is human-readable and can also be piped +through dot to generate diagrams in many graphical formats:: + + $ cat /sys/kernel/debug/interconnect/interconnect_graph | \ + dot -Tsvg > interconnect_graph.svg diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index c498796adc07..07e91288c7f4 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -92,10 +92,74 @@ static int icc_summary_show(struct seq_file *s, void *data) return 0; } DEFINE_SHOW_ATTRIBUTE(icc_summary); +static void icc_graph_show_link(struct seq_file *s, int level, + struct icc_node *n, struct icc_node *m) +{ + seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n", + level == 2 ? "\t\t" : "\t", + n->id, n->name, m->id, m->name); +} + +static void icc_graph_show_node(struct seq_file *s, struct icc_node *n) +{ + seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s", + n->id, n->name, n->id, n->name); + seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw); + seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw); + seq_puts(s, "\"]\n"); +} + +static int icc_graph_show(struct seq_file *s, void *data) +{ + struct icc_provider *provider; + struct icc_node *n; + int cluster_index = 0; + int i; + + seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n"); + mutex_lock(&icc_lock); + + /* draw providers as cluster subgraphs */ + cluster_index = 0; + list_for_each_entry(provider, &icc_providers, provider_list) { + seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index); + if (provider->dev) + seq_printf(s, "\t\tlabel = \"%s\"\n", + dev_name(provider->dev)); + + /* draw nodes */ + list_for_each_entry(n, &provider->nodes, node_list) + icc_graph_show_node(s, n); + + /* draw internal links */ + list_for_each_entry(n, &provider->nodes, node_list) + for (i = 0; i < n->num_links; ++i) + if (n->provider == n->links[i]->provider) + icc_graph_show_link(s, 2, n, + n->links[i]); + + seq_puts(s, "\t}\n"); + } + + /* draw external links */ + list_for_each_entry(provider, &icc_providers, provider_list) + list_for_each_entry(n, &provider->nodes, node_list) + for (i = 0; i < n->num_links; ++i) + if (n->provider != n->links[i]->provider) + icc_graph_show_link(s, 1, n, + n->links[i]); + + mutex_unlock(&icc_lock); + seq_puts(s, "}"); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(icc_graph); + static struct icc_node *node_find(const int id) { return idr_find(&icc_idr, id); } @@ -800,10 +864,12 @@ EXPORT_SYMBOL_GPL(icc_provider_del); static int __init icc_init(void) { icc_debugfs_dir = debugfs_create_dir("interconnect", NULL); debugfs_create_file("interconnect_summary", 0444, icc_debugfs_dir, NULL, &icc_summary_fops); + debugfs_create_file("interconnect_graph", 0444, + icc_debugfs_dir, NULL, &icc_graph_fops); return 0; } static void __exit icc_exit(void) {