Message ID | 20230314030532.9238-2-kerneljasonxing@gmail.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | add some detailed data when reading softnet_stat | expand |
On Tue, Mar 14, 2023 at 11:05:31AM +0800, Jason Xing wrote: > From: Jason Xing <kernelxing@tencent.com> > > Sometimes we need to know which one of backlog queue can be exactly > long enough to cause some latency when debugging this part is needed. > Thus, we can then separate the display of both. > > Signed-off-by: Jason Xing <kernelxing@tencent.com> > --- > v2: keep the total len of backlog queues untouched as Eric said > Link: https://lore.kernel.org/lkml/20230311151756.83302-1-kerneljasonxing@gmail.com/ > --- > net/core/net-procfs.c | 20 ++++++++++++++++---- > 1 file changed, 16 insertions(+), 4 deletions(-) > > diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c > index 1ec23bf8b05c..2809b663e78d 100644 > --- a/net/core/net-procfs.c > +++ b/net/core/net-procfs.c > @@ -115,10 +115,19 @@ static int dev_seq_show(struct seq_file *seq, void *v) > return 0; > } > > +static u32 softnet_input_pkt_queue_len(struct softnet_data *sd) > +{ > + return skb_queue_len_lockless(&sd->input_pkt_queue); > +} > + > +static u32 softnet_process_queue_len(struct softnet_data *sd) > +{ > + return skb_queue_len_lockless(&sd->process_queue); > +} > + > static u32 softnet_backlog_len(struct softnet_data *sd) > { > - return skb_queue_len_lockless(&sd->input_pkt_queue) + > - skb_queue_len_lockless(&sd->process_queue); > + return softnet_input_pkt_queue_len(sd) + softnet_process_queue_len(sd); > } > > static struct softnet_data *softnet_get_online(loff_t *pos) > @@ -169,12 +178,15 @@ static int softnet_seq_show(struct seq_file *seq, void *v) > * mapping the data a specific CPU > */ > seq_printf(seq, > - "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", > + "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x " > + "%08x %08x\n", > sd->processed, sd->dropped, sd->time_squeeze, 0, > 0, 0, 0, 0, /* was fastroute */ > 0, /* was cpu_collision */ > sd->received_rps, flow_limit_count, > - softnet_backlog_len(sd), (int)seq->index); > + softnet_backlog_len(sd), /* keep it untouched */ I'm not sure the comment on the line above buys us much outside of the context of development of this patch. Likewise in patch 2/2. That not withstanding, this looks good to me. Reviewed-by: Simon Horman <simon.horman@corigine.com> > + (int)seq->index, > + softnet_input_pkt_queue_len(sd), softnet_process_queue_len(sd)); > return 0; > } > > -- > 2.37.3 >
On Mon, Mar 13, 2023 at 8:06 PM Jason Xing <kerneljasonxing@gmail.com> wrote: > > From: Jason Xing <kernelxing@tencent.com> > > Sometimes we need to know which one of backlog queue can be exactly > long enough to cause some latency when debugging this part is needed. > Thus, we can then separate the display of both. > > Signed-off-by: Jason Xing <kernelxing@tencent.com> > --- > v2: keep the total len of backlog queues untouched as Eric said > Link: https://lore.kernel.org/lkml/20230311151756.83302-1-kerneljasonxing@gmail.com/ > --- > net/core/net-procfs.c | 20 ++++++++++++++++---- > 1 file changed, 16 insertions(+), 4 deletions(-) > > diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c > index 1ec23bf8b05c..2809b663e78d 100644 > --- a/net/core/net-procfs.c > +++ b/net/core/net-procfs.c > @@ -115,10 +115,19 @@ static int dev_seq_show(struct seq_file *seq, void *v) > return 0; > } > > +static u32 softnet_input_pkt_queue_len(struct softnet_data *sd) > +{ > + return skb_queue_len_lockless(&sd->input_pkt_queue); > +} > + > +static u32 softnet_process_queue_len(struct softnet_data *sd) > +{ > + return skb_queue_len_lockless(&sd->process_queue); > +} > + > static u32 softnet_backlog_len(struct softnet_data *sd) > { > - return skb_queue_len_lockless(&sd->input_pkt_queue) + > - skb_queue_len_lockless(&sd->process_queue); > + return softnet_input_pkt_queue_len(sd) + softnet_process_queue_len(sd); Reading these variables twice might lead to inconsistency that can easily be avoided. I would suggest you cache the values, u32 len1 = softnet_input_pkt_queue_len(sd); u32 len2 = softnet_process_queue_len(sd); > } > > static struct softnet_data *softnet_get_online(loff_t *pos) > @@ -169,12 +178,15 @@ static int softnet_seq_show(struct seq_file *seq, void *v) > * mapping the data a specific CPU > */ > seq_printf(seq, > - "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", > + "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x " > + "%08x %08x\n", > sd->processed, sd->dropped, sd->time_squeeze, 0, > 0, 0, 0, 0, /* was fastroute */ > 0, /* was cpu_collision */ > sd->received_rps, flow_limit_count, > - softnet_backlog_len(sd), (int)seq->index); > + softnet_backlog_len(sd), /* keep it untouched */ len1 + len2. > + (int)seq->index, > + softnet_input_pkt_queue_len(sd), softnet_process_queue_len(sd)); len1, len2); > return 0; > } > > -- > 2.37.3 >
On Tue, Mar 14, 2023 at 10:59 PM Eric Dumazet <edumazet@google.com> wrote: > > On Mon, Mar 13, 2023 at 8:06 PM Jason Xing <kerneljasonxing@gmail.com> wrote: > > > > From: Jason Xing <kernelxing@tencent.com> > > > > Sometimes we need to know which one of backlog queue can be exactly > > long enough to cause some latency when debugging this part is needed. > > Thus, we can then separate the display of both. > > > > Signed-off-by: Jason Xing <kernelxing@tencent.com> > > --- > > v2: keep the total len of backlog queues untouched as Eric said > > Link: https://lore.kernel.org/lkml/20230311151756.83302-1-kerneljasonxing@gmail.com/ > > --- > > net/core/net-procfs.c | 20 ++++++++++++++++---- > > 1 file changed, 16 insertions(+), 4 deletions(-) > > > > diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c > > index 1ec23bf8b05c..2809b663e78d 100644 > > --- a/net/core/net-procfs.c > > +++ b/net/core/net-procfs.c > > @@ -115,10 +115,19 @@ static int dev_seq_show(struct seq_file *seq, void *v) > > return 0; > > } > > > > +static u32 softnet_input_pkt_queue_len(struct softnet_data *sd) > > +{ > > + return skb_queue_len_lockless(&sd->input_pkt_queue); > > +} > > + > > +static u32 softnet_process_queue_len(struct softnet_data *sd) > > +{ > > + return skb_queue_len_lockless(&sd->process_queue); > > +} > > + > > static u32 softnet_backlog_len(struct softnet_data *sd) > > { > > - return skb_queue_len_lockless(&sd->input_pkt_queue) + > > - skb_queue_len_lockless(&sd->process_queue); > > + return softnet_input_pkt_queue_len(sd) + softnet_process_queue_len(sd); > [...] > Reading these variables twice might lead to inconsistency that can > easily be avoided. > > I would suggest you cache the values, > > u32 len1 = softnet_input_pkt_queue_len(sd); > u32 len2 = softnet_process_queue_len(sd); Agreed. Thank you, Eric. I should have realized that. Also, the 2/2 patch which is all about the time_/budget_squeeze should avoid such inconsistency, I think. Jason > > > > > } > > > > static struct softnet_data *softnet_get_online(loff_t *pos) > > @@ -169,12 +178,15 @@ static int softnet_seq_show(struct seq_file *seq, void *v) > > * mapping the data a specific CPU > > */ > > seq_printf(seq, > > - "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", > > + "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x " > > + "%08x %08x\n", > > sd->processed, sd->dropped, sd->time_squeeze, 0, > > 0, 0, 0, 0, /* was fastroute */ > > 0, /* was cpu_collision */ > > sd->received_rps, flow_limit_count, > > - softnet_backlog_len(sd), (int)seq->index); > > + softnet_backlog_len(sd), /* keep it untouched */ > len1 + len2. > > > + (int)seq->index, > > + softnet_input_pkt_queue_len(sd), softnet_process_queue_len(sd)); > len1, len2); > > > return 0; > > } > > > > -- > > 2.37.3 > >
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c index 1ec23bf8b05c..2809b663e78d 100644 --- a/net/core/net-procfs.c +++ b/net/core/net-procfs.c @@ -115,10 +115,19 @@ static int dev_seq_show(struct seq_file *seq, void *v) return 0; } +static u32 softnet_input_pkt_queue_len(struct softnet_data *sd) +{ + return skb_queue_len_lockless(&sd->input_pkt_queue); +} + +static u32 softnet_process_queue_len(struct softnet_data *sd) +{ + return skb_queue_len_lockless(&sd->process_queue); +} + static u32 softnet_backlog_len(struct softnet_data *sd) { - return skb_queue_len_lockless(&sd->input_pkt_queue) + - skb_queue_len_lockless(&sd->process_queue); + return softnet_input_pkt_queue_len(sd) + softnet_process_queue_len(sd); } static struct softnet_data *softnet_get_online(loff_t *pos) @@ -169,12 +178,15 @@ static int softnet_seq_show(struct seq_file *seq, void *v) * mapping the data a specific CPU */ seq_printf(seq, - "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", + "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x " + "%08x %08x\n", sd->processed, sd->dropped, sd->time_squeeze, 0, 0, 0, 0, 0, /* was fastroute */ 0, /* was cpu_collision */ sd->received_rps, flow_limit_count, - softnet_backlog_len(sd), (int)seq->index); + softnet_backlog_len(sd), /* keep it untouched */ + (int)seq->index, + softnet_input_pkt_queue_len(sd), softnet_process_queue_len(sd)); return 0; }