diff mbox series

[net-next] net: stats: Read the statistics in ___gnet_stats_copy_basic() instead of adding.

Message ID 20211021095919.bi3szpt3c2kcoiso@linutronix.de (mailing list archive)
State Accepted
Commit c5c6e589a8c811c7f20024cc22209ae984fc2b98
Delegated to: Netdev Maintainers
Headers show
Series [net-next] net: stats: Read the statistics in ___gnet_stats_copy_basic() instead of adding. | expand

Checks

Context Check Description
netdev/cover_letter success Single patches do not need cover letters
netdev/fixes_present success Fixes tag not required for -next series
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for net-next
netdev/subject_prefix success Link
netdev/cc_maintainers success CCed 5 of 5 maintainers
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 7 this patch: 7
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Fixes tag looks correct
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns
netdev/build_allmodconfig_warn success Errors and warnings before: 7 this patch: 7
netdev/header_inline success No static functions without inline keyword in header files

Commit Message

Sebastian Andrzej Siewior Oct. 21, 2021, 9:59 a.m. UTC
Since the rework, the statistics code always adds up the byte and packet
value(s). On 32bit architectures a seqcount_t is used in
gnet_stats_basic_sync to ensure that the 64bit values are not modified
during the read since two 32bit loads are required. The usage of a
seqcount_t requires a lock to ensure that only one writer is active at a
time. This lock leads to disabled preemption during the update.

The lack of disabling preemption is now creating a warning as reported
by Naresh since the query done by gnet_stats_copy_basic() is in
preemptible context.

For ___gnet_stats_copy_basic() there is no need to disable preemption
since the update is performed on stack and can't be modified by another
writer. Instead of disabling preemption, to avoid the warning,
simply create a read function to just read the values and return as u64.

Reported-by: Naresh Kamboju <naresh.kamboju@linaro.org>
Fixes: 67c9e6270f301 ("net: sched: Protect Qdisc::bstats with u64_stats")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 net/core/gen_stats.c | 43 +++++++++++++++++++++++++++++++++++++------
 1 file changed, 37 insertions(+), 6 deletions(-)

Comments

patchwork-bot+netdevbpf@kernel.org Oct. 21, 2021, 11:50 a.m. UTC | #1
Hello:

This patch was applied to netdev/net-next.git (master)
by David S. Miller <davem@davemloft.net>:

On Thu, 21 Oct 2021 11:59:19 +0200 you wrote:
> Since the rework, the statistics code always adds up the byte and packet
> value(s). On 32bit architectures a seqcount_t is used in
> gnet_stats_basic_sync to ensure that the 64bit values are not modified
> during the read since two 32bit loads are required. The usage of a
> seqcount_t requires a lock to ensure that only one writer is active at a
> time. This lock leads to disabled preemption during the update.
> 
> [...]

Here is the summary with links:
  - [net-next] net: stats: Read the statistics in ___gnet_stats_copy_basic() instead of adding.
    https://git.kernel.org/netdev/net-next/c/c5c6e589a8c8

You are awesome, thank you!
diff mbox series

Patch

diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 15c270e22c5ef..a10335b4ba2d0 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -171,20 +171,51 @@  void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
 }
 EXPORT_SYMBOL(gnet_stats_add_basic);
 
+static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets,
+				  struct gnet_stats_basic_sync __percpu *cpu,
+				  struct gnet_stats_basic_sync *b, bool running)
+{
+	unsigned int start;
+
+	if (cpu) {
+		u64 t_bytes = 0, t_packets = 0;
+		int i;
+
+		for_each_possible_cpu(i) {
+			struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i);
+			unsigned int start;
+			u64 bytes, packets;
+
+			do {
+				start = u64_stats_fetch_begin_irq(&bcpu->syncp);
+				bytes = u64_stats_read(&bcpu->bytes);
+				packets = u64_stats_read(&bcpu->packets);
+			} while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
+
+			t_bytes += bytes;
+			t_packets += packets;
+		}
+		*ret_bytes = t_bytes;
+		*ret_packets = t_packets;
+		return;
+	}
+	do {
+		if (running)
+			start = u64_stats_fetch_begin_irq(&b->syncp);
+		*ret_bytes = u64_stats_read(&b->bytes);
+		*ret_packets = u64_stats_read(&b->packets);
+	} while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
+}
+
 static int
 ___gnet_stats_copy_basic(struct gnet_dump *d,
 			 struct gnet_stats_basic_sync __percpu *cpu,
 			 struct gnet_stats_basic_sync *b,
 			 int type, bool running)
 {
-	struct gnet_stats_basic_sync bstats;
 	u64 bstats_bytes, bstats_packets;
 
-	gnet_stats_basic_sync_init(&bstats);
-	gnet_stats_add_basic(&bstats, cpu, b, running);
-
-	bstats_bytes = u64_stats_read(&bstats.bytes);
-	bstats_packets = u64_stats_read(&bstats.packets);
+	gnet_stats_read_basic(&bstats_bytes, &bstats_packets, cpu, b, running);
 
 	if (d->compat_tc_stats && type == TCA_STATS_BASIC) {
 		d->tc_stats.bytes = bstats_bytes;