diff mbox series

[v2,net-next,07/26] mvneta: add .ndo_get_xdp_stats() callback

Message ID 20211123163955.154512-8-alexandr.lobakin@intel.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series net: introduce and use generic XDP stats | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count fail Series longer than 15 patches (and no cover letter)
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 2 this patch: 2
netdev/cc_maintainers success CCed 14 of 14 maintainers
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 2 this patch: 2
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 66 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Alexander Lobakin Nov. 23, 2021, 4:39 p.m. UTC
mvneta driver implements 7 per-cpu counters which means we can
only provide them as a global sum across CPUs.
Implement a callback for querying them using generic XDP stats
infra.

Signed-off-by: Alexander Lobakin <alexandr.lobakin@intel.com>
Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
---
 drivers/net/ethernet/marvell/mvneta.c | 54 +++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

--
2.33.1

Comments

Russell King (Oracle) Nov. 24, 2021, 11:39 a.m. UTC | #1
On Tue, Nov 23, 2021 at 05:39:36PM +0100, Alexander Lobakin wrote:
> +	for_each_possible_cpu(cpu) {
> +		const struct mvneta_pcpu_stats *stats;
> +		const struct mvneta_stats *ps;
> +		u64 xdp_xmit_err;
> +		u64 xdp_redirect;
> +		u64 xdp_tx_err;
> +		u64 xdp_pass;
> +		u64 xdp_drop;
> +		u64 xdp_xmit;
> +		u64 xdp_tx;
> +		u32 start;
> +
> +		stats = per_cpu_ptr(pp->stats, cpu);
> +		ps = &stats->es.ps;
> +
> +		do {
> +			start = u64_stats_fetch_begin_irq(&stats->syncp);
> +
> +			xdp_drop = ps->xdp_drop;
> +			xdp_pass = ps->xdp_pass;
> +			xdp_redirect = ps->xdp_redirect;
> +			xdp_tx = ps->xdp_tx;
> +			xdp_tx_err = ps->xdp_tx_err;
> +			xdp_xmit = ps->xdp_xmit;
> +			xdp_xmit_err = ps->xdp_xmit_err;
> +		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
> +
> +		xdp_stats->drop += xdp_drop;
> +		xdp_stats->pass += xdp_pass;
> +		xdp_stats->redirect += xdp_redirect;
> +		xdp_stats->tx += xdp_tx;
> +		xdp_stats->tx_errors += xdp_tx_err;
> +		xdp_stats->xmit_packets += xdp_xmit;
> +		xdp_stats->xmit_errors += xdp_xmit_err;

Same comment as for mvpp2 - this could share a lot of code from
mvneta_ethtool_update_pcpu_stats() (although it means we end up
calculating a little more for the alloc error and refill error
that this API doesn't need) but I think sharing that code would be
a good idea.
Alexander Lobakin Nov. 25, 2021, 5:16 p.m. UTC | #2
From: Russell King (Oracle) <linux@armlinux.org.uk>
Date: Wed, 24 Nov 2021 11:39:05 +0000

> On Tue, Nov 23, 2021 at 05:39:36PM +0100, Alexander Lobakin wrote:
> > +	for_each_possible_cpu(cpu) {
> > +		const struct mvneta_pcpu_stats *stats;
> > +		const struct mvneta_stats *ps;
> > +		u64 xdp_xmit_err;
> > +		u64 xdp_redirect;
> > +		u64 xdp_tx_err;
> > +		u64 xdp_pass;
> > +		u64 xdp_drop;
> > +		u64 xdp_xmit;
> > +		u64 xdp_tx;
> > +		u32 start;
> > +
> > +		stats = per_cpu_ptr(pp->stats, cpu);
> > +		ps = &stats->es.ps;
> > +
> > +		do {
> > +			start = u64_stats_fetch_begin_irq(&stats->syncp);
> > +
> > +			xdp_drop = ps->xdp_drop;
> > +			xdp_pass = ps->xdp_pass;
> > +			xdp_redirect = ps->xdp_redirect;
> > +			xdp_tx = ps->xdp_tx;
> > +			xdp_tx_err = ps->xdp_tx_err;
> > +			xdp_xmit = ps->xdp_xmit;
> > +			xdp_xmit_err = ps->xdp_xmit_err;
> > +		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
> > +
> > +		xdp_stats->drop += xdp_drop;
> > +		xdp_stats->pass += xdp_pass;
> > +		xdp_stats->redirect += xdp_redirect;
> > +		xdp_stats->tx += xdp_tx;
> > +		xdp_stats->tx_errors += xdp_tx_err;
> > +		xdp_stats->xmit_packets += xdp_xmit;
> > +		xdp_stats->xmit_errors += xdp_xmit_err;
> 
> Same comment as for mvpp2 - this could share a lot of code from
> mvneta_ethtool_update_pcpu_stats() (although it means we end up
> calculating a little more for the alloc error and refill error
> that this API doesn't need) but I think sharing that code would be
> a good idea.

Ah, I didn't do that because in my first series I was removing
Ethtool counters at all. In this one, I left them as-is due to
some of folks hinted me that those counters (not specifically
on mvpp2 or mvneta, let's say on virtio-net or so) could have
already been used in some admin scripts somewhere in the world
(but with a TODO to figure out which driver I could remove them
in and do that).
It would be great if you know and would hint me if I could remove
those XDP-related Ethtool counters from Marvell drivers or not.
If so, I'll wipe them, otherwise just factor out common parts to
wipe out code duplication.

> -- 
> RMK's Patch system: https://www.armlinux.org.uk/developer/patches/
> FTTP is here! 40Mbps down 10Mbps up. Decent connectivity at last!

Thanks,
Al
diff mbox series

Patch

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 7c30417a0464..5bb0bbfa1ee6 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -802,6 +802,59 @@  mvneta_get_stats64(struct net_device *dev,
 	stats->tx_dropped	= dev->stats.tx_dropped;
 }

+static int mvneta_get_xdp_stats(const struct net_device *dev, u32 attr_id,
+				void *attr_data)
+{
+	const struct mvneta_port *pp = netdev_priv(dev);
+	struct ifla_xdp_stats *xdp_stats = attr_data;
+	u32 cpu;
+
+	switch (attr_id) {
+	case IFLA_XDP_XSTATS_TYPE_XDP:
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	for_each_possible_cpu(cpu) {
+		const struct mvneta_pcpu_stats *stats;
+		const struct mvneta_stats *ps;
+		u64 xdp_xmit_err;
+		u64 xdp_redirect;
+		u64 xdp_tx_err;
+		u64 xdp_pass;
+		u64 xdp_drop;
+		u64 xdp_xmit;
+		u64 xdp_tx;
+		u32 start;
+
+		stats = per_cpu_ptr(pp->stats, cpu);
+		ps = &stats->es.ps;
+
+		do {
+			start = u64_stats_fetch_begin_irq(&stats->syncp);
+
+			xdp_drop = ps->xdp_drop;
+			xdp_pass = ps->xdp_pass;
+			xdp_redirect = ps->xdp_redirect;
+			xdp_tx = ps->xdp_tx;
+			xdp_tx_err = ps->xdp_tx_err;
+			xdp_xmit = ps->xdp_xmit;
+			xdp_xmit_err = ps->xdp_xmit_err;
+		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+		xdp_stats->drop += xdp_drop;
+		xdp_stats->pass += xdp_pass;
+		xdp_stats->redirect += xdp_redirect;
+		xdp_stats->tx += xdp_tx;
+		xdp_stats->tx_errors += xdp_tx_err;
+		xdp_stats->xmit_packets += xdp_xmit;
+		xdp_stats->xmit_errors += xdp_xmit_err;
+	}
+
+	return 0;
+}
+
 /* Rx descriptors helper methods */

 /* Checks whether the RX descriptor having this status is both the first
@@ -4957,6 +5010,7 @@  static const struct net_device_ops mvneta_netdev_ops = {
 	.ndo_change_mtu		= mvneta_change_mtu,
 	.ndo_fix_features	= mvneta_fix_features,
 	.ndo_get_stats64	= mvneta_get_stats64,
+	.ndo_get_xdp_stats	= mvneta_get_xdp_stats,
 	.ndo_eth_ioctl		= mvneta_ioctl,
 	.ndo_bpf		= mvneta_xdp,
 	.ndo_xdp_xmit		= mvneta_xdp_xmit,