diff mbox series

[net-next,v2,3/3] eth: bnxt: support per-queue statistics

Message ID 20240229010221.2408413-4-kuba@kernel.org (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series netdev: add per-queue statistics | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next, async
netdev/ynl success Generated files up to date; no warnings/errors; GEN HAS DIFF 2 files changed, 190 insertions(+);
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 940 this patch: 940
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 5 of 5 maintainers
netdev/build_clang success Errors and warnings before: 957 this patch: 957
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 959 this patch: 959
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 75 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Jakub Kicinski Feb. 29, 2024, 1:02 a.m. UTC
Support per-queue statistics API in bnxt.

$ ethtool -S eth0
NIC statistics:
     [0]: rx_ucast_packets: 1418
     [0]: rx_mcast_packets: 178
     [0]: rx_bcast_packets: 0
     [0]: rx_discards: 0
     [0]: rx_errors: 0
     [0]: rx_ucast_bytes: 1141815
     [0]: rx_mcast_bytes: 16766
     [0]: rx_bcast_bytes: 0
     [0]: tx_ucast_packets: 1734
...

$ ./cli.py --spec netlink/specs/netdev.yaml \
   --dump qstats-get --json '{"scope": "queue"}'
[{'ifindex': 2,
  'queue-id': 0,
  'queue-type': 'rx',
  'rx-alloc-fail': 0,
  'rx-bytes': 1164931,
  'rx-packets': 1641},
...
 {'ifindex': 2,
  'queue-id': 0,
  'queue-type': 'tx',
  'tx-bytes': 631494,
  'tx-packets': 1771},
...

Reset the per queue counters:
$ ethtool -L eth0 combined 4

Inspect again:

$ ./cli.py --spec netlink/specs/netdev.yaml \
   --dump qstats-get --json '{"scope": "queue"}'
[{'ifindex': 2,
  'queue-id': 0,
  'queue-type': 'rx',
  'rx-alloc-fail': 0,
  'rx-bytes': 32397,
  'rx-packets': 145},
...
 {'ifindex': 2,
  'queue-id': 0,
  'queue-type': 'tx',
  'tx-bytes': 37481,
  'tx-packets': 196},
...

$ ethtool -S eth0 | head
NIC statistics:
     [0]: rx_ucast_packets: 174
     [0]: rx_mcast_packets: 3
     [0]: rx_bcast_packets: 0
     [0]: rx_discards: 0
     [0]: rx_errors: 0
     [0]: rx_ucast_bytes: 37151
     [0]: rx_mcast_bytes: 267
     [0]: rx_bcast_bytes: 0
     [0]: tx_ucast_packets: 267
...

Totals are still correct:

$ ./cli.py --spec netlink/specs/netdev.yaml --dump qstats-get
[{'ifindex': 2,
  'rx-alloc-fail': 0,
  'rx-bytes': 281949995,
  'rx-packets': 216524,
  'tx-bytes': 52694905,
  'tx-packets': 75546}]
$ ip -s link show dev eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
    link/ether 14:23:f2:61:05:40 brd ff:ff:ff:ff:ff:ff
    RX:  bytes packets errors dropped  missed   mcast
     282519546  218100      0       0       0     516
    TX:  bytes packets errors dropped carrier collsns
      53323054   77674      0       0       0       0

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 63 +++++++++++++++++++++++
 1 file changed, 63 insertions(+)

Comments

Michael Chan Feb. 29, 2024, 3:40 a.m. UTC | #1
On Wed, Feb 28, 2024 at 5:02 PM Jakub Kicinski <kuba@kernel.org> wrote:
>
> Support per-queue statistics API in bnxt.
>
> $ ethtool -S eth0
> NIC statistics:
>      [0]: rx_ucast_packets: 1418
>      [0]: rx_mcast_packets: 178
>      [0]: rx_bcast_packets: 0
>      [0]: rx_discards: 0
>      [0]: rx_errors: 0
>      [0]: rx_ucast_bytes: 1141815
>      [0]: rx_mcast_bytes: 16766
>      [0]: rx_bcast_bytes: 0
>      [0]: tx_ucast_packets: 1734
> ...
> +
> +static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
> +                                   struct netdev_queue_stats_tx *stats)
> +{
> +       struct bnxt *bp = netdev_priv(dev);
> +       u64 *sw;
> +
> +       sw = bp->bnapi[i]->cp_ring.stats.sw_stats;

Sorry I missed this earlier.  When we are in XDP mode, the first set
of TX rings is generally hidden from the user.  The standard TX rings
don't start from index 0.  They start from bp->tx_nr_rings_xdp.
Should we adjust for that?

> +
> +       stats->packets = 0;
> +       stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
> +       stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
> +       stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
> +
> +       stats->bytes = 0;
> +       stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
> +       stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
> +       stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
> +}
> +
Jakub Kicinski Feb. 29, 2024, 3:52 a.m. UTC | #2
On Wed, 28 Feb 2024 19:40:01 -0800 Michael Chan wrote:
> > +static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
> > +                                   struct netdev_queue_stats_tx *stats)
> > +{
> > +       struct bnxt *bp = netdev_priv(dev);
> > +       u64 *sw;
> > +
> > +       sw = bp->bnapi[i]->cp_ring.stats.sw_stats;  
> 
> Sorry I missed this earlier.  When we are in XDP mode, the first set
> of TX rings is generally hidden from the user.  The standard TX rings
> don't start from index 0.  They start from bp->tx_nr_rings_xdp.
> Should we adjust for that?

I feel like I made this mistake before already.. Will fix, thanks!
Jakub Kicinski March 6, 2024, 2:56 p.m. UTC | #3
On Wed, 28 Feb 2024 19:40:01 -0800 Michael Chan wrote:
> > +static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
> > +                                   struct netdev_queue_stats_tx *stats)
> > +{
> > +       struct bnxt *bp = netdev_priv(dev);
> > +       u64 *sw;
> > +
> > +       sw = bp->bnapi[i]->cp_ring.stats.sw_stats;  
> 
> Sorry I missed this earlier.  When we are in XDP mode, the first set
> of TX rings is generally hidden from the user.  The standard TX rings
> don't start from index 0.  They start from bp->tx_nr_rings_xdp.
> Should we adjust for that?

Hi Michael! Sorry for the delay, I was waiting for some related netlink
bits to get merged to simplify the core parts.

Not sure what the most idiomatic way would be to translate the indexes.

Do you prefer:

	bnapi = &bp->tx_ring[bp->tx_ring_map[i]].bnapi;
	sw = bnapi->cp_ring.stats.sw_stats;

or simply:

	sw = bp->bnapi[i - bp->tx_nr_rings_xdp]->cp_ring.stats.sw_stats;

or something else?
Michael Chan March 6, 2024, 6:10 p.m. UTC | #4
On Wed, Mar 6, 2024 at 6:56 AM Jakub Kicinski <kuba@kernel.org> wrote:
>
> On Wed, 28 Feb 2024 19:40:01 -0800 Michael Chan wrote:
> > Sorry I missed this earlier.  When we are in XDP mode, the first set
> > of TX rings is generally hidden from the user.  The standard TX rings
> > don't start from index 0.  They start from bp->tx_nr_rings_xdp.
> > Should we adjust for that?
>
> Hi Michael! Sorry for the delay, I was waiting for some related netlink
> bits to get merged to simplify the core parts.
>
> Not sure what the most idiomatic way would be to translate the indexes.
>
> Do you prefer:
>
>         bnapi = &bp->tx_ring[bp->tx_ring_map[i]].bnapi;
>         sw = bnapi->cp_ring.stats.sw_stats;

The 2 methods are interchangeable but I think it is preferred to use
bp->tx_ring_map[].  It's easier to change the underlying mapping
scheme in the future if we use bp->tx_ring_map[].

>
> or simply:
>
>         sw = bp->bnapi[i - bp->tx_nr_rings_xdp]->cp_ring.stats.sw_stats;
>
> or something else?
diff mbox series

Patch

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index a15e6d31fc22..97abde27d5fe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -14523,6 +14523,68 @@  static const struct net_device_ops bnxt_netdev_ops = {
 	.ndo_bridge_setlink	= bnxt_bridge_setlink,
 };
 
+static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
+				    struct netdev_queue_stats_rx *stats)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_cp_ring_info *cpr;
+	u64 *sw;
+
+	cpr = &bp->bnapi[i]->cp_ring;
+	sw = cpr->stats.sw_stats;
+
+	stats->packets = 0;
+	stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
+	stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
+	stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
+
+	stats->bytes = 0;
+	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
+	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
+	stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
+
+	stats->alloc_fail = cpr->sw_stats.rx.rx_oom_discards;
+}
+
+static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
+				    struct netdev_queue_stats_tx *stats)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	u64 *sw;
+
+	sw = bp->bnapi[i]->cp_ring.stats.sw_stats;
+
+	stats->packets = 0;
+	stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
+	stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
+	stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
+
+	stats->bytes = 0;
+	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
+	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
+	stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
+}
+
+static void bnxt_get_base_stats(struct net_device *dev,
+				struct netdev_queue_stats_rx *rx,
+				struct netdev_queue_stats_tx *tx)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	rx->packets = bp->net_stats_prev.rx_packets;
+	rx->bytes = bp->net_stats_prev.rx_bytes;
+	rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
+
+	tx->packets = bp->net_stats_prev.tx_packets;
+	tx->bytes = bp->net_stats_prev.tx_bytes;
+}
+
+static const struct netdev_stat_ops bnxt_stat_ops = {
+	.get_queue_stats_rx	= bnxt_get_queue_stats_rx,
+	.get_queue_stats_tx	= bnxt_get_queue_stats_tx,
+	.get_base_stats		= bnxt_get_base_stats,
+};
+
 static void bnxt_remove_one(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
@@ -14970,6 +15032,7 @@  static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto init_err_free;
 
 	dev->netdev_ops = &bnxt_netdev_ops;
+	dev->stat_ops = &bnxt_stat_ops;
 	dev->watchdog_timeo = BNXT_TX_TIMEOUT;
 	dev->ethtool_ops = &bnxt_ethtool_ops;
 	pci_set_drvdata(pdev, dev);