diff mbox series

[net-next,2/2] virtio_net: Add TX stopped and wake counters

Message ID 20240509163216.108665-3-danielj@nvidia.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series Add TX stop/wake counters | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl fail Tree is dirty after regen; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 926 this patch: 926
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 8 of 8 maintainers
netdev/build_clang success Errors and warnings before: 937 this patch: 937
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 937 this patch: 937
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 72 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Daniel Jurgens May 9, 2024, 4:32 p.m. UTC
Add a tx queue stop and wake counters, they are useful for debugging.

$ ./tools/net/ynl/cli.py --spec netlink/specs/netdev.yaml \
--dump qstats-get --json '{"scope": "queue"}'
...
 {'ifindex': 13,
  'queue-id': 0,
  'queue-type': 'tx',
  'tx-bytes': 14756682850,
  'tx-packets': 226465,
  'tx-stop': 113208,
  'tx-wake': 113208},
 {'ifindex': 13,
  'queue-id': 1,
  'queue-type': 'tx',
  'tx-bytes': 18167675008,
  'tx-packets': 278660,
  'tx-stop': 8632,
  'tx-wake': 8632}]

Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
 drivers/net/virtio_net.c | 28 ++++++++++++++++++++++++++--
 1 file changed, 26 insertions(+), 2 deletions(-)

Comments

Xuan Zhuo May 10, 2024, 1:21 a.m. UTC | #1
On Thu, 9 May 2024 11:32:16 -0500, Daniel Jurgens <danielj@nvidia.com> wrote:
> Add a tx queue stop and wake counters, they are useful for debugging.
>
> $ ./tools/net/ynl/cli.py --spec netlink/specs/netdev.yaml \
> --dump qstats-get --json '{"scope": "queue"}'
> ...
>  {'ifindex': 13,
>   'queue-id': 0,
>   'queue-type': 'tx',
>   'tx-bytes': 14756682850,
>   'tx-packets': 226465,
>   'tx-stop': 113208,
>   'tx-wake': 113208},
>  {'ifindex': 13,
>   'queue-id': 1,
>   'queue-type': 'tx',
>   'tx-bytes': 18167675008,
>   'tx-packets': 278660,
>   'tx-stop': 8632,
>   'tx-wake': 8632}]
>
> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> Reviewed-by: Jiri Pirko <jiri@nvidia.com>
> ---
>  drivers/net/virtio_net.c | 28 ++++++++++++++++++++++++++--
>  1 file changed, 26 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 218a446c4c27..df6121c38a1b 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -95,6 +95,8 @@ struct virtnet_sq_stats {
>  	u64_stats_t xdp_tx_drops;
>  	u64_stats_t kicks;
>  	u64_stats_t tx_timeouts;
> +	u64_stats_t stop;
> +	u64_stats_t wake;
>  };
>
>  struct virtnet_rq_stats {
> @@ -145,6 +147,8 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
>  static const struct virtnet_stat_desc virtnet_sq_stats_desc_qstat[] = {
>  	VIRTNET_SQ_STAT_QSTAT("packets", packets),
>  	VIRTNET_SQ_STAT_QSTAT("bytes",   bytes),
> +	VIRTNET_SQ_STAT_QSTAT("stop",	 stop),
> +	VIRTNET_SQ_STAT_QSTAT("wake",	 wake),
>  };
>
>  static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] = {
> @@ -1014,6 +1018,9 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
>  	 */
>  	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
>  		netif_stop_subqueue(dev, qnum);
> +		u64_stats_update_begin(&sq->stats.syncp);
> +		u64_stats_inc(&sq->stats.stop);
> +		u64_stats_update_end(&sq->stats.syncp);

How about introduce two helpers to wrap
netif_tx_queue_stopped and netif_start_subqueue?

>  		if (use_napi) {
>  			if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
>  				virtqueue_napi_schedule(&sq->napi, sq->vq);
> @@ -1022,6 +1029,9 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
>  			free_old_xmit(sq, false);
>  			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
>  				netif_start_subqueue(dev, qnum);
> +				u64_stats_update_begin(&sq->stats.syncp);
> +				u64_stats_inc(&sq->stats.wake);
> +				u64_stats_update_end(&sq->stats.syncp);

If we start the queue immediately, should we update the counter?

Thanks.

>  				virtqueue_disable_cb(sq->vq);
>  			}
>  		}
> @@ -2322,8 +2332,14 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
>  			free_old_xmit(sq, true);
>  		} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
>
> -		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> +		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
> +			if (netif_tx_queue_stopped(txq)) {
> +				u64_stats_update_begin(&sq->stats.syncp);
> +				u64_stats_inc(&sq->stats.wake);
> +				u64_stats_update_end(&sq->stats.syncp);
> +			}
>  			netif_tx_wake_queue(txq);
> +		}
>
>  		__netif_tx_unlock(txq);
>  	}
> @@ -2473,8 +2489,14 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
>  	virtqueue_disable_cb(sq->vq);
>  	free_old_xmit(sq, true);
>
> -	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> +	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
> +		if (netif_tx_queue_stopped(txq)) {
> +			u64_stats_update_begin(&sq->stats.syncp);
> +			u64_stats_inc(&sq->stats.wake);
> +			u64_stats_update_end(&sq->stats.syncp);
> +		}
>  		netif_tx_wake_queue(txq);
> +	}
>
>  	opaque = virtqueue_enable_cb_prepare(sq->vq);
>
> @@ -4790,6 +4812,8 @@ static void virtnet_get_base_stats(struct net_device *dev,
>
>  	tx->bytes = 0;
>  	tx->packets = 0;
> +	tx->stop = 0;
> +	tx->wake = 0;
>
>  	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
>  		tx->hw_drops = 0;
> --
> 2.44.0
>
Daniel Jurgens May 10, 2024, 3:35 a.m. UTC | #2
> From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> Sent: Thursday, May 9, 2024 8:22 PM
> To: Dan Jurgens <danielj@nvidia.com>
> Cc: mst@redhat.com; jasowang@redhat.com; xuanzhuo@linux.alibaba.com;
> virtualization@lists.linux.dev; davem@davemloft.net;
> edumazet@google.com; kuba@kernel.org; pabeni@redhat.com; Jiri Pirko
> <jiri@nvidia.com>; Dan Jurgens <danielj@nvidia.com>;
> netdev@vger.kernel.org
> Subject: Re: [PATCH net-next 2/2] virtio_net: Add TX stopped and wake
> counters
> 
> On Thu, 9 May 2024 11:32:16 -0500, Daniel Jurgens <danielj@nvidia.com>
> wrote:
> > Add a tx queue stop and wake counters, they are useful for debugging.
> >
> > $ ./tools/net/ynl/cli.py --spec netlink/specs/netdev.yaml \ --dump
> > qstats-get --json '{"scope": "queue"}'
> > ...
> >  {'ifindex': 13,
> >   'queue-id': 0,
> >   'queue-type': 'tx',
> >   'tx-bytes': 14756682850,
> >   'tx-packets': 226465,
> >   'tx-stop': 113208,
> >   'tx-wake': 113208},
> >  {'ifindex': 13,
> >   'queue-id': 1,
> >   'queue-type': 'tx',
> >   'tx-bytes': 18167675008,
> >   'tx-packets': 278660,
> >   'tx-stop': 8632,
> >   'tx-wake': 8632}]
> >
> > Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> > Reviewed-by: Jiri Pirko <jiri@nvidia.com>
> > ---
> >  drivers/net/virtio_net.c | 28 ++++++++++++++++++++++++++--
> >  1 file changed, 26 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index
> > 218a446c4c27..df6121c38a1b 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -95,6 +95,8 @@ struct virtnet_sq_stats {
> >  	u64_stats_t xdp_tx_drops;
> >  	u64_stats_t kicks;
> >  	u64_stats_t tx_timeouts;
> > +	u64_stats_t stop;
> > +	u64_stats_t wake;
> >  };
> >
> >  struct virtnet_rq_stats {
> > @@ -145,6 +147,8 @@ static const struct virtnet_stat_desc
> > virtnet_rq_stats_desc[] = {  static const struct virtnet_stat_desc
> virtnet_sq_stats_desc_qstat[] = {
> >  	VIRTNET_SQ_STAT_QSTAT("packets", packets),
> >  	VIRTNET_SQ_STAT_QSTAT("bytes",   bytes),
> > +	VIRTNET_SQ_STAT_QSTAT("stop",	 stop),
> > +	VIRTNET_SQ_STAT_QSTAT("wake",	 wake),
> >  };
> >
> >  static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] =
> > { @@ -1014,6 +1018,9 @@ static void check_sq_full_and_disable(struct
> virtnet_info *vi,
> >  	 */
> >  	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
> >  		netif_stop_subqueue(dev, qnum);
> > +		u64_stats_update_begin(&sq->stats.syncp);
> > +		u64_stats_inc(&sq->stats.stop);
> > +		u64_stats_update_end(&sq->stats.syncp);
> 
> How about introduce two helpers to wrap
> netif_tx_queue_stopped and netif_start_subqueue?
> 
> >  		if (use_napi) {
> >  			if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
> >  				virtqueue_napi_schedule(&sq->napi, sq-
> >vq); @@ -1022,6 +1029,9 @@
> > static void check_sq_full_and_disable(struct virtnet_info *vi,
> >  			free_old_xmit(sq, false);
> >  			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
> >  				netif_start_subqueue(dev, qnum);
> > +				u64_stats_update_begin(&sq->stats.syncp);
> > +				u64_stats_inc(&sq->stats.wake);
> > +				u64_stats_update_end(&sq->stats.syncp);
> 
> If we start the queue immediately, should we update the counter?

I intentionally only counted the wakes on restarts after stopping the queue.
I don't think counting the initial wake adds any value since it always happens.

> 
> Thanks.
> 
> >  				virtqueue_disable_cb(sq->vq);
> >  			}
> >  		}
> > @@ -2322,8 +2332,14 @@ static void virtnet_poll_cleantx(struct
> receive_queue *rq)
> >  			free_old_xmit(sq, true);
> >  		} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
> >
> > -		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> > +		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
> > +			if (netif_tx_queue_stopped(txq)) {
> > +				u64_stats_update_begin(&sq->stats.syncp);
> > +				u64_stats_inc(&sq->stats.wake);
> > +				u64_stats_update_end(&sq->stats.syncp);
> > +			}
> >  			netif_tx_wake_queue(txq);
> > +		}
> >
> >  		__netif_tx_unlock(txq);
> >  	}
> > @@ -2473,8 +2489,14 @@ static int virtnet_poll_tx(struct napi_struct
> *napi, int budget)
> >  	virtqueue_disable_cb(sq->vq);
> >  	free_old_xmit(sq, true);
> >
> > -	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> > +	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
> > +		if (netif_tx_queue_stopped(txq)) {
> > +			u64_stats_update_begin(&sq->stats.syncp);
> > +			u64_stats_inc(&sq->stats.wake);
> > +			u64_stats_update_end(&sq->stats.syncp);
> > +		}
> >  		netif_tx_wake_queue(txq);
> > +	}
> >
> >  	opaque = virtqueue_enable_cb_prepare(sq->vq);
> >
> > @@ -4790,6 +4812,8 @@ static void virtnet_get_base_stats(struct
> > net_device *dev,
> >
> >  	tx->bytes = 0;
> >  	tx->packets = 0;
> > +	tx->stop = 0;
> > +	tx->wake = 0;
> >
> >  	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
> >  		tx->hw_drops = 0;
> > --
> > 2.44.0
> >
Xuan Zhuo May 10, 2024, 6:48 a.m. UTC | #3
On Fri, 10 May 2024 03:35:51 +0000, Dan Jurgens <danielj@nvidia.com> wrote:
> > From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > Sent: Thursday, May 9, 2024 8:22 PM
> > To: Dan Jurgens <danielj@nvidia.com>
> > Cc: mst@redhat.com; jasowang@redhat.com; xuanzhuo@linux.alibaba.com;
> > virtualization@lists.linux.dev; davem@davemloft.net;
> > edumazet@google.com; kuba@kernel.org; pabeni@redhat.com; Jiri Pirko
> > <jiri@nvidia.com>; Dan Jurgens <danielj@nvidia.com>;
> > netdev@vger.kernel.org
> > Subject: Re: [PATCH net-next 2/2] virtio_net: Add TX stopped and wake
> > counters
> >
> > On Thu, 9 May 2024 11:32:16 -0500, Daniel Jurgens <danielj@nvidia.com>
> > wrote:
> > > Add a tx queue stop and wake counters, they are useful for debugging.
> > >
> > > $ ./tools/net/ynl/cli.py --spec netlink/specs/netdev.yaml \ --dump
> > > qstats-get --json '{"scope": "queue"}'
> > > ...
> > >  {'ifindex': 13,
> > >   'queue-id': 0,
> > >   'queue-type': 'tx',
> > >   'tx-bytes': 14756682850,
> > >   'tx-packets': 226465,
> > >   'tx-stop': 113208,
> > >   'tx-wake': 113208},
> > >  {'ifindex': 13,
> > >   'queue-id': 1,
> > >   'queue-type': 'tx',
> > >   'tx-bytes': 18167675008,
> > >   'tx-packets': 278660,
> > >   'tx-stop': 8632,
> > >   'tx-wake': 8632}]
> > >
> > > Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> > > Reviewed-by: Jiri Pirko <jiri@nvidia.com>
> > > ---
> > >  drivers/net/virtio_net.c | 28 ++++++++++++++++++++++++++--
> > >  1 file changed, 26 insertions(+), 2 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index
> > > 218a446c4c27..df6121c38a1b 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -95,6 +95,8 @@ struct virtnet_sq_stats {
> > >  	u64_stats_t xdp_tx_drops;
> > >  	u64_stats_t kicks;
> > >  	u64_stats_t tx_timeouts;
> > > +	u64_stats_t stop;
> > > +	u64_stats_t wake;
> > >  };
> > >
> > >  struct virtnet_rq_stats {
> > > @@ -145,6 +147,8 @@ static const struct virtnet_stat_desc
> > > virtnet_rq_stats_desc[] = {  static const struct virtnet_stat_desc
> > virtnet_sq_stats_desc_qstat[] = {
> > >  	VIRTNET_SQ_STAT_QSTAT("packets", packets),
> > >  	VIRTNET_SQ_STAT_QSTAT("bytes",   bytes),
> > > +	VIRTNET_SQ_STAT_QSTAT("stop",	 stop),
> > > +	VIRTNET_SQ_STAT_QSTAT("wake",	 wake),
> > >  };
> > >
> > >  static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] =
> > > { @@ -1014,6 +1018,9 @@ static void check_sq_full_and_disable(struct
> > virtnet_info *vi,
> > >  	 */
> > >  	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
> > >  		netif_stop_subqueue(dev, qnum);
> > > +		u64_stats_update_begin(&sq->stats.syncp);
> > > +		u64_stats_inc(&sq->stats.stop);
> > > +		u64_stats_update_end(&sq->stats.syncp);
> >
> > How about introduce two helpers to wrap
> > netif_tx_queue_stopped and netif_start_subqueue?
> >
> > >  		if (use_napi) {
> > >  			if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
> > >  				virtqueue_napi_schedule(&sq->napi, sq-
> > >vq); @@ -1022,6 +1029,9 @@
> > > static void check_sq_full_and_disable(struct virtnet_info *vi,
> > >  			free_old_xmit(sq, false);
> > >  			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
> > >  				netif_start_subqueue(dev, qnum);
> > > +				u64_stats_update_begin(&sq->stats.syncp);
> > > +				u64_stats_inc(&sq->stats.wake);
> > > +				u64_stats_update_end(&sq->stats.syncp);
> >
> > If we start the queue immediately, should we update the counter?
>
> I intentionally only counted the wakes on restarts after stopping the queue.
> I don't think counting the initial wake adds any value since it always happens.

Here, we start the queue immediately after the queue is stopped.
So for the upper layer, the queue "has not" changed the status,
I think we do not need to update the wake counter.

Thanks.


>
> >
> > Thanks.
> >
> > >  				virtqueue_disable_cb(sq->vq);
> > >  			}
> > >  		}
> > > @@ -2322,8 +2332,14 @@ static void virtnet_poll_cleantx(struct
> > receive_queue *rq)
> > >  			free_old_xmit(sq, true);
> > >  		} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
> > >
> > > -		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> > > +		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
> > > +			if (netif_tx_queue_stopped(txq)) {
> > > +				u64_stats_update_begin(&sq->stats.syncp);
> > > +				u64_stats_inc(&sq->stats.wake);
> > > +				u64_stats_update_end(&sq->stats.syncp);
> > > +			}
> > >  			netif_tx_wake_queue(txq);
> > > +		}
> > >
> > >  		__netif_tx_unlock(txq);
> > >  	}
> > > @@ -2473,8 +2489,14 @@ static int virtnet_poll_tx(struct napi_struct
> > *napi, int budget)
> > >  	virtqueue_disable_cb(sq->vq);
> > >  	free_old_xmit(sq, true);
> > >
> > > -	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> > > +	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
> > > +		if (netif_tx_queue_stopped(txq)) {
> > > +			u64_stats_update_begin(&sq->stats.syncp);
> > > +			u64_stats_inc(&sq->stats.wake);
> > > +			u64_stats_update_end(&sq->stats.syncp);
> > > +		}
> > >  		netif_tx_wake_queue(txq);
> > > +	}
> > >
> > >  	opaque = virtqueue_enable_cb_prepare(sq->vq);
> > >
> > > @@ -4790,6 +4812,8 @@ static void virtnet_get_base_stats(struct
> > > net_device *dev,
> > >
> > >  	tx->bytes = 0;
> > >  	tx->packets = 0;
> > > +	tx->stop = 0;
> > > +	tx->wake = 0;
> > >
> > >  	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
> > >  		tx->hw_drops = 0;
> > > --
> > > 2.44.0
> > >
>
Daniel Jurgens May 10, 2024, 4:56 p.m. UTC | #4
> From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> Sent: Friday, May 10, 2024 1:48 AM
> To: Dan Jurgens <danielj@nvidia.com>
> Cc: mst@redhat.com; jasowang@redhat.com; virtualization@lists.linux.dev;
> davem@davemloft.net; edumazet@google.com; kuba@kernel.org;
> pabeni@redhat.com; Jiri Pirko <jiri@nvidia.com>; netdev@vger.kernel.org
> Subject: Re: RE: [PATCH net-next 2/2] virtio_net: Add TX stopped and wake
> counters
> 
> On Fri, 10 May 2024 03:35:51 +0000, Dan Jurgens <danielj@nvidia.com>
> wrote:
> > > From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > Sent: Thursday, May 9, 2024 8:22 PM
> > > To: Dan Jurgens <danielj@nvidia.com>
> > > Cc: mst@redhat.com; jasowang@redhat.com;
> xuanzhuo@linux.alibaba.com;
> > > virtualization@lists.linux.dev; davem@davemloft.net;
> > > edumazet@google.com; kuba@kernel.org; pabeni@redhat.com; Jiri
> Pirko
> > > <jiri@nvidia.com>; Dan Jurgens <danielj@nvidia.com>;
> > > netdev@vger.kernel.org
> > > Subject: Re: [PATCH net-next 2/2] virtio_net: Add TX stopped and
> > > wake counters
> > >
> > > On Thu, 9 May 2024 11:32:16 -0500, Daniel Jurgens
> > > <danielj@nvidia.com>
> > > wrote:
> > > > Add a tx queue stop and wake counters, they are useful for debugging.
> > > >
> > > > $ ./tools/net/ynl/cli.py --spec netlink/specs/netdev.yaml \ --dump
> > > > qstats-get --json '{"scope": "queue"}'
> > > > ...
> > > >  {'ifindex': 13,
> > > >   'queue-id': 0,
> > > >   'queue-type': 'tx',
> > > >   'tx-bytes': 14756682850,
> > > >   'tx-packets': 226465,
> > > >   'tx-stop': 113208,
> > > >   'tx-wake': 113208},
> > > >  {'ifindex': 13,
> > > >   'queue-id': 1,
> > > >   'queue-type': 'tx',
> > > >   'tx-bytes': 18167675008,
> > > >   'tx-packets': 278660,
> > > >   'tx-stop': 8632,
> > > >   'tx-wake': 8632}]
> > > >
> > > > Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> > > > Reviewed-by: Jiri Pirko <jiri@nvidia.com>
> > > > ---
> > > >  drivers/net/virtio_net.c | 28 ++++++++++++++++++++++++++--
> > > >  1 file changed, 26 insertions(+), 2 deletions(-)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index 218a446c4c27..df6121c38a1b 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -95,6 +95,8 @@ struct virtnet_sq_stats {
> > > >  	u64_stats_t xdp_tx_drops;
> > > >  	u64_stats_t kicks;
> > > >  	u64_stats_t tx_timeouts;
> > > > +	u64_stats_t stop;
> > > > +	u64_stats_t wake;
> > > >  };
> > > >
> > > >  struct virtnet_rq_stats {
> > > > @@ -145,6 +147,8 @@ static const struct virtnet_stat_desc
> > > > virtnet_rq_stats_desc[] = {  static const struct virtnet_stat_desc
> > > virtnet_sq_stats_desc_qstat[] = {
> > > >  	VIRTNET_SQ_STAT_QSTAT("packets", packets),
> > > >  	VIRTNET_SQ_STAT_QSTAT("bytes",   bytes),
> > > > +	VIRTNET_SQ_STAT_QSTAT("stop",	 stop),
> > > > +	VIRTNET_SQ_STAT_QSTAT("wake",	 wake),
> > > >  };
> > > >
> > > >  static const struct virtnet_stat_desc
> > > > virtnet_rq_stats_desc_qstat[] = { @@ -1014,6 +1018,9 @@ static
> > > > void check_sq_full_and_disable(struct
> > > virtnet_info *vi,
> > > >  	 */
> > > >  	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
> > > >  		netif_stop_subqueue(dev, qnum);
> > > > +		u64_stats_update_begin(&sq->stats.syncp);
> > > > +		u64_stats_inc(&sq->stats.stop);
> > > > +		u64_stats_update_end(&sq->stats.syncp);
> > >
> > > How about introduce two helpers to wrap netif_tx_queue_stopped and
> > > netif_start_subqueue?
> > >
> > > >  		if (use_napi) {
> > > >  			if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
> > > >  				virtqueue_napi_schedule(&sq->napi, sq- vq);
> @@ -1022,6
> > > >+1029,9 @@  static void check_sq_full_and_disable(struct
> > > >virtnet_info *vi,
> > > >  			free_old_xmit(sq, false);
> > > >  			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
> > > >  				netif_start_subqueue(dev, qnum);
> > > > +				u64_stats_update_begin(&sq->stats.syncp);
> > > > +				u64_stats_inc(&sq->stats.wake);
> > > > +				u64_stats_update_end(&sq->stats.syncp);
> > >
> > > If we start the queue immediately, should we update the counter?
> >
> > I intentionally only counted the wakes on restarts after stopping the
> queue.
> > I don't think counting the initial wake adds any value since it always
> happens.
> 
> Here, we start the queue immediately after the queue is stopped.
> So for the upper layer, the queue "has not" changed the status, I think we do
> not need to update the wake counter.

I think we should. We incremented the stop counter. If they get out sync wake loses any functionality.

> 
> Thanks.
> 
> 
> >
> > >
> > > Thanks.
> > >
> > > >  				virtqueue_disable_cb(sq->vq);
> > > >  			}
> > > >  		}
> > > > @@ -2322,8 +2332,14 @@ static void virtnet_poll_cleantx(struct
> > > receive_queue *rq)
> > > >  			free_old_xmit(sq, true);
> > > >  		} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
> > > >
> > > > -		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> > > > +		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
> > > > +			if (netif_tx_queue_stopped(txq)) {
> > > > +				u64_stats_update_begin(&sq->stats.syncp);
> > > > +				u64_stats_inc(&sq->stats.wake);
> > > > +				u64_stats_update_end(&sq->stats.syncp);
> > > > +			}
> > > >  			netif_tx_wake_queue(txq);
> > > > +		}
> > > >
> > > >  		__netif_tx_unlock(txq);
> > > >  	}
> > > > @@ -2473,8 +2489,14 @@ static int virtnet_poll_tx(struct
> > > > napi_struct
> > > *napi, int budget)
> > > >  	virtqueue_disable_cb(sq->vq);
> > > >  	free_old_xmit(sq, true);
> > > >
> > > > -	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> > > > +	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
> > > > +		if (netif_tx_queue_stopped(txq)) {
> > > > +			u64_stats_update_begin(&sq->stats.syncp);
> > > > +			u64_stats_inc(&sq->stats.wake);
> > > > +			u64_stats_update_end(&sq->stats.syncp);
> > > > +		}
> > > >  		netif_tx_wake_queue(txq);
> > > > +	}
> > > >
> > > >  	opaque = virtqueue_enable_cb_prepare(sq->vq);
> > > >
> > > > @@ -4790,6 +4812,8 @@ static void virtnet_get_base_stats(struct
> > > > net_device *dev,
> > > >
> > > >  	tx->bytes = 0;
> > > >  	tx->packets = 0;
> > > > +	tx->stop = 0;
> > > > +	tx->wake = 0;
> > > >
> > > >  	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
> > > >  		tx->hw_drops = 0;
> > > > --
> > > > 2.44.0
> > > >
> >
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 218a446c4c27..df6121c38a1b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -95,6 +95,8 @@  struct virtnet_sq_stats {
 	u64_stats_t xdp_tx_drops;
 	u64_stats_t kicks;
 	u64_stats_t tx_timeouts;
+	u64_stats_t stop;
+	u64_stats_t wake;
 };
 
 struct virtnet_rq_stats {
@@ -145,6 +147,8 @@  static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
 static const struct virtnet_stat_desc virtnet_sq_stats_desc_qstat[] = {
 	VIRTNET_SQ_STAT_QSTAT("packets", packets),
 	VIRTNET_SQ_STAT_QSTAT("bytes",   bytes),
+	VIRTNET_SQ_STAT_QSTAT("stop",	 stop),
+	VIRTNET_SQ_STAT_QSTAT("wake",	 wake),
 };
 
 static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] = {
@@ -1014,6 +1018,9 @@  static void check_sq_full_and_disable(struct virtnet_info *vi,
 	 */
 	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
 		netif_stop_subqueue(dev, qnum);
+		u64_stats_update_begin(&sq->stats.syncp);
+		u64_stats_inc(&sq->stats.stop);
+		u64_stats_update_end(&sq->stats.syncp);
 		if (use_napi) {
 			if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
 				virtqueue_napi_schedule(&sq->napi, sq->vq);
@@ -1022,6 +1029,9 @@  static void check_sq_full_and_disable(struct virtnet_info *vi,
 			free_old_xmit(sq, false);
 			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
 				netif_start_subqueue(dev, qnum);
+				u64_stats_update_begin(&sq->stats.syncp);
+				u64_stats_inc(&sq->stats.wake);
+				u64_stats_update_end(&sq->stats.syncp);
 				virtqueue_disable_cb(sq->vq);
 			}
 		}
@@ -2322,8 +2332,14 @@  static void virtnet_poll_cleantx(struct receive_queue *rq)
 			free_old_xmit(sq, true);
 		} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
 
-		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
+			if (netif_tx_queue_stopped(txq)) {
+				u64_stats_update_begin(&sq->stats.syncp);
+				u64_stats_inc(&sq->stats.wake);
+				u64_stats_update_end(&sq->stats.syncp);
+			}
 			netif_tx_wake_queue(txq);
+		}
 
 		__netif_tx_unlock(txq);
 	}
@@ -2473,8 +2489,14 @@  static int virtnet_poll_tx(struct napi_struct *napi, int budget)
 	virtqueue_disable_cb(sq->vq);
 	free_old_xmit(sq, true);
 
-	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
+		if (netif_tx_queue_stopped(txq)) {
+			u64_stats_update_begin(&sq->stats.syncp);
+			u64_stats_inc(&sq->stats.wake);
+			u64_stats_update_end(&sq->stats.syncp);
+		}
 		netif_tx_wake_queue(txq);
+	}
 
 	opaque = virtqueue_enable_cb_prepare(sq->vq);
 
@@ -4790,6 +4812,8 @@  static void virtnet_get_base_stats(struct net_device *dev,
 
 	tx->bytes = 0;
 	tx->packets = 0;
+	tx->stop = 0;
+	tx->wake = 0;
 
 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
 		tx->hw_drops = 0;