diff mbox series

[net-next,3/3] virtio_net: Map NAPIs to queues

Message ID 20250110202605.429475-4-jdamato@fastly.com (mailing list archive)
State New
Delegated to: Netdev Maintainers
Headers show
Series virtio_net: Link queues to NAPIs | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1 this patch: 1
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 10 of 10 maintainers
netdev/build_clang success Errors and warnings before: 2 this patch: 2
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 1 this patch: 1
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 69 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest pending net-next-2025-01-11--00-00 (tests: 0)

Commit Message

Joe Damato Jan. 10, 2025, 8:26 p.m. UTC
Use netif_queue_set_napi to map NAPIs to queue IDs so that the mapping
can be accessed by user apps.

$ ethtool -i ens4 | grep driver
driver: virtio_net

$ sudo ethtool -L ens4 combined 4

$ ./tools/net/ynl/pyynl/cli.py \
       --spec Documentation/netlink/specs/netdev.yaml \
       --dump queue-get --json='{"ifindex": 2}'
[{'id': 0, 'ifindex': 2, 'napi-id': 8289, 'type': 'rx'},
 {'id': 1, 'ifindex': 2, 'napi-id': 8290, 'type': 'rx'},
 {'id': 2, 'ifindex': 2, 'napi-id': 8291, 'type': 'rx'},
 {'id': 3, 'ifindex': 2, 'napi-id': 8292, 'type': 'rx'},
 {'id': 0, 'ifindex': 2, 'type': 'tx'},
 {'id': 1, 'ifindex': 2, 'type': 'tx'},
 {'id': 2, 'ifindex': 2, 'type': 'tx'},
 {'id': 3, 'ifindex': 2, 'type': 'tx'}]

Note that virtio_net has TX-only NAPIs which do not have NAPI IDs, so
the lack of 'napi-id' in the above output is expected.

Signed-off-by: Joe Damato <jdamato@fastly.com>
---
 drivers/net/virtio_net.c | 29 ++++++++++++++++++++++++++---
 1 file changed, 26 insertions(+), 3 deletions(-)

Comments

Gerhard Engleder Jan. 10, 2025, 10:25 p.m. UTC | #1
On 10.01.25 21:26, Joe Damato wrote:
> Use netif_queue_set_napi to map NAPIs to queue IDs so that the mapping
> can be accessed by user apps.
> 
> $ ethtool -i ens4 | grep driver
> driver: virtio_net
> 
> $ sudo ethtool -L ens4 combined 4
> 
> $ ./tools/net/ynl/pyynl/cli.py \
>         --spec Documentation/netlink/specs/netdev.yaml \
>         --dump queue-get --json='{"ifindex": 2}'
> [{'id': 0, 'ifindex': 2, 'napi-id': 8289, 'type': 'rx'},
>   {'id': 1, 'ifindex': 2, 'napi-id': 8290, 'type': 'rx'},
>   {'id': 2, 'ifindex': 2, 'napi-id': 8291, 'type': 'rx'},
>   {'id': 3, 'ifindex': 2, 'napi-id': 8292, 'type': 'rx'},
>   {'id': 0, 'ifindex': 2, 'type': 'tx'},
>   {'id': 1, 'ifindex': 2, 'type': 'tx'},
>   {'id': 2, 'ifindex': 2, 'type': 'tx'},
>   {'id': 3, 'ifindex': 2, 'type': 'tx'}]
> 
> Note that virtio_net has TX-only NAPIs which do not have NAPI IDs, so
> the lack of 'napi-id' in the above output is expected.
> 
> Signed-off-by: Joe Damato <jdamato@fastly.com>
> ---
>   drivers/net/virtio_net.c | 29 ++++++++++++++++++++++++++---
>   1 file changed, 26 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 4e88d352d3eb..8f0f26cc5a94 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -2804,14 +2804,28 @@ static void virtnet_napi_do_enable(struct virtqueue *vq,
>   }
>   
>   static void virtnet_napi_enable_lock(struct virtqueue *vq,
> -				     struct napi_struct *napi)
> +				     struct napi_struct *napi,
> +				     bool need_rtnl)
>   {
> +	struct virtnet_info *vi = vq->vdev->priv;
> +	int q = vq2rxq(vq);
> +
>   	virtnet_napi_do_enable(vq, napi);
> +
> +	if (q < vi->curr_queue_pairs) {
> +		if (need_rtnl)
> +			rtnl_lock();
> +
> +		netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, napi);
> +
> +		if (need_rtnl)
> +			rtnl_unlock();
> +	}
>   }
>   
>   static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
>   {
> -	virtnet_napi_enable_lock(vq, napi);
> +	virtnet_napi_enable_lock(vq, napi, false);
>   }
>   
>   static void virtnet_napi_tx_enable(struct virtnet_info *vi,
> @@ -2848,9 +2862,13 @@ static void refill_work(struct work_struct *work)
>   	for (i = 0; i < vi->curr_queue_pairs; i++) {
>   		struct receive_queue *rq = &vi->rq[i];
>   
> +		rtnl_lock();
> +		netif_queue_set_napi(vi->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
> +		rtnl_unlock();
>   		napi_disable(&rq->napi);
> +
>   		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
> -		virtnet_napi_enable_lock(rq->vq, &rq->napi);
> +		virtnet_napi_enable_lock(rq->vq, &rq->napi, true);
>   
>   		/* In theory, this can happen: if we don't get any buffers in
>   		 * we will *never* try to fill again.
> @@ -3048,6 +3066,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
>   static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
>   {
>   	virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
> +	netif_queue_set_napi(vi->dev, qp_index, NETDEV_QUEUE_TYPE_RX, NULL);
>   	napi_disable(&vi->rq[qp_index].napi);
>   	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
>   }
> @@ -3317,8 +3336,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
>   static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
>   {
>   	bool running = netif_running(vi->dev);
> +	int q = vq2rxq(rq->vq);
>   
>   	if (running) {
> +		netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, NULL);
>   		napi_disable(&rq->napi);
>   		virtnet_cancel_dim(vi, &rq->dim);
>   	}
> @@ -5943,6 +5964,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
>   	/* Make sure NAPI is not using any XDP TX queues for RX. */
>   	if (netif_running(dev)) {
>   		for (i = 0; i < vi->max_queue_pairs; i++) {
> +			netif_queue_set_napi(vi->dev, i, NETDEV_QUEUE_TYPE_RX,
> +					     NULL);
>   			napi_disable(&vi->rq[i].napi);
>   			virtnet_napi_tx_disable(&vi->sq[i].napi);
>   		}

Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4e88d352d3eb..8f0f26cc5a94 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2804,14 +2804,28 @@  static void virtnet_napi_do_enable(struct virtqueue *vq,
 }
 
 static void virtnet_napi_enable_lock(struct virtqueue *vq,
-				     struct napi_struct *napi)
+				     struct napi_struct *napi,
+				     bool need_rtnl)
 {
+	struct virtnet_info *vi = vq->vdev->priv;
+	int q = vq2rxq(vq);
+
 	virtnet_napi_do_enable(vq, napi);
+
+	if (q < vi->curr_queue_pairs) {
+		if (need_rtnl)
+			rtnl_lock();
+
+		netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, napi);
+
+		if (need_rtnl)
+			rtnl_unlock();
+	}
 }
 
 static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
 {
-	virtnet_napi_enable_lock(vq, napi);
+	virtnet_napi_enable_lock(vq, napi, false);
 }
 
 static void virtnet_napi_tx_enable(struct virtnet_info *vi,
@@ -2848,9 +2862,13 @@  static void refill_work(struct work_struct *work)
 	for (i = 0; i < vi->curr_queue_pairs; i++) {
 		struct receive_queue *rq = &vi->rq[i];
 
+		rtnl_lock();
+		netif_queue_set_napi(vi->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
+		rtnl_unlock();
 		napi_disable(&rq->napi);
+
 		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
-		virtnet_napi_enable_lock(rq->vq, &rq->napi);
+		virtnet_napi_enable_lock(rq->vq, &rq->napi, true);
 
 		/* In theory, this can happen: if we don't get any buffers in
 		 * we will *never* try to fill again.
@@ -3048,6 +3066,7 @@  static int virtnet_poll(struct napi_struct *napi, int budget)
 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
 {
 	virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
+	netif_queue_set_napi(vi->dev, qp_index, NETDEV_QUEUE_TYPE_RX, NULL);
 	napi_disable(&vi->rq[qp_index].napi);
 	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
 }
@@ -3317,8 +3336,10 @@  static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
 {
 	bool running = netif_running(vi->dev);
+	int q = vq2rxq(rq->vq);
 
 	if (running) {
+		netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, NULL);
 		napi_disable(&rq->napi);
 		virtnet_cancel_dim(vi, &rq->dim);
 	}
@@ -5943,6 +5964,8 @@  static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
 	/* Make sure NAPI is not using any XDP TX queues for RX. */
 	if (netif_running(dev)) {
 		for (i = 0; i < vi->max_queue_pairs; i++) {
+			netif_queue_set_napi(vi->dev, i, NETDEV_QUEUE_TYPE_RX,
+					     NULL);
 			napi_disable(&vi->rq[i].napi);
 			virtnet_napi_tx_disable(&vi->sq[i].napi);
 		}