diff mbox series

[net,v2,2/2] virtio-net: suppress bad irq warning for tx napi

Message ID 20210220014436.3556492-3-weiwan@google.com (mailing list archive)
State Deferred
Delegated to: Netdev Maintainers
Headers show
Series virtio-net: suppress bad irq warning for tx napi | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for net
netdev/subject_prefix success Link
netdev/cc_maintainers success CCed 7 of 7 maintainers
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 48 lines checked
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

Wei Wang Feb. 20, 2021, 1:44 a.m. UTC
With the implementation of napi-tx in virtio driver, we clean tx
descriptors from rx napi handler, for the purpose of reducing tx
complete interrupts. But this could introduce a race where tx complete
interrupt has been raised, but the handler found there is no work to do
because we have done the work in the previous rx interrupt handler.
This could lead to the following warning msg:
[ 3588.010778] irq 38: nobody cared (try booting with the
"irqpoll" option)
[ 3588.017938] CPU: 4 PID: 0 Comm: swapper/4 Not tainted
5.3.0-19-generic #20~18.04.2-Ubuntu
[ 3588.017940] Call Trace:
[ 3588.017942]  <IRQ>
[ 3588.017951]  dump_stack+0x63/0x85
[ 3588.017953]  __report_bad_irq+0x35/0xc0
[ 3588.017955]  note_interrupt+0x24b/0x2a0
[ 3588.017956]  handle_irq_event_percpu+0x54/0x80
[ 3588.017957]  handle_irq_event+0x3b/0x60
[ 3588.017958]  handle_edge_irq+0x83/0x1a0
[ 3588.017961]  handle_irq+0x20/0x30
[ 3588.017964]  do_IRQ+0x50/0xe0
[ 3588.017966]  common_interrupt+0xf/0xf
[ 3588.017966]  </IRQ>
[ 3588.017989] handlers:
[ 3588.020374] [<000000001b9f1da8>] vring_interrupt
[ 3588.025099] Disabling IRQ #38

This patch sets no_interrupt_check in tx vring_virtqueue, when napi-tx
is enabled, to suppress the warning in such case.

Fixes: 7b0411ef4aa6 ("virtio-net: clean tx descriptors from rx napi")
Reported-by: Rick Jones <jonesrick@google.com>
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Willem de Bruijn <willemb@google.com>
---
 drivers/net/virtio_net.c | 19 ++++++++++++++-----
 1 file changed, 14 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 508408fbe78f..18b14739d63e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1303,13 +1303,22 @@  static void virtnet_napi_tx_enable(struct virtnet_info *vi,
 		return;
 	}
 
+	/* With napi_tx enabled, free_old_xmit_skbs() could be called from
+	 * rx napi handler. Set no_interrupt_check to suppress bad irq warning
+	 * for IRQ_NONE case from tx complete interrupt handler.
+	 */
+	virtqueue_set_no_interrupt_check(vq, true);
+
 	return virtnet_napi_enable(vq, napi);
 }
 
-static void virtnet_napi_tx_disable(struct napi_struct *napi)
+static void virtnet_napi_tx_disable(struct virtqueue *vq,
+				    struct napi_struct *napi)
 {
-	if (napi->weight)
+	if (napi->weight) {
 		napi_disable(napi);
+		virtqueue_set_no_interrupt_check(vq, false);
+	}
 }
 
 static void refill_work(struct work_struct *work)
@@ -1835,7 +1844,7 @@  static int virtnet_close(struct net_device *dev)
 	for (i = 0; i < vi->max_queue_pairs; i++) {
 		xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
 		napi_disable(&vi->rq[i].napi);
-		virtnet_napi_tx_disable(&vi->sq[i].napi);
+		virtnet_napi_tx_disable(vi->sq[i].vq, &vi->sq[i].napi);
 	}
 
 	return 0;
@@ -2315,7 +2324,7 @@  static void virtnet_freeze_down(struct virtio_device *vdev)
 	if (netif_running(vi->dev)) {
 		for (i = 0; i < vi->max_queue_pairs; i++) {
 			napi_disable(&vi->rq[i].napi);
-			virtnet_napi_tx_disable(&vi->sq[i].napi);
+			virtnet_napi_tx_disable(vi->sq[i].vq, &vi->sq[i].napi);
 		}
 	}
 }
@@ -2440,7 +2449,7 @@  static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
 	if (netif_running(dev)) {
 		for (i = 0; i < vi->max_queue_pairs; i++) {
 			napi_disable(&vi->rq[i].napi);
-			virtnet_napi_tx_disable(&vi->sq[i].napi);
+			virtnet_napi_tx_disable(vi->sq[i].vq, &vi->sq[i].napi);
 		}
 	}