diff mbox series

[RFC,v2,3/4] virtio_net: move tx vq operation under tx queue lock

Message ID 20210413054733.36363-4-mst@redhat.com (mailing list archive)
State RFC
Delegated to: Netdev Maintainers
Headers show
Series virtio net: spurious interrupt related fixes | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Guessed tree name to be net-next
netdev/subject_prefix success Link
netdev/cc_maintainers success CCed 6 of 6 maintainers
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 37 lines checked
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/header_inline success Link

Commit Message

Michael S. Tsirkin April 13, 2021, 5:47 a.m. UTC
It's unsafe to operate a vq from multiple threads.
Unfortunately this is exactly what we do when invoking
clean tx poll from rx napi.
As a fix move everything that deals with the vq to under tx lock.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 drivers/net/virtio_net.c | 22 +++++++++++++++++++++-
 1 file changed, 21 insertions(+), 1 deletion(-)

Comments

Jason Wang April 13, 2021, 8:54 a.m. UTC | #1
在 2021/4/13 下午1:47, Michael S. Tsirkin 写道:
> It's unsafe to operate a vq from multiple threads.
> Unfortunately this is exactly what we do when invoking
> clean tx poll from rx napi.
> As a fix move everything that deals with the vq to under tx lock.
>
> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
> ---
>   drivers/net/virtio_net.c | 22 +++++++++++++++++++++-
>   1 file changed, 21 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 16d5abed582c..460ccdbb840e 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -1505,6 +1505,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
>   	struct virtnet_info *vi = sq->vq->vdev->priv;
>   	unsigned int index = vq2txq(sq->vq);
>   	struct netdev_queue *txq;
> +	int opaque;
> +	bool done;
>   
>   	if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
>   		/* We don't need to enable cb for XDP */
> @@ -1514,10 +1516,28 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
>   
>   	txq = netdev_get_tx_queue(vi->dev, index);
>   	__netif_tx_lock(txq, raw_smp_processor_id());
> +	virtqueue_disable_cb(sq->vq);
>   	free_old_xmit_skbs(sq, true);
> +
> +	opaque = virtqueue_enable_cb_prepare(sq->vq);
> +
> +	done = napi_complete_done(napi, 0);
> +
> +	if (!done)
> +		virtqueue_disable_cb(sq->vq);
> +
>   	__netif_tx_unlock(txq);
>   
> -	virtqueue_napi_complete(napi, sq->vq, 0);


So I wonder why not simply move __netif_tx_unlock() after 
virtqueue_napi_complete()?

Thanks


> +	if (done) {
> +		if (unlikely(virtqueue_poll(sq->vq, opaque))) {
> +			if (napi_schedule_prep(napi)) {
> +				__netif_tx_lock(txq, raw_smp_processor_id());
> +				virtqueue_disable_cb(sq->vq);
> +				__netif_tx_unlock(txq);
> +				__napi_schedule(napi);
> +			}
> +		}
> +	}
>   
>   	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
>   		netif_tx_wake_queue(txq);
Michael S. Tsirkin April 13, 2021, 2:02 p.m. UTC | #2
On Tue, Apr 13, 2021 at 04:54:42PM +0800, Jason Wang wrote:
> 
> 在 2021/4/13 下午1:47, Michael S. Tsirkin 写道:
> > It's unsafe to operate a vq from multiple threads.
> > Unfortunately this is exactly what we do when invoking
> > clean tx poll from rx napi.
> > As a fix move everything that deals with the vq to under tx lock.
> > 
> > Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
> > ---
> >   drivers/net/virtio_net.c | 22 +++++++++++++++++++++-
> >   1 file changed, 21 insertions(+), 1 deletion(-)
> > 
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index 16d5abed582c..460ccdbb840e 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -1505,6 +1505,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
> >   	struct virtnet_info *vi = sq->vq->vdev->priv;
> >   	unsigned int index = vq2txq(sq->vq);
> >   	struct netdev_queue *txq;
> > +	int opaque;
> > +	bool done;
> >   	if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
> >   		/* We don't need to enable cb for XDP */
> > @@ -1514,10 +1516,28 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
> >   	txq = netdev_get_tx_queue(vi->dev, index);
> >   	__netif_tx_lock(txq, raw_smp_processor_id());
> > +	virtqueue_disable_cb(sq->vq);
> >   	free_old_xmit_skbs(sq, true);
> > +
> > +	opaque = virtqueue_enable_cb_prepare(sq->vq);
> > +
> > +	done = napi_complete_done(napi, 0);
> > +
> > +	if (!done)
> > +		virtqueue_disable_cb(sq->vq);
> > +
> >   	__netif_tx_unlock(txq);
> > -	virtqueue_napi_complete(napi, sq->vq, 0);
> 
> 
> So I wonder why not simply move __netif_tx_unlock() after
> virtqueue_napi_complete()?
> 
> Thanks
> 


Because that calls tx poll which also takes tx lock internally ...


> > +	if (done) {
> > +		if (unlikely(virtqueue_poll(sq->vq, opaque))) {
> > +			if (napi_schedule_prep(napi)) {
> > +				__netif_tx_lock(txq, raw_smp_processor_id());
> > +				virtqueue_disable_cb(sq->vq);
> > +				__netif_tx_unlock(txq);
> > +				__napi_schedule(napi);
> > +			}
> > +		}
> > +	}
> >   	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> >   		netif_tx_wake_queue(txq);
Willem de Bruijn April 13, 2021, 2:20 p.m. UTC | #3
On Tue, Apr 13, 2021 at 10:03 AM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Tue, Apr 13, 2021 at 04:54:42PM +0800, Jason Wang wrote:
> >
> > 在 2021/4/13 下午1:47, Michael S. Tsirkin 写道:
> > > It's unsafe to operate a vq from multiple threads.
> > > Unfortunately this is exactly what we do when invoking
> > > clean tx poll from rx napi.

Actually, the issue goes back to the napi-tx even without the
opportunistic cleaning from the receive interrupt, I think? That races
with processing the vq in start_xmit.

> > > As a fix move everything that deals with the vq to under tx lock.
> > >

If the above is correct:

Fixes: b92f1e6751a6 ("virtio-net: transmit napi")

> > > Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
> > > ---
> > >   drivers/net/virtio_net.c | 22 +++++++++++++++++++++-
> > >   1 file changed, 21 insertions(+), 1 deletion(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index 16d5abed582c..460ccdbb840e 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -1505,6 +1505,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
> > >     struct virtnet_info *vi = sq->vq->vdev->priv;
> > >     unsigned int index = vq2txq(sq->vq);
> > >     struct netdev_queue *txq;
> > > +   int opaque;

nit: virtqueue_napi_complete also stores as int opaque, but
virtqueue_enable_cb_prepare actually returns, and virtqueue_poll
expects, an unsigned int. In the end, conversion works correctly. But
cleaner to use the real type.

> > > +   bool done;
> > >     if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
> > >             /* We don't need to enable cb for XDP */
> > > @@ -1514,10 +1516,28 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
> > >     txq = netdev_get_tx_queue(vi->dev, index);
> > >     __netif_tx_lock(txq, raw_smp_processor_id());
> > > +   virtqueue_disable_cb(sq->vq);
> > >     free_old_xmit_skbs(sq, true);
> > > +
> > > +   opaque = virtqueue_enable_cb_prepare(sq->vq);
> > > +
> > > +   done = napi_complete_done(napi, 0);
> > > +
> > > +   if (!done)
> > > +           virtqueue_disable_cb(sq->vq);
> > > +
> > >     __netif_tx_unlock(txq);
> > > -   virtqueue_napi_complete(napi, sq->vq, 0);
> >
> >
> > So I wonder why not simply move __netif_tx_unlock() after
> > virtqueue_napi_complete()?
> >
> > Thanks
> >
>
>
> Because that calls tx poll which also takes tx lock internally ...

which tx poll?
Michael S. Tsirkin April 13, 2021, 7:38 p.m. UTC | #4
On Tue, Apr 13, 2021 at 10:20:39AM -0400, Willem de Bruijn wrote:
> On Tue, Apr 13, 2021 at 10:03 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Tue, Apr 13, 2021 at 04:54:42PM +0800, Jason Wang wrote:
> > >
> > > 在 2021/4/13 下午1:47, Michael S. Tsirkin 写道:
> > > > It's unsafe to operate a vq from multiple threads.
> > > > Unfortunately this is exactly what we do when invoking
> > > > clean tx poll from rx napi.
> 
> Actually, the issue goes back to the napi-tx even without the
> opportunistic cleaning from the receive interrupt, I think? That races
> with processing the vq in start_xmit.
> 
> > > > As a fix move everything that deals with the vq to under tx lock.
> > > >
> 
> If the above is correct:
> 
> Fixes: b92f1e6751a6 ("virtio-net: transmit napi")
> 
> > > > Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
> > > > ---
> > > >   drivers/net/virtio_net.c | 22 +++++++++++++++++++++-
> > > >   1 file changed, 21 insertions(+), 1 deletion(-)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index 16d5abed582c..460ccdbb840e 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -1505,6 +1505,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
> > > >     struct virtnet_info *vi = sq->vq->vdev->priv;
> > > >     unsigned int index = vq2txq(sq->vq);
> > > >     struct netdev_queue *txq;
> > > > +   int opaque;
> 
> nit: virtqueue_napi_complete also stores as int opaque, but
> virtqueue_enable_cb_prepare actually returns, and virtqueue_poll
> expects, an unsigned int. In the end, conversion works correctly. But
> cleaner to use the real type.
> 
> > > > +   bool done;
> > > >     if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
> > > >             /* We don't need to enable cb for XDP */
> > > > @@ -1514,10 +1516,28 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
> > > >     txq = netdev_get_tx_queue(vi->dev, index);
> > > >     __netif_tx_lock(txq, raw_smp_processor_id());
> > > > +   virtqueue_disable_cb(sq->vq);
> > > >     free_old_xmit_skbs(sq, true);
> > > > +
> > > > +   opaque = virtqueue_enable_cb_prepare(sq->vq);
> > > > +
> > > > +   done = napi_complete_done(napi, 0);
> > > > +
> > > > +   if (!done)
> > > > +           virtqueue_disable_cb(sq->vq);
> > > > +
> > > >     __netif_tx_unlock(txq);
> > > > -   virtqueue_napi_complete(napi, sq->vq, 0);
> > >
> > >
> > > So I wonder why not simply move __netif_tx_unlock() after
> > > virtqueue_napi_complete()?
> > >
> > > Thanks
> > >
> >
> >
> > Because that calls tx poll which also takes tx lock internally ...
> 
> which tx poll?

Oh. It's virtqueue_poll actually. I confused it with
virtnet_poll_tx. Right. We can put it back the way it was.
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 16d5abed582c..460ccdbb840e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1505,6 +1505,8 @@  static int virtnet_poll_tx(struct napi_struct *napi, int budget)
 	struct virtnet_info *vi = sq->vq->vdev->priv;
 	unsigned int index = vq2txq(sq->vq);
 	struct netdev_queue *txq;
+	int opaque;
+	bool done;
 
 	if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
 		/* We don't need to enable cb for XDP */
@@ -1514,10 +1516,28 @@  static int virtnet_poll_tx(struct napi_struct *napi, int budget)
 
 	txq = netdev_get_tx_queue(vi->dev, index);
 	__netif_tx_lock(txq, raw_smp_processor_id());
+	virtqueue_disable_cb(sq->vq);
 	free_old_xmit_skbs(sq, true);
+
+	opaque = virtqueue_enable_cb_prepare(sq->vq);
+
+	done = napi_complete_done(napi, 0);
+
+	if (!done)
+		virtqueue_disable_cb(sq->vq);
+
 	__netif_tx_unlock(txq);
 
-	virtqueue_napi_complete(napi, sq->vq, 0);
+	if (done) {
+		if (unlikely(virtqueue_poll(sq->vq, opaque))) {
+			if (napi_schedule_prep(napi)) {
+				__netif_tx_lock(txq, raw_smp_processor_id());
+				virtqueue_disable_cb(sq->vq);
+				__netif_tx_unlock(txq);
+				__napi_schedule(napi);
+			}
+		}
+	}
 
 	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
 		netif_tx_wake_queue(txq);