diff mbox series

[v1] virtio_net: fix missing dma unmap for resize

Message ID 20231212081141.39757-1-xuanzhuo@linux.alibaba.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series [v1] virtio_net: fix missing dma unmap for resize | expand

Checks

Context Check Description
netdev/series_format warning Single patches do not need cover letters; Target tree name not specified in the subject
netdev/tree_selection success Guessed tree name to be net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1115 this patch: 1115
netdev/cc_maintainers warning 1 maintainers not CCed: virtualization@lists.linux.dev
netdev/build_clang success Errors and warnings before: 1142 this patch: 1142
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn success Errors and warnings before: 1144 this patch: 1144
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 109 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Xuan Zhuo Dec. 12, 2023, 8:11 a.m. UTC
For rq, we have three cases getting buffers from virtio core:

1. virtqueue_get_buf{,_ctx}
2. virtqueue_detach_unused_buf
3. callback for virtqueue_resize

But in commit 295525e29a5b("virtio_net: merge dma operations when
filling mergeable buffers"), I missed the dma unmap for the #3 case.

That will leak some memory, because I did not release the pages referred
by the unused buffers.

If we do such script, we will make the system OOM.

    while true
    do
            ethtool -G ens4 rx 128
            ethtool -G ens4 rx 256
            free -m
    done

Fixes: 295525e29a5b ("virtio_net: merge dma operations when filling mergeable buffers")
Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---

v1: rename to virtnet_rq_free_buf_check_dma()

 drivers/net/virtio_net.c | 60 ++++++++++++++++++++--------------------
 1 file changed, 30 insertions(+), 30 deletions(-)

--
2.32.0.3.g01195cf9f

Comments

Michael S. Tsirkin Dec. 12, 2023, 8:26 a.m. UTC | #1
On Tue, Dec 12, 2023 at 04:11:41PM +0800, Xuan Zhuo wrote:
> For rq, we have three cases getting buffers from virtio core:
> 
> 1. virtqueue_get_buf{,_ctx}
> 2. virtqueue_detach_unused_buf
> 3. callback for virtqueue_resize
> 
> But in commit 295525e29a5b("virtio_net: merge dma operations when
> filling mergeable buffers"), I missed the dma unmap for the #3 case.
> 
> That will leak some memory, because I did not release the pages referred
> by the unused buffers.
> 
> If we do such script, we will make the system OOM.
> 
>     while true
>     do
>             ethtool -G ens4 rx 128
>             ethtool -G ens4 rx 256
>             free -m
>     done
> 
> Fixes: 295525e29a5b ("virtio_net: merge dma operations when filling mergeable buffers")
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
> 
> v1: rename to virtnet_rq_free_buf_check_dma()

The fact that we check does not matter what matters is
that we unmap. I'd change the name to reflect that.


> 
>  drivers/net/virtio_net.c | 60 ++++++++++++++++++++--------------------
>  1 file changed, 30 insertions(+), 30 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index d16f592c2061..58ebbffeb952 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -334,7 +334,6 @@ struct virtio_net_common_hdr {
>  	};
>  };
> 
> -static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
>  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> 
>  static bool is_xdp_frame(void *ptr)
> @@ -408,6 +407,17 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
>  	return p;
>  }
> 
> +static void virtnet_rq_free_buf(struct virtnet_info *vi,
> +				struct receive_queue *rq, void *buf)
> +{
> +	if (vi->mergeable_rx_bufs)
> +		put_page(virt_to_head_page(buf));
> +	else if (vi->big_packets)
> +		give_pages(rq, buf);
> +	else
> +		put_page(virt_to_head_page(buf));
> +}
> +
>  static void enable_delayed_refill(struct virtnet_info *vi)
>  {
>  	spin_lock_bh(&vi->refill_lock);
> @@ -634,17 +644,6 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
>  	return buf;
>  }
> 
> -static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
> -{
> -	void *buf;
> -
> -	buf = virtqueue_detach_unused_buf(rq->vq);
> -	if (buf && rq->do_dma)
> -		virtnet_rq_unmap(rq, buf, 0);
> -
> -	return buf;
> -}
> -
>  static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
>  {
>  	struct virtnet_rq_dma *dma;
> @@ -744,6 +743,20 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
>  	}
>  }
> 
> +static void virtnet_rq_free_buf_check_dma(struct virtqueue *vq, void *buf)
> +{
> +	struct virtnet_info *vi = vq->vdev->priv;
> +	struct receive_queue *rq;
> +	int i = vq2rxq(vq);
> +
> +	rq = &vi->rq[i];
> +
> +	if (rq->do_dma)
> +		virtnet_rq_unmap(rq, buf, 0);
> +
> +	virtnet_rq_free_buf(vi, rq, buf);
> +}
> +
>  static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
>  {
>  	unsigned int len;
> @@ -1764,7 +1777,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
>  	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
>  		pr_debug("%s: short packet %i\n", dev->name, len);
>  		DEV_STATS_INC(dev, rx_length_errors);
> -		virtnet_rq_free_unused_buf(rq->vq, buf);
> +		virtnet_rq_free_buf(vi, rq, buf);
>  		return;
>  	}
> 
> @@ -2392,7 +2405,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
>  	if (running)
>  		napi_disable(&rq->napi);
> 
> -	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
> +	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_buf_check_dma);
>  	if (err)
>  		netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
> 
> @@ -4031,19 +4044,6 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
>  		xdp_return_frame(ptr_to_xdp(buf));
>  }
> 
> -static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
> -{
> -	struct virtnet_info *vi = vq->vdev->priv;
> -	int i = vq2rxq(vq);
> -
> -	if (vi->mergeable_rx_bufs)
> -		put_page(virt_to_head_page(buf));
> -	else if (vi->big_packets)
> -		give_pages(&vi->rq[i], buf);
> -	else
> -		put_page(virt_to_head_page(buf));
> -}
> -
>  static void free_unused_bufs(struct virtnet_info *vi)
>  {
>  	void *buf;
> @@ -4057,10 +4057,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
>  	}
> 
>  	for (i = 0; i < vi->max_queue_pairs; i++) {
> -		struct receive_queue *rq = &vi->rq[i];
> +		struct virtqueue *vq = vi->rq[i].vq;
> 
> -		while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
> -			virtnet_rq_free_unused_buf(rq->vq, buf);
> +		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
> +			virtnet_rq_free_buf_check_dma(vq, buf);
>  		cond_resched();
>  	}
>  }
> --
> 2.32.0.3.g01195cf9f
Xuan Zhuo Dec. 26, 2023, 5:57 a.m. UTC | #2
On Tue, 12 Dec 2023 03:26:41 -0500, "Michael S. Tsirkin" <mst@redhat.com> wrote:
> On Tue, Dec 12, 2023 at 04:11:41PM +0800, Xuan Zhuo wrote:
> > For rq, we have three cases getting buffers from virtio core:
> >
> > 1. virtqueue_get_buf{,_ctx}
> > 2. virtqueue_detach_unused_buf
> > 3. callback for virtqueue_resize
> >
> > But in commit 295525e29a5b("virtio_net: merge dma operations when
> > filling mergeable buffers"), I missed the dma unmap for the #3 case.
> >
> > That will leak some memory, because I did not release the pages referred
> > by the unused buffers.
> >
> > If we do such script, we will make the system OOM.
> >
> >     while true
> >     do
> >             ethtool -G ens4 rx 128
> >             ethtool -G ens4 rx 256
> >             free -m
> >     done
> >
> > Fixes: 295525e29a5b ("virtio_net: merge dma operations when filling mergeable buffers")
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > ---
> >
> > v1: rename to virtnet_rq_free_buf_check_dma()
>
> The fact that we check does not matter what matters is
> that we unmap. I'd change the name to reflect that.


Hi Michael:

I see one "[GIT PULL] virtio: bugfixes". But this is not in the list.

So I hope this is your list.

Thanks.


>
>
> >
> >  drivers/net/virtio_net.c | 60 ++++++++++++++++++++--------------------
> >  1 file changed, 30 insertions(+), 30 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index d16f592c2061..58ebbffeb952 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -334,7 +334,6 @@ struct virtio_net_common_hdr {
> >  	};
> >  };
> >
> > -static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> >
> >  static bool is_xdp_frame(void *ptr)
> > @@ -408,6 +407,17 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
> >  	return p;
> >  }
> >
> > +static void virtnet_rq_free_buf(struct virtnet_info *vi,
> > +				struct receive_queue *rq, void *buf)
> > +{
> > +	if (vi->mergeable_rx_bufs)
> > +		put_page(virt_to_head_page(buf));
> > +	else if (vi->big_packets)
> > +		give_pages(rq, buf);
> > +	else
> > +		put_page(virt_to_head_page(buf));
> > +}
> > +
> >  static void enable_delayed_refill(struct virtnet_info *vi)
> >  {
> >  	spin_lock_bh(&vi->refill_lock);
> > @@ -634,17 +644,6 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
> >  	return buf;
> >  }
> >
> > -static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
> > -{
> > -	void *buf;
> > -
> > -	buf = virtqueue_detach_unused_buf(rq->vq);
> > -	if (buf && rq->do_dma)
> > -		virtnet_rq_unmap(rq, buf, 0);
> > -
> > -	return buf;
> > -}
> > -
> >  static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
> >  {
> >  	struct virtnet_rq_dma *dma;
> > @@ -744,6 +743,20 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
> >  	}
> >  }
> >
> > +static void virtnet_rq_free_buf_check_dma(struct virtqueue *vq, void *buf)
> > +{
> > +	struct virtnet_info *vi = vq->vdev->priv;
> > +	struct receive_queue *rq;
> > +	int i = vq2rxq(vq);
> > +
> > +	rq = &vi->rq[i];
> > +
> > +	if (rq->do_dma)
> > +		virtnet_rq_unmap(rq, buf, 0);
> > +
> > +	virtnet_rq_free_buf(vi, rq, buf);
> > +}
> > +
> >  static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
> >  {
> >  	unsigned int len;
> > @@ -1764,7 +1777,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
> >  	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
> >  		pr_debug("%s: short packet %i\n", dev->name, len);
> >  		DEV_STATS_INC(dev, rx_length_errors);
> > -		virtnet_rq_free_unused_buf(rq->vq, buf);
> > +		virtnet_rq_free_buf(vi, rq, buf);
> >  		return;
> >  	}
> >
> > @@ -2392,7 +2405,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
> >  	if (running)
> >  		napi_disable(&rq->napi);
> >
> > -	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
> > +	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_buf_check_dma);
> >  	if (err)
> >  		netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
> >
> > @@ -4031,19 +4044,6 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
> >  		xdp_return_frame(ptr_to_xdp(buf));
> >  }
> >
> > -static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
> > -{
> > -	struct virtnet_info *vi = vq->vdev->priv;
> > -	int i = vq2rxq(vq);
> > -
> > -	if (vi->mergeable_rx_bufs)
> > -		put_page(virt_to_head_page(buf));
> > -	else if (vi->big_packets)
> > -		give_pages(&vi->rq[i], buf);
> > -	else
> > -		put_page(virt_to_head_page(buf));
> > -}
> > -
> >  static void free_unused_bufs(struct virtnet_info *vi)
> >  {
> >  	void *buf;
> > @@ -4057,10 +4057,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
> >  	}
> >
> >  	for (i = 0; i < vi->max_queue_pairs; i++) {
> > -		struct receive_queue *rq = &vi->rq[i];
> > +		struct virtqueue *vq = vi->rq[i].vq;
> >
> > -		while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
> > -			virtnet_rq_free_unused_buf(rq->vq, buf);
> > +		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
> > +			virtnet_rq_free_buf_check_dma(vq, buf);
> >  		cond_resched();
> >  	}
> >  }
> > --
> > 2.32.0.3.g01195cf9f
>
Michael S. Tsirkin Dec. 26, 2023, 8:57 a.m. UTC | #3
On Tue, Dec 26, 2023 at 01:57:09PM +0800, Xuan Zhuo wrote:
> On Tue, 12 Dec 2023 03:26:41 -0500, "Michael S. Tsirkin" <mst@redhat.com> wrote:
> > On Tue, Dec 12, 2023 at 04:11:41PM +0800, Xuan Zhuo wrote:
> > > For rq, we have three cases getting buffers from virtio core:
> > >
> > > 1. virtqueue_get_buf{,_ctx}
> > > 2. virtqueue_detach_unused_buf
> > > 3. callback for virtqueue_resize
> > >
> > > But in commit 295525e29a5b("virtio_net: merge dma operations when
> > > filling mergeable buffers"), I missed the dma unmap for the #3 case.
> > >
> > > That will leak some memory, because I did not release the pages referred
> > > by the unused buffers.
> > >
> > > If we do such script, we will make the system OOM.
> > >
> > >     while true
> > >     do
> > >             ethtool -G ens4 rx 128
> > >             ethtool -G ens4 rx 256
> > >             free -m
> > >     done
> > >
> > > Fixes: 295525e29a5b ("virtio_net: merge dma operations when filling mergeable buffers")
> > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > ---
> > >
> > > v1: rename to virtnet_rq_free_buf_check_dma()
> >
> > The fact that we check does not matter what matters is
> > that we unmap. I'd change the name to reflect that.
> 
> 
> Hi Michael:
> 
> I see one "[GIT PULL] virtio: bugfixes". But this is not in the list.
> 
> So I hope this is your list.
> 
> Thanks.

No - I'm still waiting for the comment to be addressed. sorry about
the back and forth. It does unmap then free. So maybe virtnet_rq_unmap_free_buf?


> 
> >
> >
> > >
> > >  drivers/net/virtio_net.c | 60 ++++++++++++++++++++--------------------
> > >  1 file changed, 30 insertions(+), 30 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index d16f592c2061..58ebbffeb952 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -334,7 +334,6 @@ struct virtio_net_common_hdr {
> > >  	};
> > >  };
> > >
> > > -static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > >
> > >  static bool is_xdp_frame(void *ptr)
> > > @@ -408,6 +407,17 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
> > >  	return p;
> > >  }
> > >
> > > +static void virtnet_rq_free_buf(struct virtnet_info *vi,
> > > +				struct receive_queue *rq, void *buf)
> > > +{
> > > +	if (vi->mergeable_rx_bufs)
> > > +		put_page(virt_to_head_page(buf));
> > > +	else if (vi->big_packets)
> > > +		give_pages(rq, buf);
> > > +	else
> > > +		put_page(virt_to_head_page(buf));
> > > +}
> > > +
> > >  static void enable_delayed_refill(struct virtnet_info *vi)
> > >  {
> > >  	spin_lock_bh(&vi->refill_lock);
> > > @@ -634,17 +644,6 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
> > >  	return buf;
> > >  }
> > >
> > > -static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
> > > -{
> > > -	void *buf;
> > > -
> > > -	buf = virtqueue_detach_unused_buf(rq->vq);
> > > -	if (buf && rq->do_dma)
> > > -		virtnet_rq_unmap(rq, buf, 0);
> > > -
> > > -	return buf;
> > > -}
> > > -
> > >  static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
> > >  {
> > >  	struct virtnet_rq_dma *dma;
> > > @@ -744,6 +743,20 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
> > >  	}
> > >  }
> > >
> > > +static void virtnet_rq_free_buf_check_dma(struct virtqueue *vq, void *buf)
> > > +{
> > > +	struct virtnet_info *vi = vq->vdev->priv;
> > > +	struct receive_queue *rq;
> > > +	int i = vq2rxq(vq);
> > > +
> > > +	rq = &vi->rq[i];
> > > +
> > > +	if (rq->do_dma)
> > > +		virtnet_rq_unmap(rq, buf, 0);
> > > +
> > > +	virtnet_rq_free_buf(vi, rq, buf);
> > > +}
> > > +
> > >  static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
> > >  {
> > >  	unsigned int len;
> > > @@ -1764,7 +1777,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
> > >  	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
> > >  		pr_debug("%s: short packet %i\n", dev->name, len);
> > >  		DEV_STATS_INC(dev, rx_length_errors);
> > > -		virtnet_rq_free_unused_buf(rq->vq, buf);
> > > +		virtnet_rq_free_buf(vi, rq, buf);
> > >  		return;
> > >  	}
> > >
> > > @@ -2392,7 +2405,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
> > >  	if (running)
> > >  		napi_disable(&rq->napi);
> > >
> > > -	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
> > > +	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_buf_check_dma);
> > >  	if (err)
> > >  		netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
> > >
> > > @@ -4031,19 +4044,6 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
> > >  		xdp_return_frame(ptr_to_xdp(buf));
> > >  }
> > >
> > > -static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
> > > -{
> > > -	struct virtnet_info *vi = vq->vdev->priv;
> > > -	int i = vq2rxq(vq);
> > > -
> > > -	if (vi->mergeable_rx_bufs)
> > > -		put_page(virt_to_head_page(buf));
> > > -	else if (vi->big_packets)
> > > -		give_pages(&vi->rq[i], buf);
> > > -	else
> > > -		put_page(virt_to_head_page(buf));
> > > -}
> > > -
> > >  static void free_unused_bufs(struct virtnet_info *vi)
> > >  {
> > >  	void *buf;
> > > @@ -4057,10 +4057,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
> > >  	}
> > >
> > >  	for (i = 0; i < vi->max_queue_pairs; i++) {
> > > -		struct receive_queue *rq = &vi->rq[i];
> > > +		struct virtqueue *vq = vi->rq[i].vq;
> > >
> > > -		while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
> > > -			virtnet_rq_free_unused_buf(rq->vq, buf);
> > > +		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
> > > +			virtnet_rq_free_buf_check_dma(vq, buf);
> > >  		cond_resched();
> > >  	}
> > >  }
> > > --
> > > 2.32.0.3.g01195cf9f
> >
Xuan Zhuo Dec. 26, 2023, 8:59 a.m. UTC | #4
On Tue, 26 Dec 2023 03:57:27 -0500, "Michael S. Tsirkin" <mst@redhat.com> wrote:
> On Tue, Dec 26, 2023 at 01:57:09PM +0800, Xuan Zhuo wrote:
> > On Tue, 12 Dec 2023 03:26:41 -0500, "Michael S. Tsirkin" <mst@redhat.com> wrote:
> > > On Tue, Dec 12, 2023 at 04:11:41PM +0800, Xuan Zhuo wrote:
> > > > For rq, we have three cases getting buffers from virtio core:
> > > >
> > > > 1. virtqueue_get_buf{,_ctx}
> > > > 2. virtqueue_detach_unused_buf
> > > > 3. callback for virtqueue_resize
> > > >
> > > > But in commit 295525e29a5b("virtio_net: merge dma operations when
> > > > filling mergeable buffers"), I missed the dma unmap for the #3 case.
> > > >
> > > > That will leak some memory, because I did not release the pages referred
> > > > by the unused buffers.
> > > >
> > > > If we do such script, we will make the system OOM.
> > > >
> > > >     while true
> > > >     do
> > > >             ethtool -G ens4 rx 128
> > > >             ethtool -G ens4 rx 256
> > > >             free -m
> > > >     done
> > > >
> > > > Fixes: 295525e29a5b ("virtio_net: merge dma operations when filling mergeable buffers")
> > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > ---
> > > >
> > > > v1: rename to virtnet_rq_free_buf_check_dma()
> > >
> > > The fact that we check does not matter what matters is
> > > that we unmap. I'd change the name to reflect that.
> >
> >
> > Hi Michael:
> >
> > I see one "[GIT PULL] virtio: bugfixes". But this is not in the list.
> >
> > So I hope this is your list.
> >
> > Thanks.
>
> No - I'm still waiting for the comment to be addressed. sorry about
> the back and forth. It does unmap then free. So maybe virtnet_rq_unmap_free_buf?

OK.

I will post v2 soon.

Thanks.



>
>
> >
> > >
> > >
> > > >
> > > >  drivers/net/virtio_net.c | 60 ++++++++++++++++++++--------------------
> > > >  1 file changed, 30 insertions(+), 30 deletions(-)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index d16f592c2061..58ebbffeb952 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -334,7 +334,6 @@ struct virtio_net_common_hdr {
> > > >  	};
> > > >  };
> > > >
> > > > -static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > >
> > > >  static bool is_xdp_frame(void *ptr)
> > > > @@ -408,6 +407,17 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
> > > >  	return p;
> > > >  }
> > > >
> > > > +static void virtnet_rq_free_buf(struct virtnet_info *vi,
> > > > +				struct receive_queue *rq, void *buf)
> > > > +{
> > > > +	if (vi->mergeable_rx_bufs)
> > > > +		put_page(virt_to_head_page(buf));
> > > > +	else if (vi->big_packets)
> > > > +		give_pages(rq, buf);
> > > > +	else
> > > > +		put_page(virt_to_head_page(buf));
> > > > +}
> > > > +
> > > >  static void enable_delayed_refill(struct virtnet_info *vi)
> > > >  {
> > > >  	spin_lock_bh(&vi->refill_lock);
> > > > @@ -634,17 +644,6 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
> > > >  	return buf;
> > > >  }
> > > >
> > > > -static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
> > > > -{
> > > > -	void *buf;
> > > > -
> > > > -	buf = virtqueue_detach_unused_buf(rq->vq);
> > > > -	if (buf && rq->do_dma)
> > > > -		virtnet_rq_unmap(rq, buf, 0);
> > > > -
> > > > -	return buf;
> > > > -}
> > > > -
> > > >  static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
> > > >  {
> > > >  	struct virtnet_rq_dma *dma;
> > > > @@ -744,6 +743,20 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
> > > >  	}
> > > >  }
> > > >
> > > > +static void virtnet_rq_free_buf_check_dma(struct virtqueue *vq, void *buf)
> > > > +{
> > > > +	struct virtnet_info *vi = vq->vdev->priv;
> > > > +	struct receive_queue *rq;
> > > > +	int i = vq2rxq(vq);
> > > > +
> > > > +	rq = &vi->rq[i];
> > > > +
> > > > +	if (rq->do_dma)
> > > > +		virtnet_rq_unmap(rq, buf, 0);
> > > > +
> > > > +	virtnet_rq_free_buf(vi, rq, buf);
> > > > +}
> > > > +
> > > >  static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
> > > >  {
> > > >  	unsigned int len;
> > > > @@ -1764,7 +1777,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
> > > >  	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
> > > >  		pr_debug("%s: short packet %i\n", dev->name, len);
> > > >  		DEV_STATS_INC(dev, rx_length_errors);
> > > > -		virtnet_rq_free_unused_buf(rq->vq, buf);
> > > > +		virtnet_rq_free_buf(vi, rq, buf);
> > > >  		return;
> > > >  	}
> > > >
> > > > @@ -2392,7 +2405,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
> > > >  	if (running)
> > > >  		napi_disable(&rq->napi);
> > > >
> > > > -	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
> > > > +	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_buf_check_dma);
> > > >  	if (err)
> > > >  		netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
> > > >
> > > > @@ -4031,19 +4044,6 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
> > > >  		xdp_return_frame(ptr_to_xdp(buf));
> > > >  }
> > > >
> > > > -static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
> > > > -{
> > > > -	struct virtnet_info *vi = vq->vdev->priv;
> > > > -	int i = vq2rxq(vq);
> > > > -
> > > > -	if (vi->mergeable_rx_bufs)
> > > > -		put_page(virt_to_head_page(buf));
> > > > -	else if (vi->big_packets)
> > > > -		give_pages(&vi->rq[i], buf);
> > > > -	else
> > > > -		put_page(virt_to_head_page(buf));
> > > > -}
> > > > -
> > > >  static void free_unused_bufs(struct virtnet_info *vi)
> > > >  {
> > > >  	void *buf;
> > > > @@ -4057,10 +4057,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
> > > >  	}
> > > >
> > > >  	for (i = 0; i < vi->max_queue_pairs; i++) {
> > > > -		struct receive_queue *rq = &vi->rq[i];
> > > > +		struct virtqueue *vq = vi->rq[i].vq;
> > > >
> > > > -		while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
> > > > -			virtnet_rq_free_unused_buf(rq->vq, buf);
> > > > +		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
> > > > +			virtnet_rq_free_buf_check_dma(vq, buf);
> > > >  		cond_resched();
> > > >  	}
> > > >  }
> > > > --
> > > > 2.32.0.3.g01195cf9f
> > >
>
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d16f592c2061..58ebbffeb952 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -334,7 +334,6 @@  struct virtio_net_common_hdr {
 	};
 };

-static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);

 static bool is_xdp_frame(void *ptr)
@@ -408,6 +407,17 @@  static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
 	return p;
 }

+static void virtnet_rq_free_buf(struct virtnet_info *vi,
+				struct receive_queue *rq, void *buf)
+{
+	if (vi->mergeable_rx_bufs)
+		put_page(virt_to_head_page(buf));
+	else if (vi->big_packets)
+		give_pages(rq, buf);
+	else
+		put_page(virt_to_head_page(buf));
+}
+
 static void enable_delayed_refill(struct virtnet_info *vi)
 {
 	spin_lock_bh(&vi->refill_lock);
@@ -634,17 +644,6 @@  static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
 	return buf;
 }

-static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
-{
-	void *buf;
-
-	buf = virtqueue_detach_unused_buf(rq->vq);
-	if (buf && rq->do_dma)
-		virtnet_rq_unmap(rq, buf, 0);
-
-	return buf;
-}
-
 static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
 {
 	struct virtnet_rq_dma *dma;
@@ -744,6 +743,20 @@  static void virtnet_rq_set_premapped(struct virtnet_info *vi)
 	}
 }

+static void virtnet_rq_free_buf_check_dma(struct virtqueue *vq, void *buf)
+{
+	struct virtnet_info *vi = vq->vdev->priv;
+	struct receive_queue *rq;
+	int i = vq2rxq(vq);
+
+	rq = &vi->rq[i];
+
+	if (rq->do_dma)
+		virtnet_rq_unmap(rq, buf, 0);
+
+	virtnet_rq_free_buf(vi, rq, buf);
+}
+
 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
 {
 	unsigned int len;
@@ -1764,7 +1777,7 @@  static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
 	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
 		pr_debug("%s: short packet %i\n", dev->name, len);
 		DEV_STATS_INC(dev, rx_length_errors);
-		virtnet_rq_free_unused_buf(rq->vq, buf);
+		virtnet_rq_free_buf(vi, rq, buf);
 		return;
 	}

@@ -2392,7 +2405,7 @@  static int virtnet_rx_resize(struct virtnet_info *vi,
 	if (running)
 		napi_disable(&rq->napi);

-	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
+	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_buf_check_dma);
 	if (err)
 		netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);

@@ -4031,19 +4044,6 @@  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
 		xdp_return_frame(ptr_to_xdp(buf));
 }

-static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
-{
-	struct virtnet_info *vi = vq->vdev->priv;
-	int i = vq2rxq(vq);
-
-	if (vi->mergeable_rx_bufs)
-		put_page(virt_to_head_page(buf));
-	else if (vi->big_packets)
-		give_pages(&vi->rq[i], buf);
-	else
-		put_page(virt_to_head_page(buf));
-}
-
 static void free_unused_bufs(struct virtnet_info *vi)
 {
 	void *buf;
@@ -4057,10 +4057,10 @@  static void free_unused_bufs(struct virtnet_info *vi)
 	}

 	for (i = 0; i < vi->max_queue_pairs; i++) {
-		struct receive_queue *rq = &vi->rq[i];
+		struct virtqueue *vq = vi->rq[i].vq;

-		while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
-			virtnet_rq_free_unused_buf(rq->vq, buf);
+		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+			virtnet_rq_free_buf_check_dma(vq, buf);
 		cond_resched();
 	}
 }