diff mbox series

[net-next,v6,07/10] virtio_net: xsk: rx: support fill with xsk buffer

Message ID 20240618075643.24867-8-xuanzhuo@linux.alibaba.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series virtio-net: support AF_XDP zero copy | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 845 this patch: 845
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 14 of 14 maintainers
netdev/build_clang success Errors and warnings before: 849 this patch: 849
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 849 this patch: 849
netdev/checkpatch warning WARNING: line length of 82 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Xuan Zhuo June 18, 2024, 7:56 a.m. UTC
Implement the logic of filling rq with XSK buffers.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 68 ++++++++++++++++++++++++++++++++++++++--
 1 file changed, 66 insertions(+), 2 deletions(-)

Comments

Paolo Abeni June 20, 2024, 10:20 a.m. UTC | #1
Hi,

On Tue, 2024-06-18 at 15:56 +0800, Xuan Zhuo wrote:
> @@ -1032,6 +1034,53 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
>  	}
>  }
>  
> +static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> +{
> +	sg->dma_address = addr;
> +	sg->length = len;
> +}
> +
> +static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
> +				   struct xsk_buff_pool *pool, gfp_t gfp)
> +{
> +	struct xdp_buff **xsk_buffs;
> +	dma_addr_t addr;
> +	u32 len, i;
> +	int err = 0;

Minor nit: the reverse xmas tree order is based on the full line len,
should be:
	int err = 0;
	u32 len, i;

[...]
> @@ -2226,6 +2281,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
>  		u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
>  	}
>  
> +	oom = err == -ENOMEM;
>  	return !oom;

Minor nit: 'oom' is used only in the above to lines. You could drop
such variable and just:
	return err != -ENOMEM;

Please _do not_ repost just for the above, but please include such
changes if you should repost for other reasons.

Also try to include a detailed changelog in each patch after the tag
area and a '---' separator, it will simplify the review process.

Thanks,

Paolo
Xuan Zhuo June 20, 2024, 10:37 a.m. UTC | #2
On Thu, 20 Jun 2024 12:20:44 +0200, Paolo Abeni <pabeni@redhat.com> wrote:
> Hi,
>
> On Tue, 2024-06-18 at 15:56 +0800, Xuan Zhuo wrote:
> > @@ -1032,6 +1034,53 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
> >  	}
> >  }
> >
> > +static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> > +{
> > +	sg->dma_address = addr;
> > +	sg->length = len;
> > +}
> > +
> > +static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
> > +				   struct xsk_buff_pool *pool, gfp_t gfp)
> > +{
> > +	struct xdp_buff **xsk_buffs;
> > +	dma_addr_t addr;
> > +	u32 len, i;
> > +	int err = 0;
>
> Minor nit: the reverse xmas tree order is based on the full line len,
> should be:
> 	int err = 0;
> 	u32 len, i;

Will fix.

>
> [...]
> > @@ -2226,6 +2281,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
> >  		u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
> >  	}
> >
> > +	oom = err == -ENOMEM;
> >  	return !oom;
>
> Minor nit: 'oom' is used only in the above to lines. You could drop
> such variable and just:
> 	return err != -ENOMEM;

Will fix.

>
> Please _do not_ repost just for the above, but please include such
> changes if you should repost for other reasons.

OK.


>
> Also try to include a detailed changelog in each patch after the tag
> area and a '---' separator, it will simplify the review process.

Will do.

Thanks.


>
> Thanks,
>
> Paolo
>
Jason Wang June 28, 2024, 2:19 a.m. UTC | #3
On Tue, Jun 18, 2024 at 3:57 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> Implement the logic of filling rq with XSK buffers.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
>  drivers/net/virtio_net.c | 68 ++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 66 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 2bbc715f22c6..2ac5668a94ce 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -355,6 +355,8 @@ struct receive_queue {
>
>                 /* xdp rxq used by xsk */
>                 struct xdp_rxq_info xdp_rxq;
> +
> +               struct xdp_buff **xsk_buffs;
>         } xsk;
>  };
>
> @@ -1032,6 +1034,53 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
>         }
>  }
>
> +static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> +{
> +       sg->dma_address = addr;
> +       sg->length = len;
> +}
> +
> +static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
> +                                  struct xsk_buff_pool *pool, gfp_t gfp)
> +{
> +       struct xdp_buff **xsk_buffs;
> +       dma_addr_t addr;
> +       u32 len, i;
> +       int err = 0;
> +       int num;
> +
> +       xsk_buffs = rq->xsk.xsk_buffs;
> +
> +       num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
> +       if (!num)
> +               return -ENOMEM;
> +
> +       len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
> +
> +       for (i = 0; i < num; ++i) {
> +               /* use the part of XDP_PACKET_HEADROOM as the virtnet hdr space */
> +               addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len;

We had VIRTIO_XDP_HEADROOM, can we reuse it? Or if it's redundant
let's send a patch to switch to XDP_PACKET_HEADROOM.

Btw, the code assumes vi->hdr_len < xsk_pool_get_headroom(). It's
better to fail if it's not true when enabling xsk.

Thanks
Xuan Zhuo June 28, 2024, 5:42 a.m. UTC | #4
On Fri, 28 Jun 2024 10:19:37 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Tue, Jun 18, 2024 at 3:57 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > Implement the logic of filling rq with XSK buffers.
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > ---
> >  drivers/net/virtio_net.c | 68 ++++++++++++++++++++++++++++++++++++++--
> >  1 file changed, 66 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index 2bbc715f22c6..2ac5668a94ce 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -355,6 +355,8 @@ struct receive_queue {
> >
> >                 /* xdp rxq used by xsk */
> >                 struct xdp_rxq_info xdp_rxq;
> > +
> > +               struct xdp_buff **xsk_buffs;
> >         } xsk;
> >  };
> >
> > @@ -1032,6 +1034,53 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
> >         }
> >  }
> >
> > +static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> > +{
> > +       sg->dma_address = addr;
> > +       sg->length = len;
> > +}
> > +
> > +static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
> > +                                  struct xsk_buff_pool *pool, gfp_t gfp)
> > +{
> > +       struct xdp_buff **xsk_buffs;
> > +       dma_addr_t addr;
> > +       u32 len, i;
> > +       int err = 0;
> > +       int num;
> > +
> > +       xsk_buffs = rq->xsk.xsk_buffs;
> > +
> > +       num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
> > +       if (!num)
> > +               return -ENOMEM;
> > +
> > +       len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
> > +
> > +       for (i = 0; i < num; ++i) {
> > +               /* use the part of XDP_PACKET_HEADROOM as the virtnet hdr space */
> > +               addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len;
>
> We had VIRTIO_XDP_HEADROOM, can we reuse it? Or if it's redundant
> let's send a patch to switch to XDP_PACKET_HEADROOM.

Do you mean replace it inside the comment?

I want to describe use the headroom of xsk, the size of the headroom is
XDP_PACKET_HEADROOM.

>
> Btw, the code assumes vi->hdr_len < xsk_pool_get_headroom(). It's
> better to fail if it's not true when enabling xsk.

It is ok.

Thanks.


>
> Thanks
>
Jason Wang July 1, 2024, 3:05 a.m. UTC | #5
On Fri, Jun 28, 2024 at 1:44 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Fri, 28 Jun 2024 10:19:37 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > On Tue, Jun 18, 2024 at 3:57 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > Implement the logic of filling rq with XSK buffers.
> > >
> > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > ---
> > >  drivers/net/virtio_net.c | 68 ++++++++++++++++++++++++++++++++++++++--
> > >  1 file changed, 66 insertions(+), 2 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index 2bbc715f22c6..2ac5668a94ce 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -355,6 +355,8 @@ struct receive_queue {
> > >
> > >                 /* xdp rxq used by xsk */
> > >                 struct xdp_rxq_info xdp_rxq;
> > > +
> > > +               struct xdp_buff **xsk_buffs;
> > >         } xsk;
> > >  };
> > >
> > > @@ -1032,6 +1034,53 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
> > >         }
> > >  }
> > >
> > > +static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> > > +{
> > > +       sg->dma_address = addr;
> > > +       sg->length = len;
> > > +}
> > > +
> > > +static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
> > > +                                  struct xsk_buff_pool *pool, gfp_t gfp)
> > > +{
> > > +       struct xdp_buff **xsk_buffs;
> > > +       dma_addr_t addr;
> > > +       u32 len, i;
> > > +       int err = 0;
> > > +       int num;
> > > +
> > > +       xsk_buffs = rq->xsk.xsk_buffs;
> > > +
> > > +       num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
> > > +       if (!num)
> > > +               return -ENOMEM;
> > > +
> > > +       len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
> > > +
> > > +       for (i = 0; i < num; ++i) {
> > > +               /* use the part of XDP_PACKET_HEADROOM as the virtnet hdr space */
> > > +               addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len;
> >
> > We had VIRTIO_XDP_HEADROOM, can we reuse it? Or if it's redundant
> > let's send a patch to switch to XDP_PACKET_HEADROOM.
>
> Do you mean replace it inside the comment?

I meant a patch to s/VIRTIO_XDP_HEADROOM/XDP_PACKET_HEADROOM/g.

>
> I want to describe use the headroom of xsk, the size of the headroom is
> XDP_PACKET_HEADROOM.
>
> >
> > Btw, the code assumes vi->hdr_len < xsk_pool_get_headroom(). It's
> > better to fail if it's not true when enabling xsk.
>
> It is ok.

I mean do we need a check to fail xsk binding if vi->hdr_len >
xsk_pool_get_headroom() or it has been guaranteed by the code already.

Thanks

>
> Thanks.
>
>
> >
> > Thanks
> >
>
Xuan Zhuo July 1, 2024, 8:34 a.m. UTC | #6
On Mon, 1 Jul 2024 11:05:33 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Fri, Jun 28, 2024 at 1:44 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Fri, 28 Jun 2024 10:19:37 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > On Tue, Jun 18, 2024 at 3:57 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > >
> > > > Implement the logic of filling rq with XSK buffers.
> > > >
> > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > ---
> > > >  drivers/net/virtio_net.c | 68 ++++++++++++++++++++++++++++++++++++++--
> > > >  1 file changed, 66 insertions(+), 2 deletions(-)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index 2bbc715f22c6..2ac5668a94ce 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -355,6 +355,8 @@ struct receive_queue {
> > > >
> > > >                 /* xdp rxq used by xsk */
> > > >                 struct xdp_rxq_info xdp_rxq;
> > > > +
> > > > +               struct xdp_buff **xsk_buffs;
> > > >         } xsk;
> > > >  };
> > > >
> > > > @@ -1032,6 +1034,53 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
> > > >         }
> > > >  }
> > > >
> > > > +static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> > > > +{
> > > > +       sg->dma_address = addr;
> > > > +       sg->length = len;
> > > > +}
> > > > +
> > > > +static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
> > > > +                                  struct xsk_buff_pool *pool, gfp_t gfp)
> > > > +{
> > > > +       struct xdp_buff **xsk_buffs;
> > > > +       dma_addr_t addr;
> > > > +       u32 len, i;
> > > > +       int err = 0;
> > > > +       int num;
> > > > +
> > > > +       xsk_buffs = rq->xsk.xsk_buffs;
> > > > +
> > > > +       num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
> > > > +       if (!num)
> > > > +               return -ENOMEM;
> > > > +
> > > > +       len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
> > > > +
> > > > +       for (i = 0; i < num; ++i) {
> > > > +               /* use the part of XDP_PACKET_HEADROOM as the virtnet hdr space */
> > > > +               addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len;
> > >
> > > We had VIRTIO_XDP_HEADROOM, can we reuse it? Or if it's redundant
> > > let's send a patch to switch to XDP_PACKET_HEADROOM.
> >
> > Do you mean replace it inside the comment?
>
> I meant a patch to s/VIRTIO_XDP_HEADROOM/XDP_PACKET_HEADROOM/g.

I see.


>
> >
> > I want to describe use the headroom of xsk, the size of the headroom is
> > XDP_PACKET_HEADROOM.
> >
> > >
> > > Btw, the code assumes vi->hdr_len < xsk_pool_get_headroom(). It's
> > > better to fail if it's not true when enabling xsk.
> >
> > It is ok.
>
> I mean do we need a check to fail xsk binding if vi->hdr_len >
> xsk_pool_get_headroom() or it has been guaranteed by the code already.

YES.

Thanks.


>
> Thanks
>
> >
> > Thanks.
> >
> >
> > >
> > > Thanks
> > >
> >
>
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2bbc715f22c6..2ac5668a94ce 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -355,6 +355,8 @@  struct receive_queue {
 
 		/* xdp rxq used by xsk */
 		struct xdp_rxq_info xdp_rxq;
+
+		struct xdp_buff **xsk_buffs;
 	} xsk;
 };
 
@@ -1032,6 +1034,53 @@  static void check_sq_full_and_disable(struct virtnet_info *vi,
 	}
 }
 
+static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
+{
+	sg->dma_address = addr;
+	sg->length = len;
+}
+
+static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
+				   struct xsk_buff_pool *pool, gfp_t gfp)
+{
+	struct xdp_buff **xsk_buffs;
+	dma_addr_t addr;
+	u32 len, i;
+	int err = 0;
+	int num;
+
+	xsk_buffs = rq->xsk.xsk_buffs;
+
+	num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
+	if (!num)
+		return -ENOMEM;
+
+	len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
+
+	for (i = 0; i < num; ++i) {
+		/* use the part of XDP_PACKET_HEADROOM as the virtnet hdr space */
+		addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len;
+
+		sg_init_table(rq->sg, 1);
+		sg_fill_dma(rq->sg, addr, len);
+
+		err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, xsk_buffs[i], gfp);
+		if (err)
+			goto err;
+	}
+
+	return num;
+
+err:
+	if (i)
+		err = i;
+
+	for (; i < num; ++i)
+		xsk_buff_free(xsk_buffs[i]);
+
+	return err;
+}
+
 static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
@@ -2206,6 +2255,11 @@  static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
 	int err;
 	bool oom;
 
+	if (rq->xsk.pool) {
+		err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk.pool, gfp);
+		goto kick;
+	}
+
 	do {
 		if (vi->mergeable_rx_bufs)
 			err = add_recvbuf_mergeable(vi, rq, gfp);
@@ -2214,10 +2268,11 @@  static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
 		else
 			err = add_recvbuf_small(vi, rq, gfp);
 
-		oom = err == -ENOMEM;
 		if (err)
 			break;
 	} while (rq->vq->num_free);
+
+kick:
 	if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
 		unsigned long flags;
 
@@ -2226,6 +2281,7 @@  static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
 		u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
 	}
 
+	oom = err == -ENOMEM;
 	return !oom;
 }
 
@@ -5050,7 +5106,7 @@  static int virtnet_xsk_pool_enable(struct net_device *dev,
 	struct receive_queue *rq;
 	struct device *dma_dev;
 	struct send_queue *sq;
-	int err;
+	int err, size;
 
 	/* In big_packets mode, xdp cannot work, so there is no need to
 	 * initialize xsk of rq.
@@ -5078,6 +5134,12 @@  static int virtnet_xsk_pool_enable(struct net_device *dev,
 	if (!dma_dev)
 		return -EPERM;
 
+	size = virtqueue_get_vring_size(rq->vq);
+
+	rq->xsk.xsk_buffs = kvcalloc(size, sizeof(*rq->xsk.xsk_buffs), GFP_KERNEL);
+	if (!rq->xsk.xsk_buffs)
+		return -ENOMEM;
+
 	err = xsk_pool_dma_map(pool, dma_dev, 0);
 	if (err)
 		goto err_xsk_map;
@@ -5112,6 +5174,8 @@  static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
 
 	xsk_pool_dma_unmap(pool, 0);
 
+	kvfree(rq->xsk.xsk_buffs);
+
 	return err;
 }